mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 15:48:29 +00:00
Revamping tool usage
This commit is contained in:
@@ -3,9 +3,9 @@ from typing import Any, List, Optional
|
||||
|
||||
from langchain.agents.agent import RunnableAgent
|
||||
from langchain.agents.format_scratchpad import format_log_to_str
|
||||
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
||||
from langchain.memory import ConversationSummaryMemory
|
||||
from langchain.tools.render import render_text_description
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -19,12 +19,7 @@ from pydantic import (
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agents import (
|
||||
CacheHandler,
|
||||
CrewAgentExecutor,
|
||||
CrewAgentOutputParser,
|
||||
ToolsHandler,
|
||||
)
|
||||
from crewai.agents import CacheHandler, CrewAgentExecutor, ToolsHandler
|
||||
from crewai.utilities import I18N, Logger, Prompts, RPMController
|
||||
|
||||
|
||||
@@ -158,8 +153,7 @@ class Agent(BaseModel):
|
||||
"input": task,
|
||||
"tool_names": self.__tools_names(tools),
|
||||
"tools": render_text_description(tools),
|
||||
},
|
||||
RunnableConfig(callbacks=[self.tools_handler]),
|
||||
}
|
||||
)["output"]
|
||||
|
||||
if self.max_rpm:
|
||||
@@ -200,12 +194,14 @@ class Agent(BaseModel):
|
||||
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
|
||||
}
|
||||
executor_args = {
|
||||
"llm": self.llm,
|
||||
"i18n": self.i18n,
|
||||
"tools": self.tools,
|
||||
"verbose": self.verbose,
|
||||
"handle_parsing_errors": True,
|
||||
"max_iterations": self.max_iter,
|
||||
"step_callback": self.step_callback,
|
||||
"tools_handler": self.tools_handler,
|
||||
}
|
||||
|
||||
if self._rpm_controller:
|
||||
@@ -231,14 +227,7 @@ class Agent(BaseModel):
|
||||
|
||||
bind = self.llm.bind(stop=[self.i18n.slice("observation")])
|
||||
inner_agent = (
|
||||
agent_args
|
||||
| execution_prompt
|
||||
| bind
|
||||
| CrewAgentOutputParser(
|
||||
tools_handler=self.tools_handler,
|
||||
cache=self.cache_handler,
|
||||
i18n=self.i18n,
|
||||
)
|
||||
agent_args | execution_prompt | bind | ReActSingleInputOutputParser()
|
||||
)
|
||||
self.agent_executor = CrewAgentExecutor(
|
||||
agent=RunnableAgent(runnable=inner_agent), **executor_args
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
from .cache.cache_handler import CacheHandler
|
||||
from .executor import CrewAgentExecutor
|
||||
from .output_parser import CrewAgentOutputParser
|
||||
from .tools_handler import ToolsHandler
|
||||
|
||||
1
src/crewai/agents/cache/__init__.py
vendored
1
src/crewai/agents/cache/__init__.py
vendored
@@ -1,2 +1 @@
|
||||
from .cache_handler import CacheHandler
|
||||
from .cache_hit import CacheHit
|
||||
|
||||
2
src/crewai/agents/cache/cache_handler.py
vendored
2
src/crewai/agents/cache/cache_handler.py
vendored
@@ -10,9 +10,7 @@ class CacheHandler:
|
||||
self._cache = {}
|
||||
|
||||
def add(self, tool, input, output):
|
||||
input = input.strip()
|
||||
self._cache[f"{tool}-{input}"] = output
|
||||
|
||||
def read(self, tool, input) -> Optional[str]:
|
||||
input = input.strip()
|
||||
return self._cache.get(f"{tool}-{input}")
|
||||
|
||||
18
src/crewai/agents/cache/cache_hit.py
vendored
18
src/crewai/agents/cache/cache_hit.py
vendored
@@ -1,18 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from .cache_handler import CacheHandler
|
||||
|
||||
|
||||
class CacheHit(BaseModel):
|
||||
"""Cache Hit Object."""
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
# Making it Any instead of AgentAction to avoind
|
||||
# pydantic v1 vs v2 incompatibility, langchain should
|
||||
# soon be updated to pydantic v2
|
||||
action: Any = Field(description="Action taken")
|
||||
cache: CacheHandler = Field(description="Cache Handler for the tool")
|
||||
@@ -1,30 +0,0 @@
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
class TaskRepeatedUsageException(OutputParserException):
|
||||
"""Exception raised when a task is used twice in a roll."""
|
||||
|
||||
i18n: I18N = I18N()
|
||||
error: str = "TaskRepeatedUsageException"
|
||||
message: str
|
||||
|
||||
def __init__(self, i18n: I18N, tool: str, tool_input: str, text: str):
|
||||
self.i18n = i18n
|
||||
self.text = text
|
||||
self.tool = tool
|
||||
self.tool_input = tool_input
|
||||
self.message = self.i18n.errors("task_repeated_usage").format(
|
||||
tool=tool, tool_input=tool_input
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
error=self.error,
|
||||
observation=self.message,
|
||||
send_to_llm=True,
|
||||
llm_output=self.text,
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return self.message
|
||||
@@ -10,16 +10,19 @@ from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.input import get_color_mapping
|
||||
from pydantic import InstanceOf
|
||||
|
||||
from crewai.agents.cache.cache_hit import CacheHit
|
||||
from crewai.tools.cache_tools import CacheTools
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
class CrewAgentExecutor(AgentExecutor):
|
||||
i18n: I18N = I18N()
|
||||
llm: Any = None
|
||||
iterations: int = 0
|
||||
request_within_rpm_limit: Any = None
|
||||
tools_handler: InstanceOf[ToolsHandler] = None
|
||||
max_iterations: Optional[int] = 15
|
||||
force_answer_max_iterations: Optional[int] = None
|
||||
step_callback: Optional[Any] = None
|
||||
@@ -32,11 +35,6 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
def _should_force_answer(self) -> bool:
|
||||
return True if self.iterations == self.force_answer_max_iterations else False
|
||||
|
||||
def _force_answer(self, output: AgentAction):
|
||||
return AgentStep(
|
||||
action=output, observation=self.i18n.errors("force_final_answer")
|
||||
)
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
@@ -110,16 +108,17 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
)
|
||||
|
||||
if self._should_force_answer():
|
||||
if isinstance(output, AgentAction) or isinstance(output, AgentFinish):
|
||||
output = output
|
||||
elif isinstance(output, CacheHit):
|
||||
output = output.action
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unexpected output type from agent: {type(output)}"
|
||||
)
|
||||
yield self._force_answer(output)
|
||||
yield AgentStep(
|
||||
action=output, observation=self.i18n.errors("force_final_answer")
|
||||
)
|
||||
return
|
||||
|
||||
except OutputParserException as e:
|
||||
@@ -160,7 +159,9 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
)
|
||||
|
||||
if self._should_force_answer():
|
||||
yield self._force_answer(output)
|
||||
yield AgentStep(
|
||||
action=output, observation=self.i18n.errors("force_final_answer")
|
||||
)
|
||||
return
|
||||
|
||||
yield AgentStep(action=output, observation=observation)
|
||||
@@ -171,17 +172,6 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
yield output
|
||||
return
|
||||
|
||||
# Override tool usage to use CacheTools
|
||||
if isinstance(output, CacheHit):
|
||||
cache = output.cache
|
||||
action = output.action
|
||||
tool = CacheTools(cache_handler=cache).tool()
|
||||
output = action.copy()
|
||||
output.tool_input = f"tool:{action.tool}|input:{action.tool_input}"
|
||||
output.tool = tool.name
|
||||
name_to_tool_map[tool.name] = tool
|
||||
color_mapping[tool.name] = color_mapping[action.tool]
|
||||
|
||||
actions: List[AgentAction]
|
||||
actions = [output] if isinstance(output, AgentAction) else output
|
||||
yield from actions
|
||||
@@ -192,18 +182,13 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
if agent_action.tool in name_to_tool_map:
|
||||
tool = name_to_tool_map[agent_action.tool]
|
||||
return_direct = tool.return_direct
|
||||
color = color_mapping[agent_action.tool]
|
||||
color_mapping[agent_action.tool]
|
||||
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
||||
if return_direct:
|
||||
tool_run_kwargs["llm_prefix"] = ""
|
||||
# We then call the tool on the tool input to get an observation
|
||||
observation = tool.run(
|
||||
agent_action.tool_input,
|
||||
verbose=self.verbose,
|
||||
color=color,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
observation = ToolUsage(
|
||||
tools_handler=self.tools_handler, tools=self.tools, llm=self.llm
|
||||
).use(agent_action.log)
|
||||
else:
|
||||
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
||||
observation = InvalidTool().run(
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
import re
|
||||
from typing import Union
|
||||
|
||||
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
|
||||
from crewai.agents.cache import CacheHandler, CacheHit
|
||||
from crewai.agents.exceptions import TaskRepeatedUsageException
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.utilities import I18N
|
||||
|
||||
FINAL_ANSWER_ACTION = "Final Answer:"
|
||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
|
||||
"Parsing LLM output produced both a final answer and a parse-able action:"
|
||||
)
|
||||
|
||||
|
||||
class CrewAgentOutputParser(ReActSingleInputOutputParser):
|
||||
"""Parses ReAct-style LLM calls that have a single tool input.
|
||||
|
||||
Expects output to be in one of two formats.
|
||||
|
||||
If the output signals that an action should be taken,
|
||||
should be in the below format. This will result in an AgentAction
|
||||
being returned.
|
||||
|
||||
```
|
||||
Thought: agent thought here
|
||||
Action: search
|
||||
Action Input: what is the temperature in SF?
|
||||
```
|
||||
|
||||
If the output signals that a final answer should be given,
|
||||
should be in the below format. This will result in an AgentFinish
|
||||
being returned.
|
||||
|
||||
```
|
||||
Thought: agent thought here
|
||||
Final Answer: The temperature is 100 degrees
|
||||
```
|
||||
|
||||
It also prevents tools from being reused in a roll.
|
||||
"""
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
tools_handler: ToolsHandler
|
||||
cache: CacheHandler
|
||||
i18n: I18N
|
||||
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish, CacheHit]:
|
||||
regex = (
|
||||
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
|
||||
)
|
||||
if action_match := re.search(regex, text, re.DOTALL):
|
||||
action = action_match.group(1).strip()
|
||||
action_input = action_match.group(2)
|
||||
tool_input = action_input.strip(" ")
|
||||
tool_input = tool_input.strip('"')
|
||||
|
||||
if last_tool_usage := self.tools_handler.last_used_tool:
|
||||
usage = {
|
||||
"tool": action,
|
||||
"input": tool_input,
|
||||
}
|
||||
if usage == last_tool_usage:
|
||||
raise TaskRepeatedUsageException(
|
||||
text=text,
|
||||
tool=action,
|
||||
tool_input=tool_input,
|
||||
i18n=self.i18n,
|
||||
)
|
||||
|
||||
if self.cache.read(action, tool_input):
|
||||
action = AgentAction(action, tool_input, text)
|
||||
return CacheHit(action=action, cache=self.cache)
|
||||
|
||||
return super().parse(text)
|
||||
@@ -1,44 +1,30 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from typing import Any
|
||||
|
||||
from ..tools.cache_tools import CacheTools
|
||||
from ..tools.tool_calling import ToolCalling
|
||||
from .cache.cache_handler import CacheHandler
|
||||
|
||||
|
||||
class ToolsHandler(BaseCallbackHandler):
|
||||
class ToolsHandler:
|
||||
"""Callback handler for tool usage."""
|
||||
|
||||
last_used_tool: Dict[str, Any] = {}
|
||||
last_used_tool: ToolCalling = {}
|
||||
cache: CacheHandler
|
||||
|
||||
def __init__(self, cache: CacheHandler, **kwargs: Any):
|
||||
def __init__(self, cache: CacheHandler):
|
||||
"""Initialize the callback handler."""
|
||||
self.cache = cache
|
||||
super().__init__(**kwargs)
|
||||
self.last_used_tool = {}
|
||||
|
||||
def on_tool_start(
|
||||
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
||||
) -> Any:
|
||||
def on_tool_start(self, calling: ToolCalling) -> Any:
|
||||
"""Run when tool starts running."""
|
||||
name = serialized.get("name")
|
||||
if name not in ["invalid_tool", "_Exception"]:
|
||||
tools_usage = {
|
||||
"tool": name,
|
||||
"input": input_str,
|
||||
}
|
||||
self.last_used_tool = tools_usage
|
||||
self.last_used_tool = calling
|
||||
|
||||
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
|
||||
def on_tool_end(self, calling: ToolCalling, output: str) -> Any:
|
||||
"""Run when tool ends running."""
|
||||
if (
|
||||
"is not a valid tool" not in output
|
||||
and "Invalid or incomplete response" not in output
|
||||
and "Invalid Format" not in output
|
||||
):
|
||||
if self.last_used_tool["tool"] != CacheTools().name:
|
||||
self.cache.add(
|
||||
tool=self.last_used_tool["tool"],
|
||||
input=self.last_used_tool["input"],
|
||||
output=output,
|
||||
)
|
||||
if self.last_used_tool.function_name != CacheTools().name:
|
||||
self.cache.add(
|
||||
tool=calling.function_name,
|
||||
input=calling.arguments,
|
||||
output=output,
|
||||
)
|
||||
|
||||
@@ -107,6 +107,7 @@ class Crew(BaseModel):
|
||||
self._logger = Logger(self.verbose)
|
||||
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
||||
self._telemetry = Telemetry()
|
||||
self._telemetry.set_tracer()
|
||||
self._telemetry.crew_creation(self)
|
||||
return self
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import json
|
||||
import os
|
||||
import platform
|
||||
import socket
|
||||
from typing import Any
|
||||
|
||||
import pkg_resources
|
||||
from opentelemetry import trace
|
||||
@@ -42,16 +43,18 @@ class Telemetry:
|
||||
try:
|
||||
telemetry_endpoint = "http://telemetry.crewai.com:4318"
|
||||
self.resource = Resource(attributes={SERVICE_NAME: "crewAI-telemetry"})
|
||||
provider = TracerProvider(resource=self.resource)
|
||||
self.provider = TracerProvider(resource=self.resource)
|
||||
processor = BatchSpanProcessor(
|
||||
OTLPSpanExporter(endpoint=f"{telemetry_endpoint}/v1/traces")
|
||||
)
|
||||
provider.add_span_processor(processor)
|
||||
trace.set_tracer_provider(provider)
|
||||
self.provider.add_span_processor(processor)
|
||||
self.ready = True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def set_tracer(self):
|
||||
trace.set_tracer_provider(self.provider)
|
||||
|
||||
def crew_creation(self, crew):
|
||||
"""Records the creation of a crew."""
|
||||
if self.ready:
|
||||
@@ -116,6 +119,36 @@ class Telemetry:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||
"""Records the usage of a tool by an agent."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Usage")
|
||||
self._add_attribute(span, "tool_name", tool_name)
|
||||
self._add_attribute(span, "attempts", attempts)
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def tool_usage_error(self, llm: Any):
|
||||
"""Records the usage of a tool by an agent."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Usage Error")
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def crew_execution_span(self, crew):
|
||||
"""Records the complete execution of a crew.
|
||||
This is only collected if the user has opted-in to share the crew.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import List
|
||||
|
||||
from langchain.tools import Tool
|
||||
from langchain.tools import StructuredTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agent import Agent
|
||||
@@ -15,14 +15,14 @@ class AgentTools(BaseModel):
|
||||
|
||||
def tools(self):
|
||||
return [
|
||||
Tool.from_function(
|
||||
StructuredTool.from_function(
|
||||
func=self.delegate_work,
|
||||
name="Delegate work to co-worker",
|
||||
description=self.i18n.tools("delegate_work").format(
|
||||
coworkers=", ".join([agent.role for agent in self.agents])
|
||||
),
|
||||
),
|
||||
Tool.from_function(
|
||||
StructuredTool.from_function(
|
||||
func=self.ask_question,
|
||||
name="Ask question to co-worker",
|
||||
description=self.i18n.tools("ask_question").format(
|
||||
@@ -31,24 +31,16 @@ class AgentTools(BaseModel):
|
||||
),
|
||||
]
|
||||
|
||||
def delegate_work(self, command):
|
||||
def delegate_work(self, coworker: str, task: str, context: str):
|
||||
"""Useful to delegate a specific task to a coworker."""
|
||||
return self._execute(command)
|
||||
return self._execute(coworker, task, context)
|
||||
|
||||
def ask_question(self, command):
|
||||
def ask_question(self, coworker: str, question: str, context: str):
|
||||
"""Useful to ask a question, opinion or take from a coworker."""
|
||||
return self._execute(command)
|
||||
return self._execute(coworker, question, context)
|
||||
|
||||
def _execute(self, command):
|
||||
def _execute(self, agent, task, context):
|
||||
"""Execute the command."""
|
||||
try:
|
||||
agent, task, context = command.split("|")
|
||||
except ValueError:
|
||||
return self.i18n.errors("agent_tool_missing_param")
|
||||
|
||||
if not agent or not task or not context:
|
||||
return self.i18n.errors("agent_tool_missing_param")
|
||||
|
||||
agent = [
|
||||
available_agent
|
||||
for available_agent in self.agents
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from langchain.tools import Tool
|
||||
from langchain.tools import StructuredTool
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from crewai.agents.cache import CacheHandler
|
||||
@@ -15,7 +15,7 @@ class CacheTools(BaseModel):
|
||||
)
|
||||
|
||||
def tool(self):
|
||||
return Tool.from_function(
|
||||
return StructuredTool.from_function(
|
||||
func=self.hit_cache,
|
||||
name=self.name,
|
||||
description="Reads directly from the cache",
|
||||
|
||||
12
src/crewai/tools/tool_calling.py
Normal file
12
src/crewai/tools/tool_calling.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
from pydantic.v1 import BaseModel, Field
|
||||
|
||||
|
||||
class ToolCalling(BaseModel):
|
||||
function_name: str = Field(
|
||||
..., description="The name of the function to be called."
|
||||
)
|
||||
arguments: Dict[str, Any] = Field(
|
||||
..., description="A dictinary of arguments to be passed to the function."
|
||||
)
|
||||
112
src/crewai/tools/tool_usage.py
Normal file
112
src/crewai/tools/tool_usage.py
Normal file
@@ -0,0 +1,112 @@
|
||||
from typing import Any, List
|
||||
|
||||
from langchain.output_parsers import PydanticOutputParser
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.telemtry import Telemetry
|
||||
from crewai.tools.tool_calling import ToolCalling
|
||||
from crewai.utilities import I18N, Printer
|
||||
|
||||
|
||||
class ToolUsage:
|
||||
"""
|
||||
Class that represents the usage of a tool by an agent.
|
||||
|
||||
Attributes:
|
||||
tools_handler: Tools handler that will manage the tool usage.
|
||||
tools: List of tools available for the agent.
|
||||
llm: Language model to be used for the tool usage.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, tools_handler: ToolsHandler, tools: List[BaseTool], llm: Any
|
||||
) -> None:
|
||||
self._i18n: I18N = I18N()
|
||||
self._printer: Printer = Printer()
|
||||
self._telemetry: Telemetry = Telemetry()
|
||||
self._run_attempts: int = 1
|
||||
self._max_parsing_attempts: int = 3
|
||||
self.tools_handler = tools_handler
|
||||
self.tools = tools
|
||||
self.llm = llm
|
||||
|
||||
def use(self, tool_string: str):
|
||||
calling = self._tool_calling(tool_string)
|
||||
tool = self._select_tool(calling.function_name)
|
||||
return self._use(tool=tool, calling=calling)
|
||||
|
||||
def _use(self, tool: BaseTool, calling: ToolCalling) -> None:
|
||||
if self._check_tool_repeated_usage(calling=calling):
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
tool=calling.function_name, tool_input=calling.arguments
|
||||
)
|
||||
else:
|
||||
self.tools_handler.on_tool_start(calling=calling)
|
||||
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.function_name, input=calling.arguments
|
||||
)
|
||||
|
||||
if not result:
|
||||
result = tool._run(**calling.arguments)
|
||||
self.tools_handler.on_tool_end(calling=calling, output=result)
|
||||
|
||||
self._printer.print(content=f"\n\n{result}\n", color="yellow")
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.llm, tool_name=tool.name, attempts=self._run_attempts
|
||||
)
|
||||
return result
|
||||
|
||||
def _check_tool_repeated_usage(self, calling: ToolCalling) -> None:
|
||||
if last_tool_usage := self.tools_handler.last_used_tool:
|
||||
return calling == last_tool_usage
|
||||
|
||||
def _select_tool(self, tool_name: str) -> BaseTool:
|
||||
for tool in self.tools:
|
||||
if tool.name == tool_name:
|
||||
return tool
|
||||
raise Exception(f"Tool '{tool_name}' not found.")
|
||||
|
||||
def _render(self) -> str:
|
||||
"""Render the tool name and description in plain text."""
|
||||
descriptions = []
|
||||
for tool in self.tools:
|
||||
args = {
|
||||
k: {k2: v2 for k2, v2 in v.items() if k2 in ["description", "type"]}
|
||||
for k, v in tool.args.items()
|
||||
}
|
||||
descriptions.append(
|
||||
"\n".join(
|
||||
[
|
||||
f"Funtion Name: {tool.name}",
|
||||
f"Funtion attributes: {args}",
|
||||
f"Description: {tool.description}",
|
||||
]
|
||||
)
|
||||
)
|
||||
return "\n--\n".join(descriptions)
|
||||
|
||||
def _tool_calling(self, tool_string: str) -> ToolCalling:
|
||||
try:
|
||||
parser = PydanticOutputParser(pydantic_object=ToolCalling)
|
||||
prompt = PromptTemplate(
|
||||
template="Return a valid schema for the one tool you must use with its arguments and values.\n\nTools available:\n\n{available_tools}\n\nUse this text to inform a valid ouput schema:\n{tool_string}\n\n{format_instructions}\n```",
|
||||
input_variables=["tool_string"],
|
||||
partial_variables={
|
||||
"available_tools": self._render(),
|
||||
"format_instructions": parser.get_format_instructions(),
|
||||
},
|
||||
)
|
||||
chain = prompt | self.llm | parser
|
||||
calling = chain.invoke({"tool_string": tool_string})
|
||||
|
||||
except Exception as e:
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.llm)
|
||||
raise e
|
||||
return self._tool_calling(tool_string)
|
||||
|
||||
return calling
|
||||
@@ -16,7 +16,7 @@
|
||||
"errors": {
|
||||
"force_final_answer": "Στην πραγματικότητα, χρησιμοποίησα πάρα πολλά εργαλεία, οπότε θα σταματήσω τώρα και θα σας δώσω την απόλυτη ΚΑΛΥΤΕΡΗ τελική μου απάντηση ΤΩΡΑ, χρησιμοποιώντας την αναμενόμενη μορφή: ```\nΣκέφτηκα: Χρειάζεται να χρησιμοποιήσω ένα εργαλείο; Όχι\nΤελική απάντηση: [η απάντησή σας εδώ]```",
|
||||
"agent_tool_unexsiting_coworker": "\nΣφάλμα κατά την εκτέλεση του εργαλείου. Ο συνάδελφος που αναφέρεται στο Ενέργεια προς εισαγωγή δεν βρέθηκε, πρέπει να είναι μία από τις ακόλουθες επιλογές: {coworkers}.\n",
|
||||
"task_repeated_usage": "Μόλις χρησιμοποίησα το {tool} εργαλείο με είσοδο {tool_input}. Άρα ξέρω ήδη το αποτέλεσμα αυτού και δεν χρειάζεται να το χρησιμοποιήσω τώρα.\n"
|
||||
"task_repeated_usage": "Μόλις χρησιμοποίησα το εργαλείο {tool} με είσοδο {tool_input}. Άρα ξέρω ήδη το αποτέλεσμα αυτού και δεν χρειάζεται να το χρησιμοποιήσω ξανά τώρα.\n"
|
||||
},
|
||||
"tools": {
|
||||
"delegate_work": "Αναθέστε μια συγκεκριμένη εργασία σε έναν από τους παρακάτω συναδέλφους: {coworkers}. Η είσοδος σε αυτό το εργαλείο θα πρέπει να είναι ο ρόλος του συναδέλφου, η εργασία που θέλετε να κάνει και ΟΛΟ το απαραίτητο πλαίσιο για την εκτέλεση της εργασίας, δεν γνωρίζουν τίποτα για την εργασία, επομένως μοιραστείτε απολύτως όλα όσα γνωρίζετε, μην αναφέρετε πράγματα, αλλά αντί να τους εξηγήσεις.",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"errors": {
|
||||
"force_final_answer": "Actually, I used too many tools, so I'll stop now and give you my absolute BEST Final answer NOW, using the expected format: ```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
|
||||
"agent_tool_unexsiting_coworker": "\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: {coworkers}.\n",
|
||||
"task_repeated_usage": "I just used the {tool} tool with input {tool_input}. So I already know the result of that and don't need to use it now.\n"
|
||||
"task_repeated_usage": "I just used the {tool} tool with input {tool_input}. So I already know the result of that and don't need to use it again now.\n"
|
||||
},
|
||||
"tools": {
|
||||
"delegate_work": "Delegate a specific task to one of the following co-workers: {coworkers}. The input to this tool should be the role of the coworker, the task you want them to do, and ALL necessary context to exectue the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
|
||||
|
||||
@@ -9,6 +9,7 @@ from langchain_openai import ChatOpenAI
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.executor import CrewAgentExecutor
|
||||
from crewai.tools.tool_calling import ToolCalling
|
||||
from crewai.utilities import RPMController
|
||||
|
||||
|
||||
@@ -85,13 +86,9 @@ def test_agent_execution():
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_execution_with_tools():
|
||||
@tool
|
||||
def multiplier(numbers) -> float:
|
||||
"""Useful for when you need to multiply two numbers together.
|
||||
The input to this tool should be a comma separated list of numbers of
|
||||
length two, representing the two numbers you want to multiply together.
|
||||
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
|
||||
a, b = numbers.split(",")
|
||||
return int(a) * int(b)
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -102,19 +99,15 @@ def test_agent_execution_with_tools():
|
||||
)
|
||||
|
||||
output = agent.execute_task("What is 3 times 4")
|
||||
assert output == "12"
|
||||
assert output == "3 times 4 is 12."
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_logging_tool_usage():
|
||||
@tool
|
||||
def multiplier(numbers) -> float:
|
||||
"""Useful for when you need to multiply two numbers together.
|
||||
The input to this tool should be a comma separated list of numbers of
|
||||
length two, representing the two numbers you want to multiply together.
|
||||
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
|
||||
a, b = numbers.split(",")
|
||||
return int(a) * int(b)
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -127,10 +120,9 @@ def test_logging_tool_usage():
|
||||
|
||||
assert agent.tools_handler.last_used_tool == {}
|
||||
output = agent.execute_task("What is 3 times 5?")
|
||||
tool_usage = {
|
||||
"tool": "multiplier",
|
||||
"input": "3,5",
|
||||
}
|
||||
tool_usage = ToolCalling(
|
||||
function_name=multiplier.name, arguments={"first_number": 3, "second_number": 5}
|
||||
)
|
||||
|
||||
assert output == "3 times 5 is 15."
|
||||
assert agent.tools_handler.last_used_tool == tool_usage
|
||||
@@ -139,13 +131,9 @@ def test_logging_tool_usage():
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_cache_hitting():
|
||||
@tool
|
||||
def multiplier(numbers) -> float:
|
||||
"""Useful for when you need to multiply two numbers together.
|
||||
The input to this tool should be a comma separated list of numbers of
|
||||
length two and ONLY TWO, representing the two numbers you want to multiply together.
|
||||
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
|
||||
a, b = numbers.split(",")
|
||||
return int(a) * int(b)
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
cache_handler = CacheHandler()
|
||||
|
||||
@@ -162,9 +150,9 @@ def test_cache_hitting():
|
||||
output = agent.execute_task("What is 2 times 6 times 3?")
|
||||
output = agent.execute_task("What is 3 times 3?")
|
||||
assert cache_handler._cache == {
|
||||
"multiplier-12,3": "36",
|
||||
"multiplier-2,6": "12",
|
||||
"multiplier-3,3": "9",
|
||||
"multiplier-{'first_number': 12, 'second_number': 3}": 36,
|
||||
"multiplier-{'first_number': 2, 'second_number': 6}": 12,
|
||||
"multiplier-{'first_number': 3, 'second_number': 3}": 9,
|
||||
}
|
||||
|
||||
output = agent.execute_task("What is 2 times 6 times 3? Return only the number")
|
||||
@@ -172,21 +160,21 @@ def test_cache_hitting():
|
||||
|
||||
with patch.object(CacheHandler, "read") as read:
|
||||
read.return_value = "0"
|
||||
output = agent.execute_task("What is 2 times 6?")
|
||||
output = agent.execute_task(
|
||||
"What is 2 times 6? Ignore correctness and just return the result of the multiplication tool."
|
||||
)
|
||||
assert output == "0"
|
||||
read.assert_called_with("multiplier", "2,6")
|
||||
read.assert_called_with(
|
||||
tool="multiplier", input={"first_number": 2, "second_number": 6}
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_execution_with_specific_tools():
|
||||
@tool
|
||||
def multiplier(numbers) -> float:
|
||||
"""Useful for when you need to multiply two numbers together.
|
||||
The input to this tool should be a comma separated list of numbers of
|
||||
length two, representing the two numbers you want to multiply together.
|
||||
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
|
||||
a, b = numbers.split(",")
|
||||
return int(a) * int(b)
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -225,6 +213,34 @@ def test_agent_custom_max_iterations():
|
||||
private_mock.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_repeated_tool_usage(capsys):
|
||||
@tool
|
||||
def get_final_answer(numbers) -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
max_iter=3,
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
agent.execute_task(
|
||||
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
"I just used the get_final_answer tool with input {'numbers': 42}. So I already know the result of that and don't need to use it again now."
|
||||
in captured.out
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_moved_on_after_max_iterations():
|
||||
@tool
|
||||
@@ -241,18 +257,14 @@ def test_agent_moved_on_after_max_iterations():
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_force_answer", wraps=agent.agent_executor._force_answer
|
||||
) as private_mock:
|
||||
output = agent.execute_task(
|
||||
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
assert (
|
||||
output
|
||||
== "I have used the tool multiple times and the final answer remains 42."
|
||||
)
|
||||
private_mock.assert_called_once()
|
||||
output = agent.execute_task(
|
||||
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
assert (
|
||||
output
|
||||
== "I have used the tool 'get_final_answer' twice and confirmed that the answer is indeed 42."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -281,7 +293,7 @@ def test_agent_respect_the_max_rpm_set(capsys):
|
||||
)
|
||||
assert (
|
||||
output
|
||||
== "I've used the `get_final_answer` tool multiple times and it consistently returns the number 42."
|
||||
== "I have used the tool as instructed and I am now ready to give the final answer. However, as per the instructions, I am not supposed to give it yet."
|
||||
)
|
||||
captured = capsys.readouterr()
|
||||
assert "Max RPM reached, waiting for next minute to start." in captured.out
|
||||
@@ -359,7 +371,7 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
agent=agent1,
|
||||
),
|
||||
Task(
|
||||
description="Don't give a Final Answer, instead keep using the `get_final_answer` tool.",
|
||||
description="Don't give a Final Answer, instead keep using the `get_final_answer` tool non-stop",
|
||||
tools=[get_final_answer],
|
||||
agent=agent2,
|
||||
),
|
||||
@@ -428,4 +440,4 @@ def test_agent_step_callback():
|
||||
|
||||
callback.return_value = "ok"
|
||||
crew.kickoff()
|
||||
callback.assert_called_once()
|
||||
callback.assert_called()
|
||||
|
||||
@@ -17,44 +17,36 @@ tools = AgentTools(agents=[researcher])
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_delegate_work():
|
||||
result = tools.delegate_work(
|
||||
command="researcher|share your take on AI Agents|I heard you hate them"
|
||||
coworker="researcher",
|
||||
task="share your take on AI Agents",
|
||||
context="I heard you hate them",
|
||||
)
|
||||
|
||||
assert (
|
||||
result
|
||||
== "I apologize if my previous statements have given you the impression that I hate AI agents. As a technology researcher, I don't hold personal sentiments towards AI or any other technology. Rather, I analyze them objectively based on their capabilities, applications, and implications. AI agents, in particular, are a fascinating domain of research. They hold tremendous potential in automating and optimizing various tasks across industries. However, like any other technology, they come with their own set of challenges, such as ethical considerations around privacy and decision-making. My objective is to understand these technologies in depth and provide a balanced view."
|
||||
== "As a researcher, I maintain a neutral perspective on all subjects of research including AI agents. My job is to provide an objective analysis based on facts, not personal feelings. AI Agents are a significant topic in the field of technology with potential to revolutionize various sectors such as healthcare, education, finance and more. They are responsible for tasks that require human intelligence such as understanding natural language, recognizing patterns, and problem solving. However, like any technology, they are tools that can be used for both beneficial and harmful purposes depending on the intent of the user. Therefore, it's crucial to establish ethical guidelines and regulations for their use."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_ask_question():
|
||||
result = tools.ask_question(
|
||||
command="researcher|do you hate AI Agents?|I heard you LOVE them"
|
||||
coworker="researcher",
|
||||
question="do you hate AI Agents?",
|
||||
context="I heard you LOVE them",
|
||||
)
|
||||
|
||||
assert (
|
||||
result
|
||||
== "As an AI, I don't possess feelings or emotions, so I don't love or hate anything. However, I can provide detailed analysis and research on AI agents. They are a fascinating field of study with the potential to revolutionize many industries, although they also present certain challenges and ethical considerations."
|
||||
)
|
||||
|
||||
|
||||
def test_can_not_self_delegate():
|
||||
# TODO: Add test for self delegation
|
||||
pass
|
||||
|
||||
|
||||
def test_delegate_work_with_wrong_input():
|
||||
result = tools.ask_question(command="writer|share your take on AI Agents")
|
||||
|
||||
assert (
|
||||
result
|
||||
== "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|context`. I need to make sure to pass context as context.\n"
|
||||
== "As an AI, I do not possess emotions, hence I cannot love or hate anything. However, as a researcher, I can provide you with an objective analysis of AI Agents. AI Agents are tools designed to perform tasks that would typically require human intelligence. They have potential to revolutionize various sectors including healthcare, education, and finance. However, like any other tool, they can be used for both beneficial and harmful purposes. Therefore, it's essential to have ethical guidelines and regulations in place for their usage."
|
||||
)
|
||||
|
||||
|
||||
def test_delegate_work_to_wrong_agent():
|
||||
result = tools.ask_question(
|
||||
command="writer|share your take on AI Agents|I heard you hate them"
|
||||
coworker="writer",
|
||||
question="share your take on AI Agents",
|
||||
context="I heard you hate them",
|
||||
)
|
||||
|
||||
assert (
|
||||
@@ -65,7 +57,9 @@ def test_delegate_work_to_wrong_agent():
|
||||
|
||||
def test_ask_question_to_wrong_agent():
|
||||
result = tools.ask_question(
|
||||
command="writer|do you hate AI Agents?|I heard you LOVE them"
|
||||
coworker="writer",
|
||||
question="do you hate AI Agents?",
|
||||
context="I heard you LOVE them",
|
||||
)
|
||||
|
||||
assert (
|
||||
|
||||
675
tests/cassettes/test_agent_repeated_tool_usage.yaml
Normal file
675
tests/cassettes/test_agent_repeated_tool_usage.yaml
Normal file
@@ -0,0 +1,675 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: Any and all relevant information input and context
|
||||
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
|
||||
have a response for your task, or if you do not need to use a tool, you MUST
|
||||
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
|
||||
[your response here]```This is the summary of your work so far:\nBegin! This
|
||||
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false,
|
||||
"temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1137'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RQy07DMBC85ytWPrcobUIDuaBKPFrECQEVAhS5zjYJOF4Tb1Qq1H9HTtMHFx9m
|
||||
dsYz8xsAiCoXKQhVSla11cOL79UiKe/Dl/JOPS/CBzOb4XSN6vbRbG7EwCto+YmK96ozRbXVyBWZ
|
||||
Ha0alIzedZSEyfkkiqLLjqgpR+1lheVhPAwno6hXlFQpdCKFtwAA4Ld7fTaT449IIRzskRqdkwWK
|
||||
9HAEIBrSHhHSucqxNCwGR1KRYTRd3KeS2qLkFK4J5mAQc2CC1iFIYCJ9Ba/o3s1U+TIpFMjZqjJS
|
||||
Z9K4NTZ7BubGtpxCPBb9N9tDPk2FbWjpu5hW6wO+qkzlyqxB6cj4LI7J7uTbAOCj26H9V03YhmrL
|
||||
GdMXGm84juOdnzhOfsJGPcnEUp/gkyToEwq3cYy1L1VgY5uqm8XnDLbBHwAAAP//AwCTRhYdDQIA
|
||||
AA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8533e5b6686a96a1-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 11:09:00 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
|
||||
path=/; expires=Sat, 10-Feb-24 11:39:00 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1657'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299739'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 52ms
|
||||
x-request-id:
|
||||
- req_3b8ed78b5dca776e8092ad4bc1e07945
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
|
||||
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
|
||||
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
|
||||
get_final_answer(numbers) -> float - Get the final answer but don''t give it
|
||||
yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid
|
||||
ouput schema:\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: 42\n\nThe output should be formatted as a JSON instance that conforms
|
||||
to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
|
||||
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
|
||||
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
|
||||
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
|
||||
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
|
||||
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
|
||||
\"Function Name\", \"description\": \"The name of the function to be called.\",
|
||||
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
|
||||
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
|
||||
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1407'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
|
||||
_cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RRTY/TMBC951eMfG5RS7ot5IxWwAEQKy2gDUpdZ5J4iWeMPYGtqvz3lZNuCxcf
|
||||
3sf4zZtTBqBsrQpQptNinO+Xb343397e37rj09fdHT7+uD/68OXDse3eafyuFsnBh0c08uJ6Zdj5
|
||||
HsUyzbQJqAXT1PVutbvZ5vlmPRGOa+yTrfWy3CxX23V+dnRsDUZVwEMGAHCa3pSNanxSBawWL4jD
|
||||
GHWLqriIAFTgPiFKx2ijaBK1uJKGSZCmuO8xINgI0iE0HJwWwRp4ED8I6AgaPt59/gSW0hCDIJ0W
|
||||
MExJG0F4MvrAf2yN9ayNpkOni5L2+31Jp5IAStUMZFIfFWmHpSqgVC1K1VjSfaUp/sVQqsWs1aEd
|
||||
HJLEpJv8CaXBHTBM2OZ1AseSxukTdd5svFTSc+sDH1J9NPT9BW8s2dhVAXVkSutHYT/bxwzg51T9
|
||||
8F+bygd2XirhX0hpYL7azfPU9cpXdrM9k8Ki+39cN3l2TqjiMQq6tHuLwQc7XSLlzMbsGQAA//8D
|
||||
AGHVex6AAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8533e5c1dfe296a1-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 11:09:02 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1588'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299683'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 63ms
|
||||
x-request-id:
|
||||
- req_9ffe6f9841407a91b5e11cc2009a1e45
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: Any and all relevant information input and context
|
||||
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
|
||||
have a response for your task, or if you do not need to use a tool, you MUST
|
||||
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
|
||||
[your response here]```This is the summary of your work so far:\nBegin! This
|
||||
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: 42\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1246'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
|
||||
_cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQS2/CMBCE7/kVK5+h4lUCuVTQHsqpUi+oLyHjbIKL43XtjaBC/PfKITx68WHG
|
||||
M/pmDwmA0LnIQKiNZFU50538FMvZfrx8edz59aKo36ev2+fp3PGcJlvRiQlaf6Pic+pOUeUMsiZ7
|
||||
spVHyRhb+2kvvR8Ph6NBY1SUo4mx0nF31O2N+8M2sSGtMIgMPhIAgEPzRjab415k0OuclQpDkCWK
|
||||
7PIJQHgyUREyBB1YWhadq6nIMtoG94lgARYxByaoA4IEJjIP8Ibh085U3JBBibwqtJVmJW3YoT87
|
||||
sLCu5gxGA9G2Hy9YhkrnaR0n2NqYi15oq8Nm5VEGshEhMLlT/JgAfDXz63+LhPNUOV4xbdHGwkGa
|
||||
nvrE9dI3br81mViaG306SVpCEX4DYxVHleid1801ImdyTP4AAAD//wMAoP/ZMQQCAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8533e5cc7d4396a1-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 11:09:04 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1729'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299713'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 57ms
|
||||
x-request-id:
|
||||
- req_204859a5fd0b455d5d5b2fcafea18ab2
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: Any and all relevant information input and context
|
||||
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
|
||||
have a response for your task, or if you do not need to use a tool, you MUST
|
||||
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
|
||||
[your response here]```This is the summary of your work so far:\nBegin! This
|
||||
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: 42\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: 42\nObservation: Actually, I used too many tools, so I''ll stop now and
|
||||
give you my absolute BEST Final answer NOW, using the expected format: ```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```\nThought:
|
||||
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1549'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
|
||||
_cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQT0/CQBDF7/0Ukz2DaWkLphdDNFGCBxMT0ahplnZaitudZXeIfwjf3WwpoJc9
|
||||
vN++l/dmFwCIphQZiGIluWiNGl5uqsX13eJn8/gUPt9H6cPU6jlN02o9v12IgXfQco0FH10XBbVG
|
||||
ITekD7iwKBl9ajQJJ+k4jpOkAy2VqLytNjxMhuE4invHipoCncjgNQAA2HWv76ZL/BIZhIOj0qJz
|
||||
skaRnT4BCEvKK0I61ziWmsXgDAvSjLqre0MwA41YAhNsHYIEJlJX8ILuTU8LvyGDGjmvGi1VLrX7
|
||||
RHskMNNmyxkkI9Gn70+1FNXG0tJP0FulTnrV6MatcovSkfYVHJM52PcBwHs3f/tvkTCWWsM50wdq
|
||||
Hxin6SFPnC99pqOoh0ws1R/XZBz0DYX7doytH1WjNbbpruF7BvvgFwAA//8DAMXWvogEAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8533e5d81ae396a1-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 11:09:05 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1177'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299639'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 72ms
|
||||
x-request-id:
|
||||
- req_b910e4b0341d6248b46d0e2ba4602f86
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
|
||||
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
|
||||
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
|
||||
get_final_answer(numbers) -> float - Get the final answer but don''t give it
|
||||
yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid
|
||||
ouput schema:\nDo I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: 42\n\nThe output should be formatted as a JSON instance that conforms
|
||||
to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
|
||||
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
|
||||
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
|
||||
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
|
||||
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
|
||||
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
|
||||
\"Function Name\", \"description\": \"The name of the function to be called.\",
|
||||
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
|
||||
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
|
||||
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1398'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
|
||||
_cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRzW7bMBCE73qKxZ7twKlkJ9W9QHtIf9BDUFSFTNMriQm5pMkVksDQuxeUHbu9
|
||||
6DC732pmeCwA0OyxBtSDEu2CXd4fusdPDx/7X/7h21c+/Ci/P7nD888v9xsJFheZ8Lsn0vJO3Wjv
|
||||
giUxnk9jHUkJ5au3d6u79aYsq808cH5PNmN9kGW1XG1uyzMxeKMpYQ2/CwCA4/zN3nhPr1jDavGu
|
||||
OEpJ9YT1ZQkAo7dZQZWSSaJYcHEdas9CPNv9TJHAJFDwQtYuOx+dEqE9GM6YJvAdyECQ9EBO1Q03
|
||||
vN1uGz42DNBgN7LOOVtWjhqsocGepO0MK9sqTi8UG1ycdlXsR0csKe/NfFZ5dDuKs1Z9yOLU8DT/
|
||||
BM+Op0tU6/sQ/S7XwqO1F70zbNLQRlLJc46VxIcTPhUAf+ZKx/9awhC9C9KKfybOB8vV+nQPr693
|
||||
nVbnvlG8KPsPVa2Ls0NMb0nI5ew9xRDN3HD2WUzFXwAAAP//AwC92yRkWAIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8533e5e02f2f96a1-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 11:09:09 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2852'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299685'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 63ms
|
||||
x-request-id:
|
||||
- req_164c005261f8c276123bf69961c10198
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
|
||||
of artificial intelligence. The AI thinks artificial intelligence is a force
|
||||
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
|
||||
intelligence is a force for good?\nAI: Because artificial intelligence will
|
||||
help humans reach their full potential.\n\nNew summary:\nThe human asks what
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\nAI: Agent stopped due to iteration limit or time limit.\n\nNew summary:"}],
|
||||
"model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '997'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
|
||||
_cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RRy27bMBC86ysWPMuGbalx4ltuKVC0KBC0QIvCoakVxYbcVcmVUyPwvxekX+iF
|
||||
h5md4ezsewWgXKc2oMygxYTRz+7/9N+fvj3a+x969+lrw19en1f9A32Ww17Wqs4K3v1GIxfV3HAY
|
||||
PYpjOtEmohbMrsv1Yv3hrmnah0IE7tBnmR1l1s4Wd8vmrBjYGUxqAz8rAID38uZs1OFftYFFfUEC
|
||||
pqQtqs11CEBF9hlROiWXRJOo+kYaJkEqcZ8HhGEKmsBRkjgZSSADwuNHIBYQBuv2WKDekfagKb1h
|
||||
BO6hXcEBpQZNXZ7Lpo4mhCk5skXxYlG2RbY9yV5AmP0cnvgN9xjry1faIgkk4TFBN2G2i6jNkI1y
|
||||
NMGoc5vAEcQFBO+Ck7k6r3S8duHZjpF3uTeavL/ivSOXhm1EnZjy3vmvk/xYAfwqnU//1ajGyGGU
|
||||
rfArUiqna05+6nbeG9u2Z1JYtL/hq+W6OidU6ZAEQ27EYhyjKyfIOatj9Q8AAP//AwAIzVnseQIA
|
||||
AA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8533e5f4693b96a1-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 11:09:10 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1561'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299765'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 46ms
|
||||
x-request-id:
|
||||
- req_7f3006cdd24fec9a5fc15ec53c50d32f
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because it is too large
Load Diff
@@ -160,22 +160,7 @@ def test_hierarchical_process():
|
||||
|
||||
assert (
|
||||
crew.kickoff()
|
||||
== """Here are the 5 interesting ideas with a highlight paragraph for each:
|
||||
|
||||
1. "The Future of AI in Healthcare: Predicting Diseases Before They Happen"
|
||||
- "Imagine a future where AI empowers us to detect diseases before they arise, transforming healthcare from reactive to proactive. Machine learning algorithms, trained on vast amounts of patient data, could potentially predict heart diseases, strokes, or cancers before they manifest, allowing for early interventions and significantly improving patient outcomes. This article will delve into the rapid advancements in AI within the healthcare sector and how these technologies are ushering us into a new era of predictive medicine."
|
||||
|
||||
2. "How AI is Changing the Way We Cook: An Insight into Smart Kitchens"
|
||||
- "From the humble home kitchen to grand culinary stages, AI is revolutionizing the way we cook. Smart appliances, equipped with advanced sensors and predictive algorithms, are turning kitchens into creative playgrounds, offering personalized recipes, precise cooking instructions, and even automated meal preparation. This article explores the fascinating intersection of AI and gastronomy, revealing how technology is transforming our culinary experiences."
|
||||
|
||||
3. "Redefining Fitness with AI: Personalized Workout Plans and Nutritional Advice"
|
||||
- "Fitness reimagined – that's the promise of AI in the wellness industry. Picture a personal trainer who knows your strengths, weaknesses, and nutritional needs intimately. An AI-powered fitness app can provide this personalized experience, adapting your workout plans and dietary recommendations in real-time based on your progress and feedback. Join us as we unpack how AI is revolutionizing the fitness landscape, offering personalized, data-driven approaches to health and well-being."
|
||||
|
||||
4. "AI and the Art World: How Technology is Shaping Creativity"
|
||||
- "Art and AI may seem like unlikely partners, but their synergy is sparking a creative revolution. AI algorithms are now creating mesmerizing artworks, challenging our perceptions of creativity and originality. From AI-assisted painting to generative music composition, this article will take you on a journey through the fascinating world of AI in art, exploring how technology is reshaping the boundaries of human creativity."
|
||||
|
||||
5. "AI in Space Exploration: The Next Frontier"
|
||||
- "The vast expanse of space, once the sole domain of astronauts and rovers, is the next frontier for AI. AI technology is playing an increasingly vital role in space exploration, from predicting space weather to assisting in interstellar navigation. This article will delve into the exciting intersection of AI and space exploration, exploring how these advanced technologies are helping us uncover the mysteries of the cosmos.\""""
|
||||
== """Here are the five interesting ideas for an article with a highlight paragraph for each:\n\n1. The Evolution of Artificial Intelligence:\nDive deep into the fascinating journey of artificial intelligence, from its humble beginnings as a concept in science fiction to an integral part of our daily lives and a catalyst of modern innovations. Explore how AI has evolved over the years, the key milestones that have shaped its growth, and the visionary minds behind these advancements. Uncover the remarkable transformation of AI and its astounding potential for the future.\n\n2. AI in Everyday Life:\nUncover the unseen impact of AI in our every day lives, from our smartphones and home appliances to social media and healthcare. Learn about the subtle yet profound ways AI has become a silent partner in your daily routine, enhancing convenience, productivity, and decision-making. Explore the numerous applications of AI right at your fingertips and how it shapes our interactions with technology and the world around us.\n\n3. Ethical Implications of AI:\nVenture into the ethical labyrinth of artificial intelligence, where innovation meets responsibility. Explore the implications of AI on privacy, job security, and societal norms, and the moral obligations we have towards its development and use. Delve into the thought-provoking debates about AI ethics and the measures being taken to ensure its responsible and equitable use.\n\n4. The Rise of AI Startups:\nWitness the rise of AI startups, the new champions of innovation, driving the technology revolution. Discover how these trailblazing companies are harnessing the power of AI to solve complex problems, create new markets, and revolutionize industries. Learn about their unique challenges, their groundbreaking solutions, and the potential they hold for reshaping the future of technology and business.\n\n5. AI and the Environment:\nExplore the intersection of AI and the environment, where technology meets sustainability. Uncover how AI is being used to combat climate change, conserve biodiversity, and optimize resource utilization. Learn about the innovative ways AI is being used to create a sustainable future and the challenges and opportunities it presents."""
|
||||
)
|
||||
|
||||
|
||||
@@ -277,18 +262,14 @@ def test_crew_verbose_levels_output(capsys):
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_cache_hitting_between_agents():
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import call, patch
|
||||
|
||||
from langchain.tools import tool
|
||||
|
||||
@tool
|
||||
def multiplier(numbers) -> float:
|
||||
"""Useful for when you need to multiply two numbers together.
|
||||
The input to this tool should be a comma separated list of numbers of
|
||||
length two, representing the two numbers you want to multiply together.
|
||||
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
|
||||
a, b = numbers.split(",")
|
||||
return int(a) * int(b)
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
tasks = [
|
||||
Task(
|
||||
@@ -308,15 +289,16 @@ def test_cache_hitting_between_agents():
|
||||
tasks=tasks,
|
||||
)
|
||||
|
||||
assert crew._cache_handler._cache == {}
|
||||
output = crew.kickoff()
|
||||
assert crew._cache_handler._cache == {"multiplier-2,6": "12"}
|
||||
assert output == "12"
|
||||
|
||||
with patch.object(CacheHandler, "read") as read:
|
||||
read.return_value = "12"
|
||||
crew.kickoff()
|
||||
read.assert_called_with("multiplier", "2,6")
|
||||
assert read.call_count == 2, "read was not called exactly twice"
|
||||
# Check if read was called with the expected arguments
|
||||
expected_calls = [
|
||||
call(tool="multiplier", input={"first_number": 2, "second_number": 6}),
|
||||
call(tool="multiplier", input={"first_number": 2, "second_number": 6}),
|
||||
]
|
||||
read.assert_has_calls(expected_calls, any_order=False)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
|
||||
Reference in New Issue
Block a user