diff --git a/crewai/agent.py b/crewai/agent.py index 7fe480c81..6759dfdc9 100644 --- a/crewai/agent.py +++ b/crewai/agent.py @@ -1,14 +1,14 @@ -"""Generic agent.""" from typing import Any, List, Optional from langchain.agents import AgentExecutor from langchain.agents.format_scratchpad import format_log_to_str -from langchain.agents.output_parsers import ReActSingleInputOutputParser from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationSummaryMemory from langchain.tools.render import render_text_description -from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator +from langchain_core.runnables.config import RunnableConfig +from pydantic import BaseModel, Field, InstanceOf, model_validator +from .agents import CacheHandler, CrewAgentOutputParser, ToolsHandler from .prompts import Prompts @@ -29,9 +29,9 @@ class Agent(BaseModel): allow_delegation: Whether the agent is allowed to delegate tasks to other agents. """ - agent_executor: Optional[InstanceOf[AgentExecutor]] = Field( - default=None, description="An instance of the AgentExecutor class." - ) + class Config: + arbitrary_types_allowed = True + role: str = Field(description="Role of the agent") goal: str = Field(description="Objective of the agent") backstory: str = Field(description="Backstory of the agent") @@ -54,15 +54,59 @@ class Agent(BaseModel): tools: List[Any] = Field( default_factory=list, description="Tools at agents disposal" ) - _task_calls: List[Any] = PrivateAttr() + agent_executor: Optional[InstanceOf[AgentExecutor]] = Field( + default=None, description="An instance of the AgentExecutor class." + ) + tools_handler: Optional[InstanceOf[ToolsHandler]] = Field( + default=None, description="An instance of the ToolsHandler class." + ) + cache_handler: Optional[InstanceOf[CacheHandler]] = Field( + default=CacheHandler(), description="An instance of the CacheHandler class." + ) @model_validator(mode="after") def check_agent_executor(self) -> "Agent": if not self.agent_executor: - self.agent_executor = self._create_agent_executor() + self.set_cache_handler(self.cache_handler) return self - def _create_agent_executor(self) -> AgentExecutor: + def execute_task( + self, task: str, context: str = None, tools: List[Any] = None + ) -> str: + """Execute a task with the agent. + + Args: + task: Task to execute. + context: Context to execute the task in. + tools: Tools to use for the task. + + Returns: + Output of the agent + """ + if context: + task = "\n".join( + [task, "\nThis is the context you are working with:", context] + ) + + tools = tools or self.tools + self.agent_executor.tools = tools + + return self.agent_executor.invoke( + { + "input": task, + "tool_names": self.__tools_names(tools), + "tools": render_text_description(tools), + }, + RunnableConfig(callbacks=[self.tools_handler]), + )["output"] + + def set_cache_handler(self, cache_handler) -> None: + print(f"cache_handler: {cache_handler}") + self.cache_handler = cache_handler + self.tools_handler = ToolsHandler(cache=self.cache_handler) + self.__create_agent_executor() + + def __create_agent_executor(self) -> AgentExecutor: """Create an agent executor for the agent. Returns: @@ -98,38 +142,14 @@ class Agent(BaseModel): bind = self.llm.bind(stop=["\nObservation"]) inner_agent = ( - agent_args | execution_prompt | bind | ReActSingleInputOutputParser() - ) - - return AgentExecutor(agent=inner_agent, **executor_args) - - def execute_task( - self, task: str, context: str = None, tools: List[Any] = None - ) -> str: - """Execute a task with the agent. - - Args: - task: Task to execute. - context: Context to execute the task in. - tools: Tools to use for the task. - - Returns: - Output of the agent - """ - if context: - task = "\n".join( - [task, "\nThis is the context you are working with:", context] + agent_args + | execution_prompt + | bind + | CrewAgentOutputParser( + tools_handler=self.tools_handler, cache=self.cache_handler ) - - tools = tools or self.tools - self.agent_executor.tools = tools - return self.agent_executor.invoke( - { - "input": task, - "tool_names": self.__tools_names(tools), - "tools": render_text_description(tools), - } - )["output"] + ) + self.agent_executor = AgentExecutor(agent=inner_agent, **executor_args) @staticmethod def __tools_names(tools) -> str: diff --git a/crewai/agents/__init__.py b/crewai/agents/__init__.py index e69de29bb..0851c13d6 100644 --- a/crewai/agents/__init__.py +++ b/crewai/agents/__init__.py @@ -0,0 +1,3 @@ +from .cache_handler import CacheHandler +from .output_parser import CrewAgentOutputParser +from .tools_handler import ToolsHandler diff --git a/crewai/agents/cache_handler.py b/crewai/agents/cache_handler.py new file mode 100644 index 000000000..def5684a9 --- /dev/null +++ b/crewai/agents/cache_handler.py @@ -0,0 +1,20 @@ +from typing import Optional + +from pydantic import PrivateAttr + + +class CacheHandler: + """Callback handler for tool usage.""" + + _cache: PrivateAttr = {} + + def __init__(self): + self._cache = {} + + def add(self, tool, input, output): + input = input.strip() + self._cache[f"{tool}-{input}"] = output + + def read(self, tool, input) -> Optional[str]: + input = input.strip() + return self._cache.get(f"{tool}-{input}") diff --git a/crewai/agents/output_parser.py b/crewai/agents/output_parser.py new file mode 100644 index 000000000..2b7ab8269 --- /dev/null +++ b/crewai/agents/output_parser.py @@ -0,0 +1,81 @@ +import re +from typing import Union + +from langchain.agents.output_parsers import ReActSingleInputOutputParser +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException + +from .cache_handler import CacheHandler +from .tools_handler import ToolsHandler + +FINAL_ANSWER_ACTION = "Final Answer:" +FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = ( + "Parsing LLM output produced both a final answer and a parse-able action:" +) + + +class CrewAgentOutputParser(ReActSingleInputOutputParser): + """Parses ReAct-style LLM calls that have a single tool input. + + Expects output to be in one of two formats. + + If the output signals that an action should be taken, + should be in the below format. This will result in an AgentAction + being returned. + + ``` + Thought: agent thought here + Action: search + Action Input: what is the temperature in SF? + ``` + + If the output signals that a final answer should be given, + should be in the below format. This will result in an AgentFinish + being returned. + + ``` + Thought: agent thought here + Final Answer: The temperature is 100 degrees + ``` + + It also prevents tools from being reused in a roll. + """ + + class Config: + arbitrary_types_allowed = True + + tools_handler: ToolsHandler + cache: CacheHandler + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + includes_answer = FINAL_ANSWER_ACTION in text + regex = ( + r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" + ) + action_match = re.search(regex, text, re.DOTALL) + if action_match: + if includes_answer: + raise OutputParserException( + f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}" + ) + action = action_match.group(1).strip() + action_input = action_match.group(2) + tool_input = action_input.strip(" ") + tool_input = tool_input.strip('"') + + last_tool_usage = self.tools_handler.last_used_tool + if last_tool_usage: + usage = { + "tool": action, + "input": tool_input, + } + if usage == last_tool_usage: + raise OutputParserException( + f"""\nI just used the {action} tool with input {tool_input}. So I already knwo the result of that.""" + ) + + result = self.cache.read(action, tool_input) + if result: + return AgentFinish({"output": result}, text) + + return super().parse(text) diff --git a/crewai/agents/tools_handler.py b/crewai/agents/tools_handler.py new file mode 100644 index 000000000..1b6ecc13d --- /dev/null +++ b/crewai/agents/tools_handler.py @@ -0,0 +1,42 @@ +from typing import Any, Dict + +from langchain.callbacks.base import BaseCallbackHandler + +from .cache_handler import CacheHandler + + +class ToolsHandler(BaseCallbackHandler): + """Callback handler for tool usage.""" + + last_used_tool: Dict[str, Any] = {} + cache: CacheHandler = None + + def __init__(self, cache: CacheHandler = None, **kwargs: Any): + """Initialize the callback handler.""" + self.cache = cache + super().__init__(**kwargs) + + def on_tool_start( + self, serialized: Dict[str, Any], input_str: str, **kwargs: Any + ) -> Any: + """Run when tool starts running.""" + name = serialized.get("name") + if name not in ["invalid_tool", "_Exception"]: + tools_usage = { + "tool": name, + "input": input_str, + } + self.last_used_tool = tools_usage + + def on_tool_end(self, output: str, **kwargs: Any) -> Any: + """Run when tool ends running.""" + if ( + "is not a valid tool" not in output + and "Invalid or incomplete response" not in output + and "Invalid Format" not in output + ): + self.cache.add( + tool=self.last_used_tool["tool"], + input=self.last_used_tool["input"], + output=output, + ) diff --git a/crewai/crew.py b/crewai/crew.py index 0415f512f..dab14b2e7 100644 --- a/crewai/crew.py +++ b/crewai/crew.py @@ -1,10 +1,18 @@ import json from typing import Any, Dict, List, Optional, Union -from pydantic import BaseModel, Field, Json, field_validator, model_validator +from pydantic import ( + BaseModel, + Field, + InstanceOf, + Json, + field_validator, + model_validator, +) from pydantic_core import PydanticCustomError from .agent import Agent +from .agents import CacheHandler from .process import Process from .task import Task from .tools.agent_tools import AgentTools @@ -13,6 +21,9 @@ from .tools.agent_tools import AgentTools class Crew(BaseModel): """Class that represents a group of agents, how they should work together and their tasks.""" + class Config: + arbitrary_types_allowed = True + tasks: List[Task] = Field(description="List of tasks", default_factory=list) agents: List[Agent] = Field( description="List of agents in this crew.", default_factory=list @@ -26,6 +37,9 @@ class Crew(BaseModel): config: Optional[Union[Json, Dict[str, Any]]] = Field( description="Configuration of the crew.", default=None ) + cache_handler: Optional[InstanceOf[CacheHandler]] = Field( + default=CacheHandler(), description="An instance of the CacheHandler class." + ) @classmethod @field_validator("config", mode="before") @@ -58,6 +72,10 @@ class Crew(BaseModel): tasks.append(Task(**task, agent=task_agent)) self.tasks = tasks + + if self.agents: + for agent in self.agents: + agent.set_cache_handler(self.cache_handler) return self def kickoff(self) -> str: @@ -66,6 +84,9 @@ class Crew(BaseModel): Returns: Output of the crew for each task. """ + for agent in self.agents: + agent.cache_handler = self.cache_handler + if self.process == Process.sequential: return self.__sequential_loop() diff --git a/crewai/prompts.py b/crewai/prompts.py index 6868072b5..8923f8788 100644 --- a/crewai/prompts.py +++ b/crewai/prompts.py @@ -48,7 +48,7 @@ class Prompts(BaseModel): ``` Thought: Do I need to use a tool? Yes - Action: the action to take, should be one of [{tool_names}] + Action: the action to take, should be one of [{tool_names}], just the name. Action Input: the input to the action Observation: the result of the action ``` diff --git a/crewai/tools/agent_tools.py b/crewai/tools/agent_tools.py index ee91519e4..3c443261b 100644 --- a/crewai/tools/agent_tools.py +++ b/crewai/tools/agent_tools.py @@ -16,7 +16,7 @@ class AgentTools(BaseModel): return [ Tool.from_function( func=self.delegate_work, - name="Delegate Work to Co-Worker", + name="Delegate work to co-worker", description=dedent( f"""Useful to delegate a specific task to one of the following co-workers: [{', '.join([agent.role for agent in self.agents])}]. @@ -28,7 +28,7 @@ class AgentTools(BaseModel): ), Tool.from_function( func=self.ask_question, - name="Ask Question to Co-Worker", + name="Ask question to co-worker", description=dedent( f"""Useful to ask a question, opinion or take from on of the following co-workers: [{', '.join([agent.role for agent in self.agents])}]. @@ -53,10 +53,10 @@ class AgentTools(BaseModel): try: agent, task, information = command.split("|") except ValueError: - return "Error executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|information`." + return "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|information`." if not agent or not task or not information: - return "Error executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|question|information`." + return "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|question|information`." agent = [ available_agent @@ -65,9 +65,7 @@ class AgentTools(BaseModel): ] if len(agent) == 0: - return ( - "Error executing tool. Co-worker not found, double check the co-worker." - ) + return f"\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: {', '.join([agent.role for agent in self.agents])}." agent = agent[0] result = agent.execute_task(task, information) diff --git a/tests/agent_test.py b/tests/agent_test.py index 2504c8aa3..80e7cebc8 100644 --- a/tests/agent_test.py +++ b/tests/agent_test.py @@ -4,6 +4,7 @@ import pytest from langchain.chat_models import ChatOpenAI as OpenAI from crewai.agent import Agent +from crewai.agents import CacheHandler def test_agent_creation(): @@ -101,6 +102,77 @@ def test_agent_execution_with_tools(): assert output == "12" +@pytest.mark.vcr(filter_headers=["authorization"]) +def test_logging_tool_usage(): + from langchain.tools import tool + + @tool + def multiplier(numbers) -> float: + """Useful for when you need to multiply two numbers together. + The input to this tool should be a comma separated list of numbers of + length two, representing the two numbers you want to multiply together. + For example, `1,2` would be the input if you wanted to multiply 1 by 2.""" + a, b = numbers.split(",") + return int(a) * int(b) + + agent = Agent( + role="test role", + goal="test goal", + backstory="test backstory", + tools=[multiplier], + allow_delegation=False, + verbose=True, + ) + + assert agent.tools_handler.last_used_tool == {} + output = agent.execute_task("What is 3 times 5?") + tool_usage = { + "tool": "multiplier", + "input": "3,5", + } + + assert output == "3 times 5 is 15." + assert agent.tools_handler.last_used_tool == tool_usage + + +@pytest.mark.vcr(filter_headers=["authorization"]) +def test_cache_hitting(): + from unittest.mock import patch + + from langchain.tools import tool + + @tool + def multiplier(numbers) -> float: + """Useful for when you need to multiply two numbers together. + The input to this tool should be a comma separated list of numbers of + length two, representing the two numbers you want to multiply together. + For example, `1,2` would be the input if you wanted to multiply 1 by 2.""" + a, b = numbers.split(",") + return int(a) * int(b) + + cache_handler = CacheHandler() + + agent = Agent( + role="test role", + goal="test goal", + backstory="test backstory", + tools=[multiplier], + allow_delegation=False, + cache_handler=cache_handler, + verbose=True, + ) + + output = agent.execute_task("What is 2 times 6?") + output = agent.execute_task("What is 3 times 3?") + assert cache_handler._cache == {"multiplier-2,6": "12", "multiplier-3,3": "9"} + + with patch.object(CacheHandler, "read") as read: + read.return_value = "0" + output = agent.execute_task("What is 2 times 6?") + assert output == "0" + read.assert_called_once_with("multiplier", "2,6") + + @pytest.mark.vcr(filter_headers=["authorization"]) def test_agent_execution_with_specific_tools(): from langchain.tools import tool @@ -122,4 +194,4 @@ def test_agent_execution_with_specific_tools(): ) output = agent.execute_task(task="What is 3 times 4", tools=[multiplier]) - assert output == "3 times 4 is 12." + assert output == "12" diff --git a/tests/agent_tools/agent_tools_test.py b/tests/agent_tools/agent_tools_test.py index 291b4973c..ffac3298b 100644 --- a/tests/agent_tools/agent_tools_test.py +++ b/tests/agent_tools/agent_tools_test.py @@ -48,7 +48,7 @@ def test_delegate_work_with_wrong_input(): assert ( result - == "Error executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|information`." + == "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|information`." ) @@ -59,7 +59,7 @@ def test_delegate_work_to_wrong_agent(): assert ( result - == "Error executing tool. Co-worker not found, double check the co-worker." + == "\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: researcher." ) @@ -70,5 +70,5 @@ def test_ask_question_to_wrong_agent(): assert ( result - == "Error executing tool. Co-worker not found, double check the co-worker." + == "\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: researcher." ) diff --git a/tests/cassettes/test_cache_hitting.yaml b/tests/cassettes/test_cache_hitting.yaml new file mode 100644 index 000000000..865322a6d --- /dev/null +++ b/tests/cassettes/test_cache_hitting.yaml @@ -0,0 +1,916 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following + tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need + to multiply two numbers together.\n The input to this tool should be + a comma separated list of numbers of\n length two, representing the two + numbers you want to multiply together.\n For example, `1,2` would be + the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the action to take, should be one of [multiplier], just the name.\nAction Input: + the input to the action\nObservation: the result of the action\n```\n\nWhen + you have a response for your task, or if you do not need to use a tool, you + MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]\n```\n\t\tThis is the summary of your work so far:\n \nBegin! + This is VERY important to you, your job depends on it!\n\nCurrent Task: What + is 2 times 6?\n\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": + false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1219' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b8NsTNQTMzHe8yjfHP8Eo31DhwBW\",\n \"object\": + \"chat.completion\",\n \"created\": 1703861056,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: + multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 261,\n \"completion_tokens\": + 23,\n \"total_tokens\": 284\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d2d1f00d1101c3-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 14:44:17 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=rd8TZZP3aKU.V0S.99_vKXMEF_oekbKasFEgEg.qrEw-1703861057-1-Ae5v8FUgqHuVy0hGrL9fTE3F2zt2n2q4pKK0tOpJh+mIw5ffYrGKfd68LoLAGihdbod6ucY4PYvsBE/6WSrp3sQ=; + path=/; expires=Fri, 29-Dec-23 15:14:17 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=M5Tgs1dNQjihNi4NUGpcOR42zHumBRUVwL214O3LaPA-1703861057944-0-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1750' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299720' + x-ratelimit-remaining-tokens_usage_based: + - '299720' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 55ms + x-ratelimit-reset-tokens_usage_based: + - 55ms + x-request-id: + - 907b4e1c5dddb9abb47843bbab76cd7a + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following + tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need + to multiply two numbers together.\n The input to this tool should be + a comma separated list of numbers of\n length two, representing the two + numbers you want to multiply together.\n For example, `1,2` would be + the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the action to take, should be one of [multiplier], just the name.\nAction Input: + the input to the action\nObservation: the result of the action\n```\n\nWhen + you have a response for your task, or if you do not need to use a tool, you + MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]\n```\n\t\tThis is the summary of your work so far:\n \nBegin! + This is VERY important to you, your job depends on it!\n\nCurrent Task: What + is 2 times 6?\nThought: Do I need to use a tool? Yes\nAction: multiplier\nAction + Input: 2,6\nObservation: 12\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": + ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1323' + content-type: + - application/json + cookie: + - __cf_bm=rd8TZZP3aKU.V0S.99_vKXMEF_oekbKasFEgEg.qrEw-1703861057-1-Ae5v8FUgqHuVy0hGrL9fTE3F2zt2n2q4pKK0tOpJh+mIw5ffYrGKfd68LoLAGihdbod6ucY4PYvsBE/6WSrp3sQ=; + _cfuvid=M5Tgs1dNQjihNi4NUGpcOR42zHumBRUVwL214O3LaPA-1703861057944-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b8NuuCivtT2524QvfyDjrA3XkXLB\",\n \"object\": + \"chat.completion\",\n \"created\": 1703861058,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: + 12\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": + 15,\n \"total_tokens\": 309\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d2d1fc79ad01c3-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 14:44:20 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2779' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299695' + x-ratelimit-remaining-tokens_usage_based: + - '299695' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 60ms + x-ratelimit-reset-tokens_usage_based: + - 60ms + x-request-id: + - 5db441cfcc97cfd1ab228c2c0d5234f6 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "Progressively summarize the + lines of conversation provided, adding onto the previous summary returning a + new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks + of artificial intelligence. The AI thinks artificial intelligence is a force + for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial + intelligence is a force for good?\nAI: Because artificial intelligence will + help humans reach their full potential.\n\nNew summary:\nThe human asks what + the AI thinks of artificial intelligence. The AI thinks artificial intelligence + is a force for good because it will help humans reach their full potential.\nEND + OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: What + is 2 times 6?\nAI: 12\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": + false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '872' + content-type: + - application/json + cookie: + - __cf_bm=rd8TZZP3aKU.V0S.99_vKXMEF_oekbKasFEgEg.qrEw-1703861057-1-Ae5v8FUgqHuVy0hGrL9fTE3F2zt2n2q4pKK0tOpJh+mIw5ffYrGKfd68LoLAGihdbod6ucY4PYvsBE/6WSrp3sQ=; + _cfuvid=M5Tgs1dNQjihNi4NUGpcOR42zHumBRUVwL214O3LaPA-1703861057944-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b8Nxf7bPMYpEdt2JmUIWLV4maYXG\",\n \"object\": + \"chat.completion\",\n \"created\": 1703861061,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The human asks the AI to calculate 2 + times 6, and the AI responds with 12.\"\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 149,\n \"completion_tokens\": 21,\n \"total_tokens\": 170\n },\n \"system_fingerprint\": + null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d2d20fdf6f01c3-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 14:44:22 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1463' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299796' + x-ratelimit-remaining-tokens_usage_based: + - '299796' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 40ms + x-ratelimit-reset-tokens_usage_based: + - 40ms + x-request-id: + - 99bc213367b851b629eb5489381887b7 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following + tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need + to multiply two numbers together.\n The input to this tool should be + a comma separated list of numbers of\n length two, representing the two + numbers you want to multiply together.\n For example, `1,2` would be + the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the action to take, should be one of [multiplier], just the name.\nAction Input: + the input to the action\nObservation: the result of the action\n```\n\nWhen + you have a response for your task, or if you do not need to use a tool, you + MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]\n```\n\t\tThis is the summary of your work so far:\n The + human asks the AI to calculate 2 times 6, and the AI responds with 12.\nBegin! + This is VERY important to you, your job depends on it!\n\nCurrent Task: What + is 3 times 3?\n\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": + false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1293' + content-type: + - application/json + cookie: + - __cf_bm=rd8TZZP3aKU.V0S.99_vKXMEF_oekbKasFEgEg.qrEw-1703861057-1-Ae5v8FUgqHuVy0hGrL9fTE3F2zt2n2q4pKK0tOpJh+mIw5ffYrGKfd68LoLAGihdbod6ucY4PYvsBE/6WSrp3sQ=; + _cfuvid=M5Tgs1dNQjihNi4NUGpcOR42zHumBRUVwL214O3LaPA-1703861057944-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b8NyS4i9PXuCQMD9p0Afb7msIFLA\",\n \"object\": + \"chat.completion\",\n \"created\": 1703861062,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: + multiplier\\nAction Input: 3,3\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 282,\n \"completion_tokens\": + 23,\n \"total_tokens\": 305\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d2d21a893d01c3-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 14:44:25 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2472' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299703' + x-ratelimit-remaining-tokens_usage_based: + - '299703' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 59ms + x-ratelimit-reset-tokens_usage_based: + - 59ms + x-request-id: + - ac6d5063cae237481f45c88406ee1134 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following + tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need + to multiply two numbers together.\n The input to this tool should be + a comma separated list of numbers of\n length two, representing the two + numbers you want to multiply together.\n For example, `1,2` would be + the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the action to take, should be one of [multiplier], just the name.\nAction Input: + the input to the action\nObservation: the result of the action\n```\n\nWhen + you have a response for your task, or if you do not need to use a tool, you + MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]\n```\n\t\tThis is the summary of your work so far:\n The + human asks the AI to calculate 2 times 6, and the AI responds with 12.\nBegin! + This is VERY important to you, your job depends on it!\n\nCurrent Task: What + is 3 times 3?\nThought: Do I need to use a tool? Yes\nAction: multiplier\nAction + Input: 3,3\nObservation: 9\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": + ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1396' + content-type: + - application/json + cookie: + - __cf_bm=rd8TZZP3aKU.V0S.99_vKXMEF_oekbKasFEgEg.qrEw-1703861057-1-Ae5v8FUgqHuVy0hGrL9fTE3F2zt2n2q4pKK0tOpJh+mIw5ffYrGKfd68LoLAGihdbod6ucY4PYvsBE/6WSrp3sQ=; + _cfuvid=M5Tgs1dNQjihNi4NUGpcOR42zHumBRUVwL214O3LaPA-1703861057944-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b8O1OvQNcMLn5JwCShOrcdUGDYTQ\",\n \"object\": + \"chat.completion\",\n \"created\": 1703861065,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: + 3 times 3 is 9.\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 315,\n \"completion_tokens\": + 22,\n \"total_tokens\": 337\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d2d22c1c5c01c3-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 14:44:27 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1885' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299678' + x-ratelimit-remaining-tokens_usage_based: + - '299678' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 64ms + x-ratelimit-reset-tokens_usage_based: + - 64ms + x-request-id: + - 34020a2529f99325e63ed40493268cd1 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "Progressively summarize the + lines of conversation provided, adding onto the previous summary returning a + new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks + of artificial intelligence. The AI thinks artificial intelligence is a force + for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial + intelligence is a force for good?\nAI: Because artificial intelligence will + help humans reach their full potential.\n\nNew summary:\nThe human asks what + the AI thinks of artificial intelligence. The AI thinks artificial intelligence + is a force for good because it will help humans reach their full potential.\nEND + OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to calculate 2 times 6, + and the AI responds with 12.\n\nNew lines of conversation:\nHuman: What is 3 + times 3?\nAI: 3 times 3 is 9.\n\nNew summary:"}], "model": "gpt-4", "n": 1, + "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '959' + content-type: + - application/json + cookie: + - __cf_bm=rd8TZZP3aKU.V0S.99_vKXMEF_oekbKasFEgEg.qrEw-1703861057-1-Ae5v8FUgqHuVy0hGrL9fTE3F2zt2n2q4pKK0tOpJh+mIw5ffYrGKfd68LoLAGihdbod6ucY4PYvsBE/6WSrp3sQ=; + _cfuvid=M5Tgs1dNQjihNi4NUGpcOR42zHumBRUVwL214O3LaPA-1703861057944-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b8O3IxMbwCemZSzKFeBXV2doGGt7\",\n \"object\": + \"chat.completion\",\n \"created\": 1703861067,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The human asks the AI to calculate 2 + times 6 and 3 times 3, and the AI responds with 12 and 9 respectively.\"\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 176,\n \"completion_tokens\": 31,\n + \ \"total_tokens\": 207\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d2d2395c4d01c3-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 14:44:30 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '3030' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299775' + x-ratelimit-remaining-tokens_usage_based: + - '299775' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 44ms + x-ratelimit-reset-tokens_usage_based: + - 44ms + x-request-id: + - 6a043e22366e13d63ad963dc01964533 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following + tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need + to multiply two numbers together.\n The input to this tool should be + a comma separated list of numbers of\n length two, representing the two + numbers you want to multiply together.\n For example, `1,2` would be + the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the action to take, should be one of [multiplier], just the name.\nAction Input: + the input to the action\nObservation: the result of the action\n```\n\nWhen + you have a response for your task, or if you do not need to use a tool, you + MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]\n```\n\t\tThis is the summary of your work so far:\n The + human asks the AI to calculate 2 times 6 and 3 times 3, and the AI responds + with 12 and 9 respectively.\nBegin! This is VERY important to you, your job + depends on it!\n\nCurrent Task: What is 2 times 6?\n\n"}], "model": "gpt-4", + "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1326' + content-type: + - application/json + cookie: + - __cf_bm=rd8TZZP3aKU.V0S.99_vKXMEF_oekbKasFEgEg.qrEw-1703861057-1-Ae5v8FUgqHuVy0hGrL9fTE3F2zt2n2q4pKK0tOpJh+mIw5ffYrGKfd68LoLAGihdbod6ucY4PYvsBE/6WSrp3sQ=; + _cfuvid=M5Tgs1dNQjihNi4NUGpcOR42zHumBRUVwL214O3LaPA-1703861057944-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b8O7429GQMpUZpPBK77gOoK8UnMY\",\n \"object\": + \"chat.completion\",\n \"created\": 1703861071,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: + multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 292,\n \"completion_tokens\": + 23,\n \"total_tokens\": 315\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d2d24e09bf01c3-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 14:44:32 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1369' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299695' + x-ratelimit-remaining-tokens_usage_based: + - '299695' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 61ms + x-ratelimit-reset-tokens_usage_based: + - 61ms + x-request-id: + - 7807e472ac55a3cb9201c4eadfe13ee2 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "Progressively summarize the + lines of conversation provided, adding onto the previous summary returning a + new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks + of artificial intelligence. The AI thinks artificial intelligence is a force + for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial + intelligence is a force for good?\nAI: Because artificial intelligence will + help humans reach their full potential.\n\nNew summary:\nThe human asks what + the AI thinks of artificial intelligence. The AI thinks artificial intelligence + is a force for good because it will help humans reach their full potential.\nEND + OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to calculate 2 times 6 + and 3 times 3, and the AI responds with 12 and 9 respectively.\n\nNew lines + of conversation:\nHuman: What is 2 times 6?\nAI: 0\n\nNew summary:"}], "model": + "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '978' + content-type: + - application/json + cookie: + - __cf_bm=rd8TZZP3aKU.V0S.99_vKXMEF_oekbKasFEgEg.qrEw-1703861057-1-Ae5v8FUgqHuVy0hGrL9fTE3F2zt2n2q4pKK0tOpJh+mIw5ffYrGKfd68LoLAGihdbod6ucY4PYvsBE/6WSrp3sQ=; + _cfuvid=M5Tgs1dNQjihNi4NUGpcOR42zHumBRUVwL214O3LaPA-1703861057944-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b8O8FQhcRqwPnGUxFziRReKvggG1\",\n \"object\": + \"chat.completion\",\n \"created\": 1703861072,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The human asks the AI to calculate 2 + times 6 and 3 times 3, and the AI responds with 12 and 9 respectively. However, + when asked again to calculate 2 times 6, the AI incorrectly responds with 0.\"\n + \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n + \ ],\n \"usage\": {\n \"prompt_tokens\": 180,\n \"completion_tokens\": + 52,\n \"total_tokens\": 232\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d2d2582ae901c3-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 14:44:37 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '4793' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299770' + x-ratelimit-remaining-tokens_usage_based: + - '299770' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 45ms + x-ratelimit-reset-tokens_usage_based: + - 45ms + x-request-id: + - feb3958ca226769ce85f1b4a6ba649ac + http_version: HTTP/1.1 + status_code: 200 +version: 1 diff --git a/tests/cassettes/test_cache_hitting_between_agents.yaml b/tests/cassettes/test_cache_hitting_between_agents.yaml new file mode 100644 index 000000000..f73bdd468 --- /dev/null +++ b/tests/cassettes/test_cache_hitting_between_agents.yaml @@ -0,0 +1,1084 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": "You are CEO.\nYou''re an long + time CEO of a content creation agency with a Senior Writer on the team. You''re + now working on a new project and want to make sure the content produced is amazing.\n\nYour + personal goal is: Make sure the writers in your company produce amazing content.\n\nTOOLS:\n------\nYou + have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float + - Useful for when you need to multiply two numbers together.\n The input + to this tool should be a comma separated list of numbers of\n length + two, representing the two numbers you want to multiply together.\n For + example, `1,2` would be the input if you wanted to multiply 1 by 2.\nDelegate + work to co-worker: Useful to delegate a specific task to one of the\n\t\t\t\tfollowing + co-workers: [CEO, Researcher].\n\t\t\t\tThe input to this tool should be a pipe + (|) separated text of length\n\t\t\t\tthree, representing the role you want + to delegate it to, the task and\n\t\t\t\tinformation necessary. For example, + `coworker|task|information`.\n\nAsk question to co-worker: Useful to ask a question, + opinion or take from on\n\t\t\t\tof the following co-workers: [CEO, Researcher].\n\t\t\t\tThe + input to this tool should be a pipe (|) separated text of length\n\t\t\t\tthree, + representing the role you want to ask it to, the question and\n\t\t\t\tinformation + necessary. For example, `coworker|question|information`.\n\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the action to take, should be one of [multiplier, Delegate work + to co-worker, Ask question to co-worker], just the name.\nAction Input: the + input to the action\nObservation: the result of the action\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]\n```\n\t\tThis is the summary of your work so far:\n \nBegin! + This is VERY important to you, your job depends on it!\n\nCurrent Task: What + is 2 tims 6?\n\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": + false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '2215' + content-type: + - application/json + cookie: + - _cfuvid=ps.JYe.5DT5QaG1hDuUb70tnVJo8IPWJrGrGEO_HUYA-1703099776177-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8bFlFR80bKQQzwjFzAICpcBT2G3s0\",\n \"object\": + \"chat.completion\",\n \"created\": 1703889413,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: + multiplier\\nAction Input: 2,6\\n\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 484,\n \"completion_tokens\": + 24,\n \"total_tokens\": 508\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d5863e9a050301-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 22:36:54 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=6RpGZVaY4yiI4M4AoreJuJny2199LZXRkp8DDrcSK5k-1703889414-1-AdxFp5oHDyE1t+/uGSC6LyRh7n4+ats5oznbOnwXmT+G4BkBdSY9kVWLS2RSLndYHxVzz5ksSBcREfL/gr1HAAc=; + path=/; expires=Fri, 29-Dec-23 23:06:54 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=O8jSUEuY34NDEdF4z_HQGq8w7idZ7PZlcpp1vRj2y5E-1703889414566-0-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1227' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299483' + x-ratelimit-remaining-tokens_usage_based: + - '299483' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 103ms + x-ratelimit-reset-tokens_usage_based: + - 103ms + x-request-id: + - 938270aa00d199e6be67525b6e3d6eba + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "You are CEO.\nYou''re an long + time CEO of a content creation agency with a Senior Writer on the team. You''re + now working on a new project and want to make sure the content produced is amazing.\n\nYour + personal goal is: Make sure the writers in your company produce amazing content.\n\nTOOLS:\n------\nYou + have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float + - Useful for when you need to multiply two numbers together.\n The input + to this tool should be a comma separated list of numbers of\n length + two, representing the two numbers you want to multiply together.\n For + example, `1,2` would be the input if you wanted to multiply 1 by 2.\nDelegate + work to co-worker: Useful to delegate a specific task to one of the\n\t\t\t\tfollowing + co-workers: [CEO, Researcher].\n\t\t\t\tThe input to this tool should be a pipe + (|) separated text of length\n\t\t\t\tthree, representing the role you want + to delegate it to, the task and\n\t\t\t\tinformation necessary. For example, + `coworker|task|information`.\n\nAsk question to co-worker: Useful to ask a question, + opinion or take from on\n\t\t\t\tof the following co-workers: [CEO, Researcher].\n\t\t\t\tThe + input to this tool should be a pipe (|) separated text of length\n\t\t\t\tthree, + representing the role you want to ask it to, the question and\n\t\t\t\tinformation + necessary. For example, `coworker|question|information`.\n\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the action to take, should be one of [multiplier, Delegate work + to co-worker, Ask question to co-worker], just the name.\nAction Input: the + input to the action\nObservation: the result of the action\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]\n```\n\t\tThis is the summary of your work so far:\n \nBegin! + This is VERY important to you, your job depends on it!\n\nCurrent Task: What + is 2 tims 6?\nThought: Do I need to use a tool? Yes\nAction: multiplier\nAction + Input: 2,6\n\nObservation: 12\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": + ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '2321' + content-type: + - application/json + cookie: + - _cfuvid=O8jSUEuY34NDEdF4z_HQGq8w7idZ7PZlcpp1vRj2y5E-1703889414566-0-604800000; + __cf_bm=6RpGZVaY4yiI4M4AoreJuJny2199LZXRkp8DDrcSK5k-1703889414-1-AdxFp5oHDyE1t+/uGSC6LyRh7n4+ats5oznbOnwXmT+G4BkBdSY9kVWLS2RSLndYHxVzz5ksSBcREfL/gr1HAAc= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8bFlGBZrFqbUBA6wWFIW1k7CrvOMc\",\n \"object\": + \"chat.completion\",\n \"created\": 1703889414,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: + 2 times 6 is 12.\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 517,\n \"completion_tokens\": + 22,\n \"total_tokens\": 539\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d586496c140301-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 22:36:59 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2377' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299458' + x-ratelimit-remaining-tokens_usage_based: + - '299458' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 108ms + x-ratelimit-reset-tokens_usage_based: + - 108ms + x-request-id: + - ab3bab4c142330d66869ccff39de5738 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "Progressively summarize the + lines of conversation provided, adding onto the previous summary returning a + new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks + of artificial intelligence. The AI thinks artificial intelligence is a force + for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial + intelligence is a force for good?\nAI: Because artificial intelligence will + help humans reach their full potential.\n\nNew summary:\nThe human asks what + the AI thinks of artificial intelligence. The AI thinks artificial intelligence + is a force for good because it will help humans reach their full potential.\nEND + OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: What + is 2 tims 6?\nAI: 2 times 6 is 12.\n\nNew summary:"}], "model": "gpt-4", "n": + 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '885' + content-type: + - application/json + cookie: + - _cfuvid=O8jSUEuY34NDEdF4z_HQGq8w7idZ7PZlcpp1vRj2y5E-1703889414566-0-604800000; + __cf_bm=6RpGZVaY4yiI4M4AoreJuJny2199LZXRkp8DDrcSK5k-1703889414-1-AdxFp5oHDyE1t+/uGSC6LyRh7n4+ats5oznbOnwXmT+G4BkBdSY9kVWLS2RSLndYHxVzz5ksSBcREfL/gr1HAAc= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8bFlMHhxpCjHkQymfxQ5D8WBZkclA\",\n \"object\": + \"chat.completion\",\n \"created\": 1703889420,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The human asks the AI for the product + of 2 times 6, and the AI responds that it is 12.\"\n },\n \"logprobs\": + null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 156,\n \"completion_tokens\": 25,\n \"total_tokens\": 181\n },\n \"system_fingerprint\": + null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d5866a6c360301-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 22:37:01 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1468' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299794' + x-ratelimit-remaining-tokens_usage_based: + - '299794' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 41ms + x-ratelimit-reset-tokens_usage_based: + - 41ms + x-request-id: + - e6e04fb8a12fc2288d6a3bd0b4480ab0 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re + an expert researcher, specialized in technology, software engineering, AI and + startups. You work as a freelancer and is now working on doing research and + analysis for a new customer.\n\nYour personal goal is: Make the best research + and analysis on content about AI and AI agents\n\nTOOLS:\n------\nYou have access + to the following tools:\n\nmultiplier: multiplier(numbers) -> float - Useful + for when you need to multiply two numbers together.\n The input to this + tool should be a comma separated list of numbers of\n length two, representing + the two numbers you want to multiply together.\n For example, `1,2` would + be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use + the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the action to take, should be one of [multiplier], just the name.\nAction Input: + the input to the action\nObservation: the result of the action\n```\n\nWhen + you have a response for your task, or if you do not need to use a tool, you + MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]\n```\n\t\tThis is the summary of your work so far:\n \nBegin! + This is VERY important to you, your job depends on it!\n\nCurrent Task: What + is 2 times 6?\n\nThis is the context you are working with:\n2 times 6 is 12.\n\n"}], + "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1518' + content-type: + - application/json + cookie: + - _cfuvid=u_fUK0MqnLcrbhMkg8kKMfrSZpALgzClvD984s71oU8-1703091698056-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8bFlOAG2g3FwravPGs5nQmnA8wpf1\",\n \"object\": + \"chat.completion\",\n \"created\": 1703889422,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: + multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 322,\n \"completion_tokens\": + 23,\n \"total_tokens\": 345\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d586773aea77c0-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 22:37:04 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=NTFlpt52iEoCg0Pkx5.1jxp.b3GIrmzDorPoCjgwA1c-1703889424-1-AcnqI2v1g/4ArTSHF6F4yC9cQd8KWJUiSRlY3mTNPF+6P6fectfBIvD544RQw/zhEY+z/eZ5pw4t/Baew653jtw=; + path=/; expires=Fri, 29-Dec-23 23:07:04 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=yTJHDMg7VYHMEKTSaPhpQp_5NZ3Xw1rIS8dQw4LmQMk-1703889424692-0-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2511' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299646' + x-ratelimit-remaining-tokens_usage_based: + - '299646' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 70ms + x-ratelimit-reset-tokens_usage_based: + - 70ms + x-request-id: + - b86341441fa500dc373240b329cf5446 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "Progressively summarize the + lines of conversation provided, adding onto the previous summary returning a + new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks + of artificial intelligence. The AI thinks artificial intelligence is a force + for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial + intelligence is a force for good?\nAI: Because artificial intelligence will + help humans reach their full potential.\n\nNew summary:\nThe human asks what + the AI thinks of artificial intelligence. The AI thinks artificial intelligence + is a force for good because it will help humans reach their full potential.\nEND + OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: What + is 2 times 6?\n\nThis is the context you are working with:\n2 times 6 is 12.\nAI: + 12\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": + 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '935' + content-type: + - application/json + cookie: + - _cfuvid=yTJHDMg7VYHMEKTSaPhpQp_5NZ3Xw1rIS8dQw4LmQMk-1703889424692-0-604800000; + __cf_bm=NTFlpt52iEoCg0Pkx5.1jxp.b3GIrmzDorPoCjgwA1c-1703889424-1-AcnqI2v1g/4ArTSHF6F4yC9cQd8KWJUiSRlY3mTNPF+6P6fectfBIvD544RQw/zhEY+z/eZ5pw4t/Baew653jtw= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8bFlQVqyX43MFLq79thTQ8UaZZ1xD\",\n \"object\": + \"chat.completion\",\n \"created\": 1703889424,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The human asks the AI what is 2 times + 6, and the AI responds with 12.\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 166,\n \"completion_tokens\": + 21,\n \"total_tokens\": 187\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d58688eda777c0-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 22:37:06 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1152' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299782' + x-ratelimit-remaining-tokens_usage_based: + - '299782' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 43ms + x-ratelimit-reset-tokens_usage_based: + - 43ms + x-request-id: + - d86894b3054720d2053d5bfef884d985 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "You are CEO.\nYou''re an long + time CEO of a content creation agency with a Senior Writer on the team. You''re + now working on a new project and want to make sure the content produced is amazing.\n\nYour + personal goal is: Make sure the writers in your company produce amazing content.\n\nTOOLS:\n------\nYou + have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float + - Useful for when you need to multiply two numbers together.\n The input + to this tool should be a comma separated list of numbers of\n length + two, representing the two numbers you want to multiply together.\n For + example, `1,2` would be the input if you wanted to multiply 1 by 2.\nDelegate + work to co-worker: Useful to delegate a specific task to one of the\n\t\t\t\tfollowing + co-workers: [CEO, Researcher].\n\t\t\t\tThe input to this tool should be a pipe + (|) separated text of length\n\t\t\t\tthree, representing the role you want + to delegate it to, the task and\n\t\t\t\tinformation necessary. For example, + `coworker|task|information`.\n\nAsk question to co-worker: Useful to ask a question, + opinion or take from on\n\t\t\t\tof the following co-workers: [CEO, Researcher].\n\t\t\t\tThe + input to this tool should be a pipe (|) separated text of length\n\t\t\t\tthree, + representing the role you want to ask it to, the question and\n\t\t\t\tinformation + necessary. For example, `coworker|question|information`.\n\nDelegate work to + co-worker: Useful to delegate a specific task to one of the\n\t\t\t\tfollowing + co-workers: [CEO, Researcher].\n\t\t\t\tThe input to this tool should be a pipe + (|) separated text of length\n\t\t\t\tthree, representing the role you want + to delegate it to, the task and\n\t\t\t\tinformation necessary. For example, + `coworker|task|information`.\n\nAsk question to co-worker: Useful to ask a question, + opinion or take from on\n\t\t\t\tof the following co-workers: [CEO, Researcher].\n\t\t\t\tThe + input to this tool should be a pipe (|) separated text of length\n\t\t\t\tthree, + representing the role you want to ask it to, the question and\n\t\t\t\tinformation + necessary. For example, `coworker|question|information`.\n\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the action to take, should be one of [multiplier, Delegate work + to co-worker, Ask question to co-worker, Delegate work to co-worker, Ask question + to co-worker], just the name.\nAction Input: the input to the action\nObservation: + the result of the action\n```\n\nWhen you have a response for your task, or + if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis + is the summary of your work so far:\n The human asks the AI for the product + of 2 times 6, and the AI responds that it is 12.\nBegin! This is VERY important + to you, your job depends on it!\n\nCurrent Task: What is 2 tims 6?\n\n"}], "model": + "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '3088' + content-type: + - application/json + cookie: + - _cfuvid=O8jSUEuY34NDEdF4z_HQGq8w7idZ7PZlcpp1vRj2y5E-1703889414566-0-604800000; + __cf_bm=6RpGZVaY4yiI4M4AoreJuJny2199LZXRkp8DDrcSK5k-1703889414-1-AdxFp5oHDyE1t+/uGSC6LyRh7n4+ats5oznbOnwXmT+G4BkBdSY9kVWLS2RSLndYHxVzz5ksSBcREfL/gr1HAAc= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8bFlSjjmk16fcEx4qjS8HvNxbn3ky\",\n \"object\": + \"chat.completion\",\n \"created\": 1703889426,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: + multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 688,\n \"completion_tokens\": + 23,\n \"total_tokens\": 711\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d58691fc2e0301-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 22:37:07 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1307' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299275' + x-ratelimit-remaining-tokens_usage_based: + - '299275' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 144ms + x-ratelimit-reset-tokens_usage_based: + - 144ms + x-request-id: + - e25c77d8a956e2d403a80795d27446ed + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "Progressively summarize the + lines of conversation provided, adding onto the previous summary returning a + new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks + of artificial intelligence. The AI thinks artificial intelligence is a force + for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial + intelligence is a force for good?\nAI: Because artificial intelligence will + help humans reach their full potential.\n\nNew summary:\nThe human asks what + the AI thinks of artificial intelligence. The AI thinks artificial intelligence + is a force for good because it will help humans reach their full potential.\nEND + OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI for the product of 2 times + 6, and the AI responds that it is 12.\n\nNew lines of conversation:\nHuman: + What is 2 tims 6?\nAI: 12\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": + false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '957' + content-type: + - application/json + cookie: + - _cfuvid=O8jSUEuY34NDEdF4z_HQGq8w7idZ7PZlcpp1vRj2y5E-1703889414566-0-604800000; + __cf_bm=6RpGZVaY4yiI4M4AoreJuJny2199LZXRkp8DDrcSK5k-1703889414-1-AdxFp5oHDyE1t+/uGSC6LyRh7n4+ats5oznbOnwXmT+G4BkBdSY9kVWLS2RSLndYHxVzz5ksSBcREfL/gr1HAAc= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8bFlTYmUO6Wh3mbgtPshKpQqqDxgd\",\n \"object\": + \"chat.completion\",\n \"created\": 1703889427,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The human asks the AI for the product + of 2 times 6, and the AI responds that it is 12, confirming this when asked + again.\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 175,\n \"completion_tokens\": + 31,\n \"total_tokens\": 206\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d5869baf8a0301-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 22:37:09 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1618' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299775' + x-ratelimit-remaining-tokens_usage_based: + - '299775' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 44ms + x-ratelimit-reset-tokens_usage_based: + - 44ms + x-request-id: + - b70f47e489a1fefedc547c87e41f7dae + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re + an expert researcher, specialized in technology, software engineering, AI and + startups. You work as a freelancer and is now working on doing research and + analysis for a new customer.\n\nYour personal goal is: Make the best research + and analysis on content about AI and AI agents\n\nTOOLS:\n------\nYou have access + to the following tools:\n\nmultiplier: multiplier(numbers) -> float - Useful + for when you need to multiply two numbers together.\n The input to this + tool should be a comma separated list of numbers of\n length two, representing + the two numbers you want to multiply together.\n For example, `1,2` would + be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use + the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the action to take, should be one of [multiplier], just the name.\nAction Input: + the input to the action\nObservation: the result of the action\n```\n\nWhen + you have a response for your task, or if you do not need to use a tool, you + MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]\n```\n\t\tThis is the summary of your work so far:\n The + human asks the AI what is 2 times 6, and the AI responds with 12.\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 + times 6?\n\nThis is the context you are working with:\n12\n\n"}], "model": "gpt-4", + "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1573' + content-type: + - application/json + cookie: + - _cfuvid=yTJHDMg7VYHMEKTSaPhpQp_5NZ3Xw1rIS8dQw4LmQMk-1703889424692-0-604800000; + __cf_bm=NTFlpt52iEoCg0Pkx5.1jxp.b3GIrmzDorPoCjgwA1c-1703889424-1-AcnqI2v1g/4ArTSHF6F4yC9cQd8KWJUiSRlY3mTNPF+6P6fectfBIvD544RQw/zhEY+z/eZ5pw4t/Baew653jtw= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8bFlVgnFkN5XO7X3qLOx96HX1pUjS\",\n \"object\": + \"chat.completion\",\n \"created\": 1703889429,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: + multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 337,\n \"completion_tokens\": + 23,\n \"total_tokens\": 360\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d586a76ee577c0-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 22:37:11 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1612' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299634' + x-ratelimit-remaining-tokens_usage_based: + - '299634' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 73ms + x-ratelimit-reset-tokens_usage_based: + - 73ms + x-request-id: + - 739f95896087931128c8e260b7d3cf02 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "Progressively summarize the + lines of conversation provided, adding onto the previous summary returning a + new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks + of artificial intelligence. The AI thinks artificial intelligence is a force + for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial + intelligence is a force for good?\nAI: Because artificial intelligence will + help humans reach their full potential.\n\nNew summary:\nThe human asks what + the AI thinks of artificial intelligence. The AI thinks artificial intelligence + is a force for good because it will help humans reach their full potential.\nEND + OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI what is 2 times 6, and + the AI responds with 12.\n\nNew lines of conversation:\nHuman: What is 2 times + 6?\n\nThis is the context you are working with:\n12\nAI: 12\n\nNew summary:"}], + "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '990' + content-type: + - application/json + cookie: + - _cfuvid=yTJHDMg7VYHMEKTSaPhpQp_5NZ3Xw1rIS8dQw4LmQMk-1703889424692-0-604800000; + __cf_bm=NTFlpt52iEoCg0Pkx5.1jxp.b3GIrmzDorPoCjgwA1c-1703889424-1-AcnqI2v1g/4ArTSHF6F4yC9cQd8KWJUiSRlY3mTNPF+6P6fectfBIvD544RQw/zhEY+z/eZ5pw4t/Baew653jtw= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8bFlXEDmlitriA40K77evvaqJrfZ4\",\n \"object\": + \"chat.completion\",\n \"created\": 1703889431,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The human asks the AI what is 2 times + 6, and the AI consistently responds with 12.\"\n },\n \"logprobs\": + null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 181,\n \"completion_tokens\": 22,\n \"total_tokens\": 203\n },\n \"system_fingerprint\": + null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d586b2fbd877c0-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 22:37:13 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1836' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299769' + x-ratelimit-remaining-tokens_usage_based: + - '299769' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 46ms + x-ratelimit-reset-tokens_usage_based: + - 46ms + x-request-id: + - 501523f2159a6ae7b2dbb409cf5748a1 + http_version: HTTP/1.1 + status_code: 200 +version: 1 diff --git a/tests/cassettes/test_logging_tool_usage.yaml b/tests/cassettes/test_logging_tool_usage.yaml new file mode 100644 index 000000000..fa2efc153 --- /dev/null +++ b/tests/cassettes/test_logging_tool_usage.yaml @@ -0,0 +1,690 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew + test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou + have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float + - Useful for when you need to multiply two numbers together.\n The input + to this tool should be a comma separated list of numbers of\n length + two, representing the two numbers you want to multiply together.\n For + example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use + a tool, please use the exact following format:\n\n```\nThought: Do I need to + use a tool? Yes\nAction: the action to take, should be one of [multiplier], + just the name.\nAction Input: the input to the action\nObservation: the result + of the action\n```\n\nWhen you have a response for your task, or if you do not + need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use + a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary + of your work so far:\n \nBegin! This is VERY important to you, your job depends + on it!\n\nCurrent Task: What is 3 times 5?\n\n"}], "model": "gpt-4", "n": 1, + "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1231' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b7eDhOwdMJw0817wIqiD9MoXVRU8\",\n \"object\": + \"chat.completion\",\n \"created\": 1703858225,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: + multiplier\\nAction Input: 3,5\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 264,\n \"completion_tokens\": + 23,\n \"total_tokens\": 287\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d28cd70f9f1cd9-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 13:57:07 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; + path=/; expires=Fri, 29-Dec-23 14:27:07 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1725' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299717' + x-ratelimit-remaining-tokens_usage_based: + - '299717' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 56ms + x-ratelimit-reset-tokens_usage_based: + - 56ms + x-request-id: + - 689757a1e38baf0844ec69f20525c5d7 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew + test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou + have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float + - Useful for when you need to multiply two numbers together.\n The input + to this tool should be a comma separated list of numbers of\n length + two, representing the two numbers you want to multiply together.\n For + example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use + a tool, please use the exact following format:\n\n```\nThought: Do I need to + use a tool? Yes\nAction: the action to take, should be one of [multiplier], + just the name.\nAction Input: the input to the action\nObservation: the result + of the action\n```\n\nWhen you have a response for your task, or if you do not + need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use + a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary + of your work so far:\n \nBegin! This is VERY important to you, your job depends + on it!\n\nCurrent Task: What is 3 times 5?\nThought: Do I need to use a tool? + Yes\nAction: multiplier\nAction Input: 3,5\nObservation: 15\nThought: \n"}], + "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1335' + content-type: + - application/json + cookie: + - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; + _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b7eG2qnUr6SZcankL5AqkwIJvYFc\",\n \"object\": + \"chat.completion\",\n \"created\": 1703858228,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: + 3 times 5 is 15.\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 297,\n \"completion_tokens\": + 22,\n \"total_tokens\": 319\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d28ce3ab3b1cd9-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 13:57:09 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1568' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299693' + x-ratelimit-remaining-tokens_usage_based: + - '299693' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 61ms + x-ratelimit-reset-tokens_usage_based: + - 61ms + x-request-id: + - 66a02e3e08e3f6b5b29fb04cd19f50a0 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "Progressively summarize the + lines of conversation provided, adding onto the previous summary returning a + new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks + of artificial intelligence. The AI thinks artificial intelligence is a force + for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial + intelligence is a force for good?\nAI: Because artificial intelligence will + help humans reach their full potential.\n\nNew summary:\nThe human asks what + the AI thinks of artificial intelligence. The AI thinks artificial intelligence + is a force for good because it will help humans reach their full potential.\nEND + OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: What + is 3 times 5?\nAI: 3 times 5 is 15.\n\nNew summary:"}], "model": "gpt-4", "n": + 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '886' + content-type: + - application/json + cookie: + - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; + _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b7eIfmBgQoWGLW4jiQZTu3qzEBBL\",\n \"object\": + \"chat.completion\",\n \"created\": 1703858230,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The human asks the AI to calculate 3 + times 5 and the AI responds that 3 times 5 is 15.\"\n },\n \"logprobs\": + null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 155,\n \"completion_tokens\": 26,\n \"total_tokens\": 181\n },\n \"system_fingerprint\": + null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d28cf118221cd9-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 13:57:12 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2188' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299794' + x-ratelimit-remaining-tokens_usage_based: + - '299794' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 41ms + x-ratelimit-reset-tokens_usage_based: + - 41ms + x-request-id: + - a644c703d416b1e4b06586e96e6d7297 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew + test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou + have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float + - Useful for when you need to multiply two numbers together.\n The input + to this tool should be a comma separated list of numbers of\n length + two, representing the two numbers you want to multiply together.\n For + example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use + a tool, please use the exact following format:\n\n```\nThought: Do I need to + use a tool? Yes\nAction: the action to take, should be one of [multiplier], + just the name.\nAction Input: the input to the action\nObservation: the result + of the action\n```\n\nWhen you have a response for your task, or if you do not + need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use + a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary + of your work so far:\n The human asks the AI to calculate 3 times 5 and the + AI responds that 3 times 5 is 15.\nBegin! This is VERY important to you, your + job depends on it!\n\nCurrent Task: What is 3 times 5?\n\n"}], "model": "gpt-4", + "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1317' + content-type: + - application/json + cookie: + - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; + _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b7eKehkNtTjrXJD0aFjiVUeXU7te\",\n \"object\": + \"chat.completion\",\n \"created\": 1703858232,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: + multiplier\\nAction Input: 3,5\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 290,\n \"completion_tokens\": + 23,\n \"total_tokens\": 313\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d28d007f461cd9-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 13:57:14 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1885' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299697' + x-ratelimit-remaining-tokens_usage_based: + - '299697' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 60ms + x-ratelimit-reset-tokens_usage_based: + - 60ms + x-request-id: + - 87959bfa6a029f7bcafb5095e66d4964 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew + test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou + have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float + - Useful for when you need to multiply two numbers together.\n The input + to this tool should be a comma separated list of numbers of\n length + two, representing the two numbers you want to multiply together.\n For + example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use + a tool, please use the exact following format:\n\n```\nThought: Do I need to + use a tool? Yes\nAction: the action to take, should be one of [multiplier], + just the name.\nAction Input: the input to the action\nObservation: the result + of the action\n```\n\nWhen you have a response for your task, or if you do not + need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use + a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary + of your work so far:\n The human asks the AI to calculate 3 times 5 and the + AI responds that 3 times 5 is 15.\nBegin! This is VERY important to you, your + job depends on it!\n\nCurrent Task: What is 3 times 5?\n\nI just used the multiplier + tool with input 3,5. So I already knwo the result of that.\nObservation: Invalid + or incomplete response\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], + "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1460' + content-type: + - application/json + cookie: + - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; + _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b7eME5fg1w8QcXUnl0FBcqxJWy7u\",\n \"object\": + \"chat.completion\",\n \"created\": 1703858234,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: + 3 times 5 is 15.\"\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 324,\n \"completion_tokens\": + 22,\n \"total_tokens\": 346\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d28d0fcfaf1cd9-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 13:57:17 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2494' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299661' + x-ratelimit-remaining-tokens_usage_based: + - '299661' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 67ms + x-ratelimit-reset-tokens_usage_based: + - 67ms + x-request-id: + - eb9704531634d4cd3c6652dff09914bc + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"messages": [{"role": "user", "content": "Progressively summarize the + lines of conversation provided, adding onto the previous summary returning a + new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks + of artificial intelligence. The AI thinks artificial intelligence is a force + for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial + intelligence is a force for good?\nAI: Because artificial intelligence will + help humans reach their full potential.\n\nNew summary:\nThe human asks what + the AI thinks of artificial intelligence. The AI thinks artificial intelligence + is a force for good because it will help humans reach their full potential.\nEND + OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to calculate 3 times 5 + and the AI responds that 3 times 5 is 15.\n\nNew lines of conversation:\nHuman: + What is 3 times 5?\nAI: 3 times 5 is 15.\n\nNew summary:"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '972' + content-type: + - application/json + cookie: + - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; + _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.6.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.6.1 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + content: "{\n \"id\": \"chatcmpl-8b7eP77MzprkbgjaMbeaEob0aDx39\",\n \"object\": + \"chat.completion\",\n \"created\": 1703858237,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The human asks the AI to calculate 3 + times 5 twice, and both times the AI responds that 3 times 5 is 15.\"\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 181,\n \"completion_tokens\": 30,\n + \ \"total_tokens\": 211\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 83d28d211f591cd9-GRU + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 29 Dec 2023 13:57:20 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2967' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-limit-tokens_usage_based: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299771' + x-ratelimit-remaining-tokens_usage_based: + - '299771' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 45ms + x-ratelimit-reset-tokens_usage_based: + - 45ms + x-request-id: + - e3bbd11b2e9b28f76f8b43dc7a4d2b6d + http_version: HTTP/1.1 + status_code: 200 +version: 1 diff --git a/tests/crew_test.py b/tests/crew_test.py index b962b6712..99499c1fe 100644 --- a/tests/crew_test.py +++ b/tests/crew_test.py @@ -8,6 +8,7 @@ from crewai.agent import Agent from crewai.crew import Crew from crewai.process import Process from crewai.task import Task +from crewai.agents import CacheHandler ceo = Agent( role="CEO", @@ -157,7 +158,7 @@ def test_crew_with_delegating_agents(): assert ( crew.kickoff() - == "The Senior Writer has created a compelling and engaging 1 paragraph draft about AI agents. The paragraph provides a brief yet comprehensive overview of AI agents, their uses, and implications in the current world. It emphasizes their potential and the role they can play in the future. The tone is informative but captivating, meeting the objectives of the task." + == '"AI agents, the digital masterminds at the heart of the 21st-century revolution, are shaping a new era of intelligence and innovation. They are autonomous entities, capable of observing their environment, making decisions, and acting on them, all in pursuit of a specific goal. From streamlining operations in logistics to personalizing customer experiences in retail, AI agents are transforming how businesses operate. But their potential extends far beyond the corporate world. They are the sentinels protecting our digital frontiers, the virtual assistants making our lives easier, and the unseen hands guiding autonomous vehicles. As this technology evolves, AI agents will play an increasingly central role in our world, ushering in an era of unprecedented efficiency, personalization, and productivity. But with great power comes great responsibility, and understanding and harnessing this potential responsibly will be one of our greatest challenges and opportunities in the coming years."' ) @@ -194,3 +195,47 @@ def test_crew_verbose_output(capsys): crew.kickoff() captured = capsys.readouterr() assert captured.out == "" + + +@pytest.mark.vcr(filter_headers=["authorization"]) +def test_cache_hitting_between_agents(): + from unittest.mock import patch + + from langchain.tools import tool + + @tool + def multiplier(numbers) -> float: + """Useful for when you need to multiply two numbers together. + The input to this tool should be a comma separated list of numbers of + length two, representing the two numbers you want to multiply together. + For example, `1,2` would be the input if you wanted to multiply 1 by 2.""" + a, b = numbers.split(",") + return int(a) * int(b) + + tasks = [ + Task( + description="What is 2 tims 6?", + tools=[multiplier], + agent=ceo, + ), + Task( + description="What is 2 times 6?", + tools=[multiplier], + agent=researcher, + ), + ] + + crew = Crew( + agents=[ceo, researcher], + tasks=tasks, + ) + + assert crew.cache_handler._cache == {} + output = crew.kickoff() + assert crew.cache_handler._cache == {"multiplier-2,6": "12"} + assert output == "12" + + with patch.object(CacheHandler, "read") as read: + read.return_value = "12" + crew.kickoff() + read.assert_called_with("multiplier", "2,6")