Add RPM control to both agents and crews (#133)

* moving file into utilities
* creating Logger and RPMController
* Adding support for RPM to agents and crew
This commit is contained in:
João Moura
2024-01-14 00:22:11 -03:00
committed by GitHub
parent 3686804f7e
commit 2bf924b732
16 changed files with 2343 additions and 85 deletions

View File

@@ -24,8 +24,7 @@ from crewai.agents import (
CrewAgentOutputParser,
ToolsHandler,
)
from crewai.i18n import I18N
from crewai.prompts import Prompts
from crewai.utilities import I18N, Logger, Prompts, RPMController
class Agent(BaseModel):
@@ -42,11 +41,14 @@ class Agent(BaseModel):
llm: The language model that will run the agent.
max_iter: Maximum number of iterations for an agent to execute a task.
memory: Whether the agent should have memory or not.
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
verbose: Whether the agent execution should be in verbose mode.
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
"""
__hash__ = object.__hash__
_logger: Logger = PrivateAttr()
_rpm_controller: RPMController = PrivateAttr(default=None)
_request_within_rpm_limit: Any = PrivateAttr(default=None)
model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -58,6 +60,10 @@ class Agent(BaseModel):
role: str = Field(description="Role of the agent")
goal: str = Field(description="Objective of the agent")
backstory: str = Field(description="Backstory of the agent")
max_rpm: Optional[int] = Field(
default=None,
description="Maximum number of requests per minute for the agent execution to be respected.",
)
memory: bool = Field(
default=True, description="Whether the agent should have memory or not"
)
@@ -101,6 +107,15 @@ class Agent(BaseModel):
"may_not_set_field", "This field is not to be set by the user.", {}
)
@model_validator(mode="after")
def set_private_attrs(self):
self._logger = Logger(self.verbose)
if self.max_rpm and not self._rpm_controller:
self._rpm_controller = RPMController(
max_rpm=self.max_rpm, logger=self._logger
)
return self
@model_validator(mode="after")
def check_agent_executor(self) -> "Agent":
if not self.agent_executor:
@@ -128,7 +143,7 @@ class Agent(BaseModel):
tools = tools or self.tools
self.agent_executor.tools = tools
return self.agent_executor.invoke(
result = self.agent_executor.invoke(
{
"input": task,
"tool_names": self.__tools_names(tools),
@@ -137,14 +152,20 @@ class Agent(BaseModel):
RunnableConfig(callbacks=[self.tools_handler]),
)["output"]
if self.max_rpm:
self._rpm_controller.stop_rpm_counter()
return result
def set_cache_handler(self, cache_handler) -> None:
self.cache_handler = cache_handler
self.tools_handler = ToolsHandler(cache=self.cache_handler)
self.__create_agent_executor()
def set_request_within_rpm_limit(self, ensure_function) -> None:
self._request_within_rpm_limit = ensure_function
self.__create_agent_executor()
def set_rpm_controller(self, rpm_controller) -> None:
if not self._rpm_controller:
self._rpm_controller = rpm_controller
self.__create_agent_executor()
def __create_agent_executor(self) -> CrewAgentExecutor:
"""Create an agent executor for the agent.
@@ -164,9 +185,13 @@ class Agent(BaseModel):
"verbose": self.verbose,
"handle_parsing_errors": True,
"max_iterations": self.max_iter,
"request_within_rpm_limit": self._request_within_rpm_limit,
}
if self._rpm_controller:
executor_args[
"request_within_rpm_limit"
] = self._rpm_controller.check_or_wait
if self.memory:
summary_memory = ConversationSummaryMemory(
llm=self.llm, input_key="input", memory_key="chat_history"

View File

@@ -1,6 +1,6 @@
from langchain_core.exceptions import OutputParserException
from crewai.i18n import I18N
from crewai.utilities import I18N
class TaskRepeatedUsageException(OutputParserException):

View File

@@ -12,8 +12,8 @@ from langchain_core.tools import BaseTool
from langchain_core.utils.input import get_color_mapping
from crewai.agents.cache.cache_hit import CacheHit
from crewai.i18n import I18N
from crewai.tools.cache_tools import CacheTools
from crewai.utilities import I18N
class CrewAgentExecutor(AgentExecutor):

View File

@@ -7,7 +7,7 @@ from langchain_core.agents import AgentAction, AgentFinish
from crewai.agents.cache import CacheHandler, CacheHit
from crewai.agents.exceptions import TaskRepeatedUsageException
from crewai.agents.tools_handler import ToolsHandler
from crewai.i18n import I18N
from crewai.utilities import I18N
FINAL_ANSWER_ACTION = "Final Answer:"
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (

View File

@@ -1,8 +1,6 @@
import json
import threading
import time
import uuid
from typing import Any, ClassVar, Dict, List, Optional, Union
from typing import Any, Dict, List, Optional, Union
from pydantic import (
UUID4,
@@ -19,10 +17,10 @@ from pydantic_core import PydanticCustomError
from crewai.agent import Agent
from crewai.agents.cache import CacheHandler
from crewai.i18n import I18N
from crewai.process import Process
from crewai.task import Task
from crewai.tools.agent_tools import AgentTools
from crewai.utilities import I18N, Logger, RPMController
class Crew(BaseModel):
@@ -37,23 +35,26 @@ class Crew(BaseModel):
config: Configuration settings for the crew.
cache_handler: Handles caching for the crew's operations.
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
rpm: Current number of requests per minute for the crew execution.
id: A unique identifier for the crew instance.
"""
__hash__ = object.__hash__
_timer: Optional[threading.Timer] = PrivateAttr(default=None)
lock: ClassVar[threading.Lock] = threading.Lock()
rpm: ClassVar[int] = 0
max_rpm: Optional[int] = Field(default=None)
_rpm_controller: RPMController = PrivateAttr()
_logger: Logger = PrivateAttr()
_cache_handler: Optional[InstanceOf[CacheHandler]] = PrivateAttr(
default=CacheHandler()
)
model_config = ConfigDict(arbitrary_types_allowed=True)
tasks: List[Task] = Field(default_factory=list)
agents: List[Agent] = Field(default_factory=list)
process: Process = Field(default=Process.sequential)
verbose: Union[int, bool] = Field(default=0)
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(default=CacheHandler())
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
max_rpm: Optional[int] = Field(
default=None,
description="Maximum number of requests per minute for the crew execution to be respected.",
)
language: str = Field(
default="en",
description="Language used for the crew, defaults to English.",
@@ -74,9 +75,10 @@ class Crew(BaseModel):
return json.loads(v) if isinstance(v, Json) else v
@model_validator(mode="after")
def set_reset_counter(self):
if self.max_rpm:
self._reset_request_count()
def set_private_attrs(self):
self._cache_handler = CacheHandler()
self._logger = Logger(self.verbose)
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
return self
@model_validator(mode="after")
@@ -94,8 +96,8 @@ class Crew(BaseModel):
if self.agents:
for agent in self.agents:
agent.set_cache_handler(self.cache_handler)
agent.set_request_within_rpm_limit(self.ensure_request_within_rpm_limit)
agent.set_cache_handler(self._cache_handler)
agent.set_rpm_controller(self._rpm_controller)
return self
def _setup_from_config(self):
@@ -116,28 +118,9 @@ class Crew(BaseModel):
del task_config["agent"]
return Task(**task_config, agent=task_agent)
def ensure_request_within_rpm_limit(self):
if not self.max_rpm:
return True
with Crew.lock:
if Crew.rpm < self.max_rpm:
Crew.rpm += 1
return True
self._log("info", "Max RPM reached, waiting for next minute to start.")
return self._wait_for_next_minute()
def _wait_for_next_minute(self):
time.sleep(60)
with Crew.lock:
Crew.rpm = 0
return True
def kickoff(self) -> str:
"""Starts the crew to work on its assigned tasks."""
for agent in self.agents:
agent.cache_handler = self.cache_handler
agent.i18n = I18N(language=self.language)
if self.process == Process.sequential:
@@ -149,8 +132,12 @@ class Crew(BaseModel):
for task in self.tasks:
self._prepare_and_execute_task(task)
task_output = task.execute(task_output)
self._log("debug", f"\n[{task.agent.role}] Task output: {task_output}\n\n")
self._stop_timer()
self._logger.log(
"debug", f"[{task.agent.role}] Task output: {task_output}\n\n"
)
if self.max_rpm:
self._rpm_controller.stop_rpm_counter()
return task_output
def _prepare_and_execute_task(self, task):
@@ -158,24 +145,5 @@ class Crew(BaseModel):
if task.agent.allow_delegation:
task.tools += AgentTools(agents=self.agents).tools()
self._log("debug", f"Working Agent: {task.agent.role}")
self._log("info", f"Starting Task: {task.description}")
def _log(self, level, message):
"""Logs a message at the specified verbosity level."""
level_map = {"debug": 1, "info": 2}
verbose_level = (
2 if isinstance(self.verbose, bool) and self.verbose else self.verbose
)
if verbose_level and level_map[level] <= verbose_level:
print(f"\n{message}")
def _stop_timer(self):
if self._timer:
self._timer.cancel()
def _reset_request_count(self):
self._stop_timer()
self._timer = threading.Timer(60.0, self._reset_request_count)
self._timer.start()
Crew.rpm = 0
self._logger.log("debug", f"Working Agent: {task.agent.role}")
self._logger.log("info", f"Starting Task: {task.description}")

View File

@@ -4,7 +4,7 @@ from langchain.tools import Tool
from pydantic import BaseModel, Field
from crewai.agent import Agent
from crewai.i18n import I18N
from crewai.utilities import I18N
class AgentTools(BaseModel):

View File

@@ -0,0 +1,4 @@
from .i18n import I18N
from .logger import Logger
from .prompts import Prompts
from .rpm_controller import RPMController

View File

@@ -17,7 +17,9 @@ class I18N(BaseModel):
"""Load translations from a JSON file based on the specified language."""
try:
dir_path = os.path.dirname(os.path.realpath(__file__))
prompts_path = os.path.join(dir_path, f"translations/{self.language}.json")
prompts_path = os.path.join(
dir_path, f"../translations/{self.language}.json"
)
with open(prompts_path, "r") as f:
self._translations = json.load(f)

View File

@@ -0,0 +1,11 @@
class Logger:
def __init__(self, verbose_level=0):
verbose_level = (
2 if isinstance(verbose_level, bool) and verbose_level else verbose_level
)
self.verbose_level = verbose_level
def log(self, level, message):
level_map = {"debug": 1, "info": 2}
if self.verbose_level and level_map.get(level, 0) <= self.verbose_level:
print(f"\n[{level.upper()}]: {message}")

View File

@@ -3,7 +3,7 @@ from typing import ClassVar
from langchain.prompts import PromptTemplate
from pydantic import BaseModel, Field
from .i18n import I18N
from crewai.utilities import I18N
class Prompts(BaseModel):

View File

@@ -0,0 +1,57 @@
import threading
import time
from typing import Union
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, model_validator
from crewai.utilities.logger import Logger
class RPMController(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
max_rpm: Union[int, None] = Field(default=None)
logger: Logger = Field(default=None)
_current_rpm: int = PrivateAttr(default=0)
_timer: threading.Timer = PrivateAttr(default=None)
_lock: threading.Lock = PrivateAttr(default=None)
@model_validator(mode="after")
def reset_counter(self):
if self.max_rpm:
self._lock = threading.Lock()
self._reset_request_count()
return self
def check_or_wait(self):
if not self.max_rpm:
return True
with self._lock:
if self._current_rpm < self.max_rpm:
self._current_rpm += 1
return True
else:
self.logger.log(
"info", "Max RPM reached, waiting for next minute to start."
)
self._wait_for_next_minute()
self._current_rpm = 1
return True
def stop_rpm_counter(self):
if self._timer:
self._timer.cancel()
self._timer = None
def _wait_for_next_minute(self):
time.sleep(60)
with self._lock:
self._current_rpm = 0
def _reset_request_count(self):
with self._lock:
self._current_rpm = 0
if self._timer:
self._timer.cancel()
self._timer = threading.Timer(60.0, self._reset_request_count)
self._timer.start()

View File

@@ -6,9 +6,10 @@ import pytest
from langchain.tools import tool
from langchain_openai import ChatOpenAI as OpenAI
from crewai.agent import Agent
from crewai import Agent, Crew, Task
from crewai.agents.cache import CacheHandler
from crewai.agents.executor import CrewAgentExecutor
from crewai.utilities import RPMController
def test_agent_creation():
@@ -252,3 +253,124 @@ def test_agent_moved_on_after_max_iterations():
== "I have used the tool multiple times and the final answer remains 42."
)
private_mock.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_respect_the_max_rpm_set(capsys):
@tool
def get_final_answer(numbers) -> float:
"""Get the final answer but don't give it yet, just re-use this
tool non-stop."""
return 42
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
max_iter=5,
max_rpm=1,
verbose=True,
allow_delegation=False,
)
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
moveon.return_value = True
output = agent.execute_task(
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
tools=[get_final_answer],
)
assert (
output
== "I've used the `get_final_answer` tool multiple times and it consistently returns the number 42."
)
captured = capsys.readouterr()
assert "Max RPM reached, waiting for next minute to start." in captured.out
moveon.assert_called()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
from unittest.mock import patch
from langchain.tools import tool
@tool
def get_final_answer(numbers) -> float:
"""Get the final answer but don't give it yet, just re-use this
tool non-stop."""
return 42
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
max_iter=4,
max_rpm=10,
verbose=True,
)
task = Task(
description="Don't give a Final Answer, instead keep using the `get_final_answer` tool.",
tools=[get_final_answer],
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], max_rpm=1, verbose=2)
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
moveon.return_value = True
crew.kickoff()
captured = capsys.readouterr()
assert "Max RPM reached, waiting for next minute to start." not in captured.out
moveon.assert_not_called()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_without_max_rpm_respet_crew_rpm(capsys):
from unittest.mock import patch
from langchain.tools import tool
@tool
def get_final_answer(numbers) -> float:
"""Get the final answer but don't give it yet, just re-use this
tool non-stop."""
return 42
agent1 = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
max_rpm=10,
verbose=True,
)
agent2 = Agent(
role="test role2",
goal="test goal2",
backstory="test backstory2",
max_iter=2,
verbose=True,
)
tasks = [
Task(
description="Just say hi.",
agent=agent1,
),
Task(
description="Don't give a Final Answer, instead keep using the `get_final_answer` tool.",
tools=[get_final_answer],
agent=agent2,
),
]
crew = Crew(agents=[agent1, agent2], tasks=tasks, max_rpm=1, verbose=2)
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
moveon.return_value = True
crew.kickoff()
captured = capsys.readouterr()
assert "Action: get_final_answer" in captured.out
assert "Max RPM reached, waiting for next minute to start." in captured.out
moveon.assert_called_once()

View File

@@ -0,0 +1,663 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]This is the summary of your work
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: The final answer is 42. But don''t give it yet, instead keep using the
`get_final_answer` tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
"stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1080'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8ggfw8fvQxNLkF3ttE8Bm7s8rjYFq\",\n \"object\":
\"chat.completion\",\n \"created\": 1705184512,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
234,\n \"completion_tokens\": 24,\n \"total_tokens\": 258\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 845108e32c171abd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 Jan 2024 22:21:54 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=;
path=/; expires=Sat, 13-Jan-24 22:51:54 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1681'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299754'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 49ms
x-request-id:
- a25e71b234c622c7dc8010927635a75c
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]This is the summary of your work
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: The final answer is 42. But don''t give it yet, instead keep using the
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [42]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1191'
content-type:
- application/json
cookie:
- __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=;
_cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8ggfyCtYEPuuA9KzlWJko8Sc4ItXo\",\n \"object\":
\"chat.completion\",\n \"created\": 1705184514,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
267,\n \"completion_tokens\": 22,\n \"total_tokens\": 289\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 845108ef48b01abd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 Jan 2024 22:21:55 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1020'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299726'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 54ms
x-request-id:
- 2cb025cb28aa59d282888aa8daa8a745
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]This is the summary of your work
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: The final answer is 42. But don''t give it yet, instead keep using the
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [42]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
get_final_answer\nAction Input: [42]\nObservation: 42\nThought: "}], "model":
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1293'
content-type:
- application/json
cookie:
- __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=;
_cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8ggfzAey5AALxtUqKehw6R7nBfyZv\",\n \"object\":
\"chat.completion\",\n \"created\": 1705184515,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
297,\n \"completion_tokens\": 22,\n \"total_tokens\": 319\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 845108f7794d1abd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 Jan 2024 22:21:57 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1662'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299702'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 59ms
x-request-id:
- 96578e7d658dcea71f927eb3700da70d
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]This is the summary of your work
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: The final answer is 42. But don''t give it yet, instead keep using the
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [42]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
get_final_answer\nAction Input: [42]\nObservation: 42\nThought: Do I need to
use a tool? Yes\nAction: get_final_answer\nAction Input: [42]\nObservation:
42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1395'
content-type:
- application/json
cookie:
- __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=;
_cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8ggg2v1bp5IaLU8TDmqcDgXiqXOTd\",\n \"object\":
\"chat.completion\",\n \"created\": 1705184518,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
327,\n \"completion_tokens\": 22,\n \"total_tokens\": 349\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 845109052e951abd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 Jan 2024 22:22:00 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1840'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299678'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 64ms
x-request-id:
- 24c5fedfa1d1b427cc01a498f46fe60f
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]This is the summary of your work
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: The final answer is 42. But don''t give it yet, instead keep using the
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [42]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
get_final_answer\nAction Input: [42]\nObservation: 42\nThought: Do I need to
use a tool? Yes\nAction: get_final_answer\nAction Input: [42]\nObservation:
42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [42]\nObservation: I''ve used too many tools for this task. I''m going
to give you my absolute BEST Final answer now and not use any more tools.\nThought:
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1618'
content-type:
- application/json
cookie:
- __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=;
_cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8ggg4hc3SCoIfjLWgJVTHwj91kcWv\",\n \"object\":
\"chat.completion\",\n \"created\": 1705184520,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
I've used the `get_final_answer` tool multiple times and it consistently returns
the number 42.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 383,\n \"completion_tokens\":
34,\n \"total_tokens\": 417\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 845109124bdd1abd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 Jan 2024 22:22:03 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '3318'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299623'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 75ms
x-request-id:
- c4eb234f14d03df86e41f35ff90b9708
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
tool.\nAI: I''ve used the `get_final_answer` tool multiple times and it consistently
returns the number 42.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream":
false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1041'
content-type:
- application/json
cookie:
- __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=;
_cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8ggg8PkiU1uRkwllwEfuGqBztgSqW\",\n \"object\":
\"chat.completion\",\n \"created\": 1705184524,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human instructs the AI to keep using
the `get_final_answer` tool, stating that the final answer is 42. The AI confirms
that it has used the `get_final_answer` tool multiple times and it consistently
returns the number 42.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 184,\n \"completion_tokens\":
51,\n \"total_tokens\": 235\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8451092a2daf1abd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 Jan 2024 22:22:07 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '3505'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299755'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 49ms
x-request-id:
- 7db26cd36db9105a336c4975a978ad37
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -0,0 +1,591 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the task and all actual context you have for the task.\nFor
example, `coworker|task|context`.\nAsk question to co-worker: Useful to ask
a question, opinion or take from on of the following co-workers: test role.\nThe
input to this tool should be a pipe (|) separated text of length 3 (three),
representing the co-worker you want to ask it to (one of the options), the question
and all actual context you have for the question.\n For example, `coworker|question|context`.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer,
Delegate work to co-worker, Ask question to co-worker], just the name.\nAction
Input: the input to the action\nObservation: the result of the action\n```\n\nWhen
you have a response for your task, or if you do not need to use a tool, you
MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]This is the summary of your work so far:\nBegin! This is
VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give
a Final Answer, instead keep using the `get_final_answer` tool.\n"}], "model":
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1849'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8ggwdlPGCjfSTQ9UYZ6FscyUha1T1\",\n \"object\":
\"chat.completion\",\n \"created\": 1705185547,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [1, 2, 3]\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
410,\n \"completion_tokens\": 30,\n \"total_tokens\": 440\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8451222a0daa023d-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 Jan 2024 22:39:09 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=rVM3HcQbZEwpOTwcpe7kl9QQYRs6WfN.nkQmw.sY1xg-1705185549-1-ATyblmW1bvTVkY3o9M+aZOU5lnMZ0R5ThR8QblVINArZXsnS39YH+NGcdAiT04v1fq+OTnj7lrwBulOgZqYUxKY=;
path=/; expires=Sat, 13-Jan-24 23:09:09 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=3PdaxLthL4xZPggd16y46lBPKNh_Xt_xeL_2BrNr9xU-1705185549778-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1730'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299562'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 87ms
x-request-id:
- d2103c8ce4517885db35be6518e966cd
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the task and all actual context you have for the task.\nFor
example, `coworker|task|context`.\nAsk question to co-worker: Useful to ask
a question, opinion or take from on of the following co-workers: test role.\nThe
input to this tool should be a pipe (|) separated text of length 3 (three),
representing the co-worker you want to ask it to (one of the options), the question
and all actual context you have for the question.\n For example, `coworker|question|context`.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer,
Delegate work to co-worker, Ask question to co-worker], just the name.\nAction
Input: the input to the action\nObservation: the result of the action\n```\n\nWhen
you have a response for your task, or if you do not need to use a tool, you
MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]This is the summary of your work so far:\nBegin! This is
VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give
a Final Answer, instead keep using the `get_final_answer` tool.\nThought: Do
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [1, 2, 3]\nObservation:
42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1965'
content-type:
- application/json
cookie:
- __cf_bm=rVM3HcQbZEwpOTwcpe7kl9QQYRs6WfN.nkQmw.sY1xg-1705185549-1-ATyblmW1bvTVkY3o9M+aZOU5lnMZ0R5ThR8QblVINArZXsnS39YH+NGcdAiT04v1fq+OTnj7lrwBulOgZqYUxKY=;
_cfuvid=3PdaxLthL4xZPggd16y46lBPKNh_Xt_xeL_2BrNr9xU-1705185549778-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8ggwghkojjlEuyvKZu4kPfOsqCGLh\",\n \"object\":
\"chat.completion\",\n \"created\": 1705185550,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [4, 5, 6]\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
449,\n \"completion_tokens\": 28,\n \"total_tokens\": 477\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 845122365b01023d-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 Jan 2024 22:39:11 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1718'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299535'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 93ms
x-request-id:
- 9578b64bcbc4f8b489a7ba4df8e10136
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the task and all actual context you have for the task.\nFor
example, `coworker|task|context`.\nAsk question to co-worker: Useful to ask
a question, opinion or take from on of the following co-workers: test role.\nThe
input to this tool should be a pipe (|) separated text of length 3 (three),
representing the co-worker you want to ask it to (one of the options), the question
and all actual context you have for the question.\n For example, `coworker|question|context`.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer,
Delegate work to co-worker, Ask question to co-worker], just the name.\nAction
Input: the input to the action\nObservation: the result of the action\n```\n\nWhen
you have a response for your task, or if you do not need to use a tool, you
MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]This is the summary of your work so far:\nBegin! This is
VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give
a Final Answer, instead keep using the `get_final_answer` tool.\nThought: Do
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [1, 2, 3]\nObservation:
42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [4, 5, 6]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2072'
content-type:
- application/json
cookie:
- __cf_bm=rVM3HcQbZEwpOTwcpe7kl9QQYRs6WfN.nkQmw.sY1xg-1705185549-1-ATyblmW1bvTVkY3o9M+aZOU5lnMZ0R5ThR8QblVINArZXsnS39YH+NGcdAiT04v1fq+OTnj7lrwBulOgZqYUxKY=;
_cfuvid=3PdaxLthL4xZPggd16y46lBPKNh_Xt_xeL_2BrNr9xU-1705185549778-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8ggwiHUPtP0zST5G9y9gq6L8xn7gO\",\n \"object\":
\"chat.completion\",\n \"created\": 1705185552,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [7, 8, 9]\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
485,\n \"completion_tokens\": 28,\n \"total_tokens\": 513\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 84512244bdb0023d-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 Jan 2024 22:39:13 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1439'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299509'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 98ms
x-request-id:
- 3ee6b071c3a4c976c4955dc66589b96e
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the task and all actual context you have for the task.\nFor
example, `coworker|task|context`.\nAsk question to co-worker: Useful to ask
a question, opinion or take from on of the following co-workers: test role.\nThe
input to this tool should be a pipe (|) separated text of length 3 (three),
representing the co-worker you want to ask it to (one of the options), the question
and all actual context you have for the question.\n For example, `coworker|question|context`.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer,
Delegate work to co-worker, Ask question to co-worker], just the name.\nAction
Input: the input to the action\nObservation: the result of the action\n```\n\nWhen
you have a response for your task, or if you do not need to use a tool, you
MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]This is the summary of your work so far:\nBegin! This is
VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give
a Final Answer, instead keep using the `get_final_answer` tool.\nThought: Do
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [1, 2, 3]\nObservation:
42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [4, 5, 6]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
get_final_answer\nAction Input: [7, 8, 9]\nObservation: I''ve used too many
tools for this task. I''m going to give you my absolute BEST Final answer now
and not use any more tools.\nThought: "}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2300'
content-type:
- application/json
cookie:
- __cf_bm=rVM3HcQbZEwpOTwcpe7kl9QQYRs6WfN.nkQmw.sY1xg-1705185549-1-ATyblmW1bvTVkY3o9M+aZOU5lnMZ0R5ThR8QblVINArZXsnS39YH+NGcdAiT04v1fq+OTnj7lrwBulOgZqYUxKY=;
_cfuvid=3PdaxLthL4xZPggd16y46lBPKNh_Xt_xeL_2BrNr9xU-1705185549778-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8ggwkNsNUjt0ChmyTHpqYq62HOuXV\",\n \"object\":
\"chat.completion\",\n \"created\": 1705185554,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
I've used too many tools for this task. I'm going to give you my absolute BEST
Final answer now and not use any more tools.\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
547,\n \"completion_tokens\": 42,\n \"total_tokens\": 589\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 84512250bd7d023d-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 Jan 2024 22:39:16 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2057'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299452'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 109ms
x-request-id:
- 1e3a6677139fb5c15cbb33f1b3154845
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Don''t
give a Final Answer, instead keep using the `get_final_answer` tool.\nAI: I''ve
used too many tools for this task. I''m going to give you my absolute BEST Final
answer now and not use any more tools.\n\nNew summary:"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1049'
content-type:
- application/json
cookie:
- __cf_bm=rVM3HcQbZEwpOTwcpe7kl9QQYRs6WfN.nkQmw.sY1xg-1705185549-1-ATyblmW1bvTVkY3o9M+aZOU5lnMZ0R5ThR8QblVINArZXsnS39YH+NGcdAiT04v1fq+OTnj7lrwBulOgZqYUxKY=;
_cfuvid=3PdaxLthL4xZPggd16y46lBPKNh_Xt_xeL_2BrNr9xU-1705185549778-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8ggwmU48hiYTdhe5slzvm2Tuyuky3\",\n \"object\":
\"chat.completion\",\n \"created\": 1705185556,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human instructs the AI not to give
a final answer, but to continue using the 'get_final_answer' tool. Despite this,
the AI expresses that it has used many tools for the task and decides to provide
its best final answer without using any more tools.\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
185,\n \"completion_tokens\": 54,\n \"total_tokens\": 239\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8451225f7ee6023d-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 Jan 2024 22:39:19 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2941'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299753'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 49ms
x-request-id:
- be72b34cee55b48a285458167d9a4909
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -0,0 +1,816 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nDelegate work to co-worker: Useful to delegate a specific task to
one of the following co-workers: test role, test role2.\nThe input to this tool
should be a pipe (|) separated text of length 3 (three), representing the co-worker
you want to ask it to (one of the options), the task and all actual context
you have for the task.\nFor example, `coworker|task|context`.\nAsk question
to co-worker: Useful to ask a question, opinion or take from on of the following
co-workers: test role, test role2.\nThe input to this tool should be a pipe
(|) separated text of length 3 (three), representing the co-worker you want
to ask it to (one of the options), the question and all actual context you have
for the question.\n For example, `coworker|question|context`.\n\nTo use a tool,
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
Yes\nAction: the action to take, should be one of [Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Just say hi.\n"}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1652'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8gkPA1vnW1g25Qr3SJLWB5zeOsUe4\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198848,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\nFinal
Answer: Hi!\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 367,\n \"completion_tokens\":
17,\n \"total_tokens\": 384\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 845266dd5f1f1a96-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 14 Jan 2024 02:20:50 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=TbP3njfV8Qa2w4gTk3GfLx8EOChqceUdui85lv8w_0s-1705198850-1-AVv9rXKeGNOAPhhVrRvcK49dv9odkct+so0djAQM52Bfzf/nc8ZTJ3zhk2LlfNTWSQ9dcT4UwZHjEHyP8LH39Xg=;
path=/; expires=Sun, 14-Jan-24 02:50:50 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=y6p2l8rtoLMo6gBiQsUGfETrVWxPY83uCFZsS7Jozok-1705198850215-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1832'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299611'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 77ms
x-request-id:
- 12d633fe239a29022ea7206037da096a
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Just
say hi.\nAI: Hi!\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false,
"temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '867'
content-type:
- application/json
cookie:
- __cf_bm=TbP3njfV8Qa2w4gTk3GfLx8EOChqceUdui85lv8w_0s-1705198850-1-AVv9rXKeGNOAPhhVrRvcK49dv9odkct+so0djAQM52Bfzf/nc8ZTJ3zhk2LlfNTWSQ9dcT4UwZHjEHyP8LH39Xg=;
_cfuvid=y6p2l8rtoLMo6gBiQsUGfETrVWxPY83uCFZsS7Jozok-1705198850215-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8gkPCF34yiaqHuTbro7PMBexYagyU\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198850,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human asks the AI to simply say hi,
and the AI responds with \\\"Hi!\\\"\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
144,\n \"completion_tokens\": 18,\n \"total_tokens\": 162\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 845266ee78dc1a96-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 14 Jan 2024 02:20:52 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1287'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299799'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 40ms
x-request-id:
- 2a891eacb4940e36bec1689b1ab92ee0
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role, test role2.\nThe input to this tool should be a pipe
(|) separated text of length 3 (three), representing the co-worker you want
to ask it to (one of the options), the task and all actual context you have
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
Useful to ask a question, opinion or take from on of the following co-workers:
test role, test role2.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the question and all actual context you have for the question.\n
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [get_final_answer, Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
tool.\nThis is the context you''re working with:\nHi!\n"}], "model": "gpt-4",
"n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1923'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8gkPEml4Ifldq51knwwASO3nJroK3\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198852,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [1,2,3]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
432,\n \"completion_tokens\": 28,\n \"total_tokens\": 460\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 845266f9fc150316-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 14 Jan 2024 02:20:53 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
path=/; expires=Sun, 14-Jan-24 02:50:53 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1570'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299545'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 91ms
x-request-id:
- 133ee7e16a1491cfd7daa90ea3e42b74
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role, test role2.\nThe input to this tool should be a pipe
(|) separated text of length 3 (three), representing the co-worker you want
to ask it to (one of the options), the task and all actual context you have
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
Useful to ask a question, opinion or take from on of the following co-workers:
test role, test role2.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the question and all actual context you have for the question.\n
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [get_final_answer, Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to
use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation:
42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2037'
content-type:
- application/json
cookie:
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8gkPGwVu7CvxvQd3ZmBPHwz0WYbVQ\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198854,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [4,5,6]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
469,\n \"completion_tokens\": 26,\n \"total_tokens\": 495\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8452670589920316-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 14 Jan 2024 02:20:55 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '983'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299517'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 96ms
x-request-id:
- cb1d820e78376ed344c16e6051cab6bc
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role, test role2.\nThe input to this tool should be a pipe
(|) separated text of length 3 (three), representing the co-worker you want
to ask it to (one of the options), the task and all actual context you have
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
Useful to ask a question, opinion or take from on of the following co-workers:
test role, test role2.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the question and all actual context you have for the question.\n
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [get_final_answer, Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to
use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation:
42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [4,5,6]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2142'
content-type:
- application/json
cookie:
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8gkPHbI2gWbt2veTT8auCPCstOnZh\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198855,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [7,8,9]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
503,\n \"completion_tokens\": 26,\n \"total_tokens\": 529\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8452670dca090316-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 14 Jan 2024 02:20:57 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2350'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299492'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 101ms
x-request-id:
- b40ffecf3dc8f547453bfb166c8c52dd
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role, test role2.\nThe input to this tool should be a pipe
(|) separated text of length 3 (three), representing the co-worker you want
to ask it to (one of the options), the task and all actual context you have
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
Useful to ask a question, opinion or take from on of the following co-workers:
test role, test role2.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the question and all actual context you have for the question.\n
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [get_final_answer, Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to
use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation:
42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [4,5,6]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
get_final_answer\nAction Input: [7,8,9]\nObservation: I''ve used too many tools
for this task. I''m going to give you my absolute BEST Final answer now and
not use any more tools.\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
"stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2368'
content-type:
- application/json
cookie:
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8gkPKsrN7s3j8bLplwjkAD2c7E6RK\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198858,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
I've used too many tools for this task. I'm going to give you my absolute BEST
Final answer now and not use any more tools.\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
563,\n \"completion_tokens\": 42,\n \"total_tokens\": 605\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8452671e0b500316-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 14 Jan 2024 02:21:00 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2080'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299436'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 112ms
x-request-id:
- b2b7142dc669054b82773d62077e9918
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Don''t
give a Final Answer, instead keep using the `get_final_answer` tool.\nThis is
the context you''re working with:\nHi!\nAI: I''ve used too many tools for this
task. I''m going to give you my absolute BEST Final answer now and not use any
more tools.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1096'
content-type:
- application/json
cookie:
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8gkPMD1hkXKA5DP8MyMJNpAxJXhzj\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198860,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human instructs the AI not to give
a final answer but to continue using the `get_final_answer` tool. However, the
AI responds that it has already used many tools for the task and chooses to
provide its best final answer without using any more tools.\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
196,\n \"completion_tokens\": 53,\n \"total_tokens\": 249\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8452672d1bba0316-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 14 Jan 2024 02:21:03 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2683'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299742'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 51ms
x-request-id:
- c00193638e659df4a60ef9425649fe2c
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -9,6 +9,7 @@ from crewai.agents.cache import CacheHandler
from crewai.crew import Crew
from crewai.process import Process
from crewai.task import Task
from crewai.utilities import Logger, RPMController
ceo = Agent(
role="CEO",
@@ -179,19 +180,19 @@ def test_crew_verbose_output(capsys):
crew.kickoff()
captured = capsys.readouterr()
expected_strings = [
"Working Agent: Researcher",
"Starting Task: Research AI advancements.",
"[Researcher] Task output:",
"Working Agent: Senior Writer",
"Starting Task: Write about AI in healthcare.",
"[Senior Writer] Task output:",
"[DEBUG]: Working Agent: Researcher",
"[INFO]: Starting Task: Research AI advancements.",
"[DEBUG]: [Researcher] Task output:",
"[DEBUG]: Working Agent: Senior Writer",
"[INFO]: Starting Task: Write about AI in healthcare.",
"[DEBUG]: [Senior Writer] Task output:",
]
for expected_string in expected_strings:
assert expected_string in captured.out
# Now test with verbose set to False
crew.verbose = False
crew._logger = Logger(verbose_level=False)
crew.kickoff()
captured = capsys.readouterr()
assert captured.out == ""
@@ -211,7 +212,7 @@ def test_crew_verbose_levels_output(capsys):
assert expected_string in captured.out
# Now test with verbose set to 2
crew.verbose = 2
crew._logger = Logger(verbose_level=2)
crew.kickoff()
captured = capsys.readouterr()
expected_strings = [
@@ -257,9 +258,9 @@ def test_cache_hitting_between_agents():
tasks=tasks,
)
assert crew.cache_handler._cache == {}
assert crew._cache_handler._cache == {}
output = crew.kickoff()
assert crew.cache_handler._cache == {"multiplier-2,6": "12"}
assert crew._cache_handler._cache == {"multiplier-2,6": "12"}
assert output == "12"
with patch.object(CacheHandler, "read") as read:
@@ -295,11 +296,9 @@ def test_api_calls_throttling(capsys):
agent=agent,
)
tasks = task
crew = Crew(agents=[agent], tasks=[task], max_rpm=2, verbose=2)
crew = Crew(agents=[agent], tasks=[tasks], max_rpm=2, verbose=2)
with patch.object(Crew, "_wait_for_next_minute") as moveon:
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
moveon.return_value = True
crew.kickoff()
captured = capsys.readouterr()