mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 04:18:35 +00:00
Adding ability to remember instruction after using too many tools
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
|
||||
[tool.poetry]
|
||||
name = "crewai"
|
||||
version = "0.8.0"
|
||||
version = "0.10.0"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
authors = ["Joao Moura <joao@crewai.com>"]
|
||||
readme = "README.md"
|
||||
|
||||
@@ -150,12 +150,14 @@ class Agent(BaseModel):
|
||||
tools = tools or self.tools
|
||||
self.agent_executor.tools = tools
|
||||
self.agent_executor.task = task
|
||||
self.agent_executor.tools_description = (render_text_description(tools),)
|
||||
self.agent_executor.tools_names = self.__tools_names(tools)
|
||||
|
||||
result = self.agent_executor.invoke(
|
||||
{
|
||||
"input": task_prompt,
|
||||
"tool_names": self.__tools_names(tools),
|
||||
"tools": render_text_description(tools),
|
||||
"tool_names": self.agent_executor.tools_names,
|
||||
"tools": self.agent_executor.tools_description,
|
||||
}
|
||||
)["output"]
|
||||
|
||||
|
||||
@@ -21,6 +21,9 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
i18n: I18N = I18N()
|
||||
llm: Any = None
|
||||
iterations: int = 0
|
||||
task: Any = None
|
||||
tools_description: str = ""
|
||||
tools_names: str = ""
|
||||
request_within_rpm_limit: Any = None
|
||||
tools_handler: InstanceOf[ToolsHandler] = None
|
||||
max_iterations: Optional[int] = 15
|
||||
@@ -187,7 +190,12 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
if return_direct:
|
||||
tool_run_kwargs["llm_prefix"] = ""
|
||||
observation = ToolUsage(
|
||||
tools_handler=self.tools_handler, tools=self.tools, llm=self.llm
|
||||
tools_handler=self.tools_handler,
|
||||
tools=self.tools,
|
||||
tools_description=self.tools_description,
|
||||
tools_names=self.tools_names,
|
||||
llm=self.llm,
|
||||
task=self.task,
|
||||
).use(agent_action.log)
|
||||
else:
|
||||
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
||||
|
||||
@@ -10,30 +10,55 @@ from crewai.tools.tool_calling import ToolCalling
|
||||
from crewai.utilities import I18N, Printer
|
||||
|
||||
|
||||
class ToolUsageErrorException(Exception):
|
||||
"""Exception raised for errors in the tool usage."""
|
||||
|
||||
def __init__(self, message: str) -> None:
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class ToolUsage:
|
||||
"""
|
||||
Class that represents the usage of a tool by an agent.
|
||||
|
||||
Attributes:
|
||||
task: Task being executed.
|
||||
tools_handler: Tools handler that will manage the tool usage.
|
||||
tools: List of tools available for the agent.
|
||||
tools_description: Description of the tools available for the agent.
|
||||
tools_names: Names of the tools available for the agent.
|
||||
llm: Language model to be used for the tool usage.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, tools_handler: ToolsHandler, tools: List[BaseTool], llm: Any
|
||||
self,
|
||||
tools_handler: ToolsHandler,
|
||||
tools: List[BaseTool],
|
||||
tools_description: str,
|
||||
tools_names: str,
|
||||
task: Any,
|
||||
llm: Any,
|
||||
) -> None:
|
||||
self._i18n: I18N = I18N()
|
||||
self._printer: Printer = Printer()
|
||||
self._telemetry: Telemetry = Telemetry()
|
||||
self._run_attempts: int = 1
|
||||
self._max_parsing_attempts: int = 3
|
||||
self._remeber_format_after_usages: int = 3
|
||||
self.tools_description = tools_description
|
||||
self.tools_names = tools_names
|
||||
self.tools_handler = tools_handler
|
||||
self.tools = tools
|
||||
self.task = task
|
||||
self.llm = llm
|
||||
|
||||
def use(self, tool_string: str):
|
||||
calling = self._tool_calling(tool_string)
|
||||
if isinstance(calling, ToolUsageErrorException):
|
||||
error = calling.message
|
||||
self._printer.print(content=f"\n\n{error}\n", color="yellow")
|
||||
return error
|
||||
tool = self._select_tool(calling.function_name)
|
||||
return self._use(tool=tool, calling=calling)
|
||||
|
||||
@@ -57,6 +82,24 @@ class ToolUsage:
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.llm, tool_name=tool.name, attempts=self._run_attempts
|
||||
)
|
||||
|
||||
result = self._format_result(result=result)
|
||||
return result
|
||||
|
||||
def _format_result(self, result: Any) -> None:
|
||||
self.task.used_tools += 1
|
||||
if self._should_remember_format():
|
||||
result = self._remember_format(result=result)
|
||||
return result
|
||||
|
||||
def _should_remember_format(self) -> None:
|
||||
return self.task.used_tools % self._remeber_format_after_usages == 0
|
||||
|
||||
def _remember_format(self, result: str) -> None:
|
||||
result = str(result)
|
||||
result += "\n\n" + self._i18n.slice("tools").format(
|
||||
tools=self.tools_description, tool_names=self.tools_names
|
||||
)
|
||||
return result
|
||||
|
||||
def _check_tool_repeated_usage(self, calling: ToolCalling) -> None:
|
||||
@@ -102,11 +145,11 @@ class ToolUsage:
|
||||
chain = prompt | self.llm | parser
|
||||
calling = chain.invoke({"tool_string": tool_string})
|
||||
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.llm)
|
||||
raise e
|
||||
return ToolUsageErrorException(self._i18n.errors("tool_usage_error"))
|
||||
return self._tool_calling(tool_string)
|
||||
|
||||
return calling
|
||||
|
||||
@@ -16,7 +16,8 @@
|
||||
"errors": {
|
||||
"force_final_answer": "Στην πραγματικότητα, χρησιμοποίησα πάρα πολλά εργαλεία, οπότε θα σταματήσω τώρα και θα σας δώσω την απόλυτη ΚΑΛΥΤΕΡΗ τελική μου απάντηση ΤΩΡΑ, χρησιμοποιώντας την αναμενόμενη μορφή: ```\nΣκέφτηκα: Χρειάζεται να χρησιμοποιήσω ένα εργαλείο; Όχι\nΤελική απάντηση: [η απάντησή σας εδώ]```",
|
||||
"agent_tool_unexsiting_coworker": "\nΣφάλμα κατά την εκτέλεση του εργαλείου. Ο συνάδελφος που αναφέρεται στο Ενέργεια προς εισαγωγή δεν βρέθηκε, πρέπει να είναι μία από τις ακόλουθες επιλογές: {coworkers}.\n",
|
||||
"task_repeated_usage": "Μόλις χρησιμοποίησα το εργαλείο {tool} με είσοδο {tool_input}. Άρα ξέρω ήδη το αποτέλεσμα αυτού και δεν χρειάζεται να το χρησιμοποιήσω ξανά τώρα.\n"
|
||||
"task_repeated_usage": "Μόλις χρησιμοποίησα το εργαλείο {tool} με είσοδο {tool_input}. Άρα ξέρω ήδη το αποτέλεσμα αυτού και δεν χρειάζεται να το χρησιμοποιήσω ξανά τώρα.\n",
|
||||
"tool_usage_error": "Φαίνεται ότι αντιμετωπίσαμε ένα απροσδόκητο σφάλμα κατά την προσπάθεια χρήσης του εργαλείου."
|
||||
},
|
||||
"tools": {
|
||||
"delegate_work": "Αναθέστε μια συγκεκριμένη εργασία σε έναν από τους παρακάτω συναδέλφους: {coworkers}. Η είσοδος σε αυτό το εργαλείο θα πρέπει να είναι ο ρόλος του συναδέλφου, η εργασία που θέλετε να κάνει και ΟΛΟ το απαραίτητο πλαίσιο για την εκτέλεση της εργασίας, δεν γνωρίζουν τίποτα για την εργασία, επομένως μοιραστείτε απολύτως όλα όσα γνωρίζετε, μην αναφέρετε πράγματα, αλλά αντί να τους εξηγήσεις.",
|
||||
|
||||
@@ -14,9 +14,10 @@
|
||||
"expected_output": "Your final answer must be: {expected_output}"
|
||||
},
|
||||
"errors": {
|
||||
"force_final_answer": "Actually, I used too many tools, so I'll stop now and give you my absolute BEST Final answer NOW, using the expected format: ```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
|
||||
"force_final_answer": "Actually, I used too many tools, so I'll stop now and give you my absolute BEST Final answer NOW, using exaclty the expected format bellow: \n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
|
||||
"agent_tool_unexsiting_coworker": "\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: {coworkers}.\n",
|
||||
"task_repeated_usage": "I just used the {tool} tool with input {tool_input}. So I already know the result of that and don't need to use it again now.\n"
|
||||
"task_repeated_usage": "I just used the {tool} tool with input {tool_input}. So I already know the result of that and don't need to use it again now. \nI could give my final answer if I'm ready, using exaclty the expected format bellow: \n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```\n",
|
||||
"tool_usage_error": "It seems we encountered an unexpected error while trying to use the tool."
|
||||
},
|
||||
"tools": {
|
||||
"delegate_work": "Delegate a specific task to one of the following co-workers: {coworkers}. The input to this tool should be the role of the coworker, the task you want them to do, and ALL necessary context to exectue the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
|
||||
|
||||
@@ -10,6 +10,7 @@ from crewai import Agent, Crew, Task
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.executor import CrewAgentExecutor
|
||||
from crewai.tools.tool_calling import ToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.utilities import RPMController
|
||||
|
||||
|
||||
@@ -256,7 +257,7 @@ def test_agent_repeated_tool_usage(capsys):
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
"I just used the get_final_answer tool with input {'numbers': 42}. So I already know the result of that and don't need to use it again now."
|
||||
"I just used the get_final_answer tool with input {'numbers': 42}. So I already know the result of that and don't need to use it again now. \nI could give my final answer if I'm ready, using exaclty the expected format bellow: \n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```\n"
|
||||
in captured.out
|
||||
)
|
||||
|
||||
@@ -414,6 +415,78 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
moveon.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_error_on_parsing_tool(capsys):
|
||||
from unittest.mock import patch
|
||||
|
||||
from langchain.tools import tool
|
||||
|
||||
@tool
|
||||
def get_final_answer(numbers) -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
verbose=True,
|
||||
)
|
||||
tasks = [
|
||||
Task(
|
||||
description="Use the get_final_answer tool.",
|
||||
agent=agent1,
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
]
|
||||
|
||||
crew = Crew(agents=[agent1], tasks=tasks, verbose=2)
|
||||
|
||||
with patch.object(ToolUsage, "_render") as force_exception:
|
||||
force_exception.side_effect = Exception("Error on parsing tool.")
|
||||
crew.kickoff()
|
||||
captured = capsys.readouterr()
|
||||
assert (
|
||||
"It seems we encountered an unexpected error while trying to use the tool"
|
||||
in captured.out
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_remembers_output_format_after_using_tools_too_many_times():
|
||||
from unittest.mock import patch
|
||||
|
||||
from langchain.tools import tool
|
||||
|
||||
@tool
|
||||
def get_final_answer(numbers) -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
max_iter=4,
|
||||
verbose=True,
|
||||
)
|
||||
tasks = [
|
||||
Task(
|
||||
description="Never give the final answer. Use the get_final_answer tool in a loop.",
|
||||
agent=agent1,
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
]
|
||||
|
||||
crew = Crew(agents=[agent1], tasks=tasks, verbose=2)
|
||||
|
||||
with patch.object(ToolUsage, "_remember_format") as remember_format:
|
||||
crew.kickoff()
|
||||
remember_format.assert_called()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_use_specific_tasks_output_as_context(capsys):
|
||||
agent1 = Agent(role="test role", goal="test goal", backstory="test backstory")
|
||||
|
||||
772
tests/cassettes/test_agent_error_on_parsing_tool.yaml
Normal file
772
tests/cassettes/test_agent_error_on_parsing_tool.yaml
Normal file
@@ -0,0 +1,772 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
|
||||
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
|
||||
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
|
||||
str) - Delegate a specific task to one of the following co-workers: . The input
|
||||
to this tool should be the role of the coworker, the task you want them to do,
|
||||
and ALL necessary context to exectue the task, they know nothing about the task,
|
||||
so share absolute everything you know, don''t reference things but instead explain
|
||||
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
|
||||
question: str, context: str) - Ask a specific question to one of the following
|
||||
co-workers: . The input to this tool should be the role of the coworker, the
|
||||
question you have for them, and ALL necessary context to ask the question properly,
|
||||
they know nothing about the question, so share absolute everything you know,
|
||||
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
|
||||
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [get_final_answer, Delegate work to
|
||||
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
|
||||
all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Use the get_final_answer tool.\n"}], "model":
|
||||
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1992'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRT0vDQBDF7/kUw55TaZPU2FxEKIJ4EEHwX0vZbqZJ6mZn3Z2gUvrdZdO01cse
|
||||
3ps3/N7sLgIQTSkKEKqWrFqrR1ef9HCfWHtfdo/+Td2+PM8fkizffuC3uRRxSNB6i4qPqQtFrdXI
|
||||
DZmDrRxKxrB1ko/z6SzPpklvtFSiDrHK8igbjS8n6ZCoqVHoRQHvEQDArn8DmynxWxQwjo9Ki97L
|
||||
CkVxGgIQjnRQhPS+8SwNi/hsKjKMpsd9qqmrai5gTnAHBrEEJug8ggQm0tfwin5hblQoU0CFvNo0
|
||||
RuqVNP4L3dGBO2M7LmC3EKZr1+j8IqBPYkhiSGPIYpgu92JA2J/YNVXW0Tr0NJ3WJ33TmMbXK4fS
|
||||
kwmcnske4vsIYNnfqPtXW1hHreUV0weasDBLk8M+cf6Os5vOBpOJpf6TyifRQCj8j2dsQ+EKnXVN
|
||||
f7LAGe2jXwAAAP//AwAmhGp+KQIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8537268cf851f9cc-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:37:34 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
|
||||
path=/; expires=Sat, 10-Feb-24 21:07:34 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2033'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299527'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 94ms
|
||||
x-request-id:
|
||||
- req_e5a50fb40b6fe941bbcb75db85aa1e14
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
|
||||
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
|
||||
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
|
||||
str) - Delegate a specific task to one of the following co-workers: . The input
|
||||
to this tool should be the role of the coworker, the task you want them to do,
|
||||
and ALL necessary context to exectue the task, they know nothing about the task,
|
||||
so share absolute everything you know, don''t reference things but instead explain
|
||||
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
|
||||
question: str, context: str) - Ask a specific question to one of the following
|
||||
co-workers: . The input to this tool should be the role of the coworker, the
|
||||
question you have for them, and ALL necessary context to ask the question properly,
|
||||
they know nothing about the question, so share absolute everything you know,
|
||||
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
|
||||
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [get_final_answer, Delegate work to
|
||||
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
|
||||
all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Use the get_final_answer tool.\nThought: Do
|
||||
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
|
||||
[1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected error while
|
||||
trying to use the tool.\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2200'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
|
||||
_cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RRy27bMBC86ysWPMuGH3Kd6JYiF6MtmkPQSxwYNLWWmFBcmrtCYhv+94DyK73w
|
||||
MMMZzgwPGYCylSpBmUaLaYMb3G3p75/dfh+b6b+fDzTZmvi0Dbz/tX1sf6s8KWj9hkYuqqGhNjgU
|
||||
S/5Em4haMLmO56P57H5ezIqeaKlCl2R1kEExGP0YT8+KhqxBViW8ZAAAh/5M2XyFn6qEUX5BWmTW
|
||||
NaryeglARXIJUZrZsmgvKr+Rhryg7+M+NwhC5KCyFXgS+KD4DpoBPwMawWoIC+CGOleBxB3oWlsP
|
||||
QsCIYDcgDYJl7hACxvQUD2HpH0zqXkKNstpYr91Ke/7AeGFg4UMnJRyWynftGiMvU9NxDpMcpjkU
|
||||
Ocxej+qc+Hit6qgOkdZpFt85d8U31ltuVhE1k0+1WCic5McM4LWftPtvJRUitUFWQu/ok2FxPz75
|
||||
qdvvfWPnZ1JItLvhs+lddk6oeMeCbSpcYwzR9gunnNkx+wIAAP//AwB4mJ1FWAIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8537269beca0f9cc-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:37:37 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2386'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299475'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 104ms
|
||||
x-request-id:
|
||||
- req_682e1eb6a163744320749bff358928ce
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
|
||||
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
|
||||
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
|
||||
str) - Delegate a specific task to one of the following co-workers: . The input
|
||||
to this tool should be the role of the coworker, the task you want them to do,
|
||||
and ALL necessary context to exectue the task, they know nothing about the task,
|
||||
so share absolute everything you know, don''t reference things but instead explain
|
||||
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
|
||||
question: str, context: str) - Ask a specific question to one of the following
|
||||
co-workers: . The input to this tool should be the role of the coworker, the
|
||||
question you have for them, and ALL necessary context to ask the question properly,
|
||||
they know nothing about the question, so share absolute everything you know,
|
||||
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
|
||||
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [get_final_answer, Delegate work to
|
||||
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
|
||||
all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Use the get_final_answer tool.\nThought: Do
|
||||
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
|
||||
[1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected error while
|
||||
trying to use the tool.\nThought: The tool did not work as expected. I should
|
||||
try again to see if the issue persists. \nAction: get_final_answer\nAction Input:
|
||||
{\"numbers\": [1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected
|
||||
error while trying to use the tool.\nThought: "}], "model": "gpt-4", "n": 1,
|
||||
"stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2455'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
|
||||
_cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RT0W4aMRB85ytGfj4QFAgNL1XVqmrSRGnVVqkaImTuljsHn/dq7wUQ4t8rGy5p
|
||||
X07yzs54dj136AHKFGoOlVda8rqx/bd/+O7rl+m37ze35e/qbnLzvP91+1RffLq/r69VFhm8eqJc
|
||||
OtYg57qxJIbdCc49aaGoOpoNZ9PL2WQ6S0DNBdlIKxvpT/rDi9H4zKjY5BTUHA89ADikb/TmCtqp
|
||||
OYZZV6kpBF2Smr80AcqzjRWlQzBBtBOVvYI5OyGX7P6oCMJsEWvGtRQgjFrbdevyOMAAV6hNWQkc
|
||||
URHBgiyVWghSmQDRYROrGjlv2W/Igz1SsaIa63gonk1OYIdtpSVJMBztZLBw79Mlc3zsRKNGbMm5
|
||||
f5LrenDlmlbmOCxUd9NCzbFQ11y5hcqwUNHLqfYzRHuEkmS5Nk7bpXZhSz7NempOS9jJqf+eoD1B
|
||||
/N64Ml5fkiSBRMaZ3IaIagTyhgJ4DdfWK/IBD6MMbzKMM0wyTB+hA0y0O8Bn3tIz+SyppU1XOmBF
|
||||
5P7dctJ1BRpPgeJDxCNaR7uGcqEC5D37AT5ohz23aCzpkPxGs+152iTPqcODXJEkLQlqwsbxFmad
|
||||
2HFUvbLUDWrSo8SdvluoozoH5fiSMMtl43kV0+haa1/qa+NMqJaedGAX0xSEmxP92AMeU5Lb/8Kp
|
||||
Gs91I0vhDbkoOD39CSmV3U/zio7G4zMqLNq+AheXw97Zogr7IFTHdy7JN96kZEejvWPvLwAAAP//
|
||||
AwBWiKdQ0AMAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 853726ab8ad0f9cc-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:37:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '7250'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299414'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 117ms
|
||||
x-request-id:
|
||||
- req_999c12bd72c5eab61ca77b38e1e394d2
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
|
||||
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
|
||||
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
|
||||
str) - Delegate a specific task to one of the following co-workers: . The input
|
||||
to this tool should be the role of the coworker, the task you want them to do,
|
||||
and ALL necessary context to exectue the task, they know nothing about the task,
|
||||
so share absolute everything you know, don''t reference things but instead explain
|
||||
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
|
||||
question: str, context: str) - Ask a specific question to one of the following
|
||||
co-workers: . The input to this tool should be the role of the coworker, the
|
||||
question you have for them, and ALL necessary context to ask the question properly,
|
||||
they know nothing about the question, so share absolute everything you know,
|
||||
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
|
||||
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [get_final_answer, Delegate work to
|
||||
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
|
||||
all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Use the get_final_answer tool.\nThought: Do
|
||||
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
|
||||
[1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected error while
|
||||
trying to use the tool.\nThought: The tool did not work as expected. I should
|
||||
try again to see if the issue persists. \nAction: get_final_answer\nAction Input:
|
||||
{\"numbers\": [1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected
|
||||
error while trying to use the tool.\nThought: The tool continues to malfunction.
|
||||
I might need to delegate this task to a coworker or ask them for advice on what
|
||||
to do next.\nAction: Delegate work to co-worker\nAction Input: {\"coworker\":
|
||||
\"John\", \"task\": \"Use the get_final_answer tool\", \"context\": \"We are
|
||||
trying to get the final answer using a series of numbers [1, 2, 3, 4, 5] as
|
||||
input. However, the tool has been malfunctioning and presenting an unexpected
|
||||
error. Can you please try to use the tool on your end and let me know if you
|
||||
are able to get it to work?\"}\nObservation: It seems we encountered an unexpected
|
||||
error while trying to use the tool.\nThought: "}], "model": "gpt-4", "n": 1,
|
||||
"stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3085'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
|
||||
_cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA2xTTU8bMRC951c8+bxESQkEckFQWkEvqFJVWhGEnN1J1o3jWTyzCRHiv1f2JqRI
|
||||
vVjyzLw3b75ee4BxlZnAlLXVctX4o7Nnvru/4dOr099X8fPPO748Hm+//voi99v1d1MkBM/+UKl7
|
||||
VL/kVeNJHYfOXUaySol1OB6MT87Ho9NRdqy4Ip9gi0aPRkeD0+HxDlGzK0nMBA89AHjNb9IWKnox
|
||||
EwyKvWVFInZBZvIeBJjIPlmMFXGiNqgpDs6Sg1LIcq/J08KqCwtoTVArS1gvjEjSeqUKLsAGUIwc
|
||||
+7iF1Nz6Chq3sLJMOIvnliQVC2VYlLzhuKSYfkIEN0/UW9R2TbBhC1utXUnggJo3KaqJXBJV2Dit
|
||||
swwn0lJ/Gi7LRDvBpSw/JCn5qMuxD8FtaFqd4HVq9umnZoKp+WYDTU2BqdnjO/s1Y8stloE3exWR
|
||||
hP2asoBc7kHPgvRp7oL1TzbIJpfG/qLjzc180Y72R+ohs4cTSNs0LFRlwdaXrbdKsMhE2BG10rXQ
|
||||
O1HwHKFdzSgKrMClkvAwLPCpwHGBUYGTxz5ueENrigVmrDWsKq0alZSklU58FmBDTlx186XDdPMY
|
||||
/p1uG+iloTL9ctnSx647/5nYprYKUWpyRrVLQqAXvZiaN7NbsLf3zfS8aCLP0haH1vt3+9wFJ/VT
|
||||
JCsc0haKctPB33rAY76A9sNSmybyqtEn5SWFRDgenHd85nBsB+9wNNx5ldX6g+PsZNDbSTSyFaVV
|
||||
GuyCYhNdvogktPfW+wsAAP//AwCmp6D/CAQAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 853726d98e95f9cc-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:37:52 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '7779'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299260'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 148ms
|
||||
x-request-id:
|
||||
- req_21051d7ae3d6603004991854d95cf202
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
|
||||
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
|
||||
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
|
||||
str) - Delegate a specific task to one of the following co-workers: . The input
|
||||
to this tool should be the role of the coworker, the task you want them to do,
|
||||
and ALL necessary context to exectue the task, they know nothing about the task,
|
||||
so share absolute everything you know, don''t reference things but instead explain
|
||||
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
|
||||
question: str, context: str) - Ask a specific question to one of the following
|
||||
co-workers: . The input to this tool should be the role of the coworker, the
|
||||
question you have for them, and ALL necessary context to ask the question properly,
|
||||
they know nothing about the question, so share absolute everything you know,
|
||||
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
|
||||
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [get_final_answer, Delegate work to
|
||||
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
|
||||
all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Use the get_final_answer tool.\nThought: Do
|
||||
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
|
||||
[1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected error while
|
||||
trying to use the tool.\nThought: The tool did not work as expected. I should
|
||||
try again to see if the issue persists. \nAction: get_final_answer\nAction Input:
|
||||
{\"numbers\": [1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected
|
||||
error while trying to use the tool.\nThought: The tool continues to malfunction.
|
||||
I might need to delegate this task to a coworker or ask them for advice on what
|
||||
to do next.\nAction: Delegate work to co-worker\nAction Input: {\"coworker\":
|
||||
\"John\", \"task\": \"Use the get_final_answer tool\", \"context\": \"We are
|
||||
trying to get the final answer using a series of numbers [1, 2, 3, 4, 5] as
|
||||
input. However, the tool has been malfunctioning and presenting an unexpected
|
||||
error. Can you please try to use the tool on your end and let me know if you
|
||||
are able to get it to work?\"}\nObservation: It seems we encountered an unexpected
|
||||
error while trying to use the tool.\nThought: Delegating the task also resulted
|
||||
in an error. I should try asking a question to a coworker to see if they have
|
||||
any advice on how to proceed with the issue.\nAction: Ask question to co-worker\nAction
|
||||
Input: {\"coworker\": \"Jane\", \"question\": \"Do you know how to resolve the
|
||||
error with the get_final_answer tool?\", \"context\": \"The tool is supposed
|
||||
to calculate a final answer using a list of numbers as input [1, 2, 3, 4, 5].
|
||||
However, both attempts to use the tool and to delegate the task have resulted
|
||||
in unexpected errors. Do you have any advice on what steps to take next?\"}\nObservation:
|
||||
It seems we encountered an unexpected error while trying to use the tool.\nThought:
|
||||
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3771'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
|
||||
_cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xSwW4aMRS88xUjX3JJECkEArfk0CqXVqpacWgjZLwP1sXrZ/zehqAo/155l4B6
|
||||
68WHGc+8see9DQDjK7OAcbVV16Rwc7/nb7Rc3k2Oy9fD/utjep1+yXs7Xq8fKzHXRcHrP+T0QzV0
|
||||
3KRA6jn2tMtklYrr7Ww0u5vPJrNPHdFwRaHItklvJjej6e34pKjZOxKzwK8BALx1Z8kWK3o1C4yu
|
||||
P5CGROyWzOJ8CTCZQ0GMFfGiNqq5vpCOo1Ls4j7IzsctLPYtSQkMZVg4PnDeUYYNwsgkbVCq4CNs
|
||||
BOXMeYgnhRA1guB3BK0p05XAQo6i1HgHL9ISCg6trV4JUqYXitpNjEfwpsigzEGwydygTC1sypwo
|
||||
h+MQT5Ca21AhU+Ks0NrLyVkZTz+w4YxNm8t8+PhSXrG15SXD3/GzjzbgIcqB8gI/o12HTnbqh/rx
|
||||
Vnaoej8lV0fvbOhHCA5e60vIIb6T46ahWFEF67oP81KU53j0n+nMqY/3c5GBtynzupQe2xDO+MZH
|
||||
L/UqkxWOpTRRTr38fQA8dwvT/rMDJmVukq6UdxSL4f103vuZy25e2NnkRCqrDRd8PhkPTglNX+tq
|
||||
4+OWcsq+25+Sc/A++AsAAP//AwCg6BkTNgMAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8537270accf1f9cc-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:37:55 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '3045'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299093'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 181ms
|
||||
x-request-id:
|
||||
- req_e25b7a3704fa67132b27d4f8dd989ff9
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
|
||||
of artificial intelligence. The AI thinks artificial intelligence is a force
|
||||
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
|
||||
intelligence is a force for good?\nAI: Because artificial intelligence will
|
||||
help humans reach their full potential.\n\nNew summary:\nThe human asks what
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Use the
|
||||
get_final_answer tool.\nAI: Unable to complete the task due to technical issues
|
||||
with the tools. Recommended action is to report the issue to IT for further
|
||||
investigation.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false,
|
||||
"temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1024'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
|
||||
_cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRza4TMQyF9/MUVtZt1YFeBrpjA7obYFEEEkJVmnFn0puxQ+zwd9V3R0mnrdhE
|
||||
io8/6/j4uQEwvjdbMG606qYYlq9/8MfxczzZ/J4+fFr/tSn37+zX09D1X7JZFIIPJ3R6pVaOpxhQ
|
||||
PdNFdgmtYpnaduvu4U236R6qMHGPoWBD1OVmuX7VvpyJkb1DMVv41gAAPNe3eKMef5strBfXyoQi
|
||||
dkCzvTUBmMShVIwV8aKW1CzuomNSpGp3NyKMebIEnkRTdiqgI8LbR1CGLFh/A+r+6MmGvSX5hQmU
|
||||
OSzgkPXa7AUy2UPAgs3rX1i18gR9roKiG8k7G8CLZJQV7C54QsfThNQLJIyc1NNQ6dpX0McdHDnB
|
||||
MScdMYGnnyjqB1tCXpl5t/MtlMBDTHwoAVIO4VY/evIy7hNaYSoBiHK84OcG4HsNP/+Xp4mJp6h7
|
||||
5SckqTdsL/PM/c53dTNfxiirDff6i7ZtZodG/ojiVCIdMMXk6y2Kz+bc/AMAAP//AwAM185PggIA
|
||||
AA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 853727200bf9f9cc-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:37:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '3010'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299759'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 48ms
|
||||
x-request-id:
|
||||
- req_223e2da857e1cdd65e5fe2d6baf51e69
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,995 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
|
||||
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
|
||||
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
|
||||
str) - Delegate a specific task to one of the following co-workers: . The input
|
||||
to this tool should be the role of the coworker, the task you want them to do,
|
||||
and ALL necessary context to exectue the task, they know nothing about the task,
|
||||
so share absolute everything you know, don''t reference things but instead explain
|
||||
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
|
||||
question: str, context: str) - Ask a specific question to one of the following
|
||||
co-workers: . The input to this tool should be the role of the coworker, the
|
||||
question you have for them, and ALL necessary context to ask the question properly,
|
||||
they know nothing about the question, so share absolute everything you know,
|
||||
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
|
||||
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [get_final_answer, Delegate work to
|
||||
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
|
||||
all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Never give the final answer. Use the get_final_answer
|
||||
tool in a loop.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
|
||||
false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2031'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRS0/rMBCF9/kVI69T1LQBSjboSkj3srkIxAbRqnKdIXFxZow9EY+q/x05fcHG
|
||||
i3PmjL4z3mQAytaqAmVaLabzbjR7Y/2OcfZQ/CdT373erL/s+u3fvaW/k1blKcGrNRo5pM4Md96h
|
||||
WKadbQJqwbS1uBxfnl/NJtNyMDqu0aVY42VUjsYXxXSfaNkajKqC5wwAYDO8iY1q/FAVjPOD0mGM
|
||||
ukFVHYcAVGCXFKVjtFE0icpPpmESpAH3seW+aaWCG4ZbIMQahKGPCBqE2V3DE8Y5/TGpTAUNyvLF
|
||||
knZLTfEdw8GBW/K9VLCZK+q7FYY4T+hFDpMcpjmUOZwvtmqPsD2yO2584FXqSb1zR/3Fko3tMqCO
|
||||
TIkzCvtdfJsBLIYb9b9qKx+487IUfkVKC8uy2O1Tp+84udOrvSks2v1IzcbZnlDFzyjYpcINBh/s
|
||||
cLLEmW2zbwAAAP//AwAtHZ7UKQIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 853739a24f53f93d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:50:40 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
|
||||
path=/; expires=Sat, 10-Feb-24 21:20:40 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '6390'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299516'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 96ms
|
||||
x-request-id:
|
||||
- req_446106800e9e8d8da73e526abbc77d9b
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
|
||||
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
|
||||
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
|
||||
get_final_answer(numbers) -> float - Get the final answer but don''t give it
|
||||
yet, just re-use this\n tool non-stop.\n--\nFuntion Name: Delegate work
|
||||
to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''task'':
|
||||
{''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Delegate
|
||||
work to co-worker(coworker: str, task: str, context: str) - Delegate a specific
|
||||
task to one of the following co-workers: . The input to this tool should be
|
||||
the role of the coworker, the task you want them to do, and ALL necessary context
|
||||
to exectue the task, they know nothing about the task, so share absolute everything
|
||||
you know, don''t reference things but instead explain them.\n--\nFuntion Name:
|
||||
Ask question to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''},
|
||||
''question'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription:
|
||||
Ask question to co-worker(coworker: str, question: str, context: str) - Ask
|
||||
a specific question to one of the following co-workers: . The input to this
|
||||
tool should be the role of the coworker, the question you have for them, and
|
||||
ALL necessary context to ask the question properly, they know nothing about
|
||||
the question, so share absolute everything you know, don''t reference things
|
||||
but instead explain them.\n\nUse this text to inform a valid ouput schema:\nThought:
|
||||
Do I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
|
||||
[1, 2, 3, 4, 5]}\n\nThe output should be formatted as a JSON instance that conforms
|
||||
to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
|
||||
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
|
||||
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
|
||||
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
|
||||
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
|
||||
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
|
||||
\"Function Name\", \"description\": \"The name of the function to be called.\",
|
||||
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
|
||||
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
|
||||
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2581'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
|
||||
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQ3U7DMAyF7/sUlq871Hb/fQAeAARCWqcq7bwukMQhScVg6rujdGUTN5Fyjo/9
|
||||
2ZcEAOUBS8D2JEKrrZptPrkptvz08pgVh9xmP83ZPq8blm+FeMU0Jrh5pzb8pR5a1lZRkGyudutI
|
||||
BIpd83W2Xm43xSIbDc0HUjHW2TBbzLJVPp8SJ5YteSxhlwAAXMY3spkDnbGEMT8qmrwXHWF5KwJA
|
||||
xyoqKLyXPggTML2bLZtAZsS9VHjsTRtRayM0VVhChR2F+iiNULUw/otchSlUKFzXazLBx6JLhabX
|
||||
Dbnxt8tTKFKYp7BIYbkfBpymDTdMxZ113MSVTK/UTT9KI/2pdiQ8m4jkA9trfEgA9uM5+n8bonWs
|
||||
bagDf5CJDZfb9bUf3i9/d+f5ZAYOQt31VbFJJkL03z6Qjkt35KyT43UiZzIkvwAAAP//AwBXA9cg
|
||||
FAIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 853739cafc36f93d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:50:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1987'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299391'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 121ms
|
||||
x-request-id:
|
||||
- req_70e4d9540d0633db0b0e10623c0d5472
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
|
||||
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
|
||||
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
|
||||
str) - Delegate a specific task to one of the following co-workers: . The input
|
||||
to this tool should be the role of the coworker, the task you want them to do,
|
||||
and ALL necessary context to exectue the task, they know nothing about the task,
|
||||
so share absolute everything you know, don''t reference things but instead explain
|
||||
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
|
||||
question: str, context: str) - Ask a specific question to one of the following
|
||||
co-workers: . The input to this tool should be the role of the coworker, the
|
||||
question you have for them, and ALL necessary context to ask the question properly,
|
||||
they know nothing about the question, so share absolute everything you know,
|
||||
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
|
||||
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [get_final_answer, Delegate work to
|
||||
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
|
||||
all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Never give the final answer. Use the get_final_answer
|
||||
tool in a loop.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: {\"numbers\": [1, 2, 3, 4, 5]}\nObservation: 42\nThought: "}], "model":
|
||||
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2168'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
|
||||
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRSU/DMBCF7/kVI59T1CXdckFURSwSiAMcCq0qJ52mAcdjPBMWVf3vyOkmLj68
|
||||
5zf+3ngbAahypVJQ+UZLXjnTGn1Sltx+zeg6u5+wcS9Pj9PJc/L6kNwIqzgkKHvHXI6pi5wqZ1BK
|
||||
sns796gFw9TOsD3sj0fdpNsYFa3QhFjhpJW02oNO75DYUJkjqxTeIgCAbXMGNrvCH5VCOz4qFTLr
|
||||
AlV6ugSgPJmgKM1csmgrKj6bOVlB2+BOCe7AIq5ACGpG0CBE5hJmyHN7lYcOKRQoy3VptVlqy9/o
|
||||
jw7cWVdLCtu5snWVoed5IB7EMIxhFMM4hk57sVOHp3cnZkOF85SFfrY25qSvS1vyZulRM9nAx0Ju
|
||||
H99FAItmN/W/usp5qpwshT7QhoHJaLyfp87fcHZ7w4MpJNqc9X53EB0IFf+yYBUaF+idL5tVBc5o
|
||||
F/0BAAD//wMAg8kFAyECAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 853739d99e49f93d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:50:50 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '7534'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299484'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 103ms
|
||||
x-request-id:
|
||||
- req_e8de19a1404e0081e1db382d204a2679
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
|
||||
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
|
||||
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
|
||||
get_final_answer(numbers) -> float - Get the final answer but don''t give it
|
||||
yet, just re-use this\n tool non-stop.\n--\nFuntion Name: Delegate work
|
||||
to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''task'':
|
||||
{''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Delegate
|
||||
work to co-worker(coworker: str, task: str, context: str) - Delegate a specific
|
||||
task to one of the following co-workers: . The input to this tool should be
|
||||
the role of the coworker, the task you want them to do, and ALL necessary context
|
||||
to exectue the task, they know nothing about the task, so share absolute everything
|
||||
you know, don''t reference things but instead explain them.\n--\nFuntion Name:
|
||||
Ask question to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''},
|
||||
''question'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription:
|
||||
Ask question to co-worker(coworker: str, question: str, context: str) - Ask
|
||||
a specific question to one of the following co-workers: . The input to this
|
||||
tool should be the role of the coworker, the question you have for them, and
|
||||
ALL necessary context to ask the question properly, they know nothing about
|
||||
the question, so share absolute everything you know, don''t reference things
|
||||
but instead explain them.\n\nUse this text to inform a valid ouput schema:\nDo
|
||||
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
|
||||
[6, 7, 8, 9, 10]}\n\nThe output should be formatted as a JSON instance that
|
||||
conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
|
||||
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
|
||||
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
|
||||
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
|
||||
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
|
||||
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
|
||||
\"Function Name\", \"description\": \"The name of the function to be called.\",
|
||||
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
|
||||
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
|
||||
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2573'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
|
||||
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRPW/bMBCGd/2Kw81yIceVLWsLsmRqByfIYAUCTZ9lJeRRJU9tCkP/PSDt2MjC
|
||||
4f04PHc8ZQDY77EG1Ecl2g5mVv1xu4cXKcT91Zv7X7/N0/ruefMYqsKU95jHhtu9kZav1g/t7GBI
|
||||
esdnW3tSQnHqfFWsynV1VxbJsG5PJta6QWY/Z8Vyvrg0jq7XFLCGbQYAcEpvZOM9fWANqZ8USyGo
|
||||
jrC+hgDQOxMVVCH0QRQL5jdTOxbihHtqGKDBw8g64rasLDVYQ4MdSXvoWZlWcfhHvsH8nFW+Gy2x
|
||||
hJhL/ajyaHfkk7Zd5rDKocphncO8eI2RqeEJLwTTFd24bvBuF9fk0Zirfui5D8fWkwqOI2YQN5zr
|
||||
Uwbwmk40ftsaB+/sIK24d+I4sFyX53l4+42bu6gupjhR5qYvF4vsQojhfxCy8Qgd+cH36WKRM5uy
|
||||
TwAAAP//AwC0p3SrKAIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 85373a09fbe1f93d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:50:53 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2726'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299394'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 121ms
|
||||
x-request-id:
|
||||
- req_f0982e90897053fa7ea290c5fc976e43
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
|
||||
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
|
||||
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
|
||||
str) - Delegate a specific task to one of the following co-workers: . The input
|
||||
to this tool should be the role of the coworker, the task you want them to do,
|
||||
and ALL necessary context to exectue the task, they know nothing about the task,
|
||||
so share absolute everything you know, don''t reference things but instead explain
|
||||
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
|
||||
question: str, context: str) - Ask a specific question to one of the following
|
||||
co-workers: . The input to this tool should be the role of the coworker, the
|
||||
question you have for them, and ALL necessary context to ask the question properly,
|
||||
they know nothing about the question, so share absolute everything you know,
|
||||
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
|
||||
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [get_final_answer, Delegate work to
|
||||
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
|
||||
all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Never give the final answer. Use the get_final_answer
|
||||
tool in a loop.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: {\"numbers\": [1, 2, 3, 4, 5]}\nObservation: 42\nThought: Do I need to
|
||||
use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\": [6, 7,
|
||||
8, 9, 10]}\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2297'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
|
||||
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRS2/bMBCE7/oVC57lwIykOtElSFIUzS2XAnnYMGhpI7OhdhlyhTwM//eA8qu9
|
||||
8DDDWX6z3GQAyraqBtWsjTS9d5OLN1796m/L8vXr5okePh+r6o+l31K091NWeUrw6i82ckidNdx7
|
||||
h2KZdnYT0AimqXo2nVWXF+dVMRo9t+hSrPMyKSfTH7rYJ9ZsG4yqhucMAGAznomNWvxQNUzzg9Jj
|
||||
jKZDVR8vAajALinKxGijGBKVn8yGSZBG3J8Md0CILQjDEBEMCLO7gkeMc7puUocaOpTliyXjlobi
|
||||
O4aDA3fkB6lhM1c09CsMcZ6Itc5Bn+egixx0mYOuFlu1f3575Hbc+cCr1JEG5476iyUb18uAJjIl
|
||||
xijsd/FtBrAY9zP8V1n5wL2XpfArUhpYFeVunjp9xcktZntTWIz7JzXT2Z5Qxc8o2KfWHQYf7Liu
|
||||
xJlts28AAAD//wMANhlcFSUCAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 85373a1bc83cf93d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:50:56 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '3406'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299453'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 109ms
|
||||
x-request-id:
|
||||
- req_a5a6994e55cdf125c20fd34abc6279f2
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
|
||||
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
|
||||
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
|
||||
str) - Delegate a specific task to one of the following co-workers: . The input
|
||||
to this tool should be the role of the coworker, the task you want them to do,
|
||||
and ALL necessary context to exectue the task, they know nothing about the task,
|
||||
so share absolute everything you know, don''t reference things but instead explain
|
||||
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
|
||||
question: str, context: str) - Ask a specific question to one of the following
|
||||
co-workers: . The input to this tool should be the role of the coworker, the
|
||||
question you have for them, and ALL necessary context to ask the question properly,
|
||||
they know nothing about the question, so share absolute everything you know,
|
||||
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
|
||||
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [get_final_answer, Delegate work to
|
||||
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
|
||||
all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Never give the final answer. Use the get_final_answer
|
||||
tool in a loop.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: {\"numbers\": [1, 2, 3, 4, 5]}\nObservation: 42\nThought: Do I need to
|
||||
use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\": [6, 7,
|
||||
8, 9, 10]}\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
|
||||
get_final_answer\nAction Input: {\"numbers\": [11, 12, 13, 14, 15]}\nObservation:
|
||||
Actually, I used too many tools, so I''ll stop now and give you my absolute
|
||||
BEST Final answer NOW, using exaclty the expected format bellow: \n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```\nThought:
|
||||
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2650'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
|
||||
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRS2/CMBCE7/kVK59DFfMK5FJV6gWuXPqgQibZhLSO19ibthTx3yuHAO3FhxnP
|
||||
+pv1MQIQdSEyEPlOcd5YPZjtabvUaVm2hyc9Wa7mrVx9zvY/8gWxEnFI0PYdc76k7nJqrEauyZzt
|
||||
3KFiDFNlmqST+Ww4STujoQJ1iFWWB+NBMpWjPrGjOkcvMniNAACO3RnYTIHfIoMkvigNeq8qFNn1
|
||||
EoBwpIMilPe1Z2VYxDczJ8NoOtxHggUYxAKYoPUICphI38Mz+rV5yEOHDCrkTVkbpTfK+C90FwcW
|
||||
xracwXEtTNts0fl1IJbTGGQag5zFIOcxDJO3k+ifP125NVXW0TZ0NK3WV72sTe13G4fKkwmMnsme
|
||||
46cI4K3bT/uvsrCOGssbpg80YeB0ND7PE7evuLmjtDeZWOk/qVRGPaHwB8/YhNYVOuvqbl2BMzpF
|
||||
vwAAAP//AwBfP5bvJQIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 85373a323a04f93d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:51:03 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '6786'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299367'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 126ms
|
||||
x-request-id:
|
||||
- req_3180621ea488c8926b943b613305b069
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
|
||||
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
|
||||
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
|
||||
get_final_answer(numbers) -> float - Get the final answer but don''t give it
|
||||
yet, just re-use this\n tool non-stop.\n--\nFuntion Name: Delegate work
|
||||
to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''task'':
|
||||
{''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Delegate
|
||||
work to co-worker(coworker: str, task: str, context: str) - Delegate a specific
|
||||
task to one of the following co-workers: . The input to this tool should be
|
||||
the role of the coworker, the task you want them to do, and ALL necessary context
|
||||
to exectue the task, they know nothing about the task, so share absolute everything
|
||||
you know, don''t reference things but instead explain them.\n--\nFuntion Name:
|
||||
Ask question to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''},
|
||||
''question'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription:
|
||||
Ask question to co-worker(coworker: str, question: str, context: str) - Ask
|
||||
a specific question to one of the following co-workers: . The input to this
|
||||
tool should be the role of the coworker, the question you have for them, and
|
||||
ALL necessary context to ask the question properly, they know nothing about
|
||||
the question, so share absolute everything you know, don''t reference things
|
||||
but instead explain them.\n\nUse this text to inform a valid ouput schema:\nDo
|
||||
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
|
||||
[16, 17, 18, 19, 20]}\n\nThe output should be formatted as a JSON instance that
|
||||
conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
|
||||
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
|
||||
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
|
||||
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
|
||||
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
|
||||
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
|
||||
\"Function Name\", \"description\": \"The name of the function to be called.\",
|
||||
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
|
||||
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
|
||||
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2577'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
|
||||
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RRy27bMBC86ysWPMuFFNuyrVse6KmXIAGaIgpkilpLbMklQ1JOU0P/XlB27OTC
|
||||
wzyWs7OHBIDJlpXARM+D0FbN1q+muc+ebl9/PjwVt/N97v+9i7tfP75fdw83LI0O0/xGET5c34TR
|
||||
VmGQho60cMgDxqn5KlstN+urYjER2rSooq2zYbaYZUU+Pzl6IwV6VsJzAgBwmN6YjVr8y0rI0g9E
|
||||
o/e8Q1aeRQDMGRURxr2XPnAKLL2QwlBAmuI+9gh7rmQLZgh2COBFj5pDwz22YAhCj2Cd2csWW5AU
|
||||
JW9mUC00WFZU0Xa7rehQEUDFdgOJuHRNXGPFSqhYh6HeSeKq5uTf0FUsPWq56waNFHzUTf6I0qAb
|
||||
dBP2nBcp5KsU8nUK+SaFq+wlysaKxulbdlpoPDehTGedaWJrNCh1xneSpO9rh9wbilv7YOzRPiYA
|
||||
L1Pjw5cSmXVG21AH8wcpDlxulsd57HLcT+z8RAYTuLrgxWKdnBIy/+4D6thGh846OR0g5kzG5D8A
|
||||
AAD//wMAi96BtncCAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 85373a5dce03f93d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:51:08 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '4076'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299393'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 121ms
|
||||
x-request-id:
|
||||
- req_5856f2fff5c6c31e202a692aa1f3168d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
|
||||
of artificial intelligence. The AI thinks artificial intelligence is a force
|
||||
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
|
||||
intelligence is a force for good?\nAI: Because artificial intelligence will
|
||||
help humans reach their full potential.\n\nNew summary:\nThe human asks what
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Never
|
||||
give the final answer. Use the get_final_answer tool in a loop.\nAI: Agent stopped
|
||||
due to iteration limit or time limit.\n\nNew summary:"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '972'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
|
||||
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RRXUsDMRB8v1+x5Lkt/dC29k0EUaSKYJ9ESnq33qUmuzHZ+IH0v0vuzhZfApnZ
|
||||
mZ1MfgoAZSq1AlU2Wkrn7XD5zrvNo/+a36X1ZnFfL9P6ah3o4Xoz2yc1yAre7bGUP9WoZOctimHq
|
||||
6DKgFsyuk8V4cX6xnM6XLeG4QptltZfh2XA8n8x6RcOmxKhW8FwAAPy0Z85GFX6pFYwHf4jDGHWN
|
||||
anUcAlCBbUaUjtFE0SRqcCJLJkFq4z41CE1ymsBQlJBKiSAMhB8YoDYfCNIgvBrSFjTFTwygqcoj
|
||||
KXZcjbJt+W3PC7MFQ6DBMvsR3PBndhu005e3oGskgSjsI1QJs1deLxh0bgyscUaAA4hx2N1Gqg9/
|
||||
OL7acu0D73JDlKw94q+GTGy2AXVkyi/Mizr5oQB4adtN/wpTPrDzshV+Q8qGk/lZ56dOH3liZ4ue
|
||||
FBZtT/h0PCn6hCp+R0GXe6kx+GDasnPO4lD8AgAA//8DAJhMI05jAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 85373a78093ff93d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 20:51:10 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2378'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299771'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 45ms
|
||||
x-request-id:
|
||||
- req_43b967457c1ba06279e8102f4651389b
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
Reference in New Issue
Block a user