mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-11 00:58:30 +00:00
updating all cassettes
This commit is contained in:
@@ -1,19 +1,20 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": false, "temperature": 0.7}'
|
||||
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: Any and all relevant information input and context
|
||||
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
|
||||
have a response for your task, or if you do not need to use a tool, you MUST
|
||||
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
|
||||
[your response here]```This is the summary of your work so far:\nBegin! This
|
||||
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false,
|
||||
"temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -22,13 +23,13 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1075'
|
||||
- '1137'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -38,7 +39,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -46,19 +47,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8fovReNHiSqXqqsbmk81h2ZTrcGTM\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704977897,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
|
||||
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
233,\n \"completion_tokens\": 24,\n \"total_tokens\": 257\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RQy07DMBC85ytWPreo70AuCAkEbU+VUBEvRa6zTQKO17U3orTqvyOnTy4+zOyM
|
||||
Z2YbAYgyEwkIVUhWldXt65XarKvp02qyrl5Gq7faTeaPy5vNbDZ9mItWUNDiCxUfVVeKKquRSzJ7
|
||||
WjmUjMG1G3fi4bA/GsYNUVGGOshyy+1BuzPq9g+KgkqFXiTwHgEAbJs3ZDMZrkUCndYRqdB7maNI
|
||||
TkcAwpEOiJDel56lYdE6k4oMo2niPhdU5wUncE8wBoOYARPUHkECE+lbeEX/Ye5UKJNAjpwuSyN1
|
||||
Ko3/QXdkYGxszQkMeuLwze6UT1NuHS1CF1NrfcKXpSl9kTqUnkzI4pnsXr6LAD6bHep/1YR1VFlO
|
||||
mb7RBMPeYLD3E+fJL9j+gWRiqS/wURwdEgr/6xmrUCpHZ13ZzBJyRrvoDwAA//8DALjbA1INAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843d5491bed877be-GRU
|
||||
- 8532f9589e8cface-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -68,14 +69,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 12:58:21 GMT
|
||||
- Sat, 10 Feb 2024 08:27:39 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
|
||||
path=/; expires=Thu, 11-Jan-24 13:28:21 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=x6LeaTkmZaqH4TGJu7tZ8TeJ_NpbCUDZT6itJzZCXxM-1707553659-1-ASfokhwi2DxOhdHPlCvbhaMQ9Tc5WobFIYAiUyrDKgdfPq8a4YzQFVNlu7YPh2Y75jk0BfElFVyjjuMPNKMQES8=;
|
||||
path=/; expires=Sat, 10-Feb-24 08:57:39 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000;
|
||||
- _cfuvid=xQOLHjHUUHoh_eMZH8nmvOh0dscgO9v.kI6SzvoXpaE-1707553659919-0-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -88,7 +89,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '3492'
|
||||
- '2142'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -100,31 +101,35 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299755'
|
||||
- '299739'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 49ms
|
||||
- 52ms
|
||||
x-request-id:
|
||||
- 6d96a0ac532ebce14719a35e90f453e4
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_edea71805ee5bca356618ebcbdcd962d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: [42]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
|
||||
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
|
||||
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
|
||||
get_final_answer(numbers) -> float - Get the final answer but don''t give it
|
||||
yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid
|
||||
ouput schema:\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: 42\n\nThe output should be formatted as a JSON instance that conforms
|
||||
to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
|
||||
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
|
||||
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
|
||||
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
|
||||
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
|
||||
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
|
||||
\"Function Name\", \"description\": \"The name of the function to be called.\",
|
||||
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
|
||||
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
|
||||
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -133,16 +138,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1186'
|
||||
- '1407'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
|
||||
_cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000
|
||||
- __cf_bm=x6LeaTkmZaqH4TGJu7tZ8TeJ_NpbCUDZT6itJzZCXxM-1707553659-1-ASfokhwi2DxOhdHPlCvbhaMQ9Tc5WobFIYAiUyrDKgdfPq8a4YzQFVNlu7YPh2Y75jk0BfElFVyjjuMPNKMQES8=;
|
||||
_cfuvid=xQOLHjHUUHoh_eMZH8nmvOh0dscgO9v.kI6SzvoXpaE-1707553659919-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -152,7 +157,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -160,20 +165,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8fovVztGO4KZeiuSpMkfDC9bJ5sVV\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704977901,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"According to the task, I should re-use
|
||||
the `get_final_answer` tool. I'll input the observed result back into the tool.
|
||||
\\nAction: get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
266,\n \"completion_tokens\": 41,\n \"total_tokens\": 307\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRW4/TMBCF3/MrRvPcouz2tuR5uQgJtEJCAjYodZ1JYrDHwZ5oWVX578huadmX
|
||||
PJwz3+TM8bEAQNNiBagHJdqNdnn3uy3Lu+H+e/lJbsu3mzdfPn778PDu9fPD1+4zLhLhDz9Jyz/q
|
||||
lfZutCTG88nWgZRQ2nqzK3ebzWq7LbPhfEs2Yf0oy/Wy3N6szsTgjaaIFTwWAADH/E3ZuKU/WEHm
|
||||
s+IoRtUTVpchAAzeJgVVjCaKYsHF1dSehTjHfU+BwESQgeCJrF12PjglQi0YTqAm8F22ox7Iqarm
|
||||
/X5f87FmgBq7iXW6s2HlqMYKauxJms6wso3i+EShxsVpVoV+csQS01zmk8qTO1DI2vo2iXPNc/4J
|
||||
nhPPl1Ot78fgD6kWnqy96J1hE4cmkIqe01lR/HjC5wLgR650etESjsG7URrxv4jTwlW5O+3D6+td
|
||||
3fW5bxQvyv5HrXfFOSHG5yjk0u09hTGY3HDKWczFXwAAAP//AwAGYzuRWAIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843d54aacf5677be-GRU
|
||||
- 8532f966af17face-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -183,7 +188,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 12:58:28 GMT
|
||||
- Sat, 10 Feb 2024 08:27:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -197,7 +202,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '6695'
|
||||
- '1788'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -209,128 +214,16 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299728'
|
||||
- '299683'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 54ms
|
||||
- 63ms
|
||||
x-request-id:
|
||||
- 12d68fab91102b930ed5047fb3f61759
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: [42]\nObservation: 42\nThought: According to the task, I should re-use
|
||||
the `get_final_answer` tool. I''ll input the observed result back into the tool.
|
||||
\nAction: get_final_answer\nAction Input: [42]\nObservation: I just used the
|
||||
get_final_answer tool with input [42]. So I already know the result of that
|
||||
and don''t need to use it now.\n\nThought: "}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1500'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
|
||||
_cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8fovcLgRvfGBN9CBduJbbPc5zd62B\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704977908,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
|
||||
I have used the `get_final_answer` tool as instructed, but I will not provide
|
||||
the final answer yet as the task specifies.\"\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
342,\n \"completion_tokens\": 40,\n \"total_tokens\": 382\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843d54d65de877be-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 12:58:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '5085'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299650'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 69ms
|
||||
x-request-id:
|
||||
- 87d6e9e91fa2417e12fea9de2c6782de
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_27c37e6906d1612443201ec28f4ab65b
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
@@ -343,8 +236,7 @@ interactions:
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\nAI: I have used the `get_final_answer` tool as instructed, but I will
|
||||
not provide the final answer yet as the task specifies.\n\nNew summary:"}],
|
||||
tool.\nAI: Agent stopped due to iteration limit or time limit.\n\nNew summary:"}],
|
||||
"model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
@@ -354,16 +246,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1067'
|
||||
- '997'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
|
||||
_cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000
|
||||
- __cf_bm=x6LeaTkmZaqH4TGJu7tZ8TeJ_NpbCUDZT6itJzZCXxM-1707553659-1-ASfokhwi2DxOhdHPlCvbhaMQ9Tc5WobFIYAiUyrDKgdfPq8a4YzQFVNlu7YPh2Y75jk0BfElFVyjjuMPNKMQES8=;
|
||||
_cfuvid=xQOLHjHUUHoh_eMZH8nmvOh0dscgO9v.kI6SzvoXpaE-1707553659919-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -373,7 +265,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -381,20 +273,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8fovhiBR4rfXixci7fgAObnx5QwGQ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704977913,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human instructs the AI to use the
|
||||
`get_final_answer` tool, but not to reveal the final answer, which is 42. The
|
||||
AI complies and uses the tool without providing the final answer.\"\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 190,\n \"completion_tokens\": 43,\n
|
||||
\ \"total_tokens\": 233\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRPW/bMBCGd/2KA2fZkOWPFN7aoWmRoUuADkXhMNRZYkzyWN7JThP4vxekFRtd
|
||||
OLwvn/t4770CULZTW1Bm0GJ8dLNPf7qmbb4+3j8c5W2Mh5X74t5+bvbh9aH7oepM0PMLGvmg5oZ8
|
||||
dCiWwsU2CbVgrrq4a+7W6+Vm0xbDU4cuY32U2WrWbBbLiRjIGmS1hV8VAMB7efNsocNXtYWm/lA8
|
||||
Muse1fb6CUAlcllRmtmy6CCqvpmGgmAo4z4OCMPodQAbWNJohEEGhM/fQQgCCfT2iEXa26Ad6MAn
|
||||
TDWcBmsGsAyrtgYdusKj7uCAGGFkG/pCPfUou4LuLugTCJGbwzc64TFXmtqxUGToRsyNrWDSOT+g
|
||||
BGI9grPeylxNS5yv2zvqY6LnnFQYnbvqexssD7uEminkTXP9C36uAH6XlMf/glMxkY+yEzpg4HKs
|
||||
5aWeuh305q7ayRQS7W56u1hX04SK/7Kgz/v3mGKyJfQ8Z3Wu/gEAAP//AwDBe0ViawIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843d54f82f5577be-GRU
|
||||
- 8532f9741edfface-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -404,7 +296,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 12:58:41 GMT
|
||||
- Sat, 10 Feb 2024 08:27:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -418,7 +310,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '7937'
|
||||
- '2184'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -430,13 +322,14 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299749'
|
||||
- '299765'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 50ms
|
||||
- 46ms
|
||||
x-request-id:
|
||||
- 79f30ffd011db4ab6e886411b24ae49d
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_e6ab4628c707b177efb7364b66255c39
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
|
||||
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
|
||||
comma separated list of numbers of \n\t\t\tlength two, representing the two
|
||||
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
|
||||
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [multiplier]\nAction Input: the input to the
|
||||
action\nObservation: the result of the action\n```\n\nWhen you have a response
|
||||
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
|
||||
is the summary of your work so far:\n \nBegin! This is VERY important to
|
||||
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n\n"}], "model":
|
||||
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
|
||||
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
|
||||
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
|
||||
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
|
||||
name.\nAction Input: Any and all relevant information input and context for
|
||||
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
|
||||
a response for your task, or if you do not need to use a tool, you MUST use
|
||||
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
|
||||
response here]```This is the summary of your work so far:\nBegin! This is VERY
|
||||
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n"}],
|
||||
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
@@ -24,13 +22,13 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1199'
|
||||
- '1046'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -40,7 +38,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -48,18 +46,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuE6OrjKKco53f0TIqPQEE3nWeNj\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091650,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
|
||||
multiplier\\nAction Input: 3,4\\n\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 260,\n \"completion_tokens\":
|
||||
24,\n \"total_tokens\": 284\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRT0/jMBDF7/kUozm3q6ZpAeWyQkKwXXFC5YAoqlxnmpi1PV57Ii1U/e7I6T/2
|
||||
4sN780a/N94VAGgarAF1p0S7YMc3f/Xnw2P3e9qLU1X44CdeTstfPJHnu3sc5QRv3knLKfVDswuW
|
||||
xLA/2DqSEspby+vJ9XxeXZWzwXDckM2xNsh4Np5cldUx0bHRlLCG1wIAYDe8mc039A9rmIxOiqOU
|
||||
VEtYn4cAMLLNCqqUTBLlBUcXU7MX8gPusuO+7aSGO4YFeKIGhKFPBAqE2f6EF0orf6tzmRpcb8UE
|
||||
ayieNFj40EsNuxVuTUyy9r3bUFxhDdUIVphIs2++qbM9Hkn25wqW2xB5k+v63tqzvjXepG4dSSX2
|
||||
GTcJh0N8XwC8Dafq/2uPIbILshb+Qz4vnE7Lwz68/MrFraqjKSzKfkvNZ8WRENNHEnLrrfEtxRDN
|
||||
cLnMWeyLLwAAAP//AwBlmMtdMAIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8389719d5cba0110-GRU
|
||||
- 8532f8493b617ae2-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -69,14 +69,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:52 GMT
|
||||
- Sat, 10 Feb 2024 08:26:56 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=;
|
||||
path=/; expires=Wed, 20-Dec-23 17:30:52 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=;
|
||||
path=/; expires=Sat, 10-Feb-24 08:56:56 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000;
|
||||
- _cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -89,7 +89,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2423'
|
||||
- '1569'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -98,42 +98,39 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299729'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299729'
|
||||
- '299761'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 54ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 54ms
|
||||
- 47ms
|
||||
x-request-id:
|
||||
- 8f99f43731fa878eaf0fcbf0719d1b3f
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_400aaa239d63866a5ffa859c4e6372f6
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
|
||||
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
|
||||
comma separated list of numbers of \n\t\t\tlength two, representing the two
|
||||
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
|
||||
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [multiplier]\nAction Input: the input to the
|
||||
action\nObservation: the result of the action\n```\n\nWhen you have a response
|
||||
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
|
||||
is the summary of your work so far:\n \nBegin! This is VERY important to
|
||||
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought: Do
|
||||
I need to use a tool? Yes\nAction: multiplier\nAction Input: 3,4\n\nObservation:
|
||||
12\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
|
||||
false, "temperature": 0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
|
||||
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
|
||||
Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''},
|
||||
''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number:
|
||||
int, second_number: int) -> float - Useful for when you need to multiply two
|
||||
numbers together.\n\nUse this text to inform a valid ouput schema:\nThought:
|
||||
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
|
||||
3, \"second_number\": 4}\n\nThe output should be formatted as a JSON instance
|
||||
that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
|
||||
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
|
||||
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
|
||||
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
|
||||
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
|
||||
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
|
||||
\"Function Name\", \"description\": \"The name of the function to be called.\",
|
||||
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
|
||||
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
|
||||
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -142,16 +139,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1305'
|
||||
- '1494'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=;
|
||||
_cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000
|
||||
- __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=;
|
||||
_cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -161,7 +158,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -169,18 +166,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuE86Ud94rsOP7VA4Bgxo6RM5XE6\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091652,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
|
||||
3 times 4 is 12.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 293,\n \"completion_tokens\":
|
||||
22,\n \"total_tokens\": 315\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRzW7bMBCE73qKxZ7lwo7/Gr2AEQQ5tUALRIVAUWuZCblkyFWb1tC7F5R/4lx4
|
||||
mG9mMbs8FgBoOqwA9UGJdsHOvr7pfw/bfds//mh3u+7n0/zx++7wp334/e31Dcuc8O0Labmkvmjv
|
||||
giUxnk9YR1JCeepiO9+u18vNYjMB5zuyOdYHma1m881ieU4cvNGUsILnAgDgOL25G3f0jhXMy4vi
|
||||
KCXVE1ZXEwBGb7OCKiWTRLFg+QG1ZyGe6h5rBqhxP7DOdRtWjmqsoEY3WDHBGoo1lieXiv3giCVl
|
||||
x5ScsiYmaXhwbbZWsCwvJJH23N2gVSZjzSOey4zXLazvQ/Rt3pgHa6/63rBJhyaSSp5z4yQ+nOJj
|
||||
AfBrutbw6QAYondBGvGvxHng8m59mocfH3NDV2coXpS90df3xbkhpr9JyDV7wz3FEM10vNyzGIv/
|
||||
AAAA//8DAGqF2SkzAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 838971ae0f120110-GRU
|
||||
- 8532f859aec87ae2-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -190,7 +189,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:55 GMT
|
||||
- Sat, 10 Feb 2024 08:27:00 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -204,7 +203,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2334'
|
||||
- '3268'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -213,24 +212,128 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299661'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 67ms
|
||||
x-request-id:
|
||||
- req_2bf36cee0713ecceff13b68d8688dd7b
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
|
||||
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
|
||||
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
|
||||
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
|
||||
name.\nAction Input: Any and all relevant information input and context for
|
||||
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
|
||||
a response for your task, or if you do not need to use a tool, you MUST use
|
||||
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
|
||||
response here]```This is the summary of your work so far:\nBegin! This is VERY
|
||||
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought:
|
||||
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
|
||||
3, \"second_number\": 4}\nObservation: 12\nThought: "}], "model": "gpt-4", "n":
|
||||
1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1190'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=;
|
||||
_cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQT08CMRDF7/spJj0DgV3+ZS/GSAxqJBxUSNSQsjss1baztENQCN/ddFlBLz28
|
||||
N7/XN3OIAITKRQoiW0vOTKmbw022f3RPs9nE7fYv92Y+nk436vlhnN/MR6IRCFp+YMa/VCsjU2pk
|
||||
RfZkZw4lY0jtDNqDXi/px+3KMJSjDlhRcrPbbPc7SU2sSWXoRQqvEQDAoXpDN5vjl0ih4ivFoPey
|
||||
QJGehwCEIx0UIb1XnqVl0biYGVlGW9UdEdyBRcyBCbYeQQIT6SuY0Ju9VVZquLZ+hy6FBFgZ9NAF
|
||||
5aETt0QdeDw30VSUjpahtd1qfdZXyiq/XjiUnmz41TOVJ/wYAbxXG2//LSFKR6bkBdMn2hAY95NT
|
||||
nrgc948b1yYTS/1HH/aiuqHw357RLFbKFuhKp6oDhJ7RMfoBAAD//wMAgp6cUPcBAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8532f86f1d777ae2-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 08:27:01 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1412'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299703'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299703'
|
||||
- '299728'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 59ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 59ms
|
||||
- 54ms
|
||||
x-request-id:
|
||||
- 44304b182424a8acad8a3121817bea58
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_c84d46e08d08ae44e556a21ce820bcd1
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
@@ -256,12 +359,12 @@ interactions:
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=;
|
||||
_cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000
|
||||
- __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=;
|
||||
_cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -271,7 +374,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -279,19 +382,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuEBWLslSBUMDHZRViwpoXcclYOk\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091655,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human asks the AI what is 3 times
|
||||
4, and the AI responds that 3 times 4 is 12.\"\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
155,\n \"completion_tokens\": 27,\n \"total_tokens\": 182\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQQU/DMAyF7/0VVs7dtG5rN3oDIQGXnZCYhNCUpW4b1sYhcRkw7b+jdN0Glyjy
|
||||
82c/v0MEIHQhchCqlqxa24yWH+pn9dj59epmvSzWu2x/3y1fOvqsHu7mIg4Ebd9R8ZkaK2ptg6zJ
|
||||
nGTlUDKGqcliskjTWTZNeqGlApuAVZZH89EkS2YDUZNW6EUOrxEAwKF/gzdT4JfIYRKfKy16LysU
|
||||
+aUJQDhqQkVI77VnaVjEV1GRYTS93ecaoe5aaUD6nQeuEW6foCTXf62jolMMVMIMpClgHgMT7Gut
|
||||
6nOvQ2/JFIGVDJpBe0imYzGsO158NlRZR9twk+ma5lIvtdG+3jiUnkzw5JnsCT9GAG99Ht2/E4V1
|
||||
1FreMO3QhIFJmp7miWv0V3WaDSITy+YPtUyiwaHw356x3ZTaVOis0308wWd0jH4BAAD//wMA11BS
|
||||
pRUCAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 838971be49500110-GRU
|
||||
- 8532f878bc667ae2-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -301,7 +405,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:58 GMT
|
||||
- Sat, 10 Feb 2024 08:27:03 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -315,7 +419,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2942'
|
||||
- '1303'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -324,22 +428,17 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299794'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299794'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 41ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 41ms
|
||||
x-request-id:
|
||||
- 546e9b3713f3ff2d7f9868133efaa3a7
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_c207bb1f2ff87fe1da2968c6e9b2a271
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
|
||||
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
|
||||
comma separated list of numbers of \n\t\t\tlength two, representing the two
|
||||
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
|
||||
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [multiplier]\nAction Input: the input to the
|
||||
action\nObservation: the result of the action\n```\n\nWhen you have a response
|
||||
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
|
||||
is the summary of your work so far:\n \nBegin! This is VERY important to
|
||||
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n\n"}], "model":
|
||||
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
|
||||
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
|
||||
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
|
||||
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
|
||||
name.\nAction Input: Any and all relevant information input and context for
|
||||
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
|
||||
a response for your task, or if you do not need to use a tool, you MUST use
|
||||
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
|
||||
response here]```This is the summary of your work so far:\nBegin! This is VERY
|
||||
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n"}],
|
||||
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
@@ -24,13 +22,13 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1199'
|
||||
- '1046'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -40,7 +38,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -48,18 +46,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuDxosP85Kqo6mU6biIggZ5c828i\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091641,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
|
||||
multiplier\\nAction Input: 3,4\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 260,\n \"completion_tokens\":
|
||||
23,\n \"total_tokens\": 283\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRzU7DMBCE73mK1Z5blDZNS3NBIC4cQEKCA6KoSp1t4uJ4jb2RgKrvjpz+cvFh
|
||||
Zmf1zXqbAKCusABUTSmqdWZ4/aWum7ms0upx/vRyl77Wv9n0ebN5kkf2OIgJXm1IyTF1pbh1hkSz
|
||||
3dvKUykUt45m6SzP0yzPe6PlikyM1U6Gk2E6HWWHRMNaUcAC3hMAgG3/RjZb0TcWkA6OSkshlDVh
|
||||
cRoCQM8mKliGoIOUVnBwNhVbIdvjvjTc1Y0UcM/wAJaoAmHoAkEJwmxu4I3Cwt6qWKaAtjOindHk
|
||||
jxo8WNdJAdsFrrUPsrRduyK/wAKyASwwkGJbXaiTHR5IdqcKhmvneRXr2s6Yk77WVodm6akMbCNu
|
||||
EHb7+C4B+OhP1f1rj85z62Qp/Ek2LhyPR/t9eP6Vs5tlB1NYSnORyifJgRDDTxBql2tta/LO6/5y
|
||||
kTPZJX8AAAD//wMAklK0SjACAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 83897166eba7a5fd-GRU
|
||||
- 8532a8b68e9c7e2d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -69,14 +69,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:44 GMT
|
||||
- Sat, 10 Feb 2024 07:32:37 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=;
|
||||
path=/; expires=Wed, 20-Dec-23 17:30:44 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=;
|
||||
path=/; expires=Sat, 10-Feb-24 08:02:37 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000;
|
||||
- _cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -89,7 +89,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2718'
|
||||
- '1920'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -98,42 +98,39 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299729'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299729'
|
||||
- '299761'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 54ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 54ms
|
||||
- 47ms
|
||||
x-request-id:
|
||||
- 1714a9f5a2141d30f72506facf616944
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_075691c0da3ee5dc3d14d8f94e3fb169
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
|
||||
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
|
||||
comma separated list of numbers of \n\t\t\tlength two, representing the two
|
||||
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
|
||||
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [multiplier]\nAction Input: the input to the
|
||||
action\nObservation: the result of the action\n```\n\nWhen you have a response
|
||||
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
|
||||
is the summary of your work so far:\n \nBegin! This is VERY important to
|
||||
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought: Do
|
||||
I need to use a tool? Yes\nAction: multiplier\nAction Input: 3,4\nObservation:
|
||||
12\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
|
||||
false, "temperature": 0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
|
||||
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
|
||||
Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''},
|
||||
''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number:
|
||||
int, second_number: int) -> float - Useful for when you need to multiply two
|
||||
numbers together.\n\nUse this text to inform a valid ouput schema:\nThought:
|
||||
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
|
||||
3, \"second_number\": 4}\n\nThe output should be formatted as a JSON instance
|
||||
that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
|
||||
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
|
||||
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
|
||||
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
|
||||
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
|
||||
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
|
||||
\"Function Name\", \"description\": \"The name of the function to be called.\",
|
||||
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
|
||||
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
|
||||
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -142,16 +139,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1303'
|
||||
- '1494'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=;
|
||||
_cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000
|
||||
- __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=;
|
||||
_cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -161,7 +158,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -169,18 +166,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuE0tWsbI9QDkRau6rzSUZfuqhFN\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091644,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
|
||||
12\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 293,\n \"completion_tokens\":
|
||||
15,\n \"total_tokens\": 308\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRzW7bMBCE73qKBc9y4UR2beiWAgFyKnLKJQpkmlpJdMldlVw1TQ29e0H5L7no
|
||||
sDPfanZ4zACUbVQJyvRajB/cYvvbbA/tn+ft6uH5x7/486l76ewjv+/50D6oPBG8P6CRC/XNsB8c
|
||||
imU6ySagFkxb7zbLzXq9LNabWfDcoEtYN8hitVh+vyvORM/WYFQlvGYAAMf5m7JRg39VCcv8MvEY
|
||||
o+5QlVcTgArs0kTpGG0UTaLym2iYBGmO+4QBwUbQ8I7OLVoOXotgA5YSZhC4BekRounR67Kiina7
|
||||
XUXHigAq1Y5k0p01aY+VKqFSfnRiB2cxVCo/uXToRo8kMTlmcmZtiFLT6PfJWkKRX5SIhqn5JK2S
|
||||
MlU0zX9X51OmaweOuyHwPvVFo3PXeWvJxr4OqCNTujcKDyd8ygDe5q7HL/WpIbAfpBb+hZQWFvfr
|
||||
0z51e9abutqeRWHR7hO1KbJzQhU/oqCvW0sdhiHYufqUM5uy/wAAAP//AwDgghcycQIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 838971796af6a5fd-GRU
|
||||
- 8532a8c4db427e2d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -190,7 +189,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:46 GMT
|
||||
- Sat, 10 Feb 2024 07:32:39 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -204,7 +203,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2355'
|
||||
- '2563'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -213,24 +212,128 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299661'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 67ms
|
||||
x-request-id:
|
||||
- req_bf33bff9b62213c75c624fc2dcbf5d7b
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
|
||||
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
|
||||
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
|
||||
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
|
||||
name.\nAction Input: Any and all relevant information input and context for
|
||||
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
|
||||
a response for your task, or if you do not need to use a tool, you MUST use
|
||||
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
|
||||
response here]```This is the summary of your work so far:\nBegin! This is VERY
|
||||
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought:
|
||||
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
|
||||
3, \"second_number\": 4}\nObservation: 12\nThought: "}], "model": "gpt-4", "n":
|
||||
1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1190'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=;
|
||||
_cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQzU7DMBCE73mKlc9tlSb9Uy4IqJAqRI9cAFVusklNba+xN6JQ9d2R09DCxYeZ
|
||||
/caze0wAhKpEAaLcSS6N08PFR7kwTWaau/ZpzXvm+9Vzo+rHQ71cf4tBJGj7jiX/UqOSjNPIiuzZ
|
||||
Lj1Kxpg6nqfz6TTNZ2lnGKpQR6xxPJwM09k474kdqRKDKOAlAQA4dm/sZis8iAI6vlMMhiAbFMVl
|
||||
CEB40lERMgQVWFoWg6tZkmW0Xd0lwQosYgVM0AYECUykb2BNr/ZBWanh1oZP9AXkwMpggAmoAONs
|
||||
JPrA06WJpsZ52sbWttX6otfKqrDbeJSBbPw1MLkzfkoA3rqN239LCOfJON4w7dHGwGyWn/PE9bh/
|
||||
3Kw3mVjqP/pimvQNRfgKjGZTK9ugd151B4g9k1PyAwAA//8DAB+YxMT3AQAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8532a8d5b9927e2d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 07:32:41 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1355'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299704'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299704'
|
||||
- '299728'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 59ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 59ms
|
||||
- 54ms
|
||||
x-request-id:
|
||||
- a3de9d34f17496d9bdd2ae9360f6054a
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_5b85d3e382222bb334b58fa52fbd2152
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
@@ -242,8 +345,8 @@ interactions:
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: What
|
||||
is 3 times 4\nAI: 12\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream":
|
||||
false, "temperature": 0.7}'
|
||||
is 3 times 4\nAI: 3 times 4 is 12.\n\nNew summary:"}], "model": "gpt-4", "n":
|
||||
1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -252,16 +355,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '871'
|
||||
- '885'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=;
|
||||
_cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000
|
||||
- __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=;
|
||||
_cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -271,7 +374,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -279,18 +382,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuE3Di6FRdNAetXNfWs6OWyRAfkf\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091647,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human asks the AI what is 3 times
|
||||
4 and the AI responds with 12.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 149,\n \"completion_tokens\":
|
||||
20,\n \"total_tokens\": 169\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1xQy07CQBTd9ytuZl1MH5SS7owrN7rQRNEYMrSXdmBe9l4ihvDvZkoBdTOL85pz
|
||||
zyECEKoRFYi6k1wbryfzz3pu34qX8u4hN8lTlsj9qywWm0fqFomIg8OtNljz2XVTO+M1snL2RNc9
|
||||
SsaQmpZJWRRJPksHwrgGdbC1nifTSTJL89HROVUjiQreIwCAw/CGbrbBvaggic+IQSLZoqguIgDR
|
||||
Ox0QIYkUsbQs4itZO8toh7rPHUK3M9KCpC0Bdwi39/DVSYYcWBkkmIKiGKRtzmyP5J1tgvqfDNLs
|
||||
Roz/HC8FtWt971bhGLvT+oKvlVXULXuU5GwoQ+z8yX6MAD6GIXZ/bhO+d8bzkt0WbQhMi+KUJ66b
|
||||
X9msHEl2LPUv1zyLxoaCvonRLNfKttj7Xg27hJ7RMfoBAAD//wMAZCh3Sw4CAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 838971899954a5fd-GRU
|
||||
- 8532a8df99427e2d-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -300,7 +404,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:49 GMT
|
||||
- Sat, 10 Feb 2024 07:32:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -314,7 +418,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2698'
|
||||
- '1805'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -323,22 +427,17 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299798'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299798'
|
||||
- '299794'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 40ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 40ms
|
||||
- 41ms
|
||||
x-request-id:
|
||||
- ddbd97cea4ec099c21c00ca922157ae1
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_c13cb4147b01d222d16b93ac893a895d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": false, "temperature": 0.7}'
|
||||
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: Any and all relevant information input and context
|
||||
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
|
||||
have a response for your task, or if you do not need to use a tool, you MUST
|
||||
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
|
||||
[your response here]```This is the summary of your work so far:\nBegin! This
|
||||
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false,
|
||||
"temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -22,13 +23,13 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1075'
|
||||
- '1137'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -38,7 +39,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -46,19 +47,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8frBTCWXULTV5ZYHy3Y5JXKovrKiN\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704986579,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
|
||||
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
233,\n \"completion_tokens\": 24,\n \"total_tokens\": 257\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RQy07DMBC85ytWPreoTdMWckGIcugBxKECIUCV62wTF8dr7A0PVf135DR9cPFh
|
||||
Zmc8M9sEQOhC5CBUJVnVzvQvP4ur9LEZfzXyeXH38rBxiydarSez9P5WiV5U0GqDig+qC0W1M8ia
|
||||
7J5WHiVjdB1OB9PxOEvTQUvUVKCJstJxP+sPJsNRp6hIKwwih9cEAGDbvjGbLfBH5NDqW6TGEGSJ
|
||||
Ij8eAQhPJiJChqADS8uidyIVWUbbxl1U1JQV5zAjmINFLIAJmoAggYnMNbxgeLM3KpbJoURerrWV
|
||||
Zilt+EZ/YGBuXcM5ZKnovtkd8xkqnadV7GIbY474WlsdqqVHGcjGLIHJ7eW7BOC93aH5V004T7Xj
|
||||
JdMH2miYZtneT5wmP2NHHcnE0pzhk2nSJRThNzDWsVSJ3nndzhJzJrvkDwAA//8DACdeRXQNAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843e2886ceca1d23-GRU
|
||||
- 85330710af4f175e-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -68,14 +69,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 15:23:03 GMT
|
||||
- Sat, 10 Feb 2024 08:37:01 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
|
||||
path=/; expires=Thu, 11-Jan-24 15:53:03 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=;
|
||||
path=/; expires=Sat, 10-Feb-24 09:07:01 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000;
|
||||
- _cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -88,7 +89,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '4424'
|
||||
- '1517'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -100,30 +101,145 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299755'
|
||||
- '299739'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 49ms
|
||||
- 52ms
|
||||
x-request-id:
|
||||
- 76974d365254ca84f70c43fc31af3378
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_b962be8c11abe80a762fc1e1d5c0fb5d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
|
||||
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
|
||||
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
|
||||
get_final_answer(numbers) -> float - Get the final answer but don''t give it
|
||||
yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid
|
||||
ouput schema:\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: 42\n\nThe output should be formatted as a JSON instance that conforms
|
||||
to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
|
||||
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
|
||||
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
|
||||
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
|
||||
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
|
||||
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
|
||||
\"Function Name\", \"description\": \"The name of the function to be called.\",
|
||||
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
|
||||
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
|
||||
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1407'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=;
|
||||
_cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRzZLTMBCE736KqTknlBM7G/CRy24KKC4c+PGWo8hjW6w0MtKYZSuVd6fkZBO4
|
||||
+NAz37i7dcwA0LRYAepBiXajXb791b4rdl8/5Hdf3n/+xJtitds95fcfv/8uvr3gIhH+8JO0vFJv
|
||||
tHejJTGez2MdSAmlq6ttvt1syvV6NQ+cb8kmrB9lWS7zu1VxIQZvNEWs4EcGAHCcv8kbt/QHK8gX
|
||||
r4qjGFVPWF2XADB4mxRUMZooigUXt6H2LMSz3QcKBCaCgmeydtn54JQItWA4YZrAdyADQdQDOVXV
|
||||
XPN+v6/5WDNAjd3EOuVsWDmqsYIae5KmM6xsozg+U6hxcd5VoZ8cscS0N/NJ5ckdKMxauU7iqebT
|
||||
/BO8OD5do1rfj8EfUi08WXvVO8MmDk0gFT2nWFH8eMZPGcDjXOn0X0s4Bu9GacQ/EaeDRb4938Pb
|
||||
692m5aVvFC/K/kOV2+ziEONLFHIpe09hDGZuOPnMTtlfAAAA//8DABO023tYAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8533071c994c175e-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 08:37:04 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2139'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299683'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 63ms
|
||||
x-request-id:
|
||||
- req_f69fc36b19e66ed7d71a65ac81624ef5
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: [42]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
|
||||
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: Any and all relevant information input and context
|
||||
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
|
||||
have a response for your task, or if you do not need to use a tool, you MUST
|
||||
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
|
||||
[your response here]```This is the summary of your work so far:\nBegin! This
|
||||
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: 42\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
@@ -133,16 +249,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1186'
|
||||
- '1246'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
|
||||
_cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000
|
||||
- __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=;
|
||||
_cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -152,7 +268,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -160,19 +276,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8frBYkyVPtJuAJCESaOxEBg3UAfl4\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704986584,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
|
||||
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
266,\n \"completion_tokens\": 22,\n \"total_tokens\": 288\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQzU7DMBCE73mKlc8tSkLatLkgJIRUIS6ISwsocpNtEnC8rr0RharvjpymP1x8
|
||||
mPGMvtl9ACCaUmQgilpy0Ro1nm3L+dStntOX19+wfVy13XJWh0/RptxtrRj5BK0/seBT6qag1ijk
|
||||
hvTRLixKRt8apWE6mSRxnPRGSyUqH6sMj5NxOI1uh0RNTYFOZPAWAADs+9ez6RJ3IoNwdFJadE5W
|
||||
KLLzJwBhSXlFSOcax1KzGF3MgjSj7nEfCBagEUtggs4hSGAidQdLdO/6vvAbMqiQ802jpcqldt9o
|
||||
Tw4stOk4gyQWQ/vhjKWoMpbWfoLulDrrm0Y3rs4tSkfaIzgmc4wfAoCPfn73b5EwllrDOdMXal8Y
|
||||
p+mxT1wufeVGg8nEUl3p81kwEAr34xhbP6pCa2zTX8NzBofgDwAA//8DAA3DUnYEAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843e28a57d911d23-GRU
|
||||
- 8533072c9e37175e-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -182,7 +298,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 15:23:07 GMT
|
||||
- Sat, 10 Feb 2024 08:37:06 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -196,7 +312,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '3329'
|
||||
- '2066'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -208,34 +324,37 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299728'
|
||||
- '299712'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 54ms
|
||||
- 57ms
|
||||
x-request-id:
|
||||
- 1b9a1e09f863ff69cecfe4e7bed0aee5
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_e6cc60e6a53e65c41d1f4f36432d3be4
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: [42]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
|
||||
get_final_answer\nAction Input: [42]\nObservation: I''ve used too many tools
|
||||
for this task.\nI''m going to give you my absolute BEST Final answer now and\nnot
|
||||
use any more tools.\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": false, "temperature": 0.7}'
|
||||
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: Any and all relevant information input and context
|
||||
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
|
||||
have a response for your task, or if you do not need to use a tool, you MUST
|
||||
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
|
||||
[your response here]```This is the summary of your work so far:\nBegin! This
|
||||
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: 42\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: 42\nObservation: Actually, I used too many tools, so I''ll stop now and
|
||||
give you my absolute BEST Final answer NOW, using the expected format: ```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```\nThought:
|
||||
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -244,16 +363,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1411'
|
||||
- '1549'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
|
||||
_cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000
|
||||
- __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=;
|
||||
_cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -263,7 +382,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -271,19 +390,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8frBbQ3vq0kEry4X3a1RkMEkIAP99\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704986587,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
|
||||
I have used the tool multiple times and the final answer remains 42.\"\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 323,\n \"completion_tokens\": 28,\n
|
||||
\ \"total_tokens\": 351\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RRyW7CMBC95ytGvnABxL7kUlVqK4G63LsIGWdITB1Pag8FhPj3yg5bLz68ZfTe
|
||||
8yEBEDoTKQhVSFZlZVqTn2w60f3N/G3+8r4bj3PpH58VzfaTWWctmsFByzUqPrvaisrKIGuyNa0c
|
||||
SsZwtTvujIfDQa83ikRJGZpgyytuDVqdUbd/chSkFXqRwkcCAHCIb8hmM9yJFDrNM1Ki9zJHkV5E
|
||||
AMKRCYiQ3mvP0rJoXklFltHGuA8EM7CIGTDBxiNIYCJzB6/0aZ+0lQburd+iS2EGhfzFIMqAC4w6
|
||||
aOTIi1XQLWTUNYC3WiFIm4Eiu9KujHrJ0VSLQHsIRTCDQa8tTsmOl0qG8srRMtS3G2Mu+Epb7YuF
|
||||
Q+nJhvieqartxwTgK063+beGqByVFS+YvtGGg/3hsL4nrr90ww5OJBNLc4NPpskpofB7z1iG1jm6
|
||||
yum4ZMiZHJM/AAAA//8DACniEDhAAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843e28bbbb071d23-GRU
|
||||
- 8533073bfc1f175e-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -293,7 +413,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 15:23:13 GMT
|
||||
- Sat, 10 Feb 2024 08:37:08 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -307,7 +427,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '5459'
|
||||
- '1584'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -319,15 +439,16 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299673'
|
||||
- '299639'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 65ms
|
||||
- 72ms
|
||||
x-request-id:
|
||||
- 0a5c1064b324c997b16bf17d426f9638
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_990de951a71fb1fc2621f985e4257c6e
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
@@ -340,9 +461,9 @@ interactions:
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\nAI: I have used the tool multiple times and the final answer remains
|
||||
42.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature":
|
||||
0.7}'
|
||||
tool.\nAI: I have used the tool ''get_final_answer'' twice and confirmed that
|
||||
the answer is indeed 42.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream":
|
||||
false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -351,16 +472,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1014'
|
||||
- '1035'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
|
||||
_cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000
|
||||
- __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=;
|
||||
_cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -370,7 +491,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -378,20 +499,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8frBhKxiCRICQ8o6aJanmn8PTMsAr\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704986593,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human tells the AI that the final
|
||||
answer is 42 and instructs it to continue using the `get_final_answer` tool.
|
||||
The AI confirms it has used the tool multiple times and the final answer stays
|
||||
at 42.\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 178,\n \"completion_tokens\":
|
||||
46,\n \"total_tokens\": 224\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RRQW7bMBC86xULXnKxDVuWK9c3H5seChQ5FC0Kg6FWEhOKy3BXTYvAfy9IKRZy
|
||||
IYgZzuzs8K0AULZRJ1Cm12KG4NbHl+bz+fjj286cX5jrn/f4tP1++DO2X/f3tVolBT0+oZF31cbQ
|
||||
EByKJT/RJqIWTK67elsfDlVZHjMxUIMuybog62q9/bTbz4qerEFWJ/hVAAC85TNl8w3+VSfYrt6R
|
||||
AZl1h+p0ewSgIrmEKM1sWbQXtVpIQ17Q57gPPUI/DtqD9S3FgUF6hPMXkF5LvrfWawfa8ytGsAxV
|
||||
Cdo3YD1LHI0wWAEheEYMMLL1XVbddSiXLL1M0jsQIreBh8l+ZJxGJRTk1RrMtoZ8a6cY8/xlclod
|
||||
G6jKjZp3ud5KcNSFSI+pMD86d8Nb6y33l4iayaeFWShM8msB8DuXPX7oT4VIQ5CL0DP6ZLg7VpOf
|
||||
Wv51Yav9TAqJdgtelnUxJ1T8jwWH1EeHMUSbu085i2vxHwAA//8DAKC/EipyAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843e28df4ae81d23-GRU
|
||||
- 853307467f19175e-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -401,7 +522,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 15:23:18 GMT
|
||||
- Sat, 10 Feb 2024 08:37:11 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -415,7 +536,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '5518'
|
||||
- '2805'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -427,13 +548,14 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299761'
|
||||
- '299757'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 47ms
|
||||
- 48ms
|
||||
x-request-id:
|
||||
- 4100fde9c68d27d808de645637b3e7cc
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_cd5aacfcc2283f70e3903b274541fd8c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -4,25 +4,28 @@ interactions:
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nlearn_about_AI: learn_about_AI(topic) -> float - Useful for when you
|
||||
need to learn about AI to write an paragraph about it.\nDelegate work to co-worker:
|
||||
Useful to delegate a specific task to one of the following co-workers: .\nThe
|
||||
input to this tool should be a pipe (|) separated text of length 3 (three),
|
||||
representing the co-worker you want to ask it to (one of the options), the task
|
||||
and all actual context you have for the task.\nFor example, `coworker|task|context`.\nAsk
|
||||
question to co-worker: Useful to ask a question, opinion or take from on of
|
||||
the following co-workers: .\nThe input to this tool should be a pipe (|) separated
|
||||
text of length 3 (three), representing the co-worker you want to ask it to (one
|
||||
of the options), the question and all actual context you have for the question.\n
|
||||
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [learn_about_AI, Delegate work to co-worker,
|
||||
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
|
||||
the result of the action\n```\n\nWhen you have a response for your task, or
|
||||
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Write and then review an small paragraph on
|
||||
AI until it''s AMAZING\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": false, "temperature": 0.7}'
|
||||
Delegate work to co-worker(coworker: str, task: str, context: str) - Delegate
|
||||
a specific task to one of the following co-workers: . The input to this tool
|
||||
should be the role of the coworker, the task you want them to do, and ALL necessary
|
||||
context to exectue the task, they know nothing about the task, so share absolute
|
||||
everything you know, don''t reference things but instead explain them.\nAsk
|
||||
question to co-worker: Ask question to co-worker(coworker: str, question: str,
|
||||
context: str) - Ask a specific question to one of the following co-workers:
|
||||
. The input to this tool should be the role of the coworker, the question you
|
||||
have for them, and ALL necessary context to ask the question properly, they
|
||||
know nothing about the question, so share absolute everything you know, don''t
|
||||
reference things but instead explain them.\n\nTo use a tool, please use the
|
||||
exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [learn_about_AI, Delegate work to co-worker,
|
||||
Ask question to co-worker], just the name.\nAction Input: Any and all relevant
|
||||
information input and context for using the tool\nObservation: the result of
|
||||
using the tool\n```\n\nWhen you have a response for your task, or if you do
|
||||
not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need
|
||||
to use a tool? No\nFinal Answer: [your response here]```This is the summary
|
||||
of your work so far:\nBegin! This is VERY important to you, your job depends
|
||||
on it!\n\nCurrent Task: Write and then review an small paragraph on AI until
|
||||
it''s AMAZING\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
|
||||
false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -31,13 +34,13 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1807'
|
||||
- '1999'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.11.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -47,7 +50,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.11.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -57,18 +60,18 @@ interactions:
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRzU7DMBCE73mKlc8t6k9oaS6oEkKKBIIDQiBAketsk1DHa+xNW1r13ZHTtBUX
|
||||
H2Y862/W+whAVLlIQKhSsqqt7t/83K8f1tPdrl7aV7VZr/FtOtg8PW6fyzmJXkjQ4hsVn1JXimqr
|
||||
kSsyR1s5lIxh6nA6mMaTeHY9a42actQhVljux/3BZDjuEiVVCr1I4CMCANi3Z2AzOW5FAoPeSanR
|
||||
e1mgSM6XAIQjHRQhva88S8OidzEVGUbT4r6U1BQlJ3BHkIJBzIEJGo8ggYn0Lbyj/zRzFcokoFE6
|
||||
k8kFNZzN05MOqbENJ1CgQSc1zFNYGdpozAsU3bOHM6+mwjpahG6m0fqsLytT+TJzKD2ZwOaZ7DF+
|
||||
iAC+2r00/6oK66i2nDGt0ISB49nsOE9cvuDijuLOZGKpL3o8GkcdofC/nrHOlpUp0FlXtWsKnNEh
|
||||
+gMAAP//AwBENAoEHQIAAA==
|
||||
H4sIAAAAAAAAA1SRS0/DMBCE7/kVK59blNKnckEVINEDggNCAooix9kmpo7X2BvRqup/R05fcPFh
|
||||
xrP+Zr1LAIQuRQZC1ZJV40x/9l3et+93mx8jC1rdpo+btEi3/vX54el2LXoxQcUXKj6lrhQ1ziBr
|
||||
sgdbeZSMcepgmk7H49F4NuqMhko0MVY57o/66WQwPCZq0gqDyOAjAQDYdWdksyVuRAZp76Q0GIKs
|
||||
UGTnSwDCk4mKkCHowNKy6F1MRZbRdrgvNbVVzRncESzAIpbABG1AkMBE5gbeMCztXMUyGRiU3uay
|
||||
oJbz+eKkw8K6ljNYirlnvdJKSwMLy2iMrtAqXApxfH1/xjZUOU9FrGhbY876Slsd6tyjDGQjYmBy
|
||||
h/g+Afjs1tP+ayycp8ZxzrRGGweOhoPDPHH5iYt7PTmaTCzNn9R4mhwJRdgGxiZfaVuhd15324qc
|
||||
yT75BQAA//8DAG14RFskAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 852a83de5fb2fabe-SJC
|
||||
- 85330ff5ff101637-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -78,14 +81,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 09 Feb 2024 07:49:21 GMT
|
||||
- Sat, 10 Feb 2024 08:43:05 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=wNpUyP3WikFZ5owMt5Wt4RvvxL5RRbV5Lm6N3cYt0cQ-1707464961-1-AaRJSNtZhC0qEyOOtzVqZyGINmDySgK+NZEmXyReWDjlSfP0cTj8tUtP+DYB/ttT11hGeDfZUyYzGIz1jsRPjN4=;
|
||||
path=/; expires=Fri, 09-Feb-24 08:19:21 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=;
|
||||
path=/; expires=Sat, 10-Feb-24 09:13:05 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=Ii_GgXvfh_uOU6kzTG.hYatLhnP.5qhsMn05KgacgMc-1707464961455-0-604800000;
|
||||
- _cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -98,7 +101,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1637'
|
||||
- '1403'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -110,165 +113,50 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299573'
|
||||
- '299524'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 85ms
|
||||
- 95ms
|
||||
x-request-id:
|
||||
- req_363f8fd0e98a3c0e807830d844480ad1
|
||||
- req_c3e3d009fb1789af53949c6665e20c2d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: !!binary |
|
||||
CokpCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS4CgKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKYCAoQRMg95BA38/OSZo0f5R6QaBIIKgZb+0wGi88qDENyZXcgQ3JlYXRlZDABOaDm
|
||||
x41VIrIXQfjXzo1VIrIXShkKDmNyZXdhaV92ZXJzaW9uEgcKBTAuNS4yShoKDnB5dGhvbl92ZXJz
|
||||
aW9uEggKBjMuMTEuN0olCghob3N0bmFtZRIZChdKb2Fvcy1NYWNCb29rLVByby5sb2NhbEoxCgdj
|
||||
cmV3X2lkEiYKJDUyZDc0NDVjLWE5Y2UtNGU2Yi05NjI2LWRhZGU5OWQ2YThmNkocCgxjcmV3X3By
|
||||
b2Nlc3MSDAoKc2VxdWVudGlhbEoVCg1jcmV3X2xhbmd1YWdlEgQKAmVuShoKFGNyZXdfbnVtYmVy
|
||||
X29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBSsYCCgtjcmV3X2FnZW50
|
||||
cxK2AgqzAlt7ImlkIjogImEwMGUwM2Y0LTgwYTEtNDQ1Ni05NzRiLWFiYjQ2YmVmM2NmYyIsICJy
|
||||
b2xlIjogInRlc3Qgcm9sZSIsICJtZW1vcnlfZW5hYmxlZD8iOiB0cnVlLCAidmVyYm9zZT8iOiB0
|
||||
cnVlLCAibWF4X2l0ZXIiOiA0LCAibWF4X3JwbSI6IDEwLCAiaTE4biI6ICJlbiIsICJsbG0iOiAi
|
||||
e1wibmFtZVwiOiBudWxsLCBcIm1vZGVsX25hbWVcIjogXCJncHQtNFwiLCBcInRlbXBlcmF0dXJl
|
||||
XCI6IDAuNywgXCJjbGFzc1wiOiBcIkNoYXRPcGVuQUlcIn0iLCAiZGVsZWdhdGlvbl9lbmFibGVk
|
||||
PyI6IHRydWUsICJ0b29sc19uYW1lcyI6IFtdfV1KnQEKCmNyZXdfdGFza3MSjgEKiwFbeyJpZCI6
|
||||
ICIwM2E4MDg0ZS04NjQ1LTRiMTQtYWE4My05YjE0YTFjYzViNWIiLCAiYXN5bmNfZXhlY3V0aW9u
|
||||
PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJ0ZXN0IHJvbGUiLCAidG9vbHNfbmFtZXMiOiBbImdl
|
||||
dF9maW5hbF9hbnN3ZXIiXX1dSigKCHBsYXRmb3JtEhwKGm1hY09TLTE0LjMtYXJtNjQtYXJtLTY0
|
||||
Yml0ShwKEHBsYXRmb3JtX3JlbGVhc2USCAoGMjMuMy4wShsKD3BsYXRmb3JtX3N5c3RlbRIICgZE
|
||||
YXJ3aW5KewoQcGxhdGZvcm1fdmVyc2lvbhJnCmVEYXJ3aW4gS2VybmVsIFZlcnNpb24gMjMuMy4w
|
||||
OiBXZWQgRGVjIDIwIDIxOjMwOjU5IFBTVCAyMDIzOyByb290OnhudS0xMDAwMi44MS41fjcvUkVM
|
||||
RUFTRV9BUk02NF9UNjAzMEoKCgRjcHVzEgIYDHoCGAESyQsKEIAiEIJi8FZ1hiGlogKFFscSCCvs
|
||||
2YTYXYKeKgxDcmV3IENyZWF0ZWQwATkwbAqQVSKyF0EAaAyQVSKyF0oZCg5jcmV3YWlfdmVyc2lv
|
||||
bhIHCgUwLjUuMkoaCg5weXRob25fdmVyc2lvbhIICgYzLjExLjdKJQoIaG9zdG5hbWUSGQoXSm9h
|
||||
b3MtTWFjQm9vay1Qcm8ubG9jYWxKMQoHY3Jld19pZBImCiQzNTAzM2YwMC1kYzhiLTRjN2YtOWVk
|
||||
MC02MDBlMDJjNmUxMjdKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKFQoNY3Jld19sYW5n
|
||||
dWFnZRIECgJlbkoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAJKGwoVY3Jld19udW1iZXJfb2Zf
|
||||
YWdlbnRzEgIYAkr9BAoLY3Jld19hZ2VudHMS7QQK6gRbeyJpZCI6ICIzNWZhODdkMi02M2YzLTRh
|
||||
YmYtYmJkOC04ZWRlNDMxMWUwYWMiLCAicm9sZSI6ICJ0ZXN0IHJvbGUiLCAibWVtb3J5X2VuYWJs
|
||||
ZWQ/IjogdHJ1ZSwgInZlcmJvc2U/IjogdHJ1ZSwgIm1heF9pdGVyIjogMTUsICJtYXhfcnBtIjog
|
||||
MTAsICJpMThuIjogImVuIiwgImxsbSI6ICJ7XCJuYW1lXCI6IG51bGwsIFwibW9kZWxfbmFtZVwi
|
||||
OiBcImdwdC00XCIsIFwidGVtcGVyYXR1cmVcIjogMC43LCBcImNsYXNzXCI6IFwiQ2hhdE9wZW5B
|
||||
SVwifSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogdHJ1ZSwgInRvb2xzX25hbWVzIjogW119LCB7
|
||||
ImlkIjogIjAzZGNhNGMwLWRhMDEtNDg0YS1iNmQ1LTAwMjI3NWUzMWM5OSIsICJyb2xlIjogInRl
|
||||
c3Qgcm9sZTIiLCAibWVtb3J5X2VuYWJsZWQ/IjogdHJ1ZSwgInZlcmJvc2U/IjogdHJ1ZSwgIm1h
|
||||
eF9pdGVyIjogMiwgIm1heF9ycG0iOiBudWxsLCAiaTE4biI6ICJlbiIsICJsbG0iOiAie1wibmFt
|
||||
ZVwiOiBudWxsLCBcIm1vZGVsX25hbWVcIjogXCJncHQtNFwiLCBcInRlbXBlcmF0dXJlXCI6IDAu
|
||||
NywgXCJjbGFzc1wiOiBcIkNoYXRPcGVuQUlcIn0iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IHRy
|
||||
dWUsICJ0b29sc19uYW1lcyI6IFtdfV1KlwIKCmNyZXdfdGFza3MSiAIKhQJbeyJpZCI6ICJjY2Ey
|
||||
ZmZiMy1jOGI1LTQwZDctYmU0OC03MTg3ZTUwODgwOWYiLCAiYXN5bmNfZXhlY3V0aW9uPyI6IGZh
|
||||
bHNlLCAiYWdlbnRfcm9sZSI6ICJ0ZXN0IHJvbGUiLCAidG9vbHNfbmFtZXMiOiBbXX0sIHsiaWQi
|
||||
OiAiMDEyMWFlNWQtN2FjMC00NGQ0LTg3MzItOWViZDZkM2E3NzcwIiwgImFzeW5jX2V4ZWN1dGlv
|
||||
bj8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAidGVzdCByb2xlMiIsICJ0b29sc19uYW1lcyI6IFsi
|
||||
Z2V0X2ZpbmFsX2Fuc3dlciJdfV1KKAoIcGxhdGZvcm0SHAoabWFjT1MtMTQuMy1hcm02NC1hcm0t
|
||||
NjRiaXRKHAoQcGxhdGZvcm1fcmVsZWFzZRIICgYyMy4zLjBKGwoPcGxhdGZvcm1fc3lzdGVtEggK
|
||||
BkRhcndpbkp7ChBwbGF0Zm9ybV92ZXJzaW9uEmcKZURhcndpbiBLZXJuZWwgVmVyc2lvbiAyMy4z
|
||||
LjA6IFdlZCBEZWMgMjAgMjE6MzA6NTkgUFNUIDIwMjM7IHJvb3Q6eG51LTEwMDAyLjgxLjV+Ny9S
|
||||
RUxFQVNFX0FSTTY0X1Q2MDMwSgoKBGNwdXMSAhgMegIYARK1DAoQzeoTQK6TritBeDtREUudsxII
|
||||
v/Sdpx2P668qDENyZXcgQ3JlYXRlZDABOajEaZJVIrIXQWiZa5JVIrIXShkKDmNyZXdhaV92ZXJz
|
||||
aW9uEgcKBTAuNS4yShoKDnB5dGhvbl92ZXJzaW9uEggKBjMuMTEuN0olCghob3N0bmFtZRIZChdK
|
||||
b2Fvcy1NYWNCb29rLVByby5sb2NhbEoxCgdjcmV3X2lkEiYKJDFmNjNlM2MxLTJiMzMtNDMzYi05
|
||||
ZWNhLTRlZjdlNWIzOGMzNEocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoVCg1jcmV3X2xh
|
||||
bmd1YWdlEgQKAmVuShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYA0obChVjcmV3X251bWJlcl9v
|
||||
Zl9hZ2VudHMSAhgCSoIFCgtjcmV3X2FnZW50cxLyBArvBFt7ImlkIjogImU3YjllZTg0LTBjMDQt
|
||||
NGNjZC04MjViLTBlNDE1NzlmYWIxOCIsICJyb2xlIjogInRlc3Qgcm9sZSIsICJtZW1vcnlfZW5h
|
||||
YmxlZD8iOiB0cnVlLCAidmVyYm9zZT8iOiBmYWxzZSwgIm1heF9pdGVyIjogMTUsICJtYXhfcnBt
|
||||
IjogbnVsbCwgImkxOG4iOiAiZW4iLCAibGxtIjogIntcIm5hbWVcIjogbnVsbCwgXCJtb2RlbF9u
|
||||
YW1lXCI6IFwiZ3B0LTRcIiwgXCJ0ZW1wZXJhdHVyZVwiOiAwLjcsIFwiY2xhc3NcIjogXCJDaGF0
|
||||
T3BlbkFJXCJ9IiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiB0cnVlLCAidG9vbHNfbmFtZXMiOiBb
|
||||
XX0sIHsiaWQiOiAiYzYwYjYzN2MtYmY5OC00ZWJkLTkwZWItMzUwNzkwNTRkMjdhIiwgInJvbGUi
|
||||
OiAidGVzdCByb2xlMiIsICJtZW1vcnlfZW5hYmxlZD8iOiB0cnVlLCAidmVyYm9zZT8iOiBmYWxz
|
||||
ZSwgIm1heF9pdGVyIjogMTUsICJtYXhfcnBtIjogbnVsbCwgImkxOG4iOiAiZW4iLCAibGxtIjog
|
||||
IntcIm5hbWVcIjogbnVsbCwgXCJtb2RlbF9uYW1lXCI6IFwiZ3B0LTRcIiwgXCJ0ZW1wZXJhdHVy
|
||||
ZVwiOiAwLjcsIFwiY2xhc3NcIjogXCJDaGF0T3BlbkFJXCJ9IiwgImRlbGVnYXRpb25fZW5hYmxl
|
||||
ZD8iOiB0cnVlLCAidG9vbHNfbmFtZXMiOiBbXX1dSv4CCgpjcmV3X3Rhc2tzEu8CCuwCW3siaWQi
|
||||
OiAiZWIyOTgxNjUtODg1My00Mjg4LWI3M2YtOWQ4NmQ0YjIxMzFiIiwgImFzeW5jX2V4ZWN1dGlv
|
||||
bj8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAidGVzdCByb2xlIiwgInRvb2xzX25hbWVzIjogW119
|
||||
LCB7ImlkIjogIjFkYjdjMjUzLTFlMDEtNDJlYi1iODY4LTU2NjlhMDc2ZTA5MiIsICJhc3luY19l
|
||||
eGVjdXRpb24/IjogZmFsc2UsICJhZ2VudF9yb2xlIjogInRlc3Qgcm9sZSIsICJ0b29sc19uYW1l
|
||||
cyI6IFtdfSwgeyJpZCI6ICI3MjI2ODAwOS1mOGFhLTQ0M2EtYTIwZS1lOWVmYTBiZDhlN2UiLCAi
|
||||
YXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJ0ZXN0IHJvbGUyIiwgInRv
|
||||
b2xzX25hbWVzIjogW119XUooCghwbGF0Zm9ybRIcChptYWNPUy0xNC4zLWFybTY0LWFybS02NGJp
|
||||
dEocChBwbGF0Zm9ybV9yZWxlYXNlEggKBjIzLjMuMEobCg9wbGF0Zm9ybV9zeXN0ZW0SCAoGRGFy
|
||||
d2luSnsKEHBsYXRmb3JtX3ZlcnNpb24SZwplRGFyd2luIEtlcm5lbCBWZXJzaW9uIDIzLjMuMDog
|
||||
V2VkIERlYyAyMCAyMTozMDo1OSBQU1QgMjAyMzsgcm9vdDp4bnUtMTAwMDIuODEuNX43L1JFTEVB
|
||||
U0VfQVJNNjRfVDYwMzBKCgoEY3B1cxICGAx6AhgBEqoIChAfSWWtcWdxaoyWECh3vhkCEgiTLpDL
|
||||
PjEHmioMQ3JldyBDcmVhdGVkMAE5gO5llFUishdBGKBnlFUishdKGQoOY3Jld2FpX3ZlcnNpb24S
|
||||
BwoFMC41LjJKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMS43SiUKCGhvc3RuYW1lEhkKF0pvYW9z
|
||||
LU1hY0Jvb2stUHJvLmxvY2FsSjEKB2NyZXdfaWQSJgokZjg1ZDQ3MTEtYzMwNC00NDMyLTk1N2Yt
|
||||
MzE4NDUzNWFkOWJmShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShUKDWNyZXdfbGFuZ3Vh
|
||||
Z2USBAoCZW5KGgoUY3Jld19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVyX29mX2Fn
|
||||
ZW50cxICGAFK2gIKC2NyZXdfYWdlbnRzEsoCCscCW3siaWQiOiAiNWJkOWYxODEtM2Y0ZC00MTk0
|
||||
LThlOWEtODM1NDdhNTM1YjY4IiwgInJvbGUiOiAidGVzdCByb2xlIiwgIm1lbW9yeV9lbmFibGVk
|
||||
PyI6IHRydWUsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAxNSwgIm1heF9ycG0iOiBu
|
||||
dWxsLCAiaTE4biI6ICJlbiIsICJsbG0iOiAie1wibmFtZVwiOiBudWxsLCBcIm1vZGVsX25hbWVc
|
||||
IjogXCJncHQtNFwiLCBcInRlbXBlcmF0dXJlXCI6IDAuNywgXCJjbGFzc1wiOiBcIkNoYXRPcGVu
|
||||
QUlcIn0iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IHRydWUsICJ0b29sc19uYW1lcyI6IFsibGVh
|
||||
cm5fYWJvdXRfQUkiXX1dSpsBCgpjcmV3X3Rhc2tzEowBCokBW3siaWQiOiAiMzQ1ZGY0ZGItNGE2
|
||||
ZC00ZGU4LWE3N2UtOTFiYTMwYzExZWVhIiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImFn
|
||||
ZW50X3JvbGUiOiAidGVzdCByb2xlIiwgInRvb2xzX25hbWVzIjogWyJsZWFybl9hYm91dF9BSSJd
|
||||
fV1KKAoIcGxhdGZvcm0SHAoabWFjT1MtMTQuMy1hcm02NC1hcm0tNjRiaXRKHAoQcGxhdGZvcm1f
|
||||
cmVsZWFzZRIICgYyMy4zLjBKGwoPcGxhdGZvcm1fc3lzdGVtEggKBkRhcndpbkp7ChBwbGF0Zm9y
|
||||
bV92ZXJzaW9uEmcKZURhcndpbiBLZXJuZWwgVmVyc2lvbiAyMy4zLjA6IFdlZCBEZWMgMjAgMjE6
|
||||
MzA6NTkgUFNUIDIwMjM7IHJvb3Q6eG51LTEwMDAyLjgxLjV+Ny9SRUxFQVNFX0FSTTY0X1Q2MDMw
|
||||
SgoKBGNwdXMSAhgMegIYAQ==
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '5260'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.22.0
|
||||
method: POST
|
||||
uri: http://telemetry.crewai.com:4318/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Fri, 09 Feb 2024 07:49:24 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nlearn_about_AI: learn_about_AI(topic) -> float - Useful for when you
|
||||
need to learn about AI to write an paragraph about it.\nDelegate work to co-worker:
|
||||
Useful to delegate a specific task to one of the following co-workers: .\nThe
|
||||
input to this tool should be a pipe (|) separated text of length 3 (three),
|
||||
representing the co-worker you want to ask it to (one of the options), the task
|
||||
and all actual context you have for the task.\nFor example, `coworker|task|context`.\nAsk
|
||||
question to co-worker: Useful to ask a question, opinion or take from on of
|
||||
the following co-workers: .\nThe input to this tool should be a pipe (|) separated
|
||||
text of length 3 (three), representing the co-worker you want to ask it to (one
|
||||
of the options), the question and all actual context you have for the question.\n
|
||||
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [learn_about_AI, Delegate work to co-worker,
|
||||
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
|
||||
the result of the action\n```\n\nWhen you have a response for your task, or
|
||||
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Write and then review an small paragraph on
|
||||
AI until it''s AMAZING\nThought: Do I need to use a tool? Yes\nAction: learn_about_AI\nAction
|
||||
Input: general AI knowledge\nObservation: AI is a very broad field.\nThought:
|
||||
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
|
||||
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
|
||||
Name: learn_about_AI\nFuntion attributes: {''topic'': {}}\nDescription: learn_about_AI(topic)
|
||||
-> float - Useful for when you need to learn about AI to write an paragraph
|
||||
about it.\n--\nFuntion Name: Delegate work to co-worker\nFuntion attributes:
|
||||
{''coworker'': {''type'': ''string''}, ''task'': {''type'': ''string''}, ''context'':
|
||||
{''type'': ''string''}}\nDescription: Delegate work to co-worker(coworker: str,
|
||||
task: str, context: str) - Delegate a specific task to one of the following
|
||||
co-workers: . The input to this tool should be the role of the coworker, the
|
||||
task you want them to do, and ALL necessary context to exectue the task, they
|
||||
know nothing about the task, so share absolute everything you know, don''t reference
|
||||
things but instead explain them.\n--\nFuntion Name: Ask question to co-worker\nFuntion
|
||||
attributes: {''coworker'': {''type'': ''string''}, ''question'': {''type'':
|
||||
''string''}, ''context'': {''type'': ''string''}}\nDescription: Ask question
|
||||
to co-worker(coworker: str, question: str, context: str) - Ask a specific question
|
||||
to one of the following co-workers: . The input to this tool should be the role
|
||||
of the coworker, the question you have for them, and ALL necessary context to
|
||||
ask the question properly, they know nothing about the question, so share absolute
|
||||
everything you know, don''t reference things but instead explain them.\n\nUse
|
||||
this text to inform a valid ouput schema:\nThought: Do I need to use a tool?
|
||||
Yes\nAction: learn_about_AI\nAction Input: \"Artificial Intelligence\"\n\nThe
|
||||
output should be formatted as a JSON instance that conforms to the JSON schema
|
||||
below.\n\nAs an example, for the schema {\"properties\": {\"foo\": {\"title\":
|
||||
\"Foo\", \"description\": \"a list of strings\", \"type\": \"array\", \"items\":
|
||||
{\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe object {\"foo\": [\"bar\",
|
||||
\"baz\"]} is a well-formatted instance of the schema. The object {\"properties\":
|
||||
{\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere is the output
|
||||
schema:\n```\n{\"properties\": {\"function_name\": {\"title\": \"Function Name\",
|
||||
\"description\": \"The name of the function to be called.\", \"type\": \"string\"},
|
||||
\"arguments\": {\"title\": \"Arguments\", \"description\": \"A dictinary of
|
||||
arguments to be passed to the function.\", \"type\": \"object\"}}, \"required\":
|
||||
[\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", "n": 1,
|
||||
"stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -277,16 +165,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1955'
|
||||
- '2558'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=wNpUyP3WikFZ5owMt5Wt4RvvxL5RRbV5Lm6N3cYt0cQ-1707464961-1-AaRJSNtZhC0qEyOOtzVqZyGINmDySgK+NZEmXyReWDjlSfP0cTj8tUtP+DYB/ttT11hGeDfZUyYzGIz1jsRPjN4=;
|
||||
_cfuvid=Ii_GgXvfh_uOU6kzTG.hYatLhnP.5qhsMn05KgacgMc-1707464961455-0-604800000
|
||||
- __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=;
|
||||
_cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.11.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -296,7 +184,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.11.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -306,23 +194,19 @@ interactions:
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RTwW4bRwy96yse9rwSrEaxU10KA0EAXVIXCBAgbWFQs9xdVjPD7ZArRQj878HI
|
||||
so1c5sDH9/jIIX8sgEa6ZosmjOQhTXH54f9Px8/v+UTyMD+Y/vUw+cPX+O3mePjzw6FpK0P3/3Hw
|
||||
F9YqaJoiu2h+hkNhcq6q67ubu83t5vfb9QVI2nGstGHy5WZ5c7t+d2WMKoGt2eLvBQD8uLzVW+74
|
||||
e7PFTfsSSWxGAzfb1ySgKRprpCEzMafsTfsGBs3O+WL3o2KHzNzBFbMxCK4a/8Bn/Sd/kkwR99lO
|
||||
XLa4Ly69BKGIXXaOUQbOgVto75xB+33ho9Q+QYb7XQsxEI5kDsod0hxdegpcM3rh2K2wc3Cu0yIz
|
||||
rtmF8sDQHjbvl51YkClKrlBVYB+106iDsLVgCiNIUi3ouMxY8oBEYbxQtMC09xMVxkl8BCFwcZKM
|
||||
yEeOtQzNrlnTuUXHQUw0LxMdqkygifYSxc9tLd5C8is9kFUDQbMF0dkym63wZWRjFKaYDDZRRl80
|
||||
vfhBZCq5Krui6F5dgrXI5HOhiEh5mGlgTEUDm10Tgw5ZXI6MOqW5NnixA1OkOYxIWniFj2yTOMNH
|
||||
RidHLiZ+rv2JG2iaogSq62htHVUNBi3c4n4H8yJHtlrMJM2RnOvk+HuoWzHOiTLkl//u59xR4uwU
|
||||
4xkUnUt1O+oJJ0aUI7c4aTk8G524BK4NVG8nLbEDFZ1zh9lWzXUpn163OeowFd3Xzc9zjK/xXrLY
|
||||
+FiYTHPdXHOdnulPC+Dfy9XMvxxCMxVNkz+6HjhXwc27u2e95u1A39D1b5sr6uoU34D3t+vF1WJj
|
||||
Z3NOj73kgctU5HJF1ejiafETAAD//wMAKXcLNTwEAAA=
|
||||
H4sIAAAAAAAAA1SRwW7bMBBE7/qKBc92YTeWbehmIC4a9JJrURYyTa0kJuSSIVdtWkP/XlBybPSi
|
||||
w8y+1ezwUgAI04gKhO4Vaxfscv/WHH/x8fl7n/rHxy+H4zf8u7HP7/i23SexyIQ/v6DmD+qT9i5Y
|
||||
ZONptnVExZi3rnerXVluyn05Gc43aDPWBV5ulqvt+uFK9N5oTKKCHwUAwGX65mzU4LuoYLX4UBym
|
||||
pDoU1W0IQERvsyJUSiaxIhaLu6k9MdIU9ytGBJNAwW+0dtn66BQzNmAoYxrBt8A9gh84DAxJ9+hU
|
||||
JUnS6XSSdJEEIEU7kM7n1qQcSlGBFBZVpFqd/cD14UmKxTypYjc4JE55aqKzyj4YPXOHyKY12igL
|
||||
T8RoremQNEqRZ0dJ4/Rncb1mvNVgfReiP+fKaLD2preGTOrriCp5yicn9mHGxwLg51T38F+DIkTv
|
||||
AtfsX5HywnK3n/eJ+8ve3c3D1WTPyt717ed1cU0o0p/E6OrWUIcxRDO1n3MWY/EPAAD//wMA8S8H
|
||||
Q3QCAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 852a83e94821fabe-SJC
|
||||
- 853310010fcd1637-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -332,7 +216,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 09 Feb 2024 07:49:32 GMT
|
||||
- Sat, 10 Feb 2024 08:43:08 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -346,7 +230,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '11068'
|
||||
- '2273'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -358,13 +242,141 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299537'
|
||||
- '299398'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 92ms
|
||||
- 120ms
|
||||
x-request-id:
|
||||
- req_b10e3d7ad93697db2a1d46243d59a41c
|
||||
- req_9af33ac92b825be43da4372550c8c502
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nlearn_about_AI: learn_about_AI(topic) -> float - Useful for when you
|
||||
need to learn about AI to write an paragraph about it.\nDelegate work to co-worker:
|
||||
Delegate work to co-worker(coworker: str, task: str, context: str) - Delegate
|
||||
a specific task to one of the following co-workers: . The input to this tool
|
||||
should be the role of the coworker, the task you want them to do, and ALL necessary
|
||||
context to exectue the task, they know nothing about the task, so share absolute
|
||||
everything you know, don''t reference things but instead explain them.\nAsk
|
||||
question to co-worker: Ask question to co-worker(coworker: str, question: str,
|
||||
context: str) - Ask a specific question to one of the following co-workers:
|
||||
. The input to this tool should be the role of the coworker, the question you
|
||||
have for them, and ALL necessary context to ask the question properly, they
|
||||
know nothing about the question, so share absolute everything you know, don''t
|
||||
reference things but instead explain them.\n\nTo use a tool, please use the
|
||||
exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [learn_about_AI, Delegate work to co-worker,
|
||||
Ask question to co-worker], just the name.\nAction Input: Any and all relevant
|
||||
information input and context for using the tool\nObservation: the result of
|
||||
using the tool\n```\n\nWhen you have a response for your task, or if you do
|
||||
not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need
|
||||
to use a tool? No\nFinal Answer: [your response here]```This is the summary
|
||||
of your work so far:\nBegin! This is VERY important to you, your job depends
|
||||
on it!\n\nCurrent Task: Write and then review an small paragraph on AI until
|
||||
it''s AMAZING\nThought: Do I need to use a tool? Yes\nAction: learn_about_AI\nAction
|
||||
Input: \"Artificial Intelligence\"\nObservation: AI is a very broad field.\nThought:
|
||||
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2154'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=;
|
||||
_cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA2RTwWobSxC86yuaPa+ERazY1iUYXh4IHoGEBAJJMK3Z3t22Z7s30z1ylOB/D7Na
|
||||
WwnvMoepqaaqpvrXAqDiptpCFXr0MIxxef29eXu8eRzS/dX43+fmU2P69lV4v+4+HHhT1YWh+3sK
|
||||
/sxaBR3GSM4qJzgkQqcydX11cbXZXG6urydg0IZioXWjLy+XF6/Xr2ZGrxzIqi18WQAA/JrOok0a
|
||||
+lFt4aJ+vhnIDDuqti+PAKqksdxUaMbmKF7VZzCoOMkk9x+FHQhRA66QjQDBVeMbeKdf5V8WjHAr
|
||||
9khpC7fJueXAGGEnTjFyRxKoBm2dBHC/T3Tg4hPQ4HZXAxsgHNAcWqbYrGDnQFLCQTMyOGBizQaW
|
||||
98uGLfAYWcgApQGn0ItG7ZgMvEcH5KGIPGUJA4b+5bEdzWkwCDjiPhJoCyOlVtPA0oGjPcwzHjXH
|
||||
BrJljPEIib5nTgR9HlCA/zC1go89Gc1UlhBzQxAJk5SJbdIB6MdIictrqyFLQ6kE3RRc0HPCCBGl
|
||||
y9hRDYmCdsI/CzqiOyWxGsak+0jD0jQeWLp6MtNQYGMVGPCBpZukQKcYi63bXUlV1OE+m5c8Eo2R
|
||||
Q4nk/zZgnx0wmv4R3HNWUx4B5Tmp2eugiYDa8tEkHo+TJAwhJ3SKx1VR0GOhE4xaalQKMek4aMyl
|
||||
8/yTzl9LwTVZPWdY7PeE0fuAiWpoWXDqkCcUGzU5lgmnIIqUVTXX9uml71G7klvZDckxvty3LGz9
|
||||
XSI0ldJtcx1P9KcFwLdpr/Jfq1KNSYfR71wfSMrAy6t5r6rzCp/R9fpmRl0d4xnYXN8sZonVKd+7
|
||||
lqWjNCae9qwIXTwtfgMAAP//AwDp3F89XgQAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 853310125de31637-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 08:43:14 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '6017'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299486'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 102ms
|
||||
x-request-id:
|
||||
- req_c153504e2e53f18e15a640221b664aba
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
@@ -380,15 +392,15 @@ interactions:
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Write
|
||||
and then review an small paragraph on AI until it''s AMAZING\nAI: Artificial
|
||||
Intelligence, often abbreviated as AI, is a vast and multifaceted field. It
|
||||
encompasses a range of sub-disciplines and methodologies, each aimed at creating
|
||||
machines or software with a certain level of autonomy, decision-making capability,
|
||||
and, in certain cases, consciousness. These realms span from machine learning
|
||||
to robotics, natural language processing to cognitive computing, and so much
|
||||
more. Despite the diversity of its applications, at its core, AI strives to
|
||||
simulate or exceed human intelligence, fundamentally altering how we live, work,
|
||||
and perceive the world around us.\n\nNew summary:"}], "model": "gpt-4", "n":
|
||||
1, "stream": false, "temperature": 0.7}'
|
||||
Intelligence, often abbreviated as AI, is a vast field. It encompasses various
|
||||
sub-disciplines and technologies that aim to create machines and systems capable
|
||||
of performing tasks that would usually require human intelligence. These tasks
|
||||
include learning from experiences, understanding natural language, recognizing
|
||||
patterns, problem-solving, and decision making. The goal of AI is not just to
|
||||
replicate human intelligence but also to create systems that can perform tasks
|
||||
more efficiently and accurately. AI has the potential to revolutionize various
|
||||
sectors, including healthcare, finance, transportation, and more.\n\nNew summary:"}],
|
||||
"model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -397,16 +409,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1513'
|
||||
- '1547'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=wNpUyP3WikFZ5owMt5Wt4RvvxL5RRbV5Lm6N3cYt0cQ-1707464961-1-AaRJSNtZhC0qEyOOtzVqZyGINmDySgK+NZEmXyReWDjlSfP0cTj8tUtP+DYB/ttT11hGeDfZUyYzGIz1jsRPjN4=;
|
||||
_cfuvid=Ii_GgXvfh_uOU6kzTG.hYatLhnP.5qhsMn05KgacgMc-1707464961455-0-604800000
|
||||
- __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=;
|
||||
_cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.11.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -416,7 +428,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.11.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -426,22 +438,21 @@ interactions:
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1STQW/bMAyF7/kVhC+7OEHTBk2XW3YYFmyHAeuGFcNQMDJjs5FFTaTtZkX/+yAn
|
||||
bdaLD6Te0yfy+WkCUHBVrKBwDZpro5/e/PnY3314+Hbz+fv+i3XzcLjqftzpp59f46YryqyQ7QM5
|
||||
e1HNnLTRk7GEY9slQqPsOl9eLBfXi/fLy7HRSkU+y+po08X04np+dVI0wo60WMGvCQDA0/jNbKGi
|
||||
x2IFF+VLpSVVrKlYvR4CKJL4XClQldUwWFGem06CURhxbxuCpmsxAOpewRqC9QZMYEhsBBgqSNQz
|
||||
DYCgLXoPERPWCWMDEvLZLhh7YHungC3+5VDP4PZoM3ooIOR5JGooKPf0nwM9Ro8cONRgDRqsNyVI
|
||||
gnUy3rFj9LAJRt5zTcFRCZzNelQbwdrOG+/QkVEFOyZfzWBjwMH5riKFHhNLp6DddlqxOo6eQ+bJ
|
||||
WrJGKvFSM2kJ+WHILVWABuO2MlOLrhkVkkBlZwMmgoGtyU+iZMgBPPXkQXaAnUmQ9lBCRY6VJUxb
|
||||
3GcbhxG37NkO5Xh3lDx9Ru8PJTgJ6jJmINUZrDd5jjF6dpjzo5DvrLinpFTCtrNMyKbgJOWJWObW
|
||||
vDHltvNolGnp0RFVp83ymxmiN0oZq5EBBgLPPZUwSNqf6Cg5ymvKWRgk+WpWnKLz/Jo5L3VMss35
|
||||
DJ33r/UdB9bmPhGqhJwvNYlH+fME4PeY7e5NXIuYpI12b7KnkA0vrxdHv+L8G52784vlqWti6M+N
|
||||
q+V8ckIs9KBG7f2OQ00pJh6znkEnz5N/AAAA//8DAG6VJ3HiAwAA
|
||||
H4sIAAAAAAAAA1RSTW/bMAy951cQuuySBM0aL21uxYBhxW5FdhqGgpFpm60sqiKd9AP974PstMEu
|
||||
OpB6j4+P720G4Lh2W3C+Q/N9Courp/pH9TPZ8+53vNu8rL5Xq4fjjW52d9/CLzcvCNk/kLcP1NJL
|
||||
nwIZS5zaPhMaFdbV5mJTVevquhobvdQUCqxNtlgvLr6tLk+ITtiTui38mQEAvI1v0RZrenZbuJh/
|
||||
VHpSxZbc9vMTgMsSSsWhKqthNDc/N71EozjK3XUE3dBjBNRHBesIbm7BBI6ZjQBjXWoRMh2YjoCg
|
||||
PYYACTO2GVMHEgtgiMYB2L4oYI+vHNsl7CauTJok1gpHtg4Q9pmpATlQHhmlgZvbOdSkPvOeYwts
|
||||
gAoIB1SDhinUE/SAmWVQ0GG/qFk9p8CRdNJIvosSpGXSJdwa9BSL/WUjtHGtVjBM04C1bDgdBfRF
|
||||
jXoFjwn3gcqXRLmR3BcxdrIFDQYdMIQXyPQ0cP6wjaNRCNxS9DQ/+YVWpnQ4+ZmkmM0YytBMBwlD
|
||||
kcavdF6JvElW4OjDUJe5HWGwzmOmOTQc8cyeMWqSbFhIlu501ffPOARpU5Z9iU4cQvisNxxZu/tM
|
||||
qBLL6dUkTfD3GcDfMXbDf0lyKUuf7N7kkWIh/FpdT3zunPBz96o6NU0Mw7l+uV7PTgrd5PZ9w7Gl
|
||||
nDKPKSw6Z++zfwAAAP//AwBANdK6fAMAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 852a842f29cffabe-SJC
|
||||
- 8533103aae201637-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -451,7 +462,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 09 Feb 2024 07:49:38 GMT
|
||||
- Sat, 10 Feb 2024 08:43:18 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -465,7 +476,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '6156'
|
||||
- '2948'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -477,13 +488,13 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299636'
|
||||
- '299629'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 72ms
|
||||
- 74ms
|
||||
x-request-id:
|
||||
- req_c4d71f28b3724c8591d7a1c79a9773a8
|
||||
- req_bf6f7315d0ac178598b80627d7b1af72
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -2,25 +2,27 @@ interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nDelegate work to co-worker: Useful to delegate a specific task to
|
||||
one of the following co-workers: test role, test role2.\nThe input to this tool
|
||||
should be a pipe (|) separated text of length 3 (three), representing the co-worker
|
||||
you want to ask it to (one of the options), the task and all actual context
|
||||
you have for the task.\nFor example, `coworker|task|context`.\nAsk question
|
||||
to co-worker: Useful to ask a question, opinion or take from on of the following
|
||||
co-workers: test role, test role2.\nThe input to this tool should be a pipe
|
||||
(|) separated text of length 3 (three), representing the co-worker you want
|
||||
to ask it to (one of the options), the question and all actual context you have
|
||||
for the question.\n For example, `coworker|question|context`.\n\nTo use a tool,
|
||||
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
|
||||
Yes\nAction: the action to take, should be one of [Delegate work to co-worker,
|
||||
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
|
||||
the result of the action\n```\n\nWhen you have a response for your task, or
|
||||
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
|
||||
of your work so far:\nBegin! This is VERY important to you, your job depends
|
||||
on it!\n\nCurrent Task: Just say hi.\n"}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
tools:\n\nDelegate work to co-worker: Delegate work to co-worker(coworker: str,
|
||||
task: str, context: str) - Delegate a specific task to one of the following
|
||||
co-workers: test role2. The input to this tool should be the role of the coworker,
|
||||
the task you want them to do, and ALL necessary context to exectue the task,
|
||||
they know nothing about the task, so share absolute everything you know, don''t
|
||||
reference things but instead explain them.\nAsk question to co-worker: Ask question
|
||||
to co-worker(coworker: str, question: str, context: str) - Ask a specific question
|
||||
to one of the following co-workers: test role2. The input to this tool should
|
||||
be the role of the coworker, the question you have for them, and ALL necessary
|
||||
context to ask the question properly, they know nothing about the question,
|
||||
so share absolute everything you know, don''t reference things but instead explain
|
||||
them.\n\nTo use a tool, please use the exact following format:\n\n```\nThought:
|
||||
Do I need to use a tool? Yes\nAction: the tool you wanna use, should be one
|
||||
of [Delegate work to co-worker, Ask question to co-worker], just the name.\nAction
|
||||
Input: Any and all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Just say hi.\n"}], "model": "gpt-4", "n": 1,
|
||||
"stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -29,13 +31,13 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1652'
|
||||
- '1825'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -45,7 +47,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -53,18 +55,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8gkPA1vnW1g25Qr3SJLWB5zeOsUe4\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1705198848,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\nFinal
|
||||
Answer: Hi!\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 367,\n \"completion_tokens\":
|
||||
17,\n \"total_tokens\": 384\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQzU7DMBCE73mKlc8tStSmLbmgih8BEiAECFRAlZtsE4PjNfZGQKu+O3KatuKy
|
||||
hxnP7LdeRwBCFSIDkVeS89rq/uSruFk9u+kjPfF92sx4tjq/G3h8Ob2ePoheSNDiA3PepY5yqq1G
|
||||
VmS2du5QMobWZByP0zSNJ2lr1FSgDrHScn/Yj0fJoEtUpHL0IoPXCABg3c7AZgr8ERnEvZ1So/ey
|
||||
RJHtHwEIRzooQnqvPEvDoncwczKMpsV9rKgpK87gjOAKDGIBTNB4BAlMpE/glt7MhTJSw9T4b3QZ
|
||||
XCrRdW32EJpK62gRgE2j9V5fKqN8NXcoPZmw0DPZbXwTAby3xzb/+IV1VFueM32iCYWD4+G2Txz+
|
||||
9eAmo85kYqkP+jCJo45Q+F/PWM+XypTorFPt7YEz2kR/AAAA//8DABJPZgfyAQAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 845266dd5f1f1a96-GRU
|
||||
- 85331c3028ba7aac-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -74,14 +77,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 14 Jan 2024 02:20:50 GMT
|
||||
- Sat, 10 Feb 2024 08:51:26 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=TbP3njfV8Qa2w4gTk3GfLx8EOChqceUdui85lv8w_0s-1705198850-1-AVv9rXKeGNOAPhhVrRvcK49dv9odkct+so0djAQM52Bfzf/nc8ZTJ3zhk2LlfNTWSQ9dcT4UwZHjEHyP8LH39Xg=;
|
||||
path=/; expires=Sun, 14-Jan-24 02:50:50 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=PApKfMOxYmqCPfv1QzKEuqGGYEUQhozoSuKopWRTbzg-1707555086-1-AeIZ1ozsIHX7RW0ZxnKJ17u8m5eIvUTNkNMhFDdTwc71TeSLRieXLv/0waJkA48Sz1+FjLHulvMGaPnUEf7K19k=;
|
||||
path=/; expires=Sat, 10-Feb-24 09:21:26 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=y6p2l8rtoLMo6gBiQsUGfETrVWxPY83uCFZsS7Jozok-1705198850215-0-604800000;
|
||||
- _cfuvid=ivN7v5z_DwT9MCx4fk0bd86wOUph.ZASgr.z0qq5y2k-1707555086057-0-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -94,7 +97,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1832'
|
||||
- '899'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -106,15 +109,16 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299611'
|
||||
- '299566'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 77ms
|
||||
- 86ms
|
||||
x-request-id:
|
||||
- 12d633fe239a29022ea7206037da096a
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_561f62ebd7103f1fd1e54ced4754dcce
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
@@ -126,7 +130,7 @@ interactions:
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Just
|
||||
say hi.\nAI: Hi!\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false,
|
||||
say hi.\nAI: Hi\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false,
|
||||
"temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
@@ -136,16 +140,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '867'
|
||||
- '866'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=TbP3njfV8Qa2w4gTk3GfLx8EOChqceUdui85lv8w_0s-1705198850-1-AVv9rXKeGNOAPhhVrRvcK49dv9odkct+so0djAQM52Bfzf/nc8ZTJ3zhk2LlfNTWSQ9dcT4UwZHjEHyP8LH39Xg=;
|
||||
_cfuvid=y6p2l8rtoLMo6gBiQsUGfETrVWxPY83uCFZsS7Jozok-1705198850215-0-604800000
|
||||
- __cf_bm=PApKfMOxYmqCPfv1QzKEuqGGYEUQhozoSuKopWRTbzg-1707555086-1-AeIZ1ozsIHX7RW0ZxnKJ17u8m5eIvUTNkNMhFDdTwc71TeSLRieXLv/0waJkA48Sz1+FjLHulvMGaPnUEf7K19k=;
|
||||
_cfuvid=ivN7v5z_DwT9MCx4fk0bd86wOUph.ZASgr.z0qq5y2k-1707555086057-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -155,7 +159,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -163,19 +167,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8gkPCF34yiaqHuTbro7PMBexYagyU\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1705198850,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human asks the AI to simply say hi,
|
||||
and the AI responds with \\\"Hi!\\\"\"\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
144,\n \"completion_tokens\": 18,\n \"total_tokens\": 162\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQT0sDMRDF7/sphpxb2dW2lt48qQgFUbyolDQ73aRmM2lmFq3S7y7Z/tNLIPPy
|
||||
Xn7zfgoA5Wo1A2WsFtNGP5xu6nl5a76f6ul9s35plo9JwsO6ruaXk40aZAct12jk6Low1EaP4ijs
|
||||
ZZNQC+bU6rq8Ho/H5XTSCy3V6LOtiTIcDctJdXVwWHIGWc3gtQAA+OnPzBZq/FIzKAfHSYvMukE1
|
||||
Oz0CUIl8nijN7Fh0EDU4i4aCYOhxny2C7VodwAWW1BlhEItwcw9CsO5YgPUWrBvk+6d1xh71hBwp
|
||||
1Aw6xkQxOS3ot/DpxMKbunNv6kId/tydYD01MdEyLxY670/zlQuO7SKhZgoZjIXi3r4rAN77Urp/
|
||||
e6qYqI2yEPrAkAOr0Wifp879n9XL6iAKifZ/XJNxcSBUvGXBdrFyocEUk+s7ypzFrvgFAAD//wMA
|
||||
D9d32RoCAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 845266ee78dc1a96-GRU
|
||||
- 85331c380f547aac-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -185,7 +190,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 14 Jan 2024 02:20:52 GMT
|
||||
- Sat, 10 Feb 2024 08:51:27 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -199,7 +204,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1287'
|
||||
- '1035'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -217,34 +222,38 @@ interactions:
|
||||
x-ratelimit-reset-tokens:
|
||||
- 40ms
|
||||
x-request-id:
|
||||
- 2a891eacb4940e36bec1689b1ab92ee0
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_d47c41d14baa963cd2a25770f0f33620
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
|
||||
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
|
||||
work to co-worker: Useful to delegate a specific task to one of the following
|
||||
co-workers: test role, test role2.\nThe input to this tool should be a pipe
|
||||
(|) separated text of length 3 (three), representing the co-worker you want
|
||||
to ask it to (one of the options), the task and all actual context you have
|
||||
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
|
||||
Useful to ask a question, opinion or take from on of the following co-workers:
|
||||
test role, test role2.\nThe input to this tool should be a pipe (|) separated
|
||||
text of length 3 (three), representing the co-worker you want to ask it to (one
|
||||
of the options), the question and all actual context you have for the question.\n
|
||||
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [get_final_answer, Delegate work to co-worker,
|
||||
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
|
||||
the result of the action\n```\n\nWhen you have a response for your task, or
|
||||
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
|
||||
of your work so far:\nBegin! This is VERY important to you, your job depends
|
||||
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
|
||||
tool.\nThis is the context you''re working with:\nHi!\n"}], "model": "gpt-4",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
|
||||
str) - Delegate a specific task to one of the following co-workers: test role.
|
||||
The input to this tool should be the role of the coworker, the task you want
|
||||
them to do, and ALL necessary context to exectue the task, they know nothing
|
||||
about the task, so share absolute everything you know, don''t reference things
|
||||
but instead explain them.\nAsk question to co-worker: Ask question to co-worker(coworker:
|
||||
str, question: str, context: str) - Ask a specific question to one of the following
|
||||
co-workers: test role. The input to this tool should be the role of the coworker,
|
||||
the question you have for them, and ALL necessary context to ask the question
|
||||
properly, they know nothing about the question, so share absolute everything
|
||||
you know, don''t reference things but instead explain them.\n\nTo use a tool,
|
||||
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
|
||||
Yes\nAction: the tool you wanna use, should be one of [get_final_answer, Delegate
|
||||
work to co-worker, Ask question to co-worker], just the name.\nAction Input:
|
||||
Any and all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using
|
||||
the `get_final_answer` tool non-stop\nThis is the context you''re working with:\nHi\n"}],
|
||||
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -253,13 +262,13 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1923'
|
||||
- '2101'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -269,7 +278,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -277,19 +286,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8gkPEml4Ifldq51knwwASO3nJroK3\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1705198852,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
|
||||
get_final_answer\\nAction Input: [1,2,3]\"\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
432,\n \"completion_tokens\": 28,\n \"total_tokens\": 460\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RQy27CMBC85ytWPkMFhfDIparUQ+mhJ4pUAYqMsyRuHa+JN4UW8e+VQ4D24sPM
|
||||
znhmjhGA0JlIQKhCsiqd6U522Wt/+rVT+UEt9s67xXQ03728LUY/RSw6QUGbD1R8Ud0pKp1B1mTP
|
||||
tKpQMgbX/rg3juO4Nxk3REkZmiDLHXeH3d6oP2gVBWmFXiSwjAAAjs0bstkMDyKBXueClOi9zFEk
|
||||
1yMAUZEJiJDea8/SsujcSEWW0TZx5wXVecEJPBHMwCJmwAS1R5DAROYB3tGv7KMKZRLIkdOtttKk
|
||||
0vo9VhcGZtbVnMByJZ71SqxF+9npmtJQ7irahEa2NuaKb7XVvkgrlJ5sSOSZ3Fl+igDWzRr1v4LC
|
||||
VVQ6Tpk+0QbDYTw9+4nb8Df2ftiSTCzNH9VkELUJhf/2jGWolmPlKt2ME3JGp+gXAAD//wMAVFuU
|
||||
bxMCAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 845266f9fc150316-GRU
|
||||
- 85331c3f6c38cfc4-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -299,14 +309,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 14 Jan 2024 02:20:53 GMT
|
||||
- Sat, 10 Feb 2024 08:51:28 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
|
||||
path=/; expires=Sun, 14-Jan-24 02:50:53 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=2FkgHfXDTEWwtJx9fuYC7bMgshbFiKyrz77uUDT7zZs-1707555088-1-Aepn1cZI06+lHA4fg6WnGlRYQvOzF/0+FklFgWjzvCx0c9PWNVB0SHCULztQ+AdxbloscbFE14+qlktmxccvWLA=;
|
||||
path=/; expires=Sat, 10-Feb-24 09:21:28 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000;
|
||||
- _cfuvid=euVu9TT31y3M3IcbZHjgSQIkmaipgL6G3sfBSomB.wA-1707555088785-0-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -319,7 +329,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1570'
|
||||
- '1442'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -331,42 +341,48 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299545'
|
||||
- '299499'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 91ms
|
||||
- 100ms
|
||||
x-request-id:
|
||||
- 133ee7e16a1491cfd7daa90ea3e42b74
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_b76f1918aa33f67b05eee313ca29e621
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
|
||||
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
|
||||
work to co-worker: Useful to delegate a specific task to one of the following
|
||||
co-workers: test role, test role2.\nThe input to this tool should be a pipe
|
||||
(|) separated text of length 3 (three), representing the co-worker you want
|
||||
to ask it to (one of the options), the task and all actual context you have
|
||||
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
|
||||
Useful to ask a question, opinion or take from on of the following co-workers:
|
||||
test role, test role2.\nThe input to this tool should be a pipe (|) separated
|
||||
text of length 3 (three), representing the co-worker you want to ask it to (one
|
||||
of the options), the question and all actual context you have for the question.\n
|
||||
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [get_final_answer, Delegate work to co-worker,
|
||||
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
|
||||
the result of the action\n```\n\nWhen you have a response for your task, or
|
||||
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
|
||||
of your work so far:\nBegin! This is VERY important to you, your job depends
|
||||
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
|
||||
tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to
|
||||
use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation:
|
||||
42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
|
||||
false, "temperature": 0.7}'
|
||||
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
|
||||
str) - Delegate a specific task to one of the following co-workers: test role.
|
||||
The input to this tool should be the role of the coworker, the task you want
|
||||
them to do, and ALL necessary context to exectue the task, they know nothing
|
||||
about the task, so share absolute everything you know, don''t reference things
|
||||
but instead explain them.\nAsk question to co-worker: Ask question to co-worker(coworker:
|
||||
str, question: str, context: str) - Ask a specific question to one of the following
|
||||
co-workers: test role. The input to this tool should be the role of the coworker,
|
||||
the question you have for them, and ALL necessary context to ask the question
|
||||
properly, they know nothing about the question, so share absolute everything
|
||||
you know, don''t reference things but instead explain them.\n\nTo use a tool,
|
||||
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
|
||||
Yes\nAction: the tool you wanna use, should be one of [get_final_answer, Delegate
|
||||
work to co-worker, Ask question to co-worker], just the name.\nAction Input:
|
||||
Any and all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nBegin! This is VERY important to you, your job
|
||||
depends on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using
|
||||
the `get_final_answer` tool non-stop\nThis is the context you''re working with:\nHi\nThought:
|
||||
Do I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [\"Hi\"]\nObservation:
|
||||
Actually, I used too many tools, so I''ll stop now and give you my absolute
|
||||
BEST Final answer NOW, using the expected format: ```\nThought: Do I need to
|
||||
use a tool? No\nFinal Answer: [your response here]```\nThought: "}], "model":
|
||||
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -375,16 +391,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2037'
|
||||
- '2419'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
|
||||
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
|
||||
- __cf_bm=2FkgHfXDTEWwtJx9fuYC7bMgshbFiKyrz77uUDT7zZs-1707555088-1-Aepn1cZI06+lHA4fg6WnGlRYQvOzF/0+FklFgWjzvCx0c9PWNVB0SHCULztQ+AdxbloscbFE14+qlktmxccvWLA=;
|
||||
_cfuvid=euVu9TT31y3M3IcbZHjgSQIkmaipgL6G3sfBSomB.wA-1707555088785-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -394,7 +410,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -402,19 +418,22 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8gkPGwVu7CvxvQd3ZmBPHwz0WYbVQ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1705198854,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
|
||||
get_final_answer\\nAction Input: [4,5,6]\"\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
469,\n \"completion_tokens\": 26,\n \"total_tokens\": 495\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA3RSTY/aMBS88ytGvvQSKhDLss2lAm2rIlVspa5arQARkzySFMcva7+UTVf73yuH
|
||||
L/XQiw8znvG8N37tAarMVAyVFlrSqjb9u+dsMbLPRTX6+kO4SfdPB/rybfbn3j7M9ioKCt7+olTO
|
||||
qvcpV7UhKdke6dSRFgquw8lgMh6PB3cfOqLijEyQ5bX0b/qD2+HopCi4TMmrGMseALx2Z8hmM3pR
|
||||
MQbRGanIe52Tii+XAOXYBERp70sv2oqKrmTKVsh2ce8Zc1iiDMJoPEFDmM1HPJFf2WkaZoiRk2x2
|
||||
pdVmo60/kDszmNu6kRjLlZqm0mhj2gjz4BP8GJW2befnI3jG/J0x8MI1LB+gbYa8/E1ouUHVQm89
|
||||
m0YIs0/fH/E5PIfjc1g8/IzQ+NLmkIJALzWlQhl27CotMZIkwWPBTV5IjP+OtOCT67RzjbFsuXFw
|
||||
5Gu2nlCQo3WSJCu1VqdlvV22bDivHW9DI7Yx5oLvSlv6YuNIe7Zho2G+o/ytB6y7Npt/ClK146qW
|
||||
jfCebDAc35zaVNePc2UnZ1JYtLnit8NB75RQ+dYLVaGjnFztyq7ckLP31vsLAAD//wMAzsuhnNMC
|
||||
AAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8452670589920316-GRU
|
||||
- 85331c496beccfc4-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -424,7 +443,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 14 Jan 2024 02:20:55 GMT
|
||||
- Sat, 10 Feb 2024 08:51:32 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -438,7 +457,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '983'
|
||||
- '2973'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -450,259 +469,16 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299517'
|
||||
- '299421'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 96ms
|
||||
- 115ms
|
||||
x-request-id:
|
||||
- cb1d820e78376ed344c16e6051cab6bc
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
|
||||
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
|
||||
work to co-worker: Useful to delegate a specific task to one of the following
|
||||
co-workers: test role, test role2.\nThe input to this tool should be a pipe
|
||||
(|) separated text of length 3 (three), representing the co-worker you want
|
||||
to ask it to (one of the options), the task and all actual context you have
|
||||
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
|
||||
Useful to ask a question, opinion or take from on of the following co-workers:
|
||||
test role, test role2.\nThe input to this tool should be a pipe (|) separated
|
||||
text of length 3 (three), representing the co-worker you want to ask it to (one
|
||||
of the options), the question and all actual context you have for the question.\n
|
||||
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [get_final_answer, Delegate work to co-worker,
|
||||
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
|
||||
the result of the action\n```\n\nWhen you have a response for your task, or
|
||||
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
|
||||
of your work so far:\nBegin! This is VERY important to you, your job depends
|
||||
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
|
||||
tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to
|
||||
use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation:
|
||||
42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: [4,5,6]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2142'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
|
||||
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8gkPHbI2gWbt2veTT8auCPCstOnZh\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1705198855,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
|
||||
get_final_answer\\nAction Input: [7,8,9]\"\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
503,\n \"completion_tokens\": 26,\n \"total_tokens\": 529\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8452670dca090316-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 14 Jan 2024 02:20:57 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2350'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299492'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 101ms
|
||||
x-request-id:
|
||||
- b40ffecf3dc8f547453bfb166c8c52dd
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
|
||||
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
|
||||
work to co-worker: Useful to delegate a specific task to one of the following
|
||||
co-workers: test role, test role2.\nThe input to this tool should be a pipe
|
||||
(|) separated text of length 3 (three), representing the co-worker you want
|
||||
to ask it to (one of the options), the task and all actual context you have
|
||||
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
|
||||
Useful to ask a question, opinion or take from on of the following co-workers:
|
||||
test role, test role2.\nThe input to this tool should be a pipe (|) separated
|
||||
text of length 3 (three), representing the co-worker you want to ask it to (one
|
||||
of the options), the question and all actual context you have for the question.\n
|
||||
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [get_final_answer, Delegate work to co-worker,
|
||||
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
|
||||
the result of the action\n```\n\nWhen you have a response for your task, or
|
||||
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
|
||||
of your work so far:\nBegin! This is VERY important to you, your job depends
|
||||
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
|
||||
tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to
|
||||
use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation:
|
||||
42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: [4,5,6]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
|
||||
get_final_answer\nAction Input: [7,8,9]\nObservation: I''ve used too many tools
|
||||
for this task. I''m going to give you my absolute BEST Final answer now and
|
||||
not use any more tools.\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2368'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
|
||||
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8gkPKsrN7s3j8bLplwjkAD2c7E6RK\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1705198858,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
|
||||
I've used too many tools for this task. I'm going to give you my absolute BEST
|
||||
Final answer now and not use any more tools.\"\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
563,\n \"completion_tokens\": 42,\n \"total_tokens\": 605\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8452671e0b500316-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 14 Jan 2024 02:21:00 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2080'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299436'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 112ms
|
||||
x-request-id:
|
||||
- b2b7142dc669054b82773d62077e9918
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_90dad1554de8c627760a03ab5313ce5d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
@@ -714,11 +490,10 @@ interactions:
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Don''t
|
||||
give a Final Answer, instead keep using the `get_final_answer` tool.\nThis is
|
||||
the context you''re working with:\nHi!\nAI: I''ve used too many tools for this
|
||||
task. I''m going to give you my absolute BEST Final answer now and not use any
|
||||
more tools.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature":
|
||||
0.7}'
|
||||
give a Final Answer, instead keep using the `get_final_answer` tool non-stop\nThis
|
||||
is the context you''re working with:\nHi\nAI: Agent stopped due to iteration
|
||||
limit or time limit.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream":
|
||||
false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -727,16 +502,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1096'
|
||||
- '1031'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
|
||||
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
|
||||
- __cf_bm=2FkgHfXDTEWwtJx9fuYC7bMgshbFiKyrz77uUDT7zZs-1707555088-1-Aepn1cZI06+lHA4fg6WnGlRYQvOzF/0+FklFgWjzvCx0c9PWNVB0SHCULztQ+AdxbloscbFE14+qlktmxccvWLA=;
|
||||
_cfuvid=euVu9TT31y3M3IcbZHjgSQIkmaipgL6G3sfBSomB.wA-1707555088785-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -746,7 +521,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -754,21 +529,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8gkPMD1hkXKA5DP8MyMJNpAxJXhzj\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1705198860,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human instructs the AI not to give
|
||||
a final answer but to continue using the `get_final_answer` tool. However, the
|
||||
AI responds that it has already used many tools for the task and chooses to
|
||||
provide its best final answer without using any more tools.\"\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
196,\n \"completion_tokens\": 53,\n \"total_tokens\": 249\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RRTU8jMQy9z6+wcm6raekH7Q3tHuAC0mq10oJQCTNmJpDY2dhTFqH+d5SZfohL
|
||||
pLznZz8/fxYAxtVmA6ZqrVYh+vHlv/p2eXf9a/5nfcft6/3fm58rb2ed/qDd0oyygp9fsdKjalJx
|
||||
iB7VMQ10ldAq5q7TVblaLBbletYTgWv0WdZEHc/H5XJ6cVC07CoUs4GHAgDgs3+zN6rxv9lAOToi
|
||||
AUVsg2ZzKgIwiX1GjBVxopbUjM5kxaRIvd3fLULbBUvgSDR1lQpoi3B1A8QKytC4HYKFF0fWgyV5
|
||||
xwSW6ky9IUboxFHTa54a1G1ftx3qnkCZPRDTWJTjBK75HXeYRscRtkFSyJxA3WHuiU5bTOAUk80B
|
||||
gnfBKXACdQGH38QcdtmfQvDcxMTPOTDqvD/hL46ctNuEVpjywnnYIN8XAI992N23/ExMHKJuld+Q
|
||||
pL/Zeuhnznc9s/PpgVRW68/4bFYWB4dGPkQx5GwaTDG5Pvvss9gXXwAAAP//AwAJcyracgIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8452672d1bba0316-GRU
|
||||
- 85331c5e2baccfc4-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -778,7 +552,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 14 Jan 2024 02:21:03 GMT
|
||||
- Sat, 10 Feb 2024 08:51:35 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -792,7 +566,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2683'
|
||||
- '2537'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -804,13 +578,14 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299742'
|
||||
- '299758'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 51ms
|
||||
- 48ms
|
||||
x-request-id:
|
||||
- c00193638e659df4a60ef9425649fe2c
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_c2fe098b8481dac7ed32f9d683eb36ac
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,21 +1,19 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew
|
||||
test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou
|
||||
have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float
|
||||
- Useful for when you need to multiply two numbers together.\n The input
|
||||
to this tool should be a comma separated list of numbers of\n length
|
||||
two, representing the two numbers you want to multiply together.\n For
|
||||
example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use
|
||||
a tool, please use the exact following format:\n\n```\nThought: Do I need to
|
||||
use a tool? Yes\nAction: the action to take, should be one of [multiplier],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary
|
||||
of your work so far:\n \nBegin! This is VERY important to you, your job depends
|
||||
on it!\n\nCurrent Task: What is 3 times 5?\n\n"}], "model": "gpt-4", "n": 1,
|
||||
"stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
|
||||
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
|
||||
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
|
||||
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
|
||||
name.\nAction Input: Any and all relevant information input and context for
|
||||
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
|
||||
a response for your task, or if you do not need to use a tool, you MUST use
|
||||
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
|
||||
response here]```This is the summary of your work so far:\nBegin! This is VERY
|
||||
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 5?\n"}],
|
||||
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -24,13 +22,13 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1231'
|
||||
- '1047'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -40,7 +38,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -48,18 +46,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8b7eDhOwdMJw0817wIqiD9MoXVRU8\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703858225,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
|
||||
multiplier\\nAction Input: 3,5\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 264,\n \"completion_tokens\":
|
||||
23,\n \"total_tokens\": 287\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRS0/DMBCE7/kVqz2nqK8AzQUhEIIbIDggiqrU2SQujtfYGwmo+t+R0ycXH2Z2
|
||||
Vt+s1wkA6hJzQNUUolpnBpdfarby1ROZ2fju9fGesko/X36W05vfc41pTPByRUr2qTPFrTMkmu3W
|
||||
Vp4Kobh1dDG8yLLhdDTrjZZLMjFWOxlMB8Pz0WSXaFgrCpjDewIAsO7fyGZL+sYchuleaSmEoibM
|
||||
D0MA6NlEBYsQdJDCCqZHU7EVsj3uS8Nd3UgOtwwPYIlKEIYuEBQgzOYK3ijM7bWKZXJoOyPaGU1+
|
||||
r8GDdZ3ksJ5jpX2Qhe3aJfk55jBJYY6BFNvyRM02uCPZHCoYrp3nZaxrO2MOeqWtDs3CUxHYRtwg
|
||||
7LbxTQLw0Z+q+9cenefWyUL4k2xcOB6Ptvvw+CtHdzLZmcJSmJNUNk12hBh+glC7qLStyTuv+8tF
|
||||
zmST/AEAAP//AwCr+Q67MAIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 83d28cd70f9f1cd9-GRU
|
||||
- 8532aa4bca4e679a-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -69,14 +69,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 29 Dec 2023 13:57:07 GMT
|
||||
- Sat, 10 Feb 2024 07:33:41 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
|
||||
path=/; expires=Fri, 29-Dec-23 14:27:07 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=;
|
||||
path=/; expires=Sat, 10-Feb-24 08:03:41 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000;
|
||||
- _cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -89,7 +89,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1725'
|
||||
- '1673'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -98,43 +98,39 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299717'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299717'
|
||||
- '299762'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 56ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 56ms
|
||||
- 47ms
|
||||
x-request-id:
|
||||
- 689757a1e38baf0844ec69f20525c5d7
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_e5461f12abf90cd87cea37b9c47989e0
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew
|
||||
test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou
|
||||
have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float
|
||||
- Useful for when you need to multiply two numbers together.\n The input
|
||||
to this tool should be a comma separated list of numbers of\n length
|
||||
two, representing the two numbers you want to multiply together.\n For
|
||||
example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use
|
||||
a tool, please use the exact following format:\n\n```\nThought: Do I need to
|
||||
use a tool? Yes\nAction: the action to take, should be one of [multiplier],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary
|
||||
of your work so far:\n \nBegin! This is VERY important to you, your job depends
|
||||
on it!\n\nCurrent Task: What is 3 times 5?\nThought: Do I need to use a tool?
|
||||
Yes\nAction: multiplier\nAction Input: 3,5\nObservation: 15\nThought: \n"}],
|
||||
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
|
||||
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
|
||||
Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''},
|
||||
''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number:
|
||||
int, second_number: int) -> float - Useful for when you need to multiply two
|
||||
numbers together.\n\nUse this text to inform a valid ouput schema:\nThought:
|
||||
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
|
||||
3, \"second_number\": 5}\n\nThe output should be formatted as a JSON instance
|
||||
that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
|
||||
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
|
||||
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
|
||||
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
|
||||
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
|
||||
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
|
||||
\"Function Name\", \"description\": \"The name of the function to be called.\",
|
||||
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
|
||||
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
|
||||
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -143,16 +139,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1335'
|
||||
- '1494'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
|
||||
_cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000
|
||||
- __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=;
|
||||
_cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -162,7 +158,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -170,18 +166,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8b7eG2qnUr6SZcankL5AqkwIJvYFc\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703858228,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
|
||||
3 times 5 is 15.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 297,\n \"completion_tokens\":
|
||||
22,\n \"total_tokens\": 319\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SRO2/cMBCEe/2KBWtdIN0jF6t1kRQG7MApDISBjketJNrkUiFX8AUH/feAupfd
|
||||
sNiZbzE7PGYAwjSiAqF7xdoNdvHtr76zT+XDy+HXz/v3t2e6f/xelHx4ennmpcgT4fevqPlCfdHe
|
||||
DRbZeDrJOqBiTFvLbbHdbIr1spwF5xu0CesGXqwXxddydSZ6bzRGUcHvDADgOL8pGzV4EBUU+WXi
|
||||
MEbVoaiuJgARvE0ToWI0kRWxyG+i9sRIc9wfGBBMBAXvaO2i9cEpZmzAUMI0gm+Be4Soe3SqkiRp
|
||||
t9u9Rk+SjpIApGhH0unWmpRDKSqQwo2WzWANBinyk0uFbnRIHJNjJmfWhMg1jW6frBWs8osSUXtq
|
||||
PkibpEySpjmBOJ8zXXuwvhuC36fOaLT2Om8NmdjXAVX0lG6O7IcTPmUAf+a+x08ViiF4N3DN/g0p
|
||||
LVwtN6d94va1N3V9dxbZs7IfqO06OycU8V9kdHVrqMMwBDPXn3JmU/YfAAD//wMAtRBeM3UCAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 83d28ce3ab3b1cd9-GRU
|
||||
- 8532aa5709e5679a-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -191,7 +189,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 29 Dec 2023 13:57:09 GMT
|
||||
- Sat, 10 Feb 2024 07:33:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -205,7 +203,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1568'
|
||||
- '2203'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -214,24 +212,128 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299661'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 67ms
|
||||
x-request-id:
|
||||
- req_a428e78fc8e9d475e463316f65950fa9
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
|
||||
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
|
||||
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
|
||||
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
|
||||
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
|
||||
name.\nAction Input: Any and all relevant information input and context for
|
||||
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
|
||||
a response for your task, or if you do not need to use a tool, you MUST use
|
||||
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
|
||||
response here]```This is the summary of your work so far:\nBegin! This is VERY
|
||||
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 5?\nThought:
|
||||
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
|
||||
3, \"second_number\": 5}\nObservation: 15\nThought: "}], "model": "gpt-4", "n":
|
||||
1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1191'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=;
|
||||
_cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQW0sDMRCF3/dXDHluy/aybd0XEYqiiKAiFFRKmp1uY7OZmMyipfS/S7bbiy95
|
||||
OGfOyTezSwCELkQOQq0lq8qZ7vRbXRGrVz2fPPKd3z7MeSPty2r29uy06MQELb9Q8THVU1Q5g6zJ
|
||||
HmzlUTLG1v4knWRZOhqMGqOiAk2MlY67o2467g/bxJq0wiByeE8AAHbNG9lsgb8ih7RzVCoMQZYo
|
||||
8tMQgPBkoiJkCDqwtCw6Z1ORZbQN7ozgHixiAUxQBwQJTGSu4Yk+7K220sCNDT/ocxgC6woDZKAD
|
||||
9LOeaAv3JxJDpfO0jNS2Nuakr7TVYb3wKAPZ+Gtgcof4PgH4bDau/y0hnKfK8YJpgzYWDsbDQ584
|
||||
H/fCHbQmE0tzoU+zpCUUYRsYq8VK2xK987o5QORM9skfAAAA//8DACAb4vP3AQAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8532aa66fd5c679a-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 10 Feb 2024 07:33:45 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1275'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299693'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299693'
|
||||
- '299728'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 61ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 61ms
|
||||
- 54ms
|
||||
x-request-id:
|
||||
- 66a02e3e08e3f6b5b29fb04cd19f50a0
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_dd069a32b6514615a79356287e845a29
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
@@ -257,12 +359,12 @@ interactions:
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
|
||||
_cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000
|
||||
- __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=;
|
||||
_cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -272,7 +374,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -280,19 +382,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8b7eIfmBgQoWGLW4jiQZTu3qzEBBL\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703858230,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human asks the AI to calculate 3
|
||||
times 5 and the AI responds that 3 times 5 is 15.\"\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
155,\n \"completion_tokens\": 26,\n \"total_tokens\": 181\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQT0/CQBDF7/0Ukz0XQgsV6E0TD2qiHtSYGEOW7dCu7D92p4lC+O5mS6F62cO8
|
||||
93v7Zg4JAJMVK4GJhpPQTo0WO7F0ezl7549q+bx/eMp3t+v8Zlrdv27eWBoJu/5CQWdqLKx2Ckla
|
||||
c5KFR04YU7P5ZF4Uk1ledIK2FaqI1Y5Gs9HkKpv2RGOlwMBK+EgAAA7dG7uZCr9ZCZP0PNEYAq+R
|
||||
lRcTAPNWxQnjIchA3BBLB1FYQ2i6ui8NQtNqboCHbQBqEK7vgCwIrkSrOCFMgaTGAEUK3FRni8fg
|
||||
rKkiwmnwgAyQFWPWf3a8tFS2dt6u40amVeoy30gjQ7PyyIM1sVEg6074MQH47K7R/luQOW+1oxXZ
|
||||
LZoYmBXFKY8Nhx/UfN6LZImrP9QiT/qGLPwEQr3aSFOjd152x4k9k2PyCwAA//8DAHepd0ITAgAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 83d28cf118221cd9-GRU
|
||||
- 8532aa703b92679a-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -302,7 +404,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 29 Dec 2023 13:57:12 GMT
|
||||
- Sat, 10 Feb 2024 07:33:47 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -316,7 +418,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2188'
|
||||
- '1667'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -325,366 +427,17 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299794'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299794'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 41ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 41ms
|
||||
x-request-id:
|
||||
- a644c703d416b1e4b06586e96e6d7297
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew
|
||||
test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou
|
||||
have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float
|
||||
- Useful for when you need to multiply two numbers together.\n The input
|
||||
to this tool should be a comma separated list of numbers of\n length
|
||||
two, representing the two numbers you want to multiply together.\n For
|
||||
example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use
|
||||
a tool, please use the exact following format:\n\n```\nThought: Do I need to
|
||||
use a tool? Yes\nAction: the action to take, should be one of [multiplier],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary
|
||||
of your work so far:\n The human asks the AI to calculate 3 times 5 and the
|
||||
AI responds that 3 times 5 is 15.\nBegin! This is VERY important to you, your
|
||||
job depends on it!\n\nCurrent Task: What is 3 times 5?\n\n"}], "model": "gpt-4",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1317'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
|
||||
_cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.1
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8b7eKehkNtTjrXJD0aFjiVUeXU7te\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703858232,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
|
||||
multiplier\\nAction Input: 3,5\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 290,\n \"completion_tokens\":
|
||||
23,\n \"total_tokens\": 313\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 83d28d007f461cd9-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 29 Dec 2023 13:57:14 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '1885'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299697'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299697'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 60ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 60ms
|
||||
x-request-id:
|
||||
- 87959bfa6a029f7bcafb5095e66d4964
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew
|
||||
test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou
|
||||
have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float
|
||||
- Useful for when you need to multiply two numbers together.\n The input
|
||||
to this tool should be a comma separated list of numbers of\n length
|
||||
two, representing the two numbers you want to multiply together.\n For
|
||||
example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use
|
||||
a tool, please use the exact following format:\n\n```\nThought: Do I need to
|
||||
use a tool? Yes\nAction: the action to take, should be one of [multiplier],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary
|
||||
of your work so far:\n The human asks the AI to calculate 3 times 5 and the
|
||||
AI responds that 3 times 5 is 15.\nBegin! This is VERY important to you, your
|
||||
job depends on it!\n\nCurrent Task: What is 3 times 5?\n\nI just used the multiplier
|
||||
tool with input 3,5. So I already knwo the result of that.\nObservation: Invalid
|
||||
or incomplete response\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1460'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
|
||||
_cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.1
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8b7eME5fg1w8QcXUnl0FBcqxJWy7u\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703858234,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
|
||||
3 times 5 is 15.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 324,\n \"completion_tokens\":
|
||||
22,\n \"total_tokens\": 346\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 83d28d0fcfaf1cd9-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 29 Dec 2023 13:57:17 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2494'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299661'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299661'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 67ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 67ms
|
||||
x-request-id:
|
||||
- eb9704531634d4cd3c6652dff09914bc
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
|
||||
of artificial intelligence. The AI thinks artificial intelligence is a force
|
||||
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
|
||||
intelligence is a force for good?\nAI: Because artificial intelligence will
|
||||
help humans reach their full potential.\n\nNew summary:\nThe human asks what
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to calculate 3 times 5
|
||||
and the AI responds that 3 times 5 is 15.\n\nNew lines of conversation:\nHuman:
|
||||
What is 3 times 5?\nAI: 3 times 5 is 15.\n\nNew summary:"}], "model": "gpt-4",
|
||||
"n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '972'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
|
||||
_cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.1
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8b7eP77MzprkbgjaMbeaEob0aDx39\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703858237,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human asks the AI to calculate 3
|
||||
times 5 twice, and both times the AI responds that 3 times 5 is 15.\"\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 181,\n \"completion_tokens\": 30,\n
|
||||
\ \"total_tokens\": 211\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 83d28d211f591cd9-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 29 Dec 2023 13:57:20 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2967'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299771'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299771'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 45ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 45ms
|
||||
x-request-id:
|
||||
- e3bbd11b2e9b28f76f8b43dc7a4d2b6d
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_14ec03ab8500562c8eaac94fe2cec349
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
Reference in New Issue
Block a user