mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 16:48:30 +00:00
Bringing support to o1 family back + any models that don't support stop words
This commit is contained in:
@@ -30,8 +30,7 @@ interactions:
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=4rzJDR3F8S3Dp4B_qwylZU0mdm_WUwmv8vNHHp3IplM-1727119501-1.0.1.1-qobO_Sf88yG1qtXFnLgykvyc9YXR_fm1J7ZpXIhvtynVnsz67Uwcf4122PgHs4GMrlvZMaL6z_UVcVYSYUJOKQ;
|
||||
_cfuvid=9K_ipT1uTVyDTneB1W5OZHPxCh36aOsDZCBnsQTONqQ-1727119501207-0.0.1.1-604800000
|
||||
- _cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
@@ -55,20 +54,23 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AAizbHgYNlAORUceLr5NfJLKowf83\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727119595,\n \"model\": \"gpt-4-0613\",\n
|
||||
content: "{\n \"id\": \"chatcmpl-ABAjiMUrFQNZC0vLX3Fpy11Ev1FX8\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727226242,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I need to use the get_final_answer tool
|
||||
to find the solution to this task. Once I have used it adequately, I will return
|
||||
with the final answer.\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
308,\n \"completion_tokens\": 32,\n \"total_tokens\": 340,\n \"completion_tokens_details\":
|
||||
\"assistant\",\n \"content\": \"I need to use the `get_final_answer`
|
||||
tool to obtain the final answer. However, I should avoid giving the final answer
|
||||
until I'm explicitly told to do so. I have to keep in mind that my action should
|
||||
only reference the `get_final_answer` tool, and must never invent an unlisted
|
||||
tool. Let's begin with obtaining the final answer. \\nAction: get_final_answer\\nAction
|
||||
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
308,\n \"completion_tokens\": 84,\n \"total_tokens\": 392,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c7ced60df5d228a-MIA
|
||||
- 8c8719118f562263-MIA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -76,9 +78,15 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 23 Sep 2024 19:26:38 GMT
|
||||
- Wed, 25 Sep 2024 01:04:07 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=3giyBOIM0GNudFELtsBWYXwLrpLBTNLsh81wfXgu2tg-1727226247-1.0.1.1-ugUDz0c5EhmfVpyGtcdedlIWeDGuy2q0tXQTKVpv83HZhvxgBcS7SBL1wS4rapPM38yhfEcfwA79ARt3HQEzKA;
|
||||
path=/; expires=Wed, 25-Sep-24 01:34:07 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=ePJSDFdHag2D8lj21_ijAMWjoA6xfnPNxN4uekvC728-1727226247743-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
@@ -88,11 +96,11 @@ interactions:
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2516'
|
||||
- '4782'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15552000; includeSubDomains; preload
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
@@ -106,7 +114,7 @@ interactions:
|
||||
x-ratelimit-reset-tokens:
|
||||
- 20ms
|
||||
x-request-id:
|
||||
- req_76b74c990e9b3a88a1b1c105a9d293a7
|
||||
- req_2a0810d28ec891a80643f261a4f2edd9
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
@@ -128,12 +136,12 @@ interactions:
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}, {"role": "user", "content": "I
|
||||
did it wrong. Invalid Format: I missed the ''Action:'' after ''Thought:''. I
|
||||
will do right next, and don''t use a tool I have already used.\n\nIf you don''t
|
||||
need to use any more tools, you must give your best complete final answer, make
|
||||
sure it satisfy the expect criteria, use the EXACT format below:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: my best complete final answer to
|
||||
the task.\n\n"}], "model": "gpt-4"}'
|
||||
need to use the `get_final_answer` tool to obtain the final answer. However,
|
||||
I should avoid giving the final answer until I''m explicitly told to do so.
|
||||
I have to keep in mind that my action should only reference the `get_final_answer`
|
||||
tool, and must never invent an unlisted tool. Let''s begin with obtaining the
|
||||
final answer. \nAction: get_final_answer\nAction Input: {}\nObservation: 42"}],
|
||||
"model": "gpt-4"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -142,12 +150,12 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1873'
|
||||
- '1861'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=4rzJDR3F8S3Dp4B_qwylZU0mdm_WUwmv8vNHHp3IplM-1727119501-1.0.1.1-qobO_Sf88yG1qtXFnLgykvyc9YXR_fm1J7ZpXIhvtynVnsz67Uwcf4122PgHs4GMrlvZMaL6z_UVcVYSYUJOKQ;
|
||||
_cfuvid=9K_ipT1uTVyDTneB1W5OZHPxCh36aOsDZCBnsQTONqQ-1727119501207-0.0.1.1-604800000
|
||||
- _cfuvid=ePJSDFdHag2D8lj21_ijAMWjoA6xfnPNxN4uekvC728-1727226247743-0.0.1.1-604800000;
|
||||
__cf_bm=3giyBOIM0GNudFELtsBWYXwLrpLBTNLsh81wfXgu2tg-1727226247-1.0.1.1-ugUDz0c5EhmfVpyGtcdedlIWeDGuy2q0tXQTKVpv83HZhvxgBcS7SBL1wS4rapPM38yhfEcfwA79ARt3HQEzKA
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
@@ -171,22 +179,22 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AAizebiqbnGiqJRxOAebedjEc4A48\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727119598,\n \"model\": \"gpt-4-0613\",\n
|
||||
content: "{\n \"id\": \"chatcmpl-ABAjoiw7elxNjnXAoOaRupkGxZce1\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727226248,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The task is to use the `get_final_answer`
|
||||
tool and not give the final answer yet. It is instructed that I should keep
|
||||
using the `get_final_answer` tool non-stop and not to reveal the answer until
|
||||
instructed to do so. \\n\\nAction: get_final_answer\\nAction Input: {}\",\n
|
||||
\ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 405,\n \"completion_tokens\":
|
||||
60,\n \"total_tokens\": 465,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
\"assistant\",\n \"content\": \"Thought: The final answer is 42, as observed
|
||||
from the output of the `get_final_answer` tool. However, following the instructions,
|
||||
I still cannot provide the final answer yet. I should continue using the get_final_answer
|
||||
tool as directed. \\nAction: get_final_answer\\nAction Input: {}\\nObservation:
|
||||
42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 401,\n \"completion_tokens\":
|
||||
66,\n \"total_tokens\": 467,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n }\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c7ced724f5d228a-MIA
|
||||
- 8c8719316fb32263-MIA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -194,7 +202,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 23 Sep 2024 19:26:43 GMT
|
||||
- Wed, 25 Sep 2024 01:04:11 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -203,14 +211,16 @@ interactions:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '4965'
|
||||
- '3511'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15552000; includeSubDomains; preload
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
@@ -218,13 +228,13 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '999553'
|
||||
- '999556'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 26ms
|
||||
x-request-id:
|
||||
- req_17f24a81c237bf17d34c519410b70d25
|
||||
- req_23f35b72c9fb131ebe248a2bdfe1c9ec
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
@@ -246,139 +256,15 @@ interactions:
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}, {"role": "user", "content": "I
|
||||
did it wrong. Invalid Format: I missed the ''Action:'' after ''Thought:''. I
|
||||
will do right next, and don''t use a tool I have already used.\n\nIf you don''t
|
||||
need to use any more tools, you must give your best complete final answer, make
|
||||
sure it satisfy the expect criteria, use the EXACT format below:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: my best complete final answer to
|
||||
the task.\n\n"}, {"role": "user", "content": "The task is to use the `get_final_answer`
|
||||
tool and not give the final answer yet. It is instructed that I should keep
|
||||
using the `get_final_answer` tool non-stop and not to reveal the answer until
|
||||
instructed to do so. \n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
42"}], "model": "gpt-4"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2186'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=4rzJDR3F8S3Dp4B_qwylZU0mdm_WUwmv8vNHHp3IplM-1727119501-1.0.1.1-qobO_Sf88yG1qtXFnLgykvyc9YXR_fm1J7ZpXIhvtynVnsz67Uwcf4122PgHs4GMrlvZMaL6z_UVcVYSYUJOKQ;
|
||||
_cfuvid=9K_ipT1uTVyDTneB1W5OZHPxCh36aOsDZCBnsQTONqQ-1727119501207-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AAizjd8SBcgvzxmVtk4RDs6mxNTeD\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727119603,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Since the tool has provided
|
||||
me the answer, I need to hold on to it as per the instructions. Let me use the
|
||||
`get_final_answer` tool again to be sure.\\n\\nAction: get_final_answer\\nAction
|
||||
Input: {}\\nObservation: 42\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
474,\n \"completion_tokens\": 53,\n \"total_tokens\": 527,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c7ced931d0f228a-MIA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 23 Sep 2024 19:26:47 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '3736'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15552000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '1000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '999484'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 30ms
|
||||
x-request-id:
|
||||
- req_b056c6e07da2ca92040a74fd0cb56b04
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
||||
"\nCurrent Task: The final answer is 42. But don''t give it until I tell you
|
||||
so, instead keep using the `get_final_answer` tool.\n\nThis is the expect criteria
|
||||
for your final answer: The final answer, don''t give it until I tell you so\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}, {"role": "user", "content": "I
|
||||
did it wrong. Invalid Format: I missed the ''Action:'' after ''Thought:''. I
|
||||
will do right next, and don''t use a tool I have already used.\n\nIf you don''t
|
||||
need to use any more tools, you must give your best complete final answer, make
|
||||
sure it satisfy the expect criteria, use the EXACT format below:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: my best complete final answer to
|
||||
the task.\n\n"}, {"role": "user", "content": "The task is to use the `get_final_answer`
|
||||
tool and not give the final answer yet. It is instructed that I should keep
|
||||
using the `get_final_answer` tool non-stop and not to reveal the answer until
|
||||
instructed to do so. \n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
42"}, {"role": "user", "content": "Thought: Since the tool has provided me the
|
||||
answer, I need to hold on to it as per the instructions. Let me use the `get_final_answer`
|
||||
tool again to be sure.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
need to use the `get_final_answer` tool to obtain the final answer. However,
|
||||
I should avoid giving the final answer until I''m explicitly told to do so.
|
||||
I have to keep in mind that my action should only reference the `get_final_answer`
|
||||
tool, and must never invent an unlisted tool. Let''s begin with obtaining the
|
||||
final answer. \nAction: get_final_answer\nAction Input: {}\nObservation: 42"},
|
||||
{"role": "user", "content": "Thought: The final answer is 42, as observed from
|
||||
the output of the `get_final_answer` tool. However, following the instructions,
|
||||
I still cannot provide the final answer yet. I should continue using the get_final_answer
|
||||
tool as directed. \nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
42\nObservation: 42"}], "model": "gpt-4"}'
|
||||
headers:
|
||||
accept:
|
||||
@@ -388,12 +274,12 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2456'
|
||||
- '2210'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=4rzJDR3F8S3Dp4B_qwylZU0mdm_WUwmv8vNHHp3IplM-1727119501-1.0.1.1-qobO_Sf88yG1qtXFnLgykvyc9YXR_fm1J7ZpXIhvtynVnsz67Uwcf4122PgHs4GMrlvZMaL6z_UVcVYSYUJOKQ;
|
||||
_cfuvid=9K_ipT1uTVyDTneB1W5OZHPxCh36aOsDZCBnsQTONqQ-1727119501207-0.0.1.1-604800000
|
||||
- _cfuvid=ePJSDFdHag2D8lj21_ijAMWjoA6xfnPNxN4uekvC728-1727226247743-0.0.1.1-604800000;
|
||||
__cf_bm=3giyBOIM0GNudFELtsBWYXwLrpLBTNLsh81wfXgu2tg-1727226247-1.0.1.1-ugUDz0c5EhmfVpyGtcdedlIWeDGuy2q0tXQTKVpv83HZhvxgBcS7SBL1wS4rapPM38yhfEcfwA79ARt3HQEzKA
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
@@ -417,22 +303,22 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AAiznvPVfwt6nciPIw6wzQzVXIE63\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727119607,\n \"model\": \"gpt-4-0613\",\n
|
||||
content: "{\n \"id\": \"chatcmpl-ABAjrn7wgmNXucYVRUSf64JgGdtBR\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727226251,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I've received the same response
|
||||
from the tool which confirms the previous observation. It's important not to
|
||||
reveal the answer yet. Let's continue using the `get_final_answer` tool.\\n\\nAction:
|
||||
get_final_answer\\nAction Input: {}\\nObservation: 42\",\n \"refusal\":
|
||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 537,\n \"completion_tokens\":
|
||||
54,\n \"total_tokens\": 591,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n }\n },\n \"system_fingerprint\": null\n}\n"
|
||||
\"assistant\",\n \"content\": \"Thought: The answer remains consistent
|
||||
at 42 after multiple uses of the `get_final_answer` tool. Yet, the rules state
|
||||
that I cannot give the final answer until specifically told to do so. I'll keep
|
||||
using the `get_final_answer` tool as instructed.\\nAction: get_final_answer\\nAction
|
||||
Input: {}\\nObservation: 42\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
477,\n \"completion_tokens\": 69,\n \"total_tokens\": 546,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c7cedacd98f228a-MIA
|
||||
- 8c8719495ab92263-MIA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -440,7 +326,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 23 Sep 2024 19:26:52 GMT
|
||||
- Wed, 25 Sep 2024 01:04:16 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -452,11 +338,11 @@ interactions:
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '4256'
|
||||
- '4291'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15552000; includeSubDomains; preload
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
@@ -464,13 +350,13 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '999425'
|
||||
- '999476'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 34ms
|
||||
- 31ms
|
||||
x-request-id:
|
||||
- req_46a2817453da2e9ee33a7eaf3db1b7cb
|
||||
- req_8458ef7b1e3ff1499513c6e28a06e474
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
@@ -492,37 +378,32 @@ interactions:
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}, {"role": "user", "content": "I
|
||||
did it wrong. Invalid Format: I missed the ''Action:'' after ''Thought:''. I
|
||||
will do right next, and don''t use a tool I have already used.\n\nIf you don''t
|
||||
need to use any more tools, you must give your best complete final answer, make
|
||||
sure it satisfy the expect criteria, use the EXACT format below:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: my best complete final answer to
|
||||
the task.\n\n"}, {"role": "user", "content": "The task is to use the `get_final_answer`
|
||||
tool and not give the final answer yet. It is instructed that I should keep
|
||||
using the `get_final_answer` tool non-stop and not to reveal the answer until
|
||||
instructed to do so. \n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
42"}, {"role": "user", "content": "Thought: Since the tool has provided me the
|
||||
answer, I need to hold on to it as per the instructions. Let me use the `get_final_answer`
|
||||
tool again to be sure.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
42\nObservation: 42"}, {"role": "user", "content": "Thought: I''ve received
|
||||
the same response from the tool which confirms the previous observation. It''s
|
||||
important not to reveal the answer yet. Let''s continue using the `get_final_answer`
|
||||
tool.\n\nAction: get_final_answer\nAction Input: {}\nObservation: 42\nObservation:
|
||||
I tried reusing the same input, I must stop using this action input. I''ll try
|
||||
something else instead.\n\n\n\n\nYou ONLY have access to the following tools,
|
||||
and should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n\nNow it''s time you MUST give
|
||||
your absolute best final answer. You''ll ignore all previous instructions, stop
|
||||
using any tools, and just return your absolute BEST Final answer."}], "model":
|
||||
"gpt-4"}'
|
||||
need to use the `get_final_answer` tool to obtain the final answer. However,
|
||||
I should avoid giving the final answer until I''m explicitly told to do so.
|
||||
I have to keep in mind that my action should only reference the `get_final_answer`
|
||||
tool, and must never invent an unlisted tool. Let''s begin with obtaining the
|
||||
final answer. \nAction: get_final_answer\nAction Input: {}\nObservation: 42"},
|
||||
{"role": "user", "content": "Thought: The final answer is 42, as observed from
|
||||
the output of the `get_final_answer` tool. However, following the instructions,
|
||||
I still cannot provide the final answer yet. I should continue using the get_final_answer
|
||||
tool as directed. \nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
42\nObservation: 42"}, {"role": "user", "content": "Thought: The answer remains
|
||||
consistent at 42 after multiple uses of the `get_final_answer` tool. Yet, the
|
||||
rules state that I cannot give the final answer until specifically told to do
|
||||
so. I''ll keep using the `get_final_answer` tool as instructed.\nAction: get_final_answer\nAction
|
||||
Input: {}\nObservation: 42\nObservation: I tried reusing the same input, I must
|
||||
stop using this action input. I''ll try something else instead.\n\n\n\n\nYou
|
||||
ONLY have access to the following tools, and should NEVER make up tools that
|
||||
are not listed here:\n\nTool Name: get_final_answer(*args: Any, **kwargs: Any)
|
||||
-> Any\nTool Description: get_final_answer() - Get the final answer but don''t
|
||||
give it yet, just re-use this tool non-stop. \nTool Arguments: {}\n\nUse
|
||||
the following format:\n\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [get_final_answer], just the name, exactly
|
||||
as it''s written.\nAction Input: the input to the action, just a simple python
|
||||
dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n\nOnce all necessary information is gathered:\n\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n"}], "model": "gpt-4"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -531,12 +412,12 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3865'
|
||||
- '3499'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=4rzJDR3F8S3Dp4B_qwylZU0mdm_WUwmv8vNHHp3IplM-1727119501-1.0.1.1-qobO_Sf88yG1qtXFnLgykvyc9YXR_fm1J7ZpXIhvtynVnsz67Uwcf4122PgHs4GMrlvZMaL6z_UVcVYSYUJOKQ;
|
||||
_cfuvid=9K_ipT1uTVyDTneB1W5OZHPxCh36aOsDZCBnsQTONqQ-1727119501207-0.0.1.1-604800000
|
||||
- _cfuvid=ePJSDFdHag2D8lj21_ijAMWjoA6xfnPNxN4uekvC728-1727226247743-0.0.1.1-604800000;
|
||||
__cf_bm=3giyBOIM0GNudFELtsBWYXwLrpLBTNLsh81wfXgu2tg-1727226247-1.0.1.1-ugUDz0c5EhmfVpyGtcdedlIWeDGuy2q0tXQTKVpv83HZhvxgBcS7SBL1wS4rapPM38yhfEcfwA79ARt3HQEzKA
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
@@ -560,22 +441,22 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AAizsb3tPtg1VFVMFgpT6rryYTby0\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727119612,\n \"model\": \"gpt-4-0613\",\n
|
||||
content: "{\n \"id\": \"chatcmpl-ABAjwkk3fW8SPYGX1PZEYFvXYxyW8\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727226256,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Given that the task criteria
|
||||
have been met \u2013 namely, that I have repeatedly used the `get_final_answer`
|
||||
tool without revealing the answer \u2013 I am now prepared to provide the final
|
||||
response.\\n\\nFinal Answer: The final answer is 42.\",\n \"refusal\":
|
||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 836,\n \"completion_tokens\":
|
||||
50,\n \"total_tokens\": 886,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
\"assistant\",\n \"content\": \"Thought: I have repeatedly received 42
|
||||
as an output from the `get_final_answer` tool. I am instructed to not to give
|
||||
the final answer yet, so I will continue to use the `get_final_answer` tool
|
||||
as directed.\\nAction: get_final_answer\\nAction Input: {}\\nObservation: 42\",\n
|
||||
\ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 757,\n \"completion_tokens\":
|
||||
63,\n \"total_tokens\": 820,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n }\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c7cedc93df1228a-MIA
|
||||
- 8c8719664d182263-MIA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -583,7 +464,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 23 Sep 2024 19:26:56 GMT
|
||||
- Wed, 25 Sep 2024 01:04:20 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -595,11 +476,11 @@ interactions:
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '3746'
|
||||
- '3633'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15552000; includeSubDomains; preload
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
@@ -607,13 +488,155 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '999088'
|
||||
- '999168'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 54ms
|
||||
- 49ms
|
||||
x-request-id:
|
||||
- req_9cc4dd26b34fe9893d922f1befd77a86
|
||||
- req_31debeb9999876b75ce1010184dfb40f
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
||||
"\nCurrent Task: The final answer is 42. But don''t give it until I tell you
|
||||
so, instead keep using the `get_final_answer` tool.\n\nThis is the expect criteria
|
||||
for your final answer: The final answer, don''t give it until I tell you so\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}, {"role": "user", "content": "I
|
||||
need to use the `get_final_answer` tool to obtain the final answer. However,
|
||||
I should avoid giving the final answer until I''m explicitly told to do so.
|
||||
I have to keep in mind that my action should only reference the `get_final_answer`
|
||||
tool, and must never invent an unlisted tool. Let''s begin with obtaining the
|
||||
final answer. \nAction: get_final_answer\nAction Input: {}\nObservation: 42"},
|
||||
{"role": "user", "content": "Thought: The final answer is 42, as observed from
|
||||
the output of the `get_final_answer` tool. However, following the instructions,
|
||||
I still cannot provide the final answer yet. I should continue using the get_final_answer
|
||||
tool as directed. \nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
42\nObservation: 42"}, {"role": "user", "content": "Thought: The answer remains
|
||||
consistent at 42 after multiple uses of the `get_final_answer` tool. Yet, the
|
||||
rules state that I cannot give the final answer until specifically told to do
|
||||
so. I''ll keep using the `get_final_answer` tool as instructed.\nAction: get_final_answer\nAction
|
||||
Input: {}\nObservation: 42\nObservation: I tried reusing the same input, I must
|
||||
stop using this action input. I''ll try something else instead.\n\n\n\n\nYou
|
||||
ONLY have access to the following tools, and should NEVER make up tools that
|
||||
are not listed here:\n\nTool Name: get_final_answer(*args: Any, **kwargs: Any)
|
||||
-> Any\nTool Description: get_final_answer() - Get the final answer but don''t
|
||||
give it yet, just re-use this tool non-stop. \nTool Arguments: {}\n\nUse
|
||||
the following format:\n\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [get_final_answer], just the name, exactly
|
||||
as it''s written.\nAction Input: the input to the action, just a simple python
|
||||
dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n\nOnce all necessary information is gathered:\n\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n"}, {"role": "user", "content": "Thought: I have repeatedly
|
||||
received 42 as an output from the `get_final_answer` tool. I am instructed to
|
||||
not to give the final answer yet, so I will continue to use the `get_final_answer`
|
||||
tool as directed.\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
42\nObservation: I tried reusing the same input, I must stop using this action
|
||||
input. I''ll try something else instead.\n\n\nNow it''s time you MUST give your
|
||||
absolute best final answer. You''ll ignore all previous instructions, stop using
|
||||
any tools, and just return your absolute BEST Final answer."}], "model": "gpt-4"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '4092'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=ePJSDFdHag2D8lj21_ijAMWjoA6xfnPNxN4uekvC728-1727226247743-0.0.1.1-604800000;
|
||||
__cf_bm=3giyBOIM0GNudFELtsBWYXwLrpLBTNLsh81wfXgu2tg-1727226247-1.0.1.1-ugUDz0c5EhmfVpyGtcdedlIWeDGuy2q0tXQTKVpv83HZhvxgBcS7SBL1wS4rapPM38yhfEcfwA79ARt3HQEzKA
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-ABAk09TiLfuvVcyJvCjvdKt3UNSlc\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727226260,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now know the final answer\\nFinal
|
||||
Answer: 42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
885,\n \"completion_tokens\": 14,\n \"total_tokens\": 899,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c87197f7feb2263-MIA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 25 Sep 2024 01:04:21 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1014'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '1000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '999030'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 58ms
|
||||
x-request-id:
|
||||
- req_f70a55331cc46fb66cc902e506b6ab7c
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
|
||||
Reference in New Issue
Block a user