interactions: - request: body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour personal goal is: test goal\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream": false}' headers: accept: - application/json accept-encoding: - gzip, deflate connection: - keep-alive content-length: - '824' content-type: - application/json host: - api.openai.com user-agent: - OpenAI/Python 1.52.1 x-stainless-arch: - arm64 x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - MacOS x-stainless-package-version: - 1.52.1 x-stainless-raw-response: - 'true' x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - 3.12.7 method: POST uri: https://api.openai.com/v1/chat/completions response: content: "{\n \"id\": \"chatcmpl-AaqIIsTxhvf75xvuu7gQScIlRSKbW\",\n \"object\": \"chat.completion\",\n \"created\": 1733344190,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal Answer: Hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\": 12,\n \"total_tokens\": 170,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_0705bf87c0\"\n}\n" headers: CF-Cache-Status: - DYNAMIC CF-RAY: - 8ece8cfc3b1f4532-ATL Connection: - keep-alive Content-Encoding: - gzip Content-Type: - application/json Date: - Wed, 04 Dec 2024 20:29:50 GMT Server: - cloudflare Set-Cookie: - __cf_bm=QJZZjZ6eqnVamqUkw.Bx0mj7oBi3a_vGEH1VODcUxlg-1733344190-1.0.1.1-xyN0ekA9xIrSwEhRBmTiWJ3Pt72UYLU5owKfkz5yihVmMTfsr_Qz.ssGPJ5cuft066v1xVjb4zOSTdFmesMSKg; path=/; expires=Wed, 04-Dec-24 20:59:50 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - _cfuvid=eCIkP8GVPvpkg19eOhCquWFHm.RTQBQy4yHLGGEAH5c-1733344190334-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked X-Content-Type-Options: - nosniff access-control-expose-headers: - X-Request-ID alt-svc: - h3=":443"; ma=86400 openai-organization: - crewai-iuxna1 openai-processing-ms: - '313' openai-version: - '2020-10-01' strict-transport-security: - max-age=31536000; includeSubDomains; preload x-ratelimit-limit-requests: - '30000' x-ratelimit-limit-tokens: - '150000000' x-ratelimit-remaining-requests: - '29999' x-ratelimit-remaining-tokens: - '149999816' x-ratelimit-reset-requests: - 2ms x-ratelimit-reset-tokens: - 0s x-request-id: - req_9fd9a8ee688045dcf7ac5f6fdf689372 http_version: HTTP/1.1 status_code: 200 - request: body: '{"messages": [{"role": "system", "content": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with ''True'' if further changes are needed, or ''False'' if the user is satisfied. **Important** Do not include any additional commentary outside of your ''True'' or ''False'' response.\n\nFeedback: \"Don''t say hi, say Hello instead!\""}], "model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream": false}' headers: accept: - application/json accept-encoding: - gzip, deflate connection: - keep-alive content-length: - '461' content-type: - application/json cookie: - __cf_bm=QJZZjZ6eqnVamqUkw.Bx0mj7oBi3a_vGEH1VODcUxlg-1733344190-1.0.1.1-xyN0ekA9xIrSwEhRBmTiWJ3Pt72UYLU5owKfkz5yihVmMTfsr_Qz.ssGPJ5cuft066v1xVjb4zOSTdFmesMSKg; _cfuvid=eCIkP8GVPvpkg19eOhCquWFHm.RTQBQy4yHLGGEAH5c-1733344190334-0.0.1.1-604800000 host: - api.openai.com user-agent: - OpenAI/Python 1.52.1 x-stainless-arch: - arm64 x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - MacOS x-stainless-package-version: - 1.52.1 x-stainless-raw-response: - 'true' x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - 3.12.7 method: POST uri: https://api.openai.com/v1/chat/completions response: content: "{\n \"id\": \"chatcmpl-AaqIIaQlLyoyPmk909PvAIfA2TmJL\",\n \"object\": \"chat.completion\",\n \"created\": 1733344190,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"True\",\n \"refusal\": null\n \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n \ ],\n \"usage\": {\n \"prompt_tokens\": 78,\n \"completion_tokens\": 1,\n \"total_tokens\": 79,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_0705bf87c0\"\n}\n" headers: CF-Cache-Status: - DYNAMIC CF-RAY: - 8ece8d060b5e4532-ATL Connection: - keep-alive Content-Encoding: - gzip Content-Type: - application/json Date: - Wed, 04 Dec 2024 20:29:50 GMT Server: - cloudflare Transfer-Encoding: - chunked X-Content-Type-Options: - nosniff access-control-expose-headers: - X-Request-ID alt-svc: - h3=":443"; ma=86400 openai-organization: - crewai-iuxna1 openai-processing-ms: - '375' openai-version: - '2020-10-01' strict-transport-security: - max-age=31536000; includeSubDomains; preload x-ratelimit-limit-requests: - '30000' x-ratelimit-limit-tokens: - '150000000' x-ratelimit-remaining-requests: - '29999' x-ratelimit-remaining-tokens: - '149999898' x-ratelimit-reset-requests: - 2ms x-ratelimit-reset-tokens: - 0s x-request-id: - req_be7cb475e0859a82c37ee3f2871ea5ea http_version: HTTP/1.1 status_code: 200 - request: body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour personal goal is: test goal\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I now can give a great answer \nFinal Answer: Hi"}, {"role": "user", "content": "Feedback: Don''t say hi, say Hello instead!"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream": false}' headers: accept: - application/json accept-encoding: - gzip, deflate connection: - keep-alive content-length: - '986' content-type: - application/json cookie: - __cf_bm=QJZZjZ6eqnVamqUkw.Bx0mj7oBi3a_vGEH1VODcUxlg-1733344190-1.0.1.1-xyN0ekA9xIrSwEhRBmTiWJ3Pt72UYLU5owKfkz5yihVmMTfsr_Qz.ssGPJ5cuft066v1xVjb4zOSTdFmesMSKg; _cfuvid=eCIkP8GVPvpkg19eOhCquWFHm.RTQBQy4yHLGGEAH5c-1733344190334-0.0.1.1-604800000 host: - api.openai.com user-agent: - OpenAI/Python 1.52.1 x-stainless-arch: - arm64 x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - MacOS x-stainless-package-version: - 1.52.1 x-stainless-raw-response: - 'true' x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - 3.12.7 method: POST uri: https://api.openai.com/v1/chat/completions response: content: "{\n \"id\": \"chatcmpl-AaqIJAAxpVfUOdrsgYKHwfRlHv4RS\",\n \"object\": \"chat.completion\",\n \"created\": 1733344191,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer \ \\nFinal Answer: Hello\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 188,\n \"completion_tokens\": 14,\n \"total_tokens\": 202,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_0705bf87c0\"\n}\n" headers: CF-Cache-Status: - DYNAMIC CF-RAY: - 8ece8d090fc34532-ATL Connection: - keep-alive Content-Encoding: - gzip Content-Type: - application/json Date: - Wed, 04 Dec 2024 20:29:51 GMT Server: - cloudflare Transfer-Encoding: - chunked X-Content-Type-Options: - nosniff access-control-expose-headers: - X-Request-ID alt-svc: - h3=":443"; ma=86400 openai-organization: - crewai-iuxna1 openai-processing-ms: - '484' openai-version: - '2020-10-01' strict-transport-security: - max-age=31536000; includeSubDomains; preload x-ratelimit-limit-requests: - '30000' x-ratelimit-limit-tokens: - '150000000' x-ratelimit-remaining-requests: - '29999' x-ratelimit-remaining-tokens: - '149999793' x-ratelimit-reset-requests: - 2ms x-ratelimit-reset-tokens: - 0s x-request-id: - req_5bf4a565ad6c2567a1ed204ecac89134 http_version: HTTP/1.1 status_code: 200 - request: body: '{"messages": [{"role": "system", "content": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with ''True'' if further changes are needed, or ''False'' if the user is satisfied. **Important** Do not include any additional commentary outside of your ''True'' or ''False'' response.\n\nFeedback: \"looks good\""}], "model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream": false}' headers: accept: - application/json accept-encoding: - gzip, deflate connection: - keep-alive content-length: - '439' content-type: - application/json cookie: - __cf_bm=QJZZjZ6eqnVamqUkw.Bx0mj7oBi3a_vGEH1VODcUxlg-1733344190-1.0.1.1-xyN0ekA9xIrSwEhRBmTiWJ3Pt72UYLU5owKfkz5yihVmMTfsr_Qz.ssGPJ5cuft066v1xVjb4zOSTdFmesMSKg; _cfuvid=eCIkP8GVPvpkg19eOhCquWFHm.RTQBQy4yHLGGEAH5c-1733344190334-0.0.1.1-604800000 host: - api.openai.com user-agent: - OpenAI/Python 1.52.1 x-stainless-arch: - arm64 x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - MacOS x-stainless-package-version: - 1.52.1 x-stainless-raw-response: - 'true' x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - 3.12.7 method: POST uri: https://api.openai.com/v1/chat/completions response: content: "{\n \"id\": \"chatcmpl-AaqIJqyG8vl9mxj2qDPZgaxyNLLIq\",\n \"object\": \"chat.completion\",\n \"created\": 1733344191,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"False\",\n \"refusal\": null\n \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n \ ],\n \"usage\": {\n \"prompt_tokens\": 73,\n \"completion_tokens\": 1,\n \"total_tokens\": 74,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_0705bf87c0\"\n}\n" headers: CF-Cache-Status: - DYNAMIC CF-RAY: - 8ece8d0cfdeb4532-ATL Connection: - keep-alive Content-Encoding: - gzip Content-Type: - application/json Date: - Wed, 04 Dec 2024 20:29:51 GMT Server: - cloudflare Transfer-Encoding: - chunked X-Content-Type-Options: - nosniff access-control-expose-headers: - X-Request-ID alt-svc: - h3=":443"; ma=86400 openai-organization: - crewai-iuxna1 openai-processing-ms: - '341' openai-version: - '2020-10-01' strict-transport-security: - max-age=31536000; includeSubDomains; preload x-ratelimit-limit-requests: - '30000' x-ratelimit-limit-tokens: - '150000000' x-ratelimit-remaining-requests: - '29999' x-ratelimit-remaining-tokens: - '149999902' x-ratelimit-reset-requests: - 2ms x-ratelimit-reset-tokens: - 0s x-request-id: - req_5554bade8ceda00cf364b76a51b708ff http_version: HTTP/1.1 status_code: 200 version: 1