interactions: - request: body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour personal goal is: test goal\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o"}' headers: accept: - application/json accept-encoding: - gzip, deflate connection: - keep-alive content-length: - '774' content-type: - application/json cookie: - __cf_bm=MSMbC3L9m33fbO5Mdj70NJMu6kJCjhuwRcYuWJdJkYQ-1727071769-1.0.1.1-Dok5J431gUNvLRyLNrUo0A.FXXIuZUUooIMFSj74XUDgQJt0mlhf6QEEo6s9wXLhByXrdKr6cxYGBEkQoDslXg; _cfuvid=5cK2hfLqTXw1WjB81uT.sGmmA67JIxSKSQeEHQ3zOQw-1727071769966-0.0.1.1-604800000 host: - api.openai.com user-agent: - OpenAI/Python 1.47.0 x-stainless-arch: - arm64 x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - MacOS x-stainless-package-version: - 1.47.0 x-stainless-raw-response: - 'true' x-stainless-runtime: - CPython x-stainless-runtime-version: - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: content: "{\n \"id\": \"chatcmpl-AAWipyHyYHlxpbMUuDNwI1YKUXHYd\",\n \"object\": \"chat.completion\",\n \"created\": 1727072427,\n \"model\": \"gpt-4o-2024-05-13\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer.\\nFinal Answer: Hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\": 12,\n \"total_tokens\": 170,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n" headers: CF-Cache-Status: - DYNAMIC CF-RAY: - 8c786dcd1c95a540-MIA Connection: - keep-alive Content-Encoding: - gzip Content-Type: - application/json Date: - Mon, 23 Sep 2024 06:20:27 GMT Server: - cloudflare Transfer-Encoding: - chunked X-Content-Type-Options: - nosniff access-control-expose-headers: - X-Request-ID openai-organization: - crewai-iuxna1 openai-processing-ms: - '252' openai-version: - '2020-10-01' strict-transport-security: - max-age=15552000; includeSubDomains; preload x-ratelimit-limit-requests: - '10000' x-ratelimit-limit-tokens: - '30000000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - '29999817' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - 0s x-request-id: - req_631576dd331554985369fcb072419b1e http_version: HTTP/1.1 status_code: 200 - request: body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour personal goal is: test goal\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}, {"role": "user", "content": "Feedback: Don''t say hi, say Hello instead!"}], "model": "gpt-4o"}' headers: accept: - application/json accept-encoding: - gzip, deflate connection: - keep-alive content-length: - '849' content-type: - application/json cookie: - __cf_bm=MSMbC3L9m33fbO5Mdj70NJMu6kJCjhuwRcYuWJdJkYQ-1727071769-1.0.1.1-Dok5J431gUNvLRyLNrUo0A.FXXIuZUUooIMFSj74XUDgQJt0mlhf6QEEo6s9wXLhByXrdKr6cxYGBEkQoDslXg; _cfuvid=5cK2hfLqTXw1WjB81uT.sGmmA67JIxSKSQeEHQ3zOQw-1727071769966-0.0.1.1-604800000 host: - api.openai.com user-agent: - OpenAI/Python 1.47.0 x-stainless-arch: - arm64 x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - MacOS x-stainless-package-version: - 1.47.0 x-stainless-raw-response: - 'true' x-stainless-runtime: - CPython x-stainless-runtime-version: - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: content: "{\n \"id\": \"chatcmpl-AAWipafXhM3wWia1nmEBa2mmuJAjn\",\n \"object\": \"chat.completion\",\n \"created\": 1727072427,\n \"model\": \"gpt-4o-2024-05-13\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: Hello\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 172,\n \"completion_tokens\": 14,\n \"total_tokens\": 186,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n" headers: CF-Cache-Status: - DYNAMIC CF-RAY: - 8c786dd09eb2a540-MIA Connection: - keep-alive Content-Encoding: - gzip Content-Type: - application/json Date: - Mon, 23 Sep 2024 06:20:27 GMT Server: - cloudflare Transfer-Encoding: - chunked X-Content-Type-Options: - nosniff access-control-expose-headers: - X-Request-ID openai-organization: - crewai-iuxna1 openai-processing-ms: - '260' openai-version: - '2020-10-01' strict-transport-security: - max-age=15552000; includeSubDomains; preload x-ratelimit-limit-requests: - '10000' x-ratelimit-limit-tokens: - '30000000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - '29999806' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - 0s x-request-id: - req_8bbbefdb7bac0ca88b61c64598a61da5 http_version: HTTP/1.1 status_code: 200 version: 1