interactions: - request: body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour personal goal is: test goal\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Write a haiku about AI\n\nThis is the expect criteria for your final answer: A haiku (3 lines, 5-7-5 syllable pattern) about AI\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-3.5-turbo", "max_tokens": 50, "temperature": 0.7}' headers: accept: - application/json accept-encoding: - gzip, deflate connection: - keep-alive content-length: - '863' content-type: - application/json cookie: - __cf_bm=MSMbC3L9m33fbO5Mdj70NJMu6kJCjhuwRcYuWJdJkYQ-1727071769-1.0.1.1-Dok5J431gUNvLRyLNrUo0A.FXXIuZUUooIMFSj74XUDgQJt0mlhf6QEEo6s9wXLhByXrdKr6cxYGBEkQoDslXg; _cfuvid=5cK2hfLqTXw1WjB81uT.sGmmA67JIxSKSQeEHQ3zOQw-1727071769966-0.0.1.1-604800000 host: - api.openai.com user-agent: - OpenAI/Python 1.47.0 x-stainless-arch: - arm64 x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - MacOS x-stainless-package-version: - 1.47.0 x-stainless-raw-response: - 'true' x-stainless-runtime: - CPython x-stainless-runtime-version: - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: content: "{\n \"id\": \"chatcmpl-AAWjHGqK7mZezYlqr41rJe2kjljCU\",\n \"object\": \"chat.completion\",\n \"created\": 1727072455,\n \"model\": \"gpt-3.5-turbo-0125\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer\\n\\nFinal Answer: \\nArtificial minds,\\nSilent, yet they learn and grow,\\nFuture in their code.\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 173,\n \"completion_tokens\": 30,\n \"total_tokens\": 203,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": null\n}\n" headers: CF-Cache-Status: - DYNAMIC CF-RAY: - 8c786e7f4ea367e0-MIA Connection: - keep-alive Content-Encoding: - gzip Content-Type: - application/json Date: - Mon, 23 Sep 2024 06:20:56 GMT Server: - cloudflare Transfer-Encoding: - chunked X-Content-Type-Options: - nosniff access-control-expose-headers: - X-Request-ID openai-organization: - crewai-iuxna1 openai-processing-ms: - '551' openai-version: - '2020-10-01' strict-transport-security: - max-age=15552000; includeSubDomains; preload x-ratelimit-limit-requests: - '10000' x-ratelimit-limit-tokens: - '50000000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - '49999771' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - 0s x-request-id: - req_74e1a77ed0bf7f1655aae4c1eb6bd536 http_version: HTTP/1.1 status_code: 200 version: 1