interactions: - request: body: '{"messages": [{"role": "system", "content": "You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expect criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"]}' headers: accept: - application/json accept-encoding: - gzip, deflate connection: - keep-alive content-length: - '943' content-type: - application/json cookie: - __cf_bm=.8.5x0rqghNdtUCxfmwblfhuXiKyPm_cRq.NIa0heL0-1726642369-1.0.1.1-VbsAGqjOt45ZD50K2QW2oZrByLLIh6w8K4nAkLthTbXgU5qTgdf0mhvFId8Q2F3DcHUw9E72lVIZEN.YVlYINQ; _cfuvid=o5s1Ux898x0eFNtzUCnG996iqyX3LgXDXq99C5oqnVE-1726642369248-0.0.1.1-604800000 host: - api.openai.com user-agent: - OpenAI/Python 1.46.0 x-stainless-arch: - arm64 x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - MacOS x-stainless-package-version: - 1.46.0 x-stainless-raw-response: - 'true' x-stainless-runtime: - CPython x-stainless-runtime-version: - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: content: "{\n \"id\": \"chatcmpl-A8izccn4CEIrv17F1OeOFKTOnS60z\",\n \"object\": \"chat.completion\",\n \"created\": 1726642940,\n \"model\": \"gpt-4o-2024-05-13\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The title \\\"The impact of AI in the future of work\\\" is clear and directly addresses a specific topic of interest. It is straightforward and suggests an informative discussion about how AI will influence the employment landscape in the future. The topic is current and highly relevant.\\n\\nFinal Answer: 5\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 186,\n \"completion_tokens\": 57,\n \"total_tokens\": 243,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_a5d11b2ef2\"\n}\n" headers: CF-Cache-Status: - DYNAMIC CF-RAY: - 8c4f7848bb85a67a-MIA Connection: - keep-alive Content-Encoding: - gzip Content-Type: - application/json Date: - Wed, 18 Sep 2024 07:02:21 GMT Server: - cloudflare Transfer-Encoding: - chunked X-Content-Type-Options: - nosniff access-control-expose-headers: - X-Request-ID alt-svc: - h3=":443"; ma=86400 openai-organization: - crewai-iuxna1 openai-processing-ms: - '617' openai-version: - '2020-10-01' strict-transport-security: - max-age=15552000; includeSubDomains; preload x-ratelimit-limit-requests: - '10000' x-ratelimit-limit-tokens: - '30000000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - '29999781' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - 0s x-request-id: - req_c4eba39973c6231ecb755caea16e937b http_version: HTTP/1.1 status_code: 200 - request: body: '{"messages": [{"role": "user", "content": "5"}, {"role": "system", "content": "I''m gonna convert this raw text into valid JSON."}], "model": "gpt-4o", "tool_choice": {"type": "function", "function": {"name": "ScoreOutput"}}, "tools": [{"type": "function", "function": {"name": "ScoreOutput", "description": "Correctly extracted `ScoreOutput` with all the required parameters with correct types", "parameters": {"properties": {"score": {"title": "Score", "type": "integer"}}, "required": ["score"], "type": "object"}}}]}' headers: accept: - application/json accept-encoding: - gzip, deflate connection: - keep-alive content-length: - '519' content-type: - application/json cookie: - __cf_bm=N60vWLgzAuza5DUheVkkgTZxX2TmUkB_..z1df9sAXo-1726642910-1.0.1.1-qFt4471cbHIKyBuVc9Nd2aMc7Jp8n_Q0Mc_vaWBAjkUXB5QKYge2hIcX_BGCBQMP3EIEZrDd4RV9WzG0n.2zhg; _cfuvid=fm9xNGmptML5inFBy6LStoxl1xxuIMLGa.EXkFbg0Dg-1726642910345-0.0.1.1-604800000 host: - api.openai.com user-agent: - OpenAI/Python 1.46.0 x-stainless-arch: - arm64 x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - MacOS x-stainless-package-version: - 1.46.0 x-stainless-raw-response: - 'true' x-stainless-runtime: - CPython x-stainless-runtime-version: - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: content: "{\n \"id\": \"chatcmpl-A8izd2WzzVxN4lX9SsJSsbDCb9gkw\",\n \"object\": \"chat.completion\",\n \"created\": 1726642941,\n \"model\": \"gpt-4o-2024-05-13\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \ \"id\": \"call_lf0zInGVU4QvkfCjmujLe0hl\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"ScoreOutput\",\n \ \"arguments\": \"{\\\"score\\\":5}\"\n }\n }\n \ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 80,\n \"completion_tokens\": 5,\n \"total_tokens\": 85,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_8dd226ca9c\"\n}\n" headers: CF-Cache-Status: - DYNAMIC CF-RAY: - 8c4f784f4f786daf-MIA Connection: - keep-alive Content-Encoding: - gzip Content-Type: - application/json Date: - Wed, 18 Sep 2024 07:02:21 GMT Server: - cloudflare Transfer-Encoding: - chunked X-Content-Type-Options: - nosniff access-control-expose-headers: - X-Request-ID alt-svc: - h3=":443"; ma=86400 openai-organization: - crewai-iuxna1 openai-processing-ms: - '272' openai-version: - '2020-10-01' strict-transport-security: - max-age=15552000; includeSubDomains; preload x-ratelimit-limit-requests: - '10000' x-ratelimit-limit-tokens: - '30000000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - '29999968' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - 0s x-request-id: - req_1aa917a8ef2faa3e8dfc3f194a52a2a9 http_version: HTTP/1.1 status_code: 200 version: 1