diff --git a/tests/agent_tools/cassettes/test_ask_question.yaml b/tests/agent_tools/cassettes/test_ask_question.yaml index 1f2c8e9bb..1514c24a9 100644 --- a/tests/agent_tools/cassettes/test_ask_question.yaml +++ b/tests/agent_tools/cassettes/test_ask_question.yaml @@ -2,23 +2,24 @@ interactions: - request: body: '{"messages": [{"role": "user", "content": "You are researcher.\nYou''re an expert researcher, specialized in technology\n\nYour personal goal is: make - the best research and analysis on content about AI and AI agents\n\nTOOLS:\n------\nYou - have access to the following tools:\n\n\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of []\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis - is the summary of your work so far:\n The human asks the AI for its opinion - on AI agents, based on the impression that the AI dislikes them. The AI clarifies - that it doesn''t hold personal sentiments towards AI or any technology, but - instead analyzes them objectively. The AI finds AI agents a fascinating domain - of research with great potential for task automation and optimization across - industries, but acknowledges they present challenges such as ethical considerations - around privacy and decision-making.\nBegin! This is VERY important to you, your - job depends on it!\n\nCurrent Task: do you hate AI Agents?\n\nThis is the context - you are working with:\nI heard you LOVE them\n\n"}], "model": "gpt-4", "n": - 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + the best research and analysis on content about AI and AI agentsTOOLS:\n------\nYou + have access to only the following tools:\n\n\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the tool you wanna use, should be one of [], just the name.\nAction Input: Any + and all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nThe human asks the AI for its opinion on AI Agents, + assuming it dislikes them. The AI, as a researcher, maintains a neutral perspective + on all subjects, including AI agents. It recognizes their potential to revolutionize + sectors like healthcare, education, and finance, and acknowledges their capability + to perform tasks requiring human intelligence. However, it also notes that these + tools can be used for both beneficial and harmful purposes, emphasizing the + importance of ethical guidelines and regulations.Begin! This is VERY important + to you, your job depends on it!\n\nCurrent Task: do you hate AI Agents?\nThis + is the context you''re working with:\nI heard you LOVE them\n"}], "model": "gpt-4", + "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -27,16 +28,16 @@ interactions: connection: - keep-alive content-length: - - '1494' + - '1586' content-type: - application/json cookie: - - __cf_bm=k2HUdEp80irAkv.3wl0c6unbzRUujrE1TnJeObxyuHw-1703102483-1-AZe8OKi9NWunQ9x4f3lkdOpb/hJIp/3oyXUqPhkcmcEHXvFTkMcv77NSclcoz9DjRhwC62ZvANkWImyVRM4seH4=; - _cfuvid=8qN4npFFWXAqn.wugd0jrQ36YkreDcTGH14We.FcBjg-1703102483136-0-604800000 + - __cf_bm=pAjnXPCKLP174gonPz8UGOZQoqFHUiNUU1v.InXD9Cc-1707550293-1-AR6tvKypbDhglRorLyQ7tcDW2e2sIe2xhcStICqoI+acaWE0jn7KVUv77LWqGCWnFpmTGO7MhKPDfWYrMV/sLzw=; + _cfuvid=UE60YgzaF5dZyQJY5bLlOgAnsQXnIWgI6RW4LyZAQsQ-1707550293924-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.0 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -46,7 +47,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.0 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -54,22 +55,25 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8Xx2vMXN4WCrWeeO4DOAowhb3oeDJ\",\n \"object\": - \"chat.completion\",\n \"created\": 1703102489,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\nFinal - Answer: As an AI, I don't possess feelings or emotions, so I don't love or hate - anything. However, I can provide detailed analysis and research on AI agents. - They are a fascinating field of study with the potential to revolutionize many - industries, although they also present certain challenges and ethical considerations.\"\n - \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n - \ ],\n \"usage\": {\n \"prompt_tokens\": 291,\n \"completion_tokens\": - 75,\n \"total_tokens\": 366\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RTXW/bOBB8968Y6KUviuH0K61fDi76ZRS9A4K2KHpXBGtqJbKhuAp36ZyvyH8v + KKcx7kUgdjizM6vlzwXQhK5Zo3GezI1TPHtx4y7Gzx9TevdtH/76QOXV+y/fbvzl5W739U3TVobs + frCz36ylk3GKbEHSEXaZybiqnl+sLp49Wz1++WIGRuk4Vtow2dnTs9Xz8yf3DC/BsTZr/L0AgJ/z + t3pLHf/brLFqf1dGVqWBm/XDJaDJEmulIdWgRsma9gQ6ScZptvvJSxm8rfFasEVi7mCCogyCicQ/ + 8Kf8k96GRBGbpLec19goKGGzbbFFJ0himESVVcGj1NTawnNyjC0cpXohyp4hGZ6MQelgPqRhifdy + y3vOLUhByKxM2flamJmYsuxDxzhIwW0wXxsfZx32VYfiQYNCemy22AycTJenIyjznELRsYYhHdNN + nHvJI4z0WmGeDLdSYgc7TMFRjAdkvikhM3wZKSEk4xjDUCMt8cnzAZ72jEnqGAPFqpp5L7HU9OE/ + xp5ykKJQdiZZEZKLpQtpgGeK5h1lbsFdcVQpLSh16EOiucXDWGK4nscFMc95ztLCqoE6nR3XP9Wh + l4ydmMeOE/fBVUdVz1Me+xIxlTyJss7eM/dSewd7pGDVU4I5E5uvM8BQQscxJNZZKvNQ4my1ZsEU + yfHc1zyHjFI3cNncr9jdw25GGaYsu7rHqcT4UO9DCuqvMpNKqnuoJtORfrcAvs9voPxvrZspyzjZ + lck1pyr4ZPXyqNecntsJPT+/uEdNjOIJePr4+eLeYqMHNR6v+pAGzlMO85uoRhd3i18AAAD//wMA + m/i2NQoEAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 838a7a3efab6a4b0-GRU + - 8532a7561c9a5c1d-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -79,7 +83,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 20 Dec 2023 20:01:35 GMT + - Sat, 10 Feb 2024 07:31:44 GMT Server: - cloudflare Transfer-Encoding: @@ -93,7 +97,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '6060' + - '6268' openai-version: - '2020-10-01' strict-transport-security: @@ -102,24 +106,19 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299652' - x-ratelimit-remaining-tokens_usage_based: - - '299652' + - '299626' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 69ms - x-ratelimit-reset-tokens_usage_based: - - 69ms + - 74ms x-request-id: - - 3ad0d047d5260434816f61ec105bdbb8 - http_version: HTTP/1.1 - status_code: 200 + - req_4be2c88644d6a769dcf030a33e294850 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -131,18 +130,22 @@ interactions: the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI for its opinion on AI - agents, based on the impression that the AI dislikes them. The AI clarifies - that it doesn''t hold personal sentiments towards AI or any technology, but - instead analyzes them objectively. The AI finds AI agents a fascinating domain - of research with great potential for task automation and optimization across - industries, but acknowledges they present challenges such as ethical considerations - around privacy and decision-making.\n\nNew lines of conversation:\nHuman: do - you hate AI Agents?\n\nThis is the context you are working with:\nI heard you - LOVE them\nAI: As an AI, I don''t possess feelings or emotions, so I don''t - love or hate anything. However, I can provide detailed analysis and research - on AI agents. They are a fascinating field of study with the potential to revolutionize - many industries, although they also present certain challenges and ethical considerations.\n\nNew - summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' + Agents, assuming it dislikes them. The AI, as a researcher, maintains a neutral + perspective on all subjects, including AI agents. It recognizes their potential + to revolutionize sectors like healthcare, education, and finance, and acknowledges + their capability to perform tasks requiring human intelligence. However, it + also notes that these tools can be used for both beneficial and harmful purposes, + emphasizing the importance of ethical guidelines and regulations.\n\nNew lines + of conversation:\nHuman: do you hate AI Agents?\nThis is the context you''re + working with:\nI heard you LOVE them\nAI: As an AI, I do not possess emotions, + hence I cannot love or hate anything. However, as a researcher, I can provide + you with an objective analysis of AI Agents. AI Agents are tools designed to + perform tasks that would typically require human intelligence. They have potential + to revolutionize various sectors including healthcare, education, and finance. + However, like any other tool, they can be used for both beneficial and harmful + purposes. Therefore, it''s essential to have ethical guidelines and regulations + in place for their usage.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": + false, "temperature": 0.7}' headers: accept: - application/json @@ -151,16 +154,16 @@ interactions: connection: - keep-alive content-length: - - '1726' + - '1986' content-type: - application/json cookie: - - __cf_bm=k2HUdEp80irAkv.3wl0c6unbzRUujrE1TnJeObxyuHw-1703102483-1-AZe8OKi9NWunQ9x4f3lkdOpb/hJIp/3oyXUqPhkcmcEHXvFTkMcv77NSclcoz9DjRhwC62ZvANkWImyVRM4seH4=; - _cfuvid=8qN4npFFWXAqn.wugd0jrQ36YkreDcTGH14We.FcBjg-1703102483136-0-604800000 + - __cf_bm=pAjnXPCKLP174gonPz8UGOZQoqFHUiNUU1v.InXD9Cc-1707550293-1-AR6tvKypbDhglRorLyQ7tcDW2e2sIe2xhcStICqoI+acaWE0jn7KVUv77LWqGCWnFpmTGO7MhKPDfWYrMV/sLzw=; + _cfuvid=UE60YgzaF5dZyQJY5bLlOgAnsQXnIWgI6RW4LyZAQsQ-1707550293924-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.0 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -170,7 +173,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.0 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -178,26 +181,26 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8Xx32X5innWZd8vEETP1jZMLH3b1O\",\n \"object\": - \"chat.completion\",\n \"created\": 1703102496,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI for its opinion - on AI agents, based on the impression that the AI dislikes them. The AI clarifies - that it doesn't hold personal sentiments towards AI or any technology, but instead - analyzes them objectively. The AI finds AI agents a fascinating domain of research - with great potential for task automation and optimization across industries, - but acknowledges they present challenges such as ethical considerations around - privacy and decision-making. When asked again if it hates or loves AI agents, - the AI reiterates that it doesn't possess feelings or emotions, but can provide - detailed analysis and research on AI agents.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 300,\n \"completion_tokens\": 117,\n \"total_tokens\": 417\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA4RUwW4bRwy96yuIPa8FqY7qxjcDBRoX6MVIEBRFYVCz3F1Gs+R4yLFrB/73Ykay + 3F7aiwCRfHx8b8n5vgLoeOiuoQszelhSvPjpIVz5px9+HX/5/e7bx7svdx8Oz5L1y8+/6ddd11eE + 7r9R8DfUOuiSIjmrHNMhEzrVrturzdVut7nc7Fpi0YFihU3JLz5cbH7cXp4Qs3Ig667hjxUAwPf2 + W2eTgf7qrmHTv0UWMsOJuutzEUCXNdZIh2ZsjuJd/54MKk7Sxv08E8xlQQG0g4HPBDe3MGoGdgNN + LKwCKjV6M5G49YBmZWGZgB0GtsgHashlDZ8bvpYAQiYjzGGm3MOCLI4sNS5UPGOERNkSBedHqgwY + I1hpRloPLCGWobLc3AI25jXcOmQKOgm/HCk5Q9KqhTGCK2R61Fiq8fxCYBRcs0GdEGbC6HPATD3Q + UALWqh5QBhhZUAId/2A4iD5FGqYzRcCEe47sz5UjUR41L+DNsUwPhXOd82gji1OMPJEEWsMnfaLH + qp8dMJqCqLe26LW3EbhqNAgosCcoRkMzf68+w56ERg5VWh1sxryMJUIqOamR9UBLmtH4pbLXL8dL + 0uxVCugI5DMHjDAVHiiykLU2maYSm3hbw9eZBMaSfaYMD4WsxmkA3GvxtgIjVexk4PqEebB/LsJp + W0LEzCO/6apboWRVKiQ1IzOgRRvjUUj1BoS4sUZ9bMUZZvQ247PPLNPpa7NTxrNnZ3LATJD0iXK1 + 5Ghiqzg5+R8G9lBkoGxB85txQiff/98zYDltRalXt+5OZ/V6vseoU8q6r7crJcZzfGRhm+8zoanU + 2zPXdIS/rgD+bHdf/nXKXcq6JL93PZDUhpe77bFf9/7EvGe3u9Or0Lk6xvfEbrNdnUbs7NmclvuR + ZaKcMrd3oA66el39DQAA//8DACyZIo/+BAAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 838a7a67ecaca4b0-GRU + - 8532a77e4cb85c1d-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -207,7 +210,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 20 Dec 2023 20:01:41 GMT + - Sat, 10 Feb 2024 07:31:51 GMT Server: - cloudflare Transfer-Encoding: @@ -221,7 +224,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '5610' + - '6001' openai-version: - '2020-10-01' strict-transport-security: @@ -230,22 +233,17 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299585' - x-ratelimit-remaining-tokens_usage_based: - - '299585' + - '299519' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 83ms - x-ratelimit-reset-tokens_usage_based: - - 83ms + - 96ms x-request-id: - - 5b0b96506faa544c5d35b52286a3389c - http_version: HTTP/1.1 - status_code: 200 + - req_4571b50d6566e754edbc36a10d8fa7c5 + status: + code: 200 + message: OK version: 1 diff --git a/tests/agent_tools/cassettes/test_delegate_work.yaml b/tests/agent_tools/cassettes/test_delegate_work.yaml index d9cf8f23c..546ccb5e7 100644 --- a/tests/agent_tools/cassettes/test_delegate_work.yaml +++ b/tests/agent_tools/cassettes/test_delegate_work.yaml @@ -2,18 +2,18 @@ interactions: - request: body: '{"messages": [{"role": "user", "content": "You are researcher.\nYou''re an expert researcher, specialized in technology\n\nYour personal goal is: make - the best research and analysis on content about AI and AI agents\n\nTOOLS:\n------\nYou - have access to the following tools:\n\n\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of []\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis - is the summary of your work so far:\n \nBegin! This is VERY important to - you, your job depends on it!\n\nCurrent Task: share your take on AI Agents\n\nThis - is the context you are working with:\nI heard you hate them\n\n"}], "model": - "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": - 0.7}' + the best research and analysis on content about AI and AI agentsTOOLS:\n------\nYou + have access to only the following tools:\n\n\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the tool you wanna use, should be one of [], just the name.\nAction Input: Any + and all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nBegin! This is VERY important to you, your job + depends on it!\n\nCurrent Task: share your take on AI Agents\nThis is the context + you''re working with:\nI heard you hate them\n"}], "model": "gpt-4", "n": 1, + "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -22,13 +22,13 @@ interactions: connection: - keep-alive content-length: - - '1030' + - '1082' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.6.0 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -38,7 +38,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.0 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -46,27 +46,26 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8Xx2cQWLZ8Nd34HGmbQluyZWlD58b\",\n \"object\": - \"chat.completion\",\n \"created\": 1703102470,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\nFinal - Answer: I apologize if my previous statements have given you the impression - that I hate AI agents. As a technology researcher, I don't hold personal sentiments - towards AI or any other technology. Rather, I analyze them objectively based - on their capabilities, applications, and implications. AI agents, in particular, - are a fascinating domain of research. They hold tremendous potential in automating - and optimizing various tasks across industries. However, like any other technology, - they come with their own set of challenges, such as ethical considerations around - privacy and decision-making. My objective is to understand these technologies - in depth and provide a balanced view.\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 211,\n \"completion_tokens\": 134,\n \"total_tokens\": 345\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SUQW/cRgyF7/srCF1ykRe7jmMneykMJG0XaIIeAvTQFgY1ojS0R6Qy5Ky7Cfzf + i9Gu7faiw1B8/PQ4Tz9WAA33zQ6aENHDNKeL99/CDV59mMf9L39M++0mjP39pvv44dNv+fdj09YO + 7e4p+HPXOug0J3JWOZVDJnSqqtubzc27d5vL99dLYdKeUm0bZ7+4uthcb9+eO6JyIGt28OcKAODH + 8qxs0tM/zQ427fPJRGY4UrN7eQmgyZrqSYNmbI7iTftaDCpOsuB+jVrG6Dv4qLAHIerBFYoRILhq + +gm+6F/yMwsmuBV7pLyDWwOETEaYQ6Tcwh4mZHFkAQSh4hkTzJRtpuB8IFABTAmsLDYZ6PDSDywh + lZ5lhNs94EjitobPR7jXDtgqzZz1wD0BCpx8rpIomI7GBh0a9XXCgMGtBVFfZmtFHogSy2jrKn67 + iAPm+nHGo/DAAcXBdeYALOCRYGBKfSV0ClE06XiER/YIs1bTGFNlynTQVOqG+TvBATNrMTAKrtnA + SoiABpEweQyYqQXqS8Da0MLAghLqN/QwaaY1fI10XMAy2axi3CWCQTM42oOBR3TI9K1wJohlQgEW + p5R4pCr0PK9IT7mue/FT0EvdREIZC47UQqago/D3Wp3RnbJYu2DMWbtEE5imA8u4hl/1kQ51uYkf + KunxP3601agTb70jZ76AAh3Vu9Mv6J16hI6EBg7VtTomYp6GkmAueVYjg55mOtHqyX5erubif1zE + 8uJOpkGri+xvDEIu4bwHMscusUUgjxwwwVi4r0snWyZmGktafLeToZE4V911cw7E00uSko7Vh5o6 + KSm9nA8sbPEuE5pKTY25zqf2pxXA30tiy/9C2MxZp9nvXB9IquDl5fak17z+HF6r26vnqqtjei28 + vb5cnREbO5rTdDewjJTnzEuCK+jqafUvAAAA//8DAKstuxS4BAAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 838a79c59ea7a4b0-GRU + - 8532a7084d385c1d-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -76,14 +75,14 @@ interactions: Content-Type: - application/json Date: - - Wed, 20 Dec 2023 20:01:23 GMT + - Sat, 10 Feb 2024 07:31:33 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=k2HUdEp80irAkv.3wl0c6unbzRUujrE1TnJeObxyuHw-1703102483-1-AZe8OKi9NWunQ9x4f3lkdOpb/hJIp/3oyXUqPhkcmcEHXvFTkMcv77NSclcoz9DjRhwC62ZvANkWImyVRM4seH4=; - path=/; expires=Wed, 20-Dec-23 20:31:23 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=pAjnXPCKLP174gonPz8UGOZQoqFHUiNUU1v.InXD9Cc-1707550293-1-AR6tvKypbDhglRorLyQ7tcDW2e2sIe2xhcStICqoI+acaWE0jn7KVUv77LWqGCWnFpmTGO7MhKPDfWYrMV/sLzw=; + path=/; expires=Sat, 10-Feb-24 08:01:33 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=8qN4npFFWXAqn.wugd0jrQ36YkreDcTGH14We.FcBjg-1703102483136-0-604800000; + - _cfuvid=UE60YgzaF5dZyQJY5bLlOgAnsQXnIWgI6RW4LyZAQsQ-1707550293924-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -96,7 +95,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '12923' + - '7710' openai-version: - '2020-10-01' strict-transport-security: @@ -105,24 +104,19 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299769' - x-ratelimit-remaining-tokens_usage_based: - - '299769' + - '299753' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 46ms - x-ratelimit-reset-tokens_usage_based: - - 46ms + - 49ms x-request-id: - - 6468cdd7ae76aea910c72a503a58b8da - http_version: HTTP/1.1 - status_code: 200 + - req_1b998b57f55feb2ec76eee43f16e520e + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -134,17 +128,18 @@ interactions: the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: share - your take on AI Agents\n\nThis is the context you are working with:\nI heard - you hate them\nAI: I apologize if my previous statements have given you the - impression that I hate AI agents. As a technology researcher, I don''t hold - personal sentiments towards AI or any other technology. Rather, I analyze them - objectively based on their capabilities, applications, and implications. AI - agents, in particular, are a fascinating domain of research. They hold tremendous - potential in automating and optimizing various tasks across industries. However, - like any other technology, they come with their own set of challenges, such - as ethical considerations around privacy and decision-making. My objective is - to understand these technologies in depth and provide a balanced view.\n\nNew - summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' + your take on AI Agents\nThis is the context you''re working with:\nI heard you + hate them\nAI: As a researcher, I maintain a neutral perspective on all subjects + of research including AI agents. My job is to provide an objective analysis + based on facts, not personal feelings. AI Agents are a significant topic in + the field of technology with potential to revolutionize various sectors such + as healthcare, education, finance and more. They are responsible for tasks that + require human intelligence such as understanding natural language, recognizing + patterns, and problem solving. However, like any technology, they are tools + that can be used for both beneficial and harmful purposes depending on the intent + of the user. Therefore, it''s crucial to establish ethical guidelines and regulations + for their use.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, + "temperature": 0.7}' headers: accept: - application/json @@ -153,16 +148,16 @@ interactions: connection: - keep-alive content-length: - - '1622' + - '1656' content-type: - application/json cookie: - - __cf_bm=k2HUdEp80irAkv.3wl0c6unbzRUujrE1TnJeObxyuHw-1703102483-1-AZe8OKi9NWunQ9x4f3lkdOpb/hJIp/3oyXUqPhkcmcEHXvFTkMcv77NSclcoz9DjRhwC62ZvANkWImyVRM4seH4=; - _cfuvid=8qN4npFFWXAqn.wugd0jrQ36YkreDcTGH14We.FcBjg-1703102483136-0-604800000 + - __cf_bm=pAjnXPCKLP174gonPz8UGOZQoqFHUiNUU1v.InXD9Cc-1707550293-1-AR6tvKypbDhglRorLyQ7tcDW2e2sIe2xhcStICqoI+acaWE0jn7KVUv77LWqGCWnFpmTGO7MhKPDfWYrMV/sLzw=; + _cfuvid=UE60YgzaF5dZyQJY5bLlOgAnsQXnIWgI6RW4LyZAQsQ-1707550293924-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.0 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -172,7 +167,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.0 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -180,23 +175,24 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8Xx2pqWD01uG0roGJ4daSfvaXLchg\",\n \"object\": - \"chat.completion\",\n \"created\": 1703102483,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI for its opinion - on AI agents, based on the impression that the AI dislikes them. The AI clarifies - that it doesn't hold personal sentiments towards AI or any technology, but instead - analyzes them objectively. The AI finds AI agents a fascinating domain of research - with great potential for task automation and optimization across industries, - but acknowledges they present challenges such as ethical considerations around - privacy and decision-making.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 279,\n \"completion_tokens\": - 81,\n \"total_tokens\": 360\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RTyW7bQAy9+ysInRXDzmLHvqWXJgV6C9AWRRHQI0piPSInQyor8u/FyImDXgSI + 5FtIPb3OACpuqi1UoUcPQ4onl/dhzV83dn69/B5+0C79XH7brC8fv7S/Vk9VXRC6+0vBP1DzoEOK + 5KxyaIdM6FRYl+vF+uJicbo5nxqDNhQLrEt+cn6yWC3P3hG9ciCrtvB7BgDwOj2LN2noqdrCov6o + DGSGHVXb4xBAlTWWSoVmbI7iVf3ZDCpOMtm97Qn6cUABtL2B9wRXN9BqBnYDTSysAiqletWRuNWA + ZuPA0gE7NGyR9zQhhzncTvgyAgiZjDCHnnINA7I4spS60OgZIyTKlig4P1BRwBjBxumQVgNLiGNT + VK5uACflOdw4ZAraCb8cJDlD0rILYwRXyPSgcSyH5xcCo+CaDYpD6Amj9wEz1UDNGLBM1YDSQMuC + EujwgmEv+hip6Y4SARPuOLI/F41EudU8gE8Xy3Q/ci4+D2dkcYqRO5JAc7jWR3oo+7MDRlMQ9YkW + vXAbgatGg4ACO4LRqJmOv1PvYUdCLYeyWjHWYx7aMUIac1Ijq4GG1KPxS1EvX46HpNnLKqAtkPcc + MEI3ckORhWyiydSNcVre5tV7Jt6OYYrapay7EjwZYzzWWxa2/i4TmkoJjrmmA/xtBvBnCu34Xw6r + lHVIfue6JymEp5erA1/1+X98djfvia5cHeNn/Wy9mr07rOzZnIa7lqWjnDJPGS4+Z2+zfwAAAP// + AwAwOZyrugMAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 838a7a17fbb9a4b0-GRU + - 8532a7393ffa5c1d-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -206,7 +202,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 20 Dec 2023 20:01:29 GMT + - Sat, 10 Feb 2024 07:31:38 GMT Server: - cloudflare Transfer-Encoding: @@ -220,7 +216,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '5872' + - '4240' openai-version: - '2020-10-01' strict-transport-security: @@ -229,22 +225,17 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299610' - x-ratelimit-remaining-tokens_usage_based: - - '299610' + - '299601' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 77ms - x-ratelimit-reset-tokens_usage_based: - - 77ms + - 79ms x-request-id: - - bff02b569530c001aa9a2adba9adbeb9 - http_version: HTTP/1.1 - status_code: 200 + - req_502a082c0bdbfecdda4ba28f5a2fa01a + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_agent_custom_max_iterations.yaml b/tests/cassettes/test_agent_custom_max_iterations.yaml index ea909ca81..aa53403fd 100644 --- a/tests/cassettes/test_agent_custom_max_iterations.yaml +++ b/tests/cassettes/test_agent_custom_max_iterations.yaml @@ -1,19 +1,20 @@ interactions: - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to the following + personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: The final answer is 42. But don''t give it yet, instead keep using the - `get_final_answer` tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], - "stream": false, "temperature": 0.7}' + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: The final + answer is 42. But don''t give it yet, instead keep using the `get_final_answer` + tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, + "temperature": 0.7}' headers: accept: - application/json @@ -22,13 +23,13 @@ interactions: connection: - keep-alive content-length: - - '1075' + - '1137' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -38,7 +39,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -46,19 +47,19 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8fovReNHiSqXqqsbmk81h2ZTrcGTM\",\n \"object\": - \"chat.completion\",\n \"created\": 1704977897,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 233,\n \"completion_tokens\": 24,\n \"total_tokens\": 257\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RQy07DMBC85ytWPreo70AuCAkEbU+VUBEvRa6zTQKO17U3orTqvyOnTy4+zOyM + Z2YbAYgyEwkIVUhWldXt65XarKvp02qyrl5Gq7faTeaPy5vNbDZ9mItWUNDiCxUfVVeKKquRSzJ7 + WjmUjMG1G3fi4bA/GsYNUVGGOshyy+1BuzPq9g+KgkqFXiTwHgEAbJs3ZDMZrkUCndYRqdB7maNI + TkcAwpEOiJDel56lYdE6k4oMo2niPhdU5wUncE8wBoOYARPUHkECE+lbeEX/Ye5UKJNAjpwuSyN1 + Ko3/QXdkYGxszQkMeuLwze6UT1NuHS1CF1NrfcKXpSl9kTqUnkzI4pnsXr6LAD6bHep/1YR1VFlO + mb7RBMPeYLD3E+fJL9j+gWRiqS/wURwdEgr/6xmrUCpHZ13ZzBJyRrvoDwAA//8DALjbA1INAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 843d5491bed877be-GRU + - 8532f9589e8cface-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -68,14 +69,14 @@ interactions: Content-Type: - application/json Date: - - Thu, 11 Jan 2024 12:58:21 GMT + - Sat, 10 Feb 2024 08:27:39 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=; - path=/; expires=Thu, 11-Jan-24 13:28:21 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=x6LeaTkmZaqH4TGJu7tZ8TeJ_NpbCUDZT6itJzZCXxM-1707553659-1-ASfokhwi2DxOhdHPlCvbhaMQ9Tc5WobFIYAiUyrDKgdfPq8a4YzQFVNlu7YPh2Y75jk0BfElFVyjjuMPNKMQES8=; + path=/; expires=Sat, 10-Feb-24 08:57:39 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000; + - _cfuvid=xQOLHjHUUHoh_eMZH8nmvOh0dscgO9v.kI6SzvoXpaE-1707553659919-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -88,7 +89,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '3492' + - '2142' openai-version: - '2020-10-01' strict-transport-security: @@ -100,31 +101,35 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299755' + - '299739' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 49ms + - 52ms x-request-id: - - 6d96a0ac532ebce14719a35e90f453e4 - http_version: HTTP/1.1 - status_code: 200 + - req_edea71805ee5bca356618ebcbdcd962d + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to the following - tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final - answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: The final answer is 42. But don''t give it yet, instead keep using the - `get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [42]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": - ["\nObservation"], "stream": false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid + ouput schema:\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\n\nThe output should be formatted as a JSON instance that conforms + to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -133,16 +138,16 @@ interactions: connection: - keep-alive content-length: - - '1186' + - '1407' content-type: - application/json cookie: - - __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=; - _cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000 + - __cf_bm=x6LeaTkmZaqH4TGJu7tZ8TeJ_NpbCUDZT6itJzZCXxM-1707553659-1-ASfokhwi2DxOhdHPlCvbhaMQ9Tc5WobFIYAiUyrDKgdfPq8a4YzQFVNlu7YPh2Y75jk0BfElFVyjjuMPNKMQES8=; + _cfuvid=xQOLHjHUUHoh_eMZH8nmvOh0dscgO9v.kI6SzvoXpaE-1707553659919-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -152,7 +157,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -160,20 +165,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8fovVztGO4KZeiuSpMkfDC9bJ5sVV\",\n \"object\": - \"chat.completion\",\n \"created\": 1704977901,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"According to the task, I should re-use - the `get_final_answer` tool. I'll input the observed result back into the tool. - \\nAction: get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 266,\n \"completion_tokens\": 41,\n \"total_tokens\": 307\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRW4/TMBCF3/MrRvPcouz2tuR5uQgJtEJCAjYodZ1JYrDHwZ5oWVX578huadmX + PJwz3+TM8bEAQNNiBagHJdqNdnn3uy3Lu+H+e/lJbsu3mzdfPn778PDu9fPD1+4zLhLhDz9Jyz/q + lfZutCTG88nWgZRQ2nqzK3ebzWq7LbPhfEs2Yf0oy/Wy3N6szsTgjaaIFTwWAADH/E3ZuKU/WEHm + s+IoRtUTVpchAAzeJgVVjCaKYsHF1dSehTjHfU+BwESQgeCJrF12PjglQi0YTqAm8F22ox7Iqarm + /X5f87FmgBq7iXW6s2HlqMYKauxJms6wso3i+EShxsVpVoV+csQS01zmk8qTO1DI2vo2iXPNc/4J + nhPPl1Ot78fgD6kWnqy96J1hE4cmkIqe01lR/HjC5wLgR650etESjsG7URrxv4jTwlW5O+3D6+td + 3fW5bxQvyv5HrXfFOSHG5yjk0u09hTGY3HDKWczFXwAAAP//AwAGYzuRWAIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 843d54aacf5677be-GRU + - 8532f966af17face-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -183,7 +188,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 11 Jan 2024 12:58:28 GMT + - Sat, 10 Feb 2024 08:27:42 GMT Server: - cloudflare Transfer-Encoding: @@ -197,7 +202,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '6695' + - '1788' openai-version: - '2020-10-01' strict-transport-security: @@ -209,128 +214,16 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299728' + - '299683' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 54ms + - 63ms x-request-id: - - 12d68fab91102b930ed5047fb3f61759 - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to the following - tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final - answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: The final answer is 42. But don''t give it yet, instead keep using the - `get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [42]\nObservation: 42\nThought: According to the task, I should re-use - the `get_final_answer` tool. I''ll input the observed result back into the tool. - \nAction: get_final_answer\nAction Input: [42]\nObservation: I just used the - get_final_answer tool with input [42]. So I already know the result of that - and don''t need to use it now.\n\nThought: "}], "model": "gpt-4", "n": 1, "stop": - ["\nObservation"], "stream": false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '1500' - content-type: - - application/json - cookie: - - __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=; - _cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.7.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.7.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8fovcLgRvfGBN9CBduJbbPc5zd62B\",\n \"object\": - \"chat.completion\",\n \"created\": 1704977908,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - I have used the `get_final_answer` tool as instructed, but I will not provide - the final answer yet as the task specifies.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 342,\n \"completion_tokens\": 40,\n \"total_tokens\": 382\n },\n \"system_fingerprint\": - null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 843d54d65de877be-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 11 Jan 2024 12:58:33 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '5085' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299650' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 69ms - x-request-id: - - 87d6e9e91fa2417e12fea9de2c6782de - http_version: HTTP/1.1 - status_code: 200 + - req_27c37e6906d1612443201ec28f4ab65b + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -343,8 +236,7 @@ interactions: is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final answer is 42. But don''t give it yet, instead keep using the `get_final_answer` - tool.\nAI: I have used the `get_final_answer` tool as instructed, but I will - not provide the final answer yet as the task specifies.\n\nNew summary:"}], + tool.\nAI: Agent stopped due to iteration limit or time limit.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' headers: accept: @@ -354,16 +246,16 @@ interactions: connection: - keep-alive content-length: - - '1067' + - '997' content-type: - application/json cookie: - - __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=; - _cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000 + - __cf_bm=x6LeaTkmZaqH4TGJu7tZ8TeJ_NpbCUDZT6itJzZCXxM-1707553659-1-ASfokhwi2DxOhdHPlCvbhaMQ9Tc5WobFIYAiUyrDKgdfPq8a4YzQFVNlu7YPh2Y75jk0BfElFVyjjuMPNKMQES8=; + _cfuvid=xQOLHjHUUHoh_eMZH8nmvOh0dscgO9v.kI6SzvoXpaE-1707553659919-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -373,7 +265,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -381,20 +273,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8fovhiBR4rfXixci7fgAObnx5QwGQ\",\n \"object\": - \"chat.completion\",\n \"created\": 1704977913,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human instructs the AI to use the - `get_final_answer` tool, but not to reveal the final answer, which is 42. The - AI complies and uses the tool without providing the final answer.\"\n },\n - \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n - \ \"usage\": {\n \"prompt_tokens\": 190,\n \"completion_tokens\": 43,\n - \ \"total_tokens\": 233\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRPW/bMBCGd/2KA2fZkOWPFN7aoWmRoUuADkXhMNRZYkzyWN7JThP4vxekFRtd + OLwvn/t4770CULZTW1Bm0GJ8dLNPf7qmbb4+3j8c5W2Mh5X74t5+bvbh9aH7oepM0PMLGvmg5oZ8 + dCiWwsU2CbVgrrq4a+7W6+Vm0xbDU4cuY32U2WrWbBbLiRjIGmS1hV8VAMB7efNsocNXtYWm/lA8 + Muse1fb6CUAlcllRmtmy6CCqvpmGgmAo4z4OCMPodQAbWNJohEEGhM/fQQgCCfT2iEXa26Ad6MAn + TDWcBmsGsAyrtgYdusKj7uCAGGFkG/pCPfUou4LuLugTCJGbwzc64TFXmtqxUGToRsyNrWDSOT+g + BGI9grPeylxNS5yv2zvqY6LnnFQYnbvqexssD7uEminkTXP9C36uAH6XlMf/glMxkY+yEzpg4HKs + 5aWeuh305q7ayRQS7W56u1hX04SK/7Kgz/v3mGKyJfQ8Z3Wu/gEAAP//AwDBe0ViawIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 843d54f82f5577be-GRU + - 8532f9741edfface-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -404,7 +296,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 11 Jan 2024 12:58:41 GMT + - Sat, 10 Feb 2024 08:27:44 GMT Server: - cloudflare Transfer-Encoding: @@ -418,7 +310,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '7937' + - '2184' openai-version: - '2020-10-01' strict-transport-security: @@ -430,13 +322,14 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299749' + - '299765' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 50ms + - 46ms x-request-id: - - 79f30ffd011db4ab6e886411b24ae49d - http_version: HTTP/1.1 - status_code: 200 + - req_e6ab4628c707b177efb7364b66255c39 + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_agent_execution_with_specific_tools.yaml b/tests/cassettes/test_agent_execution_with_specific_tools.yaml index 51bce8237..c163e9939 100644 --- a/tests/cassettes/test_agent_execution_with_specific_tools.yaml +++ b/tests/cassettes/test_agent_execution_with_specific_tools.yaml @@ -1,20 +1,18 @@ interactions: - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together. \n\t\t\tThe input to this tool should be a - comma separated list of numbers of \n\t\t\tlength two, representing the two - numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the - input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of [multiplier]\nAction Input: the input to the - action\nObservation: the result of the action\n```\n\nWhen you have a response - for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis - is the summary of your work so far:\n \nBegin! This is VERY important to - you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n\n"}], "model": - "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nBegin! This is VERY + important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n"}], + "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: accept: @@ -24,13 +22,13 @@ interactions: connection: - keep-alive content-length: - - '1199' + - '1046' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.6.0 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -40,7 +38,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.0 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -48,18 +46,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8XuE6OrjKKco53f0TIqPQEE3nWeNj\",\n \"object\": - \"chat.completion\",\n \"created\": 1703091650,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 3,4\\n\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 260,\n \"completion_tokens\": - 24,\n \"total_tokens\": 284\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRT0/jMBDF7/kUozm3q6ZpAeWyQkKwXXFC5YAoqlxnmpi1PV57Ii1U/e7I6T/2 + 4sN780a/N94VAGgarAF1p0S7YMc3f/Xnw2P3e9qLU1X44CdeTstfPJHnu3sc5QRv3knLKfVDswuW + xLA/2DqSEspby+vJ9XxeXZWzwXDckM2xNsh4Np5cldUx0bHRlLCG1wIAYDe8mc039A9rmIxOiqOU + VEtYn4cAMLLNCqqUTBLlBUcXU7MX8gPusuO+7aSGO4YFeKIGhKFPBAqE2f6EF0orf6tzmRpcb8UE + ayieNFj40EsNuxVuTUyy9r3bUFxhDdUIVphIs2++qbM9Hkn25wqW2xB5k+v63tqzvjXepG4dSSX2 + GTcJh0N8XwC8Dafq/2uPIbILshb+Qz4vnE7Lwz68/MrFraqjKSzKfkvNZ8WRENNHEnLrrfEtxRDN + cLnMWeyLLwAAAP//AwBlmMtdMAIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 8389719d5cba0110-GRU + - 8532f8493b617ae2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -69,14 +69,14 @@ interactions: Content-Type: - application/json Date: - - Wed, 20 Dec 2023 17:00:52 GMT + - Sat, 10 Feb 2024 08:26:56 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=; - path=/; expires=Wed, 20-Dec-23 17:30:52 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=; + path=/; expires=Sat, 10-Feb-24 08:56:56 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000; + - _cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -89,7 +89,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2423' + - '1569' openai-version: - '2020-10-01' strict-transport-security: @@ -98,42 +98,39 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299729' - x-ratelimit-remaining-tokens_usage_based: - - '299729' + - '299761' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 54ms - x-ratelimit-reset-tokens_usage_based: - - 54ms + - 47ms x-request-id: - - 8f99f43731fa878eaf0fcbf0719d1b3f - http_version: HTTP/1.1 - status_code: 200 + - req_400aaa239d63866a5ffa859c4e6372f6 + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together. \n\t\t\tThe input to this tool should be a - comma separated list of numbers of \n\t\t\tlength two, representing the two - numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the - input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of [multiplier]\nAction Input: the input to the - action\nObservation: the result of the action\n```\n\nWhen you have a response - for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis - is the summary of your work so far:\n \nBegin! This is VERY important to - you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought: Do - I need to use a tool? Yes\nAction: multiplier\nAction Input: 3,4\n\nObservation: - 12\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": - false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 3, \"second_number\": 4}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -142,16 +139,16 @@ interactions: connection: - keep-alive content-length: - - '1305' + - '1494' content-type: - application/json cookie: - - __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=; - _cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000 + - __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=; + _cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.0 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -161,7 +158,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.0 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -169,18 +166,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8XuE86Ud94rsOP7VA4Bgxo6RM5XE6\",\n \"object\": - \"chat.completion\",\n \"created\": 1703091652,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - 3 times 4 is 12.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 293,\n \"completion_tokens\": - 22,\n \"total_tokens\": 315\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRzW7bMBCE73qKxZ7lwo7/Gr2AEQQ5tUALRIVAUWuZCblkyFWb1tC7F5R/4lx4 + mG9mMbs8FgBoOqwA9UGJdsHOvr7pfw/bfds//mh3u+7n0/zx++7wp334/e31Dcuc8O0Labmkvmjv + giUxnk9YR1JCeepiO9+u18vNYjMB5zuyOdYHma1m881ieU4cvNGUsILnAgDgOL25G3f0jhXMy4vi + KCXVE1ZXEwBGb7OCKiWTRLFg+QG1ZyGe6h5rBqhxP7DOdRtWjmqsoEY3WDHBGoo1lieXiv3giCVl + x5ScsiYmaXhwbbZWsCwvJJH23N2gVSZjzSOey4zXLazvQ/Rt3pgHa6/63rBJhyaSSp5z4yQ+nOJj + AfBrutbw6QAYondBGvGvxHng8m59mocfH3NDV2coXpS90df3xbkhpr9JyDV7wz3FEM10vNyzGIv/ + AAAA//8DAGqF2SkzAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 838971ae0f120110-GRU + - 8532f859aec87ae2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -190,7 +189,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 20 Dec 2023 17:00:55 GMT + - Sat, 10 Feb 2024 08:27:00 GMT Server: - cloudflare Transfer-Encoding: @@ -204,7 +203,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2334' + - '3268' openai-version: - '2020-10-01' strict-transport-security: @@ -213,24 +212,128 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299661' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 67ms + x-request-id: + - req_2bf36cee0713ecceff13b68d8688dd7b + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nBegin! This is VERY + important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 3, \"second_number\": 4}\nObservation: 12\nThought: "}], "model": "gpt-4", "n": + 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1190' + content-type: + - application/json + cookie: + - __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=; + _cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQT08CMRDF7/spJj0DgV3+ZS/GSAxqJBxUSNSQsjss1baztENQCN/ddFlBLz28 + N7/XN3OIAITKRQoiW0vOTKmbw022f3RPs9nE7fYv92Y+nk436vlhnN/MR6IRCFp+YMa/VCsjU2pk + RfZkZw4lY0jtDNqDXi/px+3KMJSjDlhRcrPbbPc7SU2sSWXoRQqvEQDAoXpDN5vjl0ih4ivFoPey + QJGehwCEIx0UIb1XnqVl0biYGVlGW9UdEdyBRcyBCbYeQQIT6SuY0Ju9VVZquLZ+hy6FBFgZ9NAF + 5aETt0QdeDw30VSUjpahtd1qfdZXyiq/XjiUnmz41TOVJ/wYAbxXG2//LSFKR6bkBdMn2hAY95NT + nrgc948b1yYTS/1HH/aiuqHw357RLFbKFuhKp6oDhJ7RMfoBAAD//wMAgp6cUPcBAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532f86f1d777ae2-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:27:01 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1412' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299703' - x-ratelimit-remaining-tokens_usage_based: - - '299703' + - '299728' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 59ms - x-ratelimit-reset-tokens_usage_based: - - 59ms + - 54ms x-request-id: - - 44304b182424a8acad8a3121817bea58 - http_version: HTTP/1.1 - status_code: 200 + - req_c84d46e08d08ae44e556a21ce820bcd1 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -256,12 +359,12 @@ interactions: content-type: - application/json cookie: - - __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=; - _cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000 + - __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=; + _cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.0 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -271,7 +374,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.0 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -279,19 +382,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8XuEBWLslSBUMDHZRViwpoXcclYOk\",\n \"object\": - \"chat.completion\",\n \"created\": 1703091655,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI what is 3 times - 4, and the AI responds that 3 times 4 is 12.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 155,\n \"completion_tokens\": 27,\n \"total_tokens\": 182\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SQQU/DMAyF7/0VVs7dtG5rN3oDIQGXnZCYhNCUpW4b1sYhcRkw7b+jdN0Glyjy + 82c/v0MEIHQhchCqlqxa24yWH+pn9dj59epmvSzWu2x/3y1fOvqsHu7mIg4Ebd9R8ZkaK2ptg6zJ + nGTlUDKGqcliskjTWTZNeqGlApuAVZZH89EkS2YDUZNW6EUOrxEAwKF/gzdT4JfIYRKfKy16LysU + +aUJQDhqQkVI77VnaVjEV1GRYTS93ecaoe5aaUD6nQeuEW6foCTXf62jolMMVMIMpClgHgMT7Gut + 6nOvQ2/JFIGVDJpBe0imYzGsO158NlRZR9twk+ma5lIvtdG+3jiUnkzw5JnsCT9GAG99Ht2/E4V1 + 1FreMO3QhIFJmp7miWv0V3WaDSITy+YPtUyiwaHw356x3ZTaVOis0308wWd0jH4BAAD//wMA11BS + pRUCAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 838971be49500110-GRU + - 8532f878bc667ae2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -301,7 +405,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 20 Dec 2023 17:00:58 GMT + - Sat, 10 Feb 2024 08:27:03 GMT Server: - cloudflare Transfer-Encoding: @@ -315,7 +419,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2942' + - '1303' openai-version: - '2020-10-01' strict-transport-security: @@ -324,22 +428,17 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - '299794' - x-ratelimit-remaining-tokens_usage_based: - - '299794' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - 41ms - x-ratelimit-reset-tokens_usage_based: - - 41ms x-request-id: - - 546e9b3713f3ff2d7f9868133efaa3a7 - http_version: HTTP/1.1 - status_code: 200 + - req_c207bb1f2ff87fe1da2968c6e9b2a271 + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_agent_execution_with_tools.yaml b/tests/cassettes/test_agent_execution_with_tools.yaml index a1fb31b02..cbf958e99 100644 --- a/tests/cassettes/test_agent_execution_with_tools.yaml +++ b/tests/cassettes/test_agent_execution_with_tools.yaml @@ -1,20 +1,18 @@ interactions: - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together. \n\t\t\tThe input to this tool should be a - comma separated list of numbers of \n\t\t\tlength two, representing the two - numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the - input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of [multiplier]\nAction Input: the input to the - action\nObservation: the result of the action\n```\n\nWhen you have a response - for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis - is the summary of your work so far:\n \nBegin! This is VERY important to - you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n\n"}], "model": - "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nBegin! This is VERY + important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n"}], + "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: accept: @@ -24,13 +22,13 @@ interactions: connection: - keep-alive content-length: - - '1199' + - '1046' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.6.0 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -40,7 +38,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.0 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -48,18 +46,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8XuDxosP85Kqo6mU6biIggZ5c828i\",\n \"object\": - \"chat.completion\",\n \"created\": 1703091641,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 3,4\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 260,\n \"completion_tokens\": - 23,\n \"total_tokens\": 283\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRzU7DMBCE73mK1Z5blDZNS3NBIC4cQEKCA6KoSp1t4uJ4jb2RgKrvjpz+cvFh + Zmf1zXqbAKCusABUTSmqdWZ4/aWum7ms0upx/vRyl77Wv9n0ebN5kkf2OIgJXm1IyTF1pbh1hkSz + 3dvKUykUt45m6SzP0yzPe6PlikyM1U6Gk2E6HWWHRMNaUcAC3hMAgG3/RjZb0TcWkA6OSkshlDVh + cRoCQM8mKliGoIOUVnBwNhVbIdvjvjTc1Y0UcM/wAJaoAmHoAkEJwmxu4I3Cwt6qWKaAtjOindHk + jxo8WNdJAdsFrrUPsrRduyK/wAKyASwwkGJbXaiTHR5IdqcKhmvneRXr2s6Yk77WVodm6akMbCNu + EHb7+C4B+OhP1f1rj85z62Qp/Ek2LhyPR/t9eP6Vs5tlB1NYSnORyifJgRDDTxBql2tta/LO6/5y + kTPZJX8AAAD//wMAklK0SjACAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 83897166eba7a5fd-GRU + - 8532a8b68e9c7e2d-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -69,14 +69,14 @@ interactions: Content-Type: - application/json Date: - - Wed, 20 Dec 2023 17:00:44 GMT + - Sat, 10 Feb 2024 07:32:37 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=; - path=/; expires=Wed, 20-Dec-23 17:30:44 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=; + path=/; expires=Sat, 10-Feb-24 08:02:37 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000; + - _cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -89,7 +89,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2718' + - '1920' openai-version: - '2020-10-01' strict-transport-security: @@ -98,42 +98,39 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299729' - x-ratelimit-remaining-tokens_usage_based: - - '299729' + - '299761' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 54ms - x-ratelimit-reset-tokens_usage_based: - - 54ms + - 47ms x-request-id: - - 1714a9f5a2141d30f72506facf616944 - http_version: HTTP/1.1 - status_code: 200 + - req_075691c0da3ee5dc3d14d8f94e3fb169 + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together. \n\t\t\tThe input to this tool should be a - comma separated list of numbers of \n\t\t\tlength two, representing the two - numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the - input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of [multiplier]\nAction Input: the input to the - action\nObservation: the result of the action\n```\n\nWhen you have a response - for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis - is the summary of your work so far:\n \nBegin! This is VERY important to - you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought: Do - I need to use a tool? Yes\nAction: multiplier\nAction Input: 3,4\nObservation: - 12\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": - false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 3, \"second_number\": 4}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -142,16 +139,16 @@ interactions: connection: - keep-alive content-length: - - '1303' + - '1494' content-type: - application/json cookie: - - __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=; - _cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000 + - __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=; + _cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.0 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -161,7 +158,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.0 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -169,18 +166,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8XuE0tWsbI9QDkRau6rzSUZfuqhFN\",\n \"object\": - \"chat.completion\",\n \"created\": 1703091644,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - 12\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 293,\n \"completion_tokens\": - 15,\n \"total_tokens\": 308\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRzW7bMBCE73qKBc9y4UR2beiWAgFyKnLKJQpkmlpJdMldlVw1TQ29e0H5L7no + sDPfanZ4zACUbVQJyvRajB/cYvvbbA/tn+ft6uH5x7/486l76ewjv+/50D6oPBG8P6CRC/XNsB8c + imU6ySagFkxb7zbLzXq9LNabWfDcoEtYN8hitVh+vyvORM/WYFQlvGYAAMf5m7JRg39VCcv8MvEY + o+5QlVcTgArs0kTpGG0UTaLym2iYBGmO+4QBwUbQ8I7OLVoOXotgA5YSZhC4BekRounR67Kiina7 + XUXHigAq1Y5k0p01aY+VKqFSfnRiB2cxVCo/uXToRo8kMTlmcmZtiFLT6PfJWkKRX5SIhqn5JK2S + MlU0zX9X51OmaweOuyHwPvVFo3PXeWvJxr4OqCNTujcKDyd8ygDe5q7HL/WpIbAfpBb+hZQWFvfr + 0z51e9abutqeRWHR7hO1KbJzQhU/oqCvW0sdhiHYufqUM5uy/wAAAP//AwDgghcycQIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 838971796af6a5fd-GRU + - 8532a8c4db427e2d-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -190,7 +189,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 20 Dec 2023 17:00:46 GMT + - Sat, 10 Feb 2024 07:32:39 GMT Server: - cloudflare Transfer-Encoding: @@ -204,7 +203,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2355' + - '2563' openai-version: - '2020-10-01' strict-transport-security: @@ -213,24 +212,128 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299661' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 67ms + x-request-id: + - req_bf33bff9b62213c75c624fc2dcbf5d7b + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nBegin! This is VERY + important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 3, \"second_number\": 4}\nObservation: 12\nThought: "}], "model": "gpt-4", "n": + 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1190' + content-type: + - application/json + cookie: + - __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=; + _cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQzU7DMBCE73mKlc9tlSb9Uy4IqJAqRI9cAFVusklNba+xN6JQ9d2R09DCxYeZ + /caze0wAhKpEAaLcSS6N08PFR7kwTWaau/ZpzXvm+9Vzo+rHQ71cf4tBJGj7jiX/UqOSjNPIiuzZ + Lj1Kxpg6nqfz6TTNZ2lnGKpQR6xxPJwM09k474kdqRKDKOAlAQA4dm/sZis8iAI6vlMMhiAbFMVl + CEB40lERMgQVWFoWg6tZkmW0Xd0lwQosYgVM0AYECUykb2BNr/ZBWanh1oZP9AXkwMpggAmoAONs + JPrA06WJpsZ52sbWttX6otfKqrDbeJSBbPw1MLkzfkoA3rqN239LCOfJON4w7dHGwGyWn/PE9bh/ + 3Kw3mVjqP/pimvQNRfgKjGZTK9ugd151B4g9k1PyAwAA//8DAB+YxMT3AQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532a8d5b9927e2d-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 07:32:41 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1355' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299704' - x-ratelimit-remaining-tokens_usage_based: - - '299704' + - '299728' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 59ms - x-ratelimit-reset-tokens_usage_based: - - 59ms + - 54ms x-request-id: - - a3de9d34f17496d9bdd2ae9360f6054a - http_version: HTTP/1.1 - status_code: 200 + - req_5b85d3e382222bb334b58fa52fbd2152 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -242,8 +345,8 @@ interactions: the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: What - is 3 times 4\nAI: 12\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": - false, "temperature": 0.7}' + is 3 times 4\nAI: 3 times 4 is 12.\n\nNew summary:"}], "model": "gpt-4", "n": + 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -252,16 +355,16 @@ interactions: connection: - keep-alive content-length: - - '871' + - '885' content-type: - application/json cookie: - - __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=; - _cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000 + - __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=; + _cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.0 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -271,7 +374,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.0 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -279,18 +382,19 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8XuE3Di6FRdNAetXNfWs6OWyRAfkf\",\n \"object\": - \"chat.completion\",\n \"created\": 1703091647,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI what is 3 times - 4 and the AI responds with 12.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 149,\n \"completion_tokens\": - 20,\n \"total_tokens\": 169\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1xQy07CQBTd9ytuZl1MH5SS7owrN7rQRNEYMrSXdmBe9l4ihvDvZkoBdTOL85pz + zyECEKoRFYi6k1wbryfzz3pu34qX8u4hN8lTlsj9qywWm0fqFomIg8OtNljz2XVTO+M1snL2RNc9 + SsaQmpZJWRRJPksHwrgGdbC1nifTSTJL89HROVUjiQreIwCAw/CGbrbBvaggic+IQSLZoqguIgDR + Ox0QIYkUsbQs4itZO8toh7rPHUK3M9KCpC0Bdwi39/DVSYYcWBkkmIKiGKRtzmyP5J1tgvqfDNLs + Roz/HC8FtWt971bhGLvT+oKvlVXULXuU5GwoQ+z8yX6MAD6GIXZ/bhO+d8bzkt0WbQhMi+KUJ66b + X9msHEl2LPUv1zyLxoaCvonRLNfKttj7Xg27hJ7RMfoBAAD//wMAZCh3Sw4CAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 838971899954a5fd-GRU + - 8532a8df99427e2d-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -300,7 +404,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 20 Dec 2023 17:00:49 GMT + - Sat, 10 Feb 2024 07:32:43 GMT Server: - cloudflare Transfer-Encoding: @@ -314,7 +418,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2698' + - '1805' openai-version: - '2020-10-01' strict-transport-security: @@ -323,22 +427,17 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299798' - x-ratelimit-remaining-tokens_usage_based: - - '299798' + - '299794' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 40ms - x-ratelimit-reset-tokens_usage_based: - - 40ms + - 41ms x-request-id: - - ddbd97cea4ec099c21c00ca922157ae1 - http_version: HTTP/1.1 - status_code: 200 + - req_c13cb4147b01d222d16b93ac893a895d + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_agent_moved_on_after_max_iterations.yaml b/tests/cassettes/test_agent_moved_on_after_max_iterations.yaml index 3cb9b0407..d0ba5137c 100644 --- a/tests/cassettes/test_agent_moved_on_after_max_iterations.yaml +++ b/tests/cassettes/test_agent_moved_on_after_max_iterations.yaml @@ -1,19 +1,20 @@ interactions: - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to the following + personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: The final answer is 42. But don''t give it yet, instead keep using the - `get_final_answer` tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], - "stream": false, "temperature": 0.7}' + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: The final + answer is 42. But don''t give it yet, instead keep using the `get_final_answer` + tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, + "temperature": 0.7}' headers: accept: - application/json @@ -22,13 +23,13 @@ interactions: connection: - keep-alive content-length: - - '1075' + - '1137' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -38,7 +39,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -46,19 +47,19 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8frBTCWXULTV5ZYHy3Y5JXKovrKiN\",\n \"object\": - \"chat.completion\",\n \"created\": 1704986579,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 233,\n \"completion_tokens\": 24,\n \"total_tokens\": 257\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RQy07DMBC85ytWPreoTdMWckGIcugBxKECIUCV62wTF8dr7A0PVf135DR9cPFh + Zmc8M9sEQOhC5CBUJVnVzvQvP4ur9LEZfzXyeXH38rBxiydarSez9P5WiV5U0GqDig+qC0W1M8ia + 7J5WHiVjdB1OB9PxOEvTQUvUVKCJstJxP+sPJsNRp6hIKwwih9cEAGDbvjGbLfBH5NDqW6TGEGSJ + Ij8eAQhPJiJChqADS8uidyIVWUbbxl1U1JQV5zAjmINFLIAJmoAggYnMNbxgeLM3KpbJoURerrWV + Zilt+EZ/YGBuXcM5ZKnovtkd8xkqnadV7GIbY474WlsdqqVHGcjGLIHJ7eW7BOC93aH5V004T7Xj + JdMH2miYZtneT5wmP2NHHcnE0pzhk2nSJRThNzDWsVSJ3nndzhJzJrvkDwAA//8DACdeRXQNAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 843e2886ceca1d23-GRU + - 85330710af4f175e-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -68,14 +69,14 @@ interactions: Content-Type: - application/json Date: - - Thu, 11 Jan 2024 15:23:03 GMT + - Sat, 10 Feb 2024 08:37:01 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=; - path=/; expires=Thu, 11-Jan-24 15:53:03 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=; + path=/; expires=Sat, 10-Feb-24 09:07:01 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000; + - _cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -88,7 +89,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '4424' + - '1517' openai-version: - '2020-10-01' strict-transport-security: @@ -100,30 +101,145 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299755' + - '299739' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 49ms + - 52ms x-request-id: - - 76974d365254ca84f70c43fc31af3378 - http_version: HTTP/1.1 - status_code: 200 + - req_b962be8c11abe80a762fc1e1d5c0fb5d + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid + ouput schema:\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\n\nThe output should be formatted as a JSON instance that conforms + to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1407' + content-type: + - application/json + cookie: + - __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=; + _cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SRzZLTMBCE736KqTknlBM7G/CRy24KKC4c+PGWo8hjW6w0MtKYZSuVd6fkZBO4 + +NAz37i7dcwA0LRYAepBiXajXb791b4rdl8/5Hdf3n/+xJtitds95fcfv/8uvr3gIhH+8JO0vFJv + tHejJTGez2MdSAmlq6ttvt1syvV6NQ+cb8kmrB9lWS7zu1VxIQZvNEWs4EcGAHCcv8kbt/QHK8gX + r4qjGFVPWF2XADB4mxRUMZooigUXt6H2LMSz3QcKBCaCgmeydtn54JQItWA4YZrAdyADQdQDOVXV + XPN+v6/5WDNAjd3EOuVsWDmqsYIae5KmM6xsozg+U6hxcd5VoZ8cscS0N/NJ5ckdKMxauU7iqebT + /BO8OD5do1rfj8EfUi08WXvVO8MmDk0gFT2nWFH8eMZPGcDjXOn0X0s4Bu9GacQ/EaeDRb4938Pb + 692m5aVvFC/K/kOV2+ziEONLFHIpe09hDGZuOPnMTtlfAAAA//8DABO023tYAgAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8533071c994c175e-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:37:04 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2139' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299683' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 63ms + x-request-id: + - req_f69fc36b19e66ed7d71a65ac81624ef5 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to the following + personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: The final answer is 42. But don''t give it yet, instead keep using the - `get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [42]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: The final + answer is 42. But don''t give it yet, instead keep using the `get_final_answer` + tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: accept: @@ -133,16 +249,16 @@ interactions: connection: - keep-alive content-length: - - '1186' + - '1246' content-type: - application/json cookie: - - __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=; - _cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000 + - __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=; + _cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -152,7 +268,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -160,19 +276,19 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8frBYkyVPtJuAJCESaOxEBg3UAfl4\",\n \"object\": - \"chat.completion\",\n \"created\": 1704986584,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 266,\n \"completion_tokens\": 22,\n \"total_tokens\": 288\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SQzU7DMBCE73mKlc8tSkLatLkgJIRUIS6ISwsocpNtEnC8rr0RharvjpymP1x8 + mPGMvtl9ACCaUmQgilpy0Ro1nm3L+dStntOX19+wfVy13XJWh0/RptxtrRj5BK0/seBT6qag1ijk + hvTRLixKRt8apWE6mSRxnPRGSyUqH6sMj5NxOI1uh0RNTYFOZPAWAADs+9ez6RJ3IoNwdFJadE5W + KLLzJwBhSXlFSOcax1KzGF3MgjSj7nEfCBagEUtggs4hSGAidQdLdO/6vvAbMqiQ802jpcqldt9o + Tw4stOk4gyQWQ/vhjKWoMpbWfoLulDrrm0Y3rs4tSkfaIzgmc4wfAoCPfn73b5EwllrDOdMXal8Y + p+mxT1wufeVGg8nEUl3p81kwEAr34xhbP6pCa2zTX8NzBofgDwAA//8DAA3DUnYEAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 843e28a57d911d23-GRU + - 8533072c9e37175e-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -182,7 +298,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 11 Jan 2024 15:23:07 GMT + - Sat, 10 Feb 2024 08:37:06 GMT Server: - cloudflare Transfer-Encoding: @@ -196,7 +312,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '3329' + - '2066' openai-version: - '2020-10-01' strict-transport-security: @@ -208,34 +324,37 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299728' + - '299712' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 54ms + - 57ms x-request-id: - - 1b9a1e09f863ff69cecfe4e7bed0aee5 - http_version: HTTP/1.1 - status_code: 200 + - req_e6cc60e6a53e65c41d1f4f36432d3be4 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to the following + personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: The final answer is 42. But don''t give it yet, instead keep using the - `get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [42]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: - get_final_answer\nAction Input: [42]\nObservation: I''ve used too many tools - for this task.\nI''m going to give you my absolute BEST Final answer now and\nnot - use any more tools.\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], - "stream": false, "temperature": 0.7}' + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: The final + answer is 42. But don''t give it yet, instead keep using the `get_final_answer` + tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: Actually, I used too many tools, so I''ll stop now and + give you my absolute BEST Final answer NOW, using the expected format: ```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```\nThought: + "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' headers: accept: - application/json @@ -244,16 +363,16 @@ interactions: connection: - keep-alive content-length: - - '1411' + - '1549' content-type: - application/json cookie: - - __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=; - _cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000 + - __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=; + _cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -263,7 +382,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -271,19 +390,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8frBbQ3vq0kEry4X3a1RkMEkIAP99\",\n \"object\": - \"chat.completion\",\n \"created\": 1704986587,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - I have used the tool multiple times and the final answer remains 42.\"\n },\n - \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n - \ \"usage\": {\n \"prompt_tokens\": 323,\n \"completion_tokens\": 28,\n - \ \"total_tokens\": 351\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RRyW7CMBC95ytGvnABxL7kUlVqK4G63LsIGWdITB1Pag8FhPj3yg5bLz68ZfTe + 8yEBEDoTKQhVSFZlZVqTn2w60f3N/G3+8r4bj3PpH58VzfaTWWctmsFByzUqPrvaisrKIGuyNa0c + SsZwtTvujIfDQa83ikRJGZpgyytuDVqdUbd/chSkFXqRwkcCAHCIb8hmM9yJFDrNM1Ki9zJHkV5E + AMKRCYiQ3mvP0rJoXklFltHGuA8EM7CIGTDBxiNIYCJzB6/0aZ+0lQburd+iS2EGhfzFIMqAC4w6 + aOTIi1XQLWTUNYC3WiFIm4Eiu9KujHrJ0VSLQHsIRTCDQa8tTsmOl0qG8srRMtS3G2Mu+Epb7YuF + Q+nJhvieqartxwTgK063+beGqByVFS+YvtGGg/3hsL4nrr90ww5OJBNLc4NPpskpofB7z1iG1jm6 + yum4ZMiZHJM/AAAA//8DACniEDhAAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 843e28bbbb071d23-GRU + - 8533073bfc1f175e-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -293,7 +413,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 11 Jan 2024 15:23:13 GMT + - Sat, 10 Feb 2024 08:37:08 GMT Server: - cloudflare Transfer-Encoding: @@ -307,7 +427,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '5459' + - '1584' openai-version: - '2020-10-01' strict-transport-security: @@ -319,15 +439,16 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299673' + - '299639' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 65ms + - 72ms x-request-id: - - 0a5c1064b324c997b16bf17d426f9638 - http_version: HTTP/1.1 - status_code: 200 + - req_990de951a71fb1fc2621f985e4257c6e + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -340,9 +461,9 @@ interactions: is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final answer is 42. But don''t give it yet, instead keep using the `get_final_answer` - tool.\nAI: I have used the tool multiple times and the final answer remains - 42.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": - 0.7}' + tool.\nAI: I have used the tool ''get_final_answer'' twice and confirmed that + the answer is indeed 42.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": + false, "temperature": 0.7}' headers: accept: - application/json @@ -351,16 +472,16 @@ interactions: connection: - keep-alive content-length: - - '1014' + - '1035' content-type: - application/json cookie: - - __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=; - _cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000 + - __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=; + _cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -370,7 +491,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -378,20 +499,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8frBhKxiCRICQ8o6aJanmn8PTMsAr\",\n \"object\": - \"chat.completion\",\n \"created\": 1704986593,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human tells the AI that the final - answer is 42 and instructs it to continue using the `get_final_answer` tool. - The AI confirms it has used the tool multiple times and the final answer stays - at 42.\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 178,\n \"completion_tokens\": - 46,\n \"total_tokens\": 224\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RRQW7bMBC86xULXnKxDVuWK9c3H5seChQ5FC0Kg6FWEhOKy3BXTYvAfy9IKRZy + IYgZzuzs8K0AULZRJ1Cm12KG4NbHl+bz+fjj286cX5jrn/f4tP1++DO2X/f3tVolBT0+oZF31cbQ + EByKJT/RJqIWTK67elsfDlVZHjMxUIMuybog62q9/bTbz4qerEFWJ/hVAAC85TNl8w3+VSfYrt6R + AZl1h+p0ewSgIrmEKM1sWbQXtVpIQ17Q57gPPUI/DtqD9S3FgUF6hPMXkF5LvrfWawfa8ytGsAxV + Cdo3YD1LHI0wWAEheEYMMLL1XVbddSiXLL1M0jsQIreBh8l+ZJxGJRTk1RrMtoZ8a6cY8/xlclod + G6jKjZp3ud5KcNSFSI+pMD86d8Nb6y33l4iayaeFWShM8msB8DuXPX7oT4VIQ5CL0DP6ZLg7VpOf + Wv51Yav9TAqJdgtelnUxJ1T8jwWH1EeHMUSbu085i2vxHwAA//8DAKC/EipyAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 843e28df4ae81d23-GRU + - 853307467f19175e-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -401,7 +522,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 11 Jan 2024 15:23:18 GMT + - Sat, 10 Feb 2024 08:37:11 GMT Server: - cloudflare Transfer-Encoding: @@ -415,7 +536,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '5518' + - '2805' openai-version: - '2020-10-01' strict-transport-security: @@ -427,13 +548,14 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299761' + - '299757' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 47ms + - 48ms x-request-id: - - 4100fde9c68d27d808de645637b3e7cc - http_version: HTTP/1.1 - status_code: 200 + - req_cd5aacfcc2283f70e3903b274541fd8c + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_agent_respect_the_max_rpm_set.yaml b/tests/cassettes/test_agent_respect_the_max_rpm_set.yaml index e2665624f..21a2b35db 100644 --- a/tests/cassettes/test_agent_respect_the_max_rpm_set.yaml +++ b/tests/cassettes/test_agent_respect_the_max_rpm_set.yaml @@ -5,15 +5,16 @@ interactions: tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: The final answer is 42. But don''t give it yet, instead keep using the - `get_final_answer` tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], - "stream": false, "temperature": 0.7}' + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: The final + answer is 42. But don''t give it yet, instead keep using the `get_final_answer` + tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, + "temperature": 0.7}' headers: accept: - application/json @@ -22,13 +23,13 @@ interactions: connection: - keep-alive content-length: - - '1080' + - '1137' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -38,7 +39,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -46,19 +47,19 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8ggfw8fvQxNLkF3ttE8Bm7s8rjYFq\",\n \"object\": - \"chat.completion\",\n \"created\": 1705184512,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 234,\n \"completion_tokens\": 24,\n \"total_tokens\": 258\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RQy07DMBC85ytWPreoadMG5YIqAVIPBVXlggBFrrNNAo7X2BvRCvXfkdP0wcWH + mZ3xzPxGAKIuRAZCVZJVY/Xw9ruYy2Wyrx6XT6uYlw+7dbpZu2pRrOyzGAQFbT5R8Ul1o6ixGrkm + c6SVQ8kYXON0lE6nySSedURDBeogKy0Pk+FoFk96RUW1Qi8yeIsAAH67N2QzBe5EBqPBCWnQe1mi + yM5HAMKRDoiQ3teepWExuJCKDKPp4r5U1JYVZ3BPsACDWAATtB5BAhPpO3hF/27mKpTJoETOt7WR + OpfG/6A7MbAwtuUMkrHovzmc82kqraNN6GJarc/4tja1r3KH0pMJWTyTPcoPEcBHt0P7r5qwjhrL + OdMXmmA4TpKjn7hMfsVOepKJpb7CZ2nUJxR+7xmbUKpEZ13dzRJyRofoDwAA//8DAF+ChBUNAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 845108e32c171abd-GRU + - 8533096bdb81cfc8-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -68,14 +69,14 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 22:21:54 GMT + - Sat, 10 Feb 2024 08:38:37 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=; - path=/; expires=Sat, 13-Jan-24 22:51:54 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=bGpvqtQUvBPM_j5Zt3f2HAEJC0cnFIa.f8kxLWfVaG0-1707554317-1-AStGpATTQb7PGsi6696c3TYfsHeLtr7KIXkTY8HNOzxiv9rW34bPe8BmHkINn2mB9Utr0MeWVTu887pr7p2Oadg=; + path=/; expires=Sat, 10-Feb-24 09:08:37 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000; + - _cfuvid=r2qEtn.gQWsg3Unirov9P7Tt.nlqpWNRVMWYPEYeOx4-1707554317785-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -88,7 +89,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1681' + - '1320' openai-version: - '2020-10-01' strict-transport-security: @@ -100,30 +101,145 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299754' + - '299739' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 49ms + - 52ms x-request-id: - - a25e71b234c622c7dc8010927635a75c - http_version: HTTP/1.1 - status_code: 200 + - req_9bfbd62b6bd4afc73086e67f3ef9f2f5 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid + ouput schema:\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\n\nThe output should be formatted as a JSON instance that conforms + to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1407' + content-type: + - application/json + cookie: + - __cf_bm=bGpvqtQUvBPM_j5Zt3f2HAEJC0cnFIa.f8kxLWfVaG0-1707554317-1-AStGpATTQb7PGsi6696c3TYfsHeLtr7KIXkTY8HNOzxiv9rW34bPe8BmHkINn2mB9Utr0MeWVTu887pr7p2Oadg=; + _cfuvid=r2qEtn.gQWsg3Unirov9P7Tt.nlqpWNRVMWYPEYeOx4-1707554317785-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RRy27bMBC86ysWPNuFXclRo1sORXrpocghBcpCpqmVxJRcquSqaWPo3wvSr/ZC + gDM7y5nhsQAQphMNCD0q1m6y6w8/u4dD/fT29TOWQZeP93fPX4aPTw8zvqlnsUoKf3hBzRfVO+3d + ZJGNpxOtAyrGtHVbb+rdriq3dSac79Am2TDxulpv7rblWTF6ozGKBr4VAADHfCZv1OFv0cBmdUEc + xqgGFM11CEAEbxMiVIwmsiIWqxupPTFStvsJA4KJoOAVrV33PjjFjB0YSjKN4HvgESHqEZ2C3od8 + HcwvJOhn0illI0nSfr+XdJQEIMWFaEk5lKIBKQbktjekbKsovmKQYnWaVWGYHRLHNJf1CaXZHTBk + rHqfwEXSkh8R5yDLtQHrhyn4Q2qLZmuveG/IxLENqKKnlDayn07ypQD4npue/ytPTMG7iVv2P5DS + wnJTn/aJ26fe2Ko6k+xZ2X9Uu21xdijin8joUvYBwxRMLj75LJbiLwAAAP//AwAOfz0YbwIAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85330976db98cfc8-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:38:41 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '3637' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299682' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 63ms + x-request-id: + - req_873d758803feda1b2388bca2c0064b5e + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: The final answer is 42. But don''t give it yet, instead keep using the - `get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [42]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: The final + answer is 42. But don''t give it yet, instead keep using the `get_final_answer` + tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: accept: @@ -133,16 +249,16 @@ interactions: connection: - keep-alive content-length: - - '1191' + - '1246' content-type: - application/json cookie: - - __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=; - _cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000 + - __cf_bm=bGpvqtQUvBPM_j5Zt3f2HAEJC0cnFIa.f8kxLWfVaG0-1707554317-1-AStGpATTQb7PGsi6696c3TYfsHeLtr7KIXkTY8HNOzxiv9rW34bPe8BmHkINn2mB9Utr0MeWVTu887pr7p2Oadg=; + _cfuvid=r2qEtn.gQWsg3Unirov9P7Tt.nlqpWNRVMWYPEYeOx4-1707554317785-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -152,7 +268,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -160,19 +276,19 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8ggfyCtYEPuuA9KzlWJko8Sc4ItXo\",\n \"object\": - \"chat.completion\",\n \"created\": 1705184514,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 267,\n \"completion_tokens\": 22,\n \"total_tokens\": 289\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SQS0/DMBCE7/kVK59b1PSVkguqeIgiEBICQQUocpNNYnC8Jt4IUNX/jpymDy4+ + zHhG3+w6ABAqEzGItJScVlb3Z1/ZPJ/Ors/lUuZX0bx8bh7zh9u7m/vLpxfR8wlafWDKu9RJSpXV + yIrM1k5rlIy+NYwG0WQyHg3D1qgoQ+1jheX+uD+YhqMuUZJK0YkYXgMAgHX7ejaT4Y+IYdDbKRU6 + JwsU8f4TgKhJe0VI55RjaVj0DmZKhtG0uBcECzCIGTBB4xAkMJE+gyW6NzNP/YYYCuQkV0bqRBr3 + jfXOgYWxDccwHoqufbPH0lTYmlZ+gmm03uu5MsqVSY3SkfEIjslu45sA4L2d3/xbJGxNleWE6RON + LxxG0bZPHC595IadycRSH+mns6AjFO7XMVZ+VIG1rVV7Dc8ZbII/AAAA//8DAELGlYYEAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 845108ef48b01abd-GRU + - 8533098e4daacfc8-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -182,7 +298,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 22:21:55 GMT + - Sat, 10 Feb 2024 08:38:43 GMT Server: - cloudflare Transfer-Encoding: @@ -196,7 +312,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1020' + - '1471' openai-version: - '2020-10-01' strict-transport-security: @@ -208,33 +324,35 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299726' + - '299713' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 54ms + - 57ms x-request-id: - - 2cb025cb28aa59d282888aa8daa8a745 - http_version: HTTP/1.1 - status_code: 200 + - req_abea42dd47069a64dd4dc2be7f63db84 + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to only the following - tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final - answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: The final answer is 42. But don''t give it yet, instead keep using the - `get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [42]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: - get_final_answer\nAction Input: [42]\nObservation: 42\nThought: "}], "model": - "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": - 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid + ouput schema:\nDo I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\n\nThe output should be formatted as a JSON instance that conforms + to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -243,16 +361,16 @@ interactions: connection: - keep-alive content-length: - - '1293' + - '1398' content-type: - application/json cookie: - - __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=; - _cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000 + - __cf_bm=bGpvqtQUvBPM_j5Zt3f2HAEJC0cnFIa.f8kxLWfVaG0-1707554317-1-AStGpATTQb7PGsi6696c3TYfsHeLtr7KIXkTY8HNOzxiv9rW34bPe8BmHkINn2mB9Utr0MeWVTu887pr7p2Oadg=; + _cfuvid=r2qEtn.gQWsg3Unirov9P7Tt.nlqpWNRVMWYPEYeOx4-1707554317785-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -262,7 +380,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -270,19 +388,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8ggfzAey5AALxtUqKehw6R7nBfyZv\",\n \"object\": - \"chat.completion\",\n \"created\": 1705184515,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 297,\n \"completion_tokens\": 22,\n \"total_tokens\": 319\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RRTY/TMBC951eM5tyidtNuV7lxQNoFCcTCAQmj1HWnicEee+0JUFX578hpt7tc + ImXeh957PlUAaPfYAJpei/HRze+e9m/7x0e/3D29G9za3z582HzLx/j1Mx8Nzooi7H6SkWfVGxN8 + dCQ28Bk2ibRQcV1uFpv1elXf1BPgw55ckXVR5qv54nZZXxR9sIYyNvC9AgA4Td+Sjff0FxtYzJ4v + nnLWHWFzJQFgCq5cUOdss2gWnL2AJrAQT3HvKRHYDNIThEHiIPD+y6ePYLmoDIH0WsAEPoTkM0iY + mJ39TQzZ9OR1o1jxdrtVfFIMoPAwsCnVW9aeFDagsCNpD5a1azXnP5QUzs5cnbrBE0suvJNCHvyO + 0vS3uhkVj5M1XqKP184udDGFXdmHB+eu94Nlm/s2kc6BS78sIZ7lYwXwY9p2+G8ujCn4KK2EX8TF + sF6sz3748oyv0LsLKEG0e3Vf1dUlIeZjFvKlcUcpJjtNXXJWY/UPAAD//wMAEXLKR2ECAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 845108f7794d1abd-GRU + - 85330998bda0cfc8-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -292,7 +411,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 22:21:57 GMT + - Sat, 10 Feb 2024 08:38:46 GMT Server: - cloudflare Transfer-Encoding: @@ -306,7 +425,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1662' + - '3542' openai-version: - '2020-10-01' strict-transport-security: @@ -318,34 +437,34 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299702' + - '299685' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 59ms + - 63ms x-request-id: - - 96578e7d658dcea71f927eb3700da70d - http_version: HTTP/1.1 - status_code: 200 + - req_5608143a7fb91c98985149622638567f + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: The final answer is 42. But don''t give it yet, instead keep using the - `get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [42]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: - get_final_answer\nAction Input: [42]\nObservation: 42\nThought: Do I need to - use a tool? Yes\nAction: get_final_answer\nAction Input: [42]\nObservation: - 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": - false, "temperature": 0.7}' + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: The final + answer is 42. But don''t give it yet, instead keep using the `get_final_answer` + tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": + ["\nObservation"], "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -354,16 +473,16 @@ interactions: connection: - keep-alive content-length: - - '1395' + - '1346' content-type: - application/json cookie: - - __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=; - _cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000 + - __cf_bm=bGpvqtQUvBPM_j5Zt3f2HAEJC0cnFIa.f8kxLWfVaG0-1707554317-1-AStGpATTQb7PGsi6696c3TYfsHeLtr7KIXkTY8HNOzxiv9rW34bPe8BmHkINn2mB9Utr0MeWVTu887pr7p2Oadg=; + _cfuvid=r2qEtn.gQWsg3Unirov9P7Tt.nlqpWNRVMWYPEYeOx4-1707554317785-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -373,7 +492,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -381,19 +500,19 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8ggg2v1bp5IaLU8TDmqcDgXiqXOTd\",\n \"object\": - \"chat.completion\",\n \"created\": 1705184518,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 327,\n \"completion_tokens\": 22,\n \"total_tokens\": 349\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SQS2/CMBCE7/kVK5+hCuERmktF1QuthMShVelDkUk2Ia3jde2NCkX898ohQHvx + YT7PaGb3AYCocpGAyDaSs9qo/vQrn6mf5WT6vHx4LO7HxWJ2+3T9sluU23glet5B6w/M+OS6yqg2 + CrkifcSZRcnoUwdxGI/Ho2EUt6CmHJW3lYb7o344GQw7x4aqDJ1I4DUAANi3r++mc9yKBMLeSanR + OVmiSM6fAIQl5RUhnascS82id4EZaUbd1r0jmINGzIEJGocggYnUDazQvelZ5jckUCKnRaWlSqV2 + 32hPBObaNJzAKBJd+uFcS1FpLK39BN0oddaLSlduk1qUjrSv4JjM0X4IAN7b+c2/RcJYqg2nTJ+o + feAwjI954nLpC40GHWRiqf64omnQNRRu5xhrP6pEa2zVXsP3DA7BLwAAAP//AwDWTYCNBAIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 845109052e951abd-GRU + - 853309af98a3cfc8-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -403,7 +522,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 22:22:00 GMT + - Sat, 10 Feb 2024 08:38:48 GMT Server: - cloudflare Transfer-Encoding: @@ -417,7 +536,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1840' + - '1361' openai-version: - '2020-10-01' strict-transport-security: @@ -429,35 +548,263 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299678' + - '299689' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 64ms + - 62ms x-request-id: - - 24c5fedfa1d1b427cc01a498f46fe60f - http_version: HTTP/1.1 - status_code: 200 + - req_db1ec990949eb8df5f40ea32a9d7967c + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid + ouput schema:\nDo I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\n\nThe output should be formatted as a JSON instance that conforms + to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1398' + content-type: + - application/json + cookie: + - __cf_bm=bGpvqtQUvBPM_j5Zt3f2HAEJC0cnFIa.f8kxLWfVaG0-1707554317-1-AStGpATTQb7PGsi6696c3TYfsHeLtr7KIXkTY8HNOzxiv9rW34bPe8BmHkINn2mB9Utr0MeWVTu887pr7p2Oadg=; + _cfuvid=r2qEtn.gQWsg3Unirov9P7Tt.nlqpWNRVMWYPEYeOx4-1707554317785-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RRy27bMBC86ysWPNuFHct1oltyCPoA2gBBiwJhINPUWmIrLhlyVac19O8BJdlu + LzrMS7PDYwYgTCUKELpRrK1v59cv1e1h901/flx+//vibx7yj/es715/HB4aErPkcLufqPnkeqed + 9S2ycROtAyrGlLrcLDbrdb5aXQ+EdRW2yVZ7nufzxfvlanI0zmiMooCnDADgOHxTN6rwVRSwmJ0Q + izGqGkVxFgGI4NqECBWjiayIxexCakeMNNT9gAHBROAG4dPj1y9gKMk1AjeKQTvau2AjsBskPrjf + psJq1EbdoFWFJEnb7VbSUVLKl2LfkU7Xl6QsSlGAFDVyuTek2lJRPGCQYnZSq1B3FoljUk4ZI0Od + 3WEY8PxqJHpJ/fA7Md3Tn4doXe2D26XRqGvbM743ZGJTBlTRUTo6svOjvc8AnofBu/82FD4467lk + 9wspBa4W6zFPXN72wuZXE8mOVfuPK99kU0MR/0RGmzaoMfhghv1Tz6zP3gAAAP//AwCJ+iejdgIA + AA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 853309b95890cfc8-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:39:01 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '13091' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299685' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 63ms + x-request-id: + - req_64b891e785363d2819f682ad9ff88ec2 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: The final answer is 42. But don''t give it yet, instead keep using the - `get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [42]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: - get_final_answer\nAction Input: [42]\nObservation: 42\nThought: Do I need to - use a tool? Yes\nAction: get_final_answer\nAction Input: [42]\nObservation: - 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [42]\nObservation: I''ve used too many tools for this task. I''m going - to give you my absolute BEST Final answer now and not use any more tools.\nThought: + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: The final + answer is 42. But don''t give it yet, instead keep using the `get_final_answer` + tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": + ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1446' + content-type: + - application/json + cookie: + - __cf_bm=bGpvqtQUvBPM_j5Zt3f2HAEJC0cnFIa.f8kxLWfVaG0-1707554317-1-AStGpATTQb7PGsi6696c3TYfsHeLtr7KIXkTY8HNOzxiv9rW34bPe8BmHkINn2mB9Utr0MeWVTu887pr7p2Oadg=; + _cfuvid=r2qEtn.gQWsg3Unirov9P7Tt.nlqpWNRVMWYPEYeOx4-1707554317785-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQQU/CQBCF7/0Vkz2DaWkR0osh0SgHTdCTUUOW7dAubneW7jSihP9uthTQyx7e + 2/fyvdlHAEIXIgehKsmqdmY43Razn0U62u7u716qtdrWz4unB/c4jzdlKgYhQasNKj6lrhTVziBr + skdbNSgZQ2syiSfjcZZmSWfUVKAJsdLxMBvG10lfqCrSCr3I4S0CANh3b2CzBe5EDvHgpNTovSxR + 5OdPAKIhExQhvdeepWUxuJiKLKPtcG8J5mARC2CC1iNIYCJzA6/o3+1MhQ05lMjLtbbSLKX1X9ic + HJhb13IO2Uj07YczlqHSNbQKE2xrzFlfa6t9tWxQerIBwTO5Y/wQAXx089t/i4RrqHa8ZPpEGwrT + dHLsE5dLX9xR0ptMLM2f1Hga9YTCf3vGOowqsXGN7q4ROKND9AsAAP//AwCObqVhBAIAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85330a0c9d23cfc8-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:39:03 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1945' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299665' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 67ms + x-request-id: + - req_dc1c0a1620e711b36e9913cf09c6fa03 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final + answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo + use a tool, please use the exact following format:\n\n```\nThought: Do I need + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: The final + answer is 42. But don''t give it yet, instead keep using the `get_final_answer` + tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: 42\nObservation: Actually, I used too many tools, so I''ll stop now and + give you my absolute BEST Final answer NOW, using the expected format: ```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: @@ -468,16 +815,16 @@ interactions: connection: - keep-alive content-length: - - '1618' + - '1749' content-type: - application/json cookie: - - __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=; - _cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000 + - __cf_bm=bGpvqtQUvBPM_j5Zt3f2HAEJC0cnFIa.f8kxLWfVaG0-1707554317-1-AStGpATTQb7PGsi6696c3TYfsHeLtr7KIXkTY8HNOzxiv9rW34bPe8BmHkINn2mB9Utr0MeWVTu887pr7p2Oadg=; + _cfuvid=r2qEtn.gQWsg3Unirov9P7Tt.nlqpWNRVMWYPEYeOx4-1707554317785-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -487,7 +834,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -495,19 +842,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8ggg4hc3SCoIfjLWgJVTHwj91kcWv\",\n \"object\": - \"chat.completion\",\n \"created\": 1705184520,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - I've used the `get_final_answer` tool multiple times and it consistently returns - the number 42.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 383,\n \"completion_tokens\": - 34,\n \"total_tokens\": 417\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRzW7bMBCE73qKBc+yYTtyXOhSNAiC+tA+QH9gMOJKYkrtMuTKihP43QvSP0ku + POzMfJhdvhUAyhpVg2p6Lc3g3ezLs7lbbacD25dn3U4/tFn/oteH11Xv2ntVpgQ/PmEjl9S84cE7 + FMt0kpuAWjBRl5vFZr2ubqoqCwMbdCnWeZlVs8Xt8uac6Nk2GFUNvwsAgLf8pm5k8EXVsCgvkwFj + 1B2q+moCUIFdmigdo42iSVT5LjZMgpTr3jNsgRANCMMYETQIs/sKP/kPPVjSDr5RnDDUsIVe7zGZ + DEiP2Qc6gqUoYWwEDWgysAU9APEEAbU5JGxn95gTbebpzJvDd55wj6FMDI8hOy4syxTLC0ogjt5z + PJXMNCtwQJmr807H6zEcdz7wYzocjc5d560lG/tdQB2Z0uJR2J/ixwLgbz76+OmOygcevOyE/yEl + YLVcn3jq/X8/qJuzKCzafZjfropzQxUPUXDYtZY6DD7Y/AepZ3Es/gMAAP//AwA3+lcDegIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 845109124bdd1abd-GRU + - 85330a1a0fbecfc8-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -517,7 +865,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 22:22:03 GMT + - Sat, 10 Feb 2024 08:39:06 GMT Server: - cloudflare Transfer-Encoding: @@ -531,7 +879,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '3318' + - '2059' openai-version: - '2020-10-01' strict-transport-security: @@ -543,15 +891,16 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299623' + - '299590' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 75ms + - 81ms x-request-id: - - c4eb234f14d03df86e41f35ff90b9708 - http_version: HTTP/1.1 - status_code: 200 + - req_2cf79f04c419784c49d447f748c33731 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -564,9 +913,10 @@ interactions: is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final answer is 42. But don''t give it yet, instead keep using the `get_final_answer` - tool.\nAI: I''ve used the `get_final_answer` tool multiple times and it consistently - returns the number 42.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": - false, "temperature": 0.7}' + tool.\nAI: I have used the tool as instructed and I am now ready to give the + final answer. However, as per the instructions, I am not supposed to give it + yet.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": + 0.7}' headers: accept: - application/json @@ -575,16 +925,16 @@ interactions: connection: - keep-alive content-length: - - '1041' + - '1093' content-type: - application/json cookie: - - __cf_bm=N43k0gHErA0PBuIXY9IHQyia.HGfApTZKpYsN4S4E8s-1705184514-1-AQrOw6bC8j+KAeTt3IquXJN8QBK/oqZwqkQ1DG2CFnb3u2VkEs58RczmbK2+3luwjUlNNK5xihxOzOe1hFlIkfc=; - _cfuvid=tLKPNNCaE7nA1vSgk2uC9AN4gynXRvlaHmfacNL.ncY-1705184514404-0-604800000 + - __cf_bm=bGpvqtQUvBPM_j5Zt3f2HAEJC0cnFIa.f8kxLWfVaG0-1707554317-1-AStGpATTQb7PGsi6696c3TYfsHeLtr7KIXkTY8HNOzxiv9rW34bPe8BmHkINn2mB9Utr0MeWVTu887pr7p2Oadg=; + _cfuvid=r2qEtn.gQWsg3Unirov9P7Tt.nlqpWNRVMWYPEYeOx4-1707554317785-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -594,7 +944,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -602,20 +952,21 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8ggg8PkiU1uRkwllwEfuGqBztgSqW\",\n \"object\": - \"chat.completion\",\n \"created\": 1705184524,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human instructs the AI to keep using - the `get_final_answer` tool, stating that the final answer is 42. The AI confirms - that it has used the `get_final_answer` tool multiple times and it consistently - returns the number 42.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 184,\n \"completion_tokens\": - 51,\n \"total_tokens\": 235\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RSTY8aMQy9z6+wcgYEu7PQ5dbeemlViaqqqorNJGYmSyZOY89StOK/V8nwUS5R + 5Pee8+yX9wpAOavWoEynxfTRTz/8sZ/q782Phr+Jff76dpC5/JRNbNPr8ouaZAU1r2jkopoZ6qNH + cRRG2CTUgrnrYjVfPT3Vj/WyAD1Z9FnWRpnW0/ly8XhWdOQMslrDrwoA4L2c2Vuw+FetYT65VHpk + 1i2q9ZUEoBL5XFGa2bHoIGpyAw0FwVDsbjqEbuh1AEHvGaRD+PgZpNNS7jsXtAcd+IAJHEP9MIFm + EHCBJQ1GGJxAIAEhaN0bFtGZfkQBHWzhoraZskeMMLALbSG+tCjb8sR21LyAEPkZbEYbhsLOpZ5H + P06g0wwDoy3qTB0fYEio7fHOxP/OR8/a7AMdPNoW7zs2iOE6EdrLPJaAKU8xU+fdna5L99TGRE0O + KAzeX+s7Fxx324SaKeQFs1Ac5acK4HcJd7jLS8VEfZSt0B5Dbrh4Xo391O0f3dDlBRQS7W/1h2Vd + nR0qPrJgnzfbYorJlayzz+pU/QMAAP//AwAJFzTM4gIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 8451092a2daf1abd-GRU + - 85330a294bbfcfc8-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -625,7 +976,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 22:22:07 GMT + - Sat, 10 Feb 2024 08:39:10 GMT Server: - cloudflare Transfer-Encoding: @@ -639,7 +990,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '3505' + - '3626' openai-version: - '2020-10-01' strict-transport-security: @@ -651,13 +1002,14 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299755' + - '299741' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 49ms + - 51ms x-request-id: - - 7db26cd36db9105a336c4975a978ad37 - http_version: HTTP/1.1 - status_code: 200 + - req_705c1d9a6ec495c203e8250118520f54 + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_agent_respect_the_max_rpm_set_over_crew_rpm.yaml b/tests/cassettes/test_agent_respect_the_max_rpm_set_over_crew_rpm.yaml index 1facbbd5a..f19869c6f 100644 --- a/tests/cassettes/test_agent_respect_the_max_rpm_set_over_crew_rpm.yaml +++ b/tests/cassettes/test_agent_respect_the_max_rpm_set_over_crew_rpm.yaml @@ -4,26 +4,28 @@ interactions: personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the following - co-workers: test role.\nThe input to this tool should be a pipe (|) separated - text of length 3 (three), representing the co-worker you want to ask it to (one - of the options), the task and all actual context you have for the task.\nFor - example, `coworker|task|context`.\nAsk question to co-worker: Useful to ask - a question, opinion or take from on of the following co-workers: test role.\nThe - input to this tool should be a pipe (|) separated text of length 3 (three), - representing the co-worker you want to ask it to (one of the options), the question - and all actual context you have for the question.\n For example, `coworker|question|context`.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer, - Delegate work to co-worker, Ask question to co-worker], just the name.\nAction - Input: the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]This is the summary of your work so far:\nBegin! This is - VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give - a Final Answer, instead keep using the `get_final_answer` tool.\n"}], "model": - "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": - 0.7}' + work to co-worker: Delegate work to co-worker(coworker: str, task: str, context: + str) - Delegate a specific task to one of the following co-workers: . The input + to this tool should be the role of the coworker, the task you want them to do, + and ALL necessary context to exectue the task, they know nothing about the task, + so share absolute everything you know, don''t reference things but instead explain + them.\nAsk question to co-worker: Ask question to co-worker(coworker: str, question: + str, context: str) - Ask a specific question to one of the following co-workers: + . The input to this tool should be the role of the coworker, the question you + have for them, and ALL necessary context to ask the question properly, they + know nothing about the question, so share absolute everything you know, don''t + reference things but instead explain them.\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the tool you wanna use, should be one of [get_final_answer, Delegate work to + co-worker, Ask question to co-worker], just the name.\nAction Input: Any and + all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nBegin! This is VERY important to you, your job + depends on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using + the `get_final_answer` tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], + "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -32,13 +34,13 @@ interactions: connection: - keep-alive content-length: - - '1849' + - '2026' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -48,7 +50,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -56,19 +58,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8ggwdlPGCjfSTQ9UYZ6FscyUha1T1\",\n \"object\": - \"chat.completion\",\n \"created\": 1705185547,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [1, 2, 3]\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 410,\n \"completion_tokens\": 30,\n \"total_tokens\": 440\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RQTU/CQBS891e87LkYsC1IL0ajiWjwQIwJQUKW9tFWtvuW7isfMfx3s6WAXvYw + 82Z2Zn48AFGkIgaR5JKT0qjO3SZ93ND+PXrdBru3z3q7XL9MnvV4Mh0fUPhOQctvTPisukmoNAq5 + IH2ikwolo3PtDbqDKAqDYdgQJaWonCwz3Ak73X4vaBU5FQlaEcPMAwD4aV6XTae4FzF0/TNSorUy + QxFfjgBERcohQlpbWJaahX8lE9KMuon7kVOd5RzDE8EINGIKTFBbBAlMpO5hivZLPySuTAwZ8mJV + aKkWUtsdVmcGRtrUHMOs58OtD4EPoQ/RXLR/Hi9hFWWmoqUrpmulLviq0IXNFxVKS9oFs0zmJD96 + APNmlPpfT2EqKg0vmNaonWEYDE9+4rr/lQ36LcnEUv1RDSKvTSjswTKWrmGGlamKZiOX0zt6vwAA + AP//AwDC+oS8GgIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 8451222a0daa023d-GRU + - 85330b52e852982a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -78,14 +81,14 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 22:39:09 GMT + - Sat, 10 Feb 2024 08:39:55 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=rVM3HcQbZEwpOTwcpe7kl9QQYRs6WfN.nkQmw.sY1xg-1705185549-1-ATyblmW1bvTVkY3o9M+aZOU5lnMZ0R5ThR8QblVINArZXsnS39YH+NGcdAiT04v1fq+OTnj7lrwBulOgZqYUxKY=; - path=/; expires=Sat, 13-Jan-24 23:09:09 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=mgJpVNCqtuBR0fzO7G0OXzc5VAZHccE3NQVwB247pcQ-1707554395-1-AWAf1RaG764Vf3HVK2LYRoTG26JkDnGSXt08toDpH23t3IU91e4vsyLBZJJomYtsPlGpcniZBeJHapg28Swymyo=; + path=/; expires=Sat, 10-Feb-24 09:09:55 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=3PdaxLthL4xZPggd16y46lBPKNh_Xt_xeL_2BrNr9xU-1705185549778-0-604800000; + - _cfuvid=8MKyNTcDCFZwVI_pTcsYnlxWVxprpynbmavIPGo2OTc-1707554395781-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -98,7 +101,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1730' + - '1637' openai-version: - '2020-10-01' strict-transport-security: @@ -110,41 +113,50 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299562' + - '299518' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 87ms + - 96ms x-request-id: - - d2103c8ce4517885db35be6518e966cd - http_version: HTTP/1.1 - status_code: 200 + - req_9417a0de3faa567914d7f775d7eab75c + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to only the following - tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final - answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the following - co-workers: test role.\nThe input to this tool should be a pipe (|) separated - text of length 3 (three), representing the co-worker you want to ask it to (one - of the options), the task and all actual context you have for the task.\nFor - example, `coworker|task|context`.\nAsk question to co-worker: Useful to ask - a question, opinion or take from on of the following co-workers: test role.\nThe - input to this tool should be a pipe (|) separated text of length 3 (three), - representing the co-worker you want to ask it to (one of the options), the question - and all actual context you have for the question.\n For example, `coworker|question|context`.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer, - Delegate work to co-worker, Ask question to co-worker], just the name.\nAction - Input: the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]This is the summary of your work so far:\nBegin! This is - VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give - a Final Answer, instead keep using the `get_final_answer` tool.\nThought: Do - I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [1, 2, 3]\nObservation: - 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": - false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n--\nFuntion Name: Delegate work + to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''task'': + {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Delegate + work to co-worker(coworker: str, task: str, context: str) - Delegate a specific + task to one of the following co-workers: . The input to this tool should be + the role of the coworker, the task you want them to do, and ALL necessary context + to exectue the task, they know nothing about the task, so share absolute everything + you know, don''t reference things but instead explain them.\n--\nFuntion Name: + Ask question to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, + ''question'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: + Ask question to co-worker(coworker: str, question: str, context: str) - Ask + a specific question to one of the following co-workers: . The input to this + tool should be the role of the coworker, the question you have for them, and + ALL necessary context to ask the question properly, they know nothing about + the question, so share absolute everything you know, don''t reference things + but instead explain them.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [1, 2, + 3, 4, 5]\n\nThe output should be formatted as a JSON instance that conforms + to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -153,16 +165,16 @@ interactions: connection: - keep-alive content-length: - - '1965' + - '2566' content-type: - application/json cookie: - - __cf_bm=rVM3HcQbZEwpOTwcpe7kl9QQYRs6WfN.nkQmw.sY1xg-1705185549-1-ATyblmW1bvTVkY3o9M+aZOU5lnMZ0R5ThR8QblVINArZXsnS39YH+NGcdAiT04v1fq+OTnj7lrwBulOgZqYUxKY=; - _cfuvid=3PdaxLthL4xZPggd16y46lBPKNh_Xt_xeL_2BrNr9xU-1705185549778-0-604800000 + - __cf_bm=mgJpVNCqtuBR0fzO7G0OXzc5VAZHccE3NQVwB247pcQ-1707554395-1-AWAf1RaG764Vf3HVK2LYRoTG26JkDnGSXt08toDpH23t3IU91e4vsyLBZJJomYtsPlGpcniZBeJHapg28Swymyo=; + _cfuvid=8MKyNTcDCFZwVI_pTcsYnlxWVxprpynbmavIPGo2OTc-1707554395781-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -172,7 +184,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -180,19 +192,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8ggwghkojjlEuyvKZu4kPfOsqCGLh\",\n \"object\": - \"chat.completion\",\n \"created\": 1705185550,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [4, 5, 6]\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 449,\n \"completion_tokens\": 28,\n \"total_tokens\": 477\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRS0vDQBDH7/kUw5xTaU2jNscigiD4OIhgJGyTaRrdnV13Jz4o+e6y6Qsve/g/ + ht/MbhMA7BosAOuNkto4Pbn6bJZe7hb69uX9OVw3N/fLu0e3fHhi/+UwjQ27eqdaDq2z2hqnSTrL + O7v2pITi1Nnl9DLP59kiHw1jG9Kx1jqZzCfTi1m2b2xsV1PAAl4TAIDt+EY2bugHC5imB8VQCKol + LI4hAPRWRwVVCF0QxYLpyawtC/GIuy0ZoMR1z3XErVgZKrGAEluSat2x0pXi8E2+xHSXVb7tDbGE + mBv7UeXerMiP2usshfMUshTmKeRvMTGUPOAeYDiSa9s6b1dxS+61PurrjruwqTypYDlSBrFuVx8S + gLfxQv2/pdF5a5xUYj+I48B8ke3m4ekzTm52tTfFitIn/SKbJXtCDL9ByMQbtOSd78aDRc5kSP4A + AAD//wMAgYOnACcCAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 845122365b01023d-GRU + - 85330b5ddeda982a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -202,7 +215,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 22:39:11 GMT + - Sat, 10 Feb 2024 08:39:57 GMT Server: - cloudflare Transfer-Encoding: @@ -216,7 +229,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1718' + - '1989' openai-version: - '2020-10-01' strict-transport-security: @@ -228,42 +241,45 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299535' + - '299395' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 93ms + - 121ms x-request-id: - - 9578b64bcbc4f8b489a7ba4df8e10136 - http_version: HTTP/1.1 - status_code: 200 + - req_d6788bbf95b247ef48092bcf07121273 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the following - co-workers: test role.\nThe input to this tool should be a pipe (|) separated - text of length 3 (three), representing the co-worker you want to ask it to (one - of the options), the task and all actual context you have for the task.\nFor - example, `coworker|task|context`.\nAsk question to co-worker: Useful to ask - a question, opinion or take from on of the following co-workers: test role.\nThe - input to this tool should be a pipe (|) separated text of length 3 (three), - representing the co-worker you want to ask it to (one of the options), the question - and all actual context you have for the question.\n For example, `coworker|question|context`.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer, - Delegate work to co-worker, Ask question to co-worker], just the name.\nAction - Input: the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]This is the summary of your work so far:\nBegin! This is - VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give - a Final Answer, instead keep using the `get_final_answer` tool.\nThought: Do - I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [1, 2, 3]\nObservation: - 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [4, 5, 6]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": - ["\nObservation"], "stream": false, "temperature": 0.7}' + work to co-worker: Delegate work to co-worker(coworker: str, task: str, context: + str) - Delegate a specific task to one of the following co-workers: . The input + to this tool should be the role of the coworker, the task you want them to do, + and ALL necessary context to exectue the task, they know nothing about the task, + so share absolute everything you know, don''t reference things but instead explain + them.\nAsk question to co-worker: Ask question to co-worker(coworker: str, question: + str, context: str) - Ask a specific question to one of the following co-workers: + . The input to this tool should be the role of the coworker, the question you + have for them, and ALL necessary context to ask the question properly, they + know nothing about the question, so share absolute everything you know, don''t + reference things but instead explain them.\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the tool you wanna use, should be one of [get_final_answer, Delegate work to + co-worker, Ask question to co-worker], just the name.\nAction Input: Any and + all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nBegin! This is VERY important to you, your job + depends on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using + the `get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: + get_final_answer\nAction Input: [1, 2, 3, 4, 5]\nObservation: 42\nThought: "}], + "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' headers: accept: - application/json @@ -272,16 +288,16 @@ interactions: connection: - keep-alive content-length: - - '2072' + - '2148' content-type: - application/json cookie: - - __cf_bm=rVM3HcQbZEwpOTwcpe7kl9QQYRs6WfN.nkQmw.sY1xg-1705185549-1-ATyblmW1bvTVkY3o9M+aZOU5lnMZ0R5ThR8QblVINArZXsnS39YH+NGcdAiT04v1fq+OTnj7lrwBulOgZqYUxKY=; - _cfuvid=3PdaxLthL4xZPggd16y46lBPKNh_Xt_xeL_2BrNr9xU-1705185549778-0-604800000 + - __cf_bm=mgJpVNCqtuBR0fzO7G0OXzc5VAZHccE3NQVwB247pcQ-1707554395-1-AWAf1RaG764Vf3HVK2LYRoTG26JkDnGSXt08toDpH23t3IU91e4vsyLBZJJomYtsPlGpcniZBeJHapg28Swymyo=; + _cfuvid=8MKyNTcDCFZwVI_pTcsYnlxWVxprpynbmavIPGo2OTc-1707554395781-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -291,7 +307,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -299,19 +315,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8ggwiHUPtP0zST5G9y9gq6L8xn7gO\",\n \"object\": - \"chat.completion\",\n \"created\": 1705185552,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [7, 8, 9]\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 485,\n \"completion_tokens\": 28,\n \"total_tokens\": 513\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RQy07DMBC85ytWPqco6TPJBVGQoEicq1KqyE22qcHxmngjiqr+O3LatOLiw8zO + eGaOAYBQpchAFHvJRW31IPku5y0NV+rwbtN0Vs2T5cvr4XG0fH4bpiL0Ctp+YsG96q6g2mpkReZM + Fw1KRu8az6LZZDIepUlH1FSi9rLK8mA8iKbx6KLYkyrQiQzWAQDAsXt9NlPiQWQQhT1So3OyQpFd + jwBEQ9ojQjqnHEvDIryRBRlG08V9IliAQSyBCVqHIIGJ9D2s0H2Yh8J3yKBCznfKSJ1L436w6RlY + GNtyButpCLMQkhDSEOJoIy5/na4hNVW2oa0vZFqtr/hOGeX2eYPSkfGBHJM9y08BwKYbo/3XT9iG + ass50xcabzhOxmc/cdv9xo56komlvuGTOAkuCYX7dYy1r1hhYxvVbeNzBqfgDwAA//8DAC7hr/US + AgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84512244bdb0023d-GRU + - 85330b6b3d5c982a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -321,7 +338,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 22:39:13 GMT + - Sat, 10 Feb 2024 08:39:59 GMT Server: - cloudflare Transfer-Encoding: @@ -335,7 +352,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1439' + - '1816' openai-version: - '2020-10-01' strict-transport-security: @@ -347,45 +364,50 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299509' + - '299487' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 98ms + - 102ms x-request-id: - - 3ee6b071c3a4c976c4955dc66589b96e - http_version: HTTP/1.1 - status_code: 200 + - req_08a3909c98b7c18c5b74f4caf0bd8aad + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to only the following - tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final - answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the following - co-workers: test role.\nThe input to this tool should be a pipe (|) separated - text of length 3 (three), representing the co-worker you want to ask it to (one - of the options), the task and all actual context you have for the task.\nFor - example, `coworker|task|context`.\nAsk question to co-worker: Useful to ask - a question, opinion or take from on of the following co-workers: test role.\nThe - input to this tool should be a pipe (|) separated text of length 3 (three), - representing the co-worker you want to ask it to (one of the options), the question - and all actual context you have for the question.\n For example, `coworker|question|context`.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer, - Delegate work to co-worker, Ask question to co-worker], just the name.\nAction - Input: the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]This is the summary of your work so far:\nBegin! This is - VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give - a Final Answer, instead keep using the `get_final_answer` tool.\nThought: Do - I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [1, 2, 3]\nObservation: - 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [4, 5, 6]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: - get_final_answer\nAction Input: [7, 8, 9]\nObservation: I''ve used too many - tools for this task. I''m going to give you my absolute BEST Final answer now - and not use any more tools.\nThought: "}], "model": "gpt-4", "n": 1, "stop": - ["\nObservation"], "stream": false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n--\nFuntion Name: Delegate work + to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''task'': + {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Delegate + work to co-worker(coworker: str, task: str, context: str) - Delegate a specific + task to one of the following co-workers: . The input to this tool should be + the role of the coworker, the task you want them to do, and ALL necessary context + to exectue the task, they know nothing about the task, so share absolute everything + you know, don''t reference things but instead explain them.\n--\nFuntion Name: + Ask question to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, + ''question'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: + Ask question to co-worker(coworker: str, question: str, context: str) - Ask + a specific question to one of the following co-workers: . The input to this + tool should be the role of the coworker, the question you have for them, and + ALL necessary context to ask the question properly, they know nothing about + the question, so share absolute everything you know, don''t reference things + but instead explain them.\n\nUse this text to inform a valid ouput schema:\nDo + I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [6, 7, 8, + 9, 10]\n\nThe output should be formatted as a JSON instance that conforms to + the JSON schema below.\n\nAs an example, for the schema {\"properties\": {\"foo\": + {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": \"array\", + \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe object {\"foo\": + [\"bar\", \"baz\"]} is a well-formatted instance of the schema. The object {\"properties\": + {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere is the output + schema:\n```\n{\"properties\": {\"function_name\": {\"title\": \"Function Name\", + \"description\": \"The name of the function to be called.\", \"type\": \"string\"}, + \"arguments\": {\"title\": \"Arguments\", \"description\": \"A dictinary of + arguments to be passed to the function.\", \"type\": \"object\"}}, \"required\": + [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", "n": 1, + "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -394,16 +416,16 @@ interactions: connection: - keep-alive content-length: - - '2300' + - '2558' content-type: - application/json cookie: - - __cf_bm=rVM3HcQbZEwpOTwcpe7kl9QQYRs6WfN.nkQmw.sY1xg-1705185549-1-ATyblmW1bvTVkY3o9M+aZOU5lnMZ0R5ThR8QblVINArZXsnS39YH+NGcdAiT04v1fq+OTnj7lrwBulOgZqYUxKY=; - _cfuvid=3PdaxLthL4xZPggd16y46lBPKNh_Xt_xeL_2BrNr9xU-1705185549778-0-604800000 + - __cf_bm=mgJpVNCqtuBR0fzO7G0OXzc5VAZHccE3NQVwB247pcQ-1707554395-1-AWAf1RaG764Vf3HVK2LYRoTG26JkDnGSXt08toDpH23t3IU91e4vsyLBZJJomYtsPlGpcniZBeJHapg28Swymyo=; + _cfuvid=8MKyNTcDCFZwVI_pTcsYnlxWVxprpynbmavIPGo2OTc-1707554395781-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -413,7 +435,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -421,20 +443,21 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8ggwkNsNUjt0ChmyTHpqYq62HOuXV\",\n \"object\": - \"chat.completion\",\n \"created\": 1705185554,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - I've used too many tools for this task. I'm going to give you my absolute BEST - Final answer now and not use any more tools.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 547,\n \"completion_tokens\": 42,\n \"total_tokens\": 589\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RRTY/aMBC951eMfA4VCAhLjq1Uqepp+yG12qyCEw+Jt/bYtSfQgvLfKwcWthcf + 3sf4zZtzBiC0EiWItpfcWm9mD7/V+8P3l08n83g6fvl5+kjR/Ijq8fOH4msh8uRwzQu2/Op61zrr + DbJ2dKHbgJIxTV1s5pv1erXcbifCOoUm2TrPs9VsXiyWV0fvdItRlPCUAQCcpzdlI4V/RAnz/BWx + GKPsUJQ3EYAIziREyBh1ZEks8jvZOmKkKe63HuEgjVbgBvYDQ2x7tBIaGVGBI+AewQd30AoVaNq7 + YGVaDI5uMAoaLCva7XYVnSsCqMR+oDbxNUmLlSihEh1yvdckTS0pHjFUIr9oZegGi8Qx6SZ/Qmmw + DYYJeypy2OTwkMM2h8X8OUnGisbpS3FdaLw1YVzng2tSazQYc8P3mnTs64AyOkpbR3b+Yh8zgOep + 8eG/EoUPznqu2f1CSgPX28Vlnrgf9w27vJLsWJo7XqxW2TWhiH8jo01NdBh80NMBUs5szP4BAAD/ + /wMA7RwNr3cCAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84512250bd7d023d-GRU + - 85330b773bcc982a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -444,7 +467,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 22:39:16 GMT + - Sat, 10 Feb 2024 08:40:03 GMT Server: - cloudflare Transfer-Encoding: @@ -458,7 +481,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2057' + - '3127' openai-version: - '2020-10-01' strict-transport-security: @@ -470,15 +493,397 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299452' + - '299396' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 109ms + - 120ms x-request-id: - - 1e3a6677139fb5c15cbb33f1b3154845 - http_version: HTTP/1.1 - status_code: 200 + - req_fb84c005ec5026c7a67ce45cfe185f23 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final + answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate + work to co-worker: Delegate work to co-worker(coworker: str, task: str, context: + str) - Delegate a specific task to one of the following co-workers: . The input + to this tool should be the role of the coworker, the task you want them to do, + and ALL necessary context to exectue the task, they know nothing about the task, + so share absolute everything you know, don''t reference things but instead explain + them.\nAsk question to co-worker: Ask question to co-worker(coworker: str, question: + str, context: str) - Ask a specific question to one of the following co-workers: + . The input to this tool should be the role of the coworker, the question you + have for them, and ALL necessary context to ask the question properly, they + know nothing about the question, so share absolute everything you know, don''t + reference things but instead explain them.\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the tool you wanna use, should be one of [get_final_answer, Delegate work to + co-worker, Ask question to co-worker], just the name.\nAction Input: Any and + all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nBegin! This is VERY important to you, your job + depends on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using + the `get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: + get_final_answer\nAction Input: [1, 2, 3, 4, 5]\nObservation: 42\nThought: Do + I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [6, 7, 8, + 9, 10]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], + "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '2262' + content-type: + - application/json + cookie: + - __cf_bm=mgJpVNCqtuBR0fzO7G0OXzc5VAZHccE3NQVwB247pcQ-1707554395-1-AWAf1RaG764Vf3HVK2LYRoTG26JkDnGSXt08toDpH23t3IU91e4vsyLBZJJomYtsPlGpcniZBeJHapg28Swymyo=; + _cfuvid=8MKyNTcDCFZwVI_pTcsYnlxWVxprpynbmavIPGo2OTc-1707554395781-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RQy27CMBC85ytWPocqJuGhXKpShIR66alSRVFkkiWEOl7XXlpaxL9XDq/24sPM + znhmDhGAaCqRgyg3isvW6t74o5r8PG/26nMymoxn26kbp08vUq7W+DgTcVDQaoslX1R3JbVWIzdk + TnTpUDEGVzlKRoNBliVpR7RUoQ6y2nIv6yVDmZ4VG2pK9CKHRQQAcOjekM1UuBc5JPEFadF7VaPI + r0cAwpEOiFDeN56VYRHfyJIMo+niTgnmYBArYIKdR1DARPoeXtG/mYcydMihRi7WjVG6UMZ/obsw + MDd2xzkspIxB9mOQaQwyi0EOluL83/EaVFNtHa1CKbPT+oqvG9P4TeFQeTIhlGeyJ/kxAlh2g+z+ + dRTWUWu5YHpHEwwH/eHJT9y2v7FpdiaZWOk/qmESnRMK/+0Z21CzRmdd0+0TckbH6BcAAP//AwA0 + fVa6FgIAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85330b8bd8d7982a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:40:04 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1500' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299460' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 107ms + x-request-id: + - req_923a2134888413a1be003ac6e28d598b + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final + answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate + work to co-worker: Delegate work to co-worker(coworker: str, task: str, context: + str) - Delegate a specific task to one of the following co-workers: . The input + to this tool should be the role of the coworker, the task you want them to do, + and ALL necessary context to exectue the task, they know nothing about the task, + so share absolute everything you know, don''t reference things but instead explain + them.\nAsk question to co-worker: Ask question to co-worker(coworker: str, question: + str, context: str) - Ask a specific question to one of the following co-workers: + . The input to this tool should be the role of the coworker, the question you + have for them, and ALL necessary context to ask the question properly, they + know nothing about the question, so share absolute everything you know, don''t + reference things but instead explain them.\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the tool you wanna use, should be one of [get_final_answer, Delegate work to + co-worker, Ask question to co-worker], just the name.\nAction Input: Any and + all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nBegin! This is VERY important to you, your job + depends on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using + the `get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: + get_final_answer\nAction Input: [1, 2, 3, 4, 5]\nObservation: 42\nThought: Do + I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [6, 7, 8, + 9, 10]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: [11, 12, 13, 14, 15]\nObservation: Actually, I used too many tools, so + I''ll stop now and give you my absolute BEST Final answer NOW, using the expected + format: ```\nThought: Do I need to use a tool? No\nFinal Answer: [your response + here]```\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], + "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '2583' + content-type: + - application/json + cookie: + - __cf_bm=mgJpVNCqtuBR0fzO7G0OXzc5VAZHccE3NQVwB247pcQ-1707554395-1-AWAf1RaG764Vf3HVK2LYRoTG26JkDnGSXt08toDpH23t3IU91e4vsyLBZJJomYtsPlGpcniZBeJHapg28Swymyo=; + _cfuvid=8MKyNTcDCFZwVI_pTcsYnlxWVxprpynbmavIPGo2OTc-1707554395781-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RQwU6DQBS88xUve6YGhELLxRiNaROjFxtjakO28Arosm9lH1Fj+u9mKW31soeZ + N7Mz8+MBiKYUGYiilly0Rk1mH+VNMC9X9VPwnKJK78Pq8WEV3UULtZgJ3ylo+4YFH1UXBbVGITek + D3TRoWR0rmEapNNpHAfxQLRUonKyyvAkngRJGI2KmpoCrchg7QEA/Ayvy6ZL/BIZBP4RadFaWaHI + TkcAoiPlECGtbSxLzcI/kwVpRj3EvSVYgkYsgQl6iyCBidQVvKB91deF65BBhZzvGi1VLrX9xO7I + wFKbnjNYh4kPYepDOPMhnPtwGWzE+N/+FFRRZTraulK6V+qE7xrd2DrvUFrSLpRlMgf53gPYDIP0 + /zoK01FrOGd6R+0MkzA5+Inz9mc2ikeSiaX6o5oG3phQ2G/L2LqaFXama4Z9XE5v7/0CAAD//wMA + DrgirBYCAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85330b963f38982a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:40:06 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1709' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299382' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 123ms + x-request-id: + - req_337f134d8999e258ce8abd94a422aee4 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n--\nFuntion Name: Delegate work + to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''task'': + {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Delegate + work to co-worker(coworker: str, task: str, context: str) - Delegate a specific + task to one of the following co-workers: . The input to this tool should be + the role of the coworker, the task you want them to do, and ALL necessary context + to exectue the task, they know nothing about the task, so share absolute everything + you know, don''t reference things but instead explain them.\n--\nFuntion Name: + Ask question to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, + ''question'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: + Ask question to co-worker(coworker: str, question: str, context: str) - Ask + a specific question to one of the following co-workers: . The input to this + tool should be the role of the coworker, the question you have for them, and + ALL necessary context to ask the question properly, they know nothing about + the question, so share absolute everything you know, don''t reference things + but instead explain them.\n\nUse this text to inform a valid ouput schema:\nDo + I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [16, 17, + 18, 19, 20]\n\nThe output should be formatted as a JSON instance that conforms + to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '2562' + content-type: + - application/json + cookie: + - __cf_bm=mgJpVNCqtuBR0fzO7G0OXzc5VAZHccE3NQVwB247pcQ-1707554395-1-AWAf1RaG764Vf3HVK2LYRoTG26JkDnGSXt08toDpH23t3IU91e4vsyLBZJJomYtsPlGpcniZBeJHapg28Swymyo=; + _cfuvid=8MKyNTcDCFZwVI_pTcsYnlxWVxprpynbmavIPGo2OTc-1707554395781-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA2xSO5PTMBDu/St21FzjMHZI4sTl0TEUHAXN+cZR5LUtsHYVSb7AZPzfGTmvgaFR + 8b30aVfnBEDoRpQgVC+DMnZYbI/Np+XXL8fnF4v58fQi1fYzc2Fx+f1bI9Lo4MMPVOHm+qDY2AGD + ZrrQyqEMGFPzIivW69Uq28yE4QaHaOtsWKwW2Sb/eHX0rBV6UcJrAgBwns/YjRr8JUrI0hti0HvZ + oSjvIgDheIiIkN5rHyQFkT5IxRSQ5rrP0mMDTBB6BOv4XTfYgKaWnZHxASn06BC0BwnvctAN8Bjs + GMCrHo2Elt3sbUdSUQ9PHYa61SSHWpI/oXuCkw79LNIUna/5JoW8SCHfppDvUlhmb2VFFe33+4rO + FQFU4pZXkzRYiRIq8W9wJdKLVrpuNEjBR93sjyiN5oBuxv53Y5RNFU3zteI6nOk+1YE76/gQN0Dj + MNzxVpP2fe1QeqY4QR/YXuxTAvA2b2/8ayHCOjY21IF/IsXA9S6/5InHR3mwxe5KBg5yeOCbIkuu + DYX/7QOaOI0OnXV6XmbsmUzJHwAAAP//AwC/s+rEwwIAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85330ba19d32982a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:40:10 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '3212' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299395' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 120ms + x-request-id: + - req_8ca8a54089b315970b09d31defcc9e0e + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -490,9 +895,8 @@ interactions: the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Don''t - give a Final Answer, instead keep using the `get_final_answer` tool.\nAI: I''ve - used too many tools for this task. I''m going to give you my absolute BEST Final - answer now and not use any more tools.\n\nNew summary:"}], "model": "gpt-4", + give a Final Answer, instead keep using the `get_final_answer` tool.\nAI: Agent + stopped due to iteration limit or time limit.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' headers: accept: @@ -502,16 +906,16 @@ interactions: connection: - keep-alive content-length: - - '1049' + - '977' content-type: - application/json cookie: - - __cf_bm=rVM3HcQbZEwpOTwcpe7kl9QQYRs6WfN.nkQmw.sY1xg-1705185549-1-ATyblmW1bvTVkY3o9M+aZOU5lnMZ0R5ThR8QblVINArZXsnS39YH+NGcdAiT04v1fq+OTnj7lrwBulOgZqYUxKY=; - _cfuvid=3PdaxLthL4xZPggd16y46lBPKNh_Xt_xeL_2BrNr9xU-1705185549778-0-604800000 + - __cf_bm=mgJpVNCqtuBR0fzO7G0OXzc5VAZHccE3NQVwB247pcQ-1707554395-1-AWAf1RaG764Vf3HVK2LYRoTG26JkDnGSXt08toDpH23t3IU91e4vsyLBZJJomYtsPlGpcniZBeJHapg28Swymyo=; + _cfuvid=8MKyNTcDCFZwVI_pTcsYnlxWVxprpynbmavIPGo2OTc-1707554395781-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -521,7 +925,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -529,21 +933,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8ggwmU48hiYTdhe5slzvm2Tuyuky3\",\n \"object\": - \"chat.completion\",\n \"created\": 1705185556,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human instructs the AI not to give - a final answer, but to continue using the 'get_final_answer' tool. Despite this, - the AI expresses that it has used many tools for the task and decides to provide - its best final answer without using any more tools.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 185,\n \"completion_tokens\": 54,\n \"total_tokens\": 239\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RRwW5TMRC8v68Y+ZxECUmTkhsghOBERS8IodR5b/u8xc9r7H0tpcq/IztpIi4+ + zOzMzo5fGsBwZ7YwrbPaDtFPr393H9Y37J9vPvL3r/LXfXm4Xb7/9vbTY9o8mUlRyP6BWn1VzVoZ + oidlCUe6TWSViutiM99cXa1Wi3klBunIF1kfdbqazteL5UnhhFvKZosfDQC81LdkCx39MVtUfUUG + ytn2ZLbnIcAk8QUxNmfOaoOayYVsJSiFGvfWEdw42AAOWdPYakYQhQp6fiRY3HOwHjbkJ0oT7MfK + FQcOI2HMHHqoI9z1pLs6vDsO30FF/AxlxbvPSJSjhC5DnVWwwtmMrBIjdehGKrYlhlKypTlIgvJA + 8Dywzswp/uF8t5c+JtmXjsLo/Rm/58DZ7RLZLKHcWJYc5YcG+Fn7Hf+rzMQkQ9Sdyi8KxXCxXh/9 + zOUrL+zy+kSqqPUX/M181ZwSmvyclYZSSE8pJq51l5zNofkHAAD//wMATtbV6mUCAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 8451225f7ee6023d-GRU + - 85330bb7da20982a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -553,7 +956,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 22:39:19 GMT + - Sat, 10 Feb 2024 08:40:12 GMT Server: - cloudflare Transfer-Encoding: @@ -567,7 +970,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2941' + - '1794' openai-version: - '2020-10-01' strict-transport-security: @@ -579,13 +982,14 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299753' + - '299770' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 49ms + - 45ms x-request-id: - - be72b34cee55b48a285458167d9a4909 - http_version: HTTP/1.1 - status_code: 200 + - req_f7d6a5e0b94636ccde0b107abd11674b + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_agent_step_callback.yaml b/tests/cassettes/test_agent_step_callback.yaml index 28ec99381..d234a368e 100644 --- a/tests/cassettes/test_agent_step_callback.yaml +++ b/tests/cassettes/test_agent_step_callback.yaml @@ -4,25 +4,28 @@ interactions: personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nlearn_about_AI: learn_about_AI(topic) -> float - Useful for when you need to learn about AI to write an paragraph about it.\nDelegate work to co-worker: - Useful to delegate a specific task to one of the following co-workers: .\nThe - input to this tool should be a pipe (|) separated text of length 3 (three), - representing the co-worker you want to ask it to (one of the options), the task - and all actual context you have for the task.\nFor example, `coworker|task|context`.\nAsk - question to co-worker: Useful to ask a question, opinion or take from on of - the following co-workers: .\nThe input to this tool should be a pipe (|) separated - text of length 3 (three), representing the co-worker you want to ask it to (one - of the options), the question and all actual context you have for the question.\n - For example, `coworker|question|context`.\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of [learn_about_AI, Delegate work to co-worker, - Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]```This is the - summary of your work so far:\nBegin! This is VERY important to you, your job - depends on it!\n\nCurrent Task: Write and then review an small paragraph on - AI until it''s AMAZING\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], - "stream": false, "temperature": 0.7}' + Delegate work to co-worker(coworker: str, task: str, context: str) - Delegate + a specific task to one of the following co-workers: . The input to this tool + should be the role of the coworker, the task you want them to do, and ALL necessary + context to exectue the task, they know nothing about the task, so share absolute + everything you know, don''t reference things but instead explain them.\nAsk + question to co-worker: Ask question to co-worker(coworker: str, question: str, + context: str) - Ask a specific question to one of the following co-workers: + . The input to this tool should be the role of the coworker, the question you + have for them, and ALL necessary context to ask the question properly, they + know nothing about the question, so share absolute everything you know, don''t + reference things but instead explain them.\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the tool you wanna use, should be one of [learn_about_AI, Delegate work to co-worker, + Ask question to co-worker], just the name.\nAction Input: Any and all relevant + information input and context for using the tool\nObservation: the result of + using the tool\n```\n\nWhen you have a response for your task, or if you do + not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need + to use a tool? No\nFinal Answer: [your response here]```This is the summary + of your work so far:\nBegin! This is VERY important to you, your job depends + on it!\n\nCurrent Task: Write and then review an small paragraph on AI until + it''s AMAZING\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": + false, "temperature": 0.7}' headers: accept: - application/json @@ -31,13 +34,13 @@ interactions: connection: - keep-alive content-length: - - '1807' + - '1999' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.11.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -47,7 +50,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.11.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -57,18 +60,18 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA1SRzU7DMBCE73mKlc8t6k9oaS6oEkKKBIIDQiBAketsk1DHa+xNW1r13ZHTtBUX - H2Y862/W+whAVLlIQKhSsqqt7t/83K8f1tPdrl7aV7VZr/FtOtg8PW6fyzmJXkjQ4hsVn1JXimqr - kSsyR1s5lIxh6nA6mMaTeHY9a42actQhVljux/3BZDjuEiVVCr1I4CMCANi3Z2AzOW5FAoPeSanR - e1mgSM6XAIQjHRQhva88S8OidzEVGUbT4r6U1BQlJ3BHkIJBzIEJGo8ggYn0Lbyj/zRzFcokoFE6 - k8kFNZzN05MOqbENJ1CgQSc1zFNYGdpozAsU3bOHM6+mwjpahG6m0fqsLytT+TJzKD2ZwOaZ7DF+ - iAC+2r00/6oK66i2nDGt0ISB49nsOE9cvuDijuLOZGKpL3o8GkcdofC/nrHOlpUp0FlXtWsKnNEh - +gMAAP//AwBENAoEHQIAAA== + H4sIAAAAAAAAA1SRS0/DMBCE7/kVK59blNKnckEVINEDggNCAooix9kmpo7X2BvRqup/R05fcPFh + xrP+Zr1LAIQuRQZC1ZJV40x/9l3et+93mx8jC1rdpo+btEi3/vX54el2LXoxQcUXKj6lrhQ1ziBr + sgdbeZSMcepgmk7H49F4NuqMhko0MVY57o/66WQwPCZq0gqDyOAjAQDYdWdksyVuRAZp76Q0GIKs + UGTnSwDCk4mKkCHowNKy6F1MRZbRdrgvNbVVzRncESzAIpbABG1AkMBE5gbeMCztXMUyGRiU3uay + oJbz+eKkw8K6ljNYirlnvdJKSwMLy2iMrtAqXApxfH1/xjZUOU9FrGhbY876Slsd6tyjDGQjYmBy + h/g+Afjs1tP+ayycp8ZxzrRGGweOhoPDPHH5iYt7PTmaTCzNn9R4mhwJRdgGxiZfaVuhd15324qc + yT75BQAA//8DAG14RFskAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 852a83de5fb2fabe-SJC + - 85330ff5ff101637-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -78,14 +81,14 @@ interactions: Content-Type: - application/json Date: - - Fri, 09 Feb 2024 07:49:21 GMT + - Sat, 10 Feb 2024 08:43:05 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=wNpUyP3WikFZ5owMt5Wt4RvvxL5RRbV5Lm6N3cYt0cQ-1707464961-1-AaRJSNtZhC0qEyOOtzVqZyGINmDySgK+NZEmXyReWDjlSfP0cTj8tUtP+DYB/ttT11hGeDfZUyYzGIz1jsRPjN4=; - path=/; expires=Fri, 09-Feb-24 08:19:21 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=; + path=/; expires=Sat, 10-Feb-24 09:13:05 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=Ii_GgXvfh_uOU6kzTG.hYatLhnP.5qhsMn05KgacgMc-1707464961455-0-604800000; + - _cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -98,7 +101,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1637' + - '1403' openai-version: - '2020-10-01' strict-transport-security: @@ -110,165 +113,50 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299573' + - '299524' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 85ms + - 95ms x-request-id: - - req_363f8fd0e98a3c0e807830d844480ad1 + - req_c3e3d009fb1789af53949c6665e20c2d status: code: 200 message: OK - request: - body: !!binary | - CokpCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS4CgKEgoQY3Jld2FpLnRl - bGVtZXRyeRKYCAoQRMg95BA38/OSZo0f5R6QaBIIKgZb+0wGi88qDENyZXcgQ3JlYXRlZDABOaDm - x41VIrIXQfjXzo1VIrIXShkKDmNyZXdhaV92ZXJzaW9uEgcKBTAuNS4yShoKDnB5dGhvbl92ZXJz - aW9uEggKBjMuMTEuN0olCghob3N0bmFtZRIZChdKb2Fvcy1NYWNCb29rLVByby5sb2NhbEoxCgdj - cmV3X2lkEiYKJDUyZDc0NDVjLWE5Y2UtNGU2Yi05NjI2LWRhZGU5OWQ2YThmNkocCgxjcmV3X3By - b2Nlc3MSDAoKc2VxdWVudGlhbEoVCg1jcmV3X2xhbmd1YWdlEgQKAmVuShoKFGNyZXdfbnVtYmVy - X29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBSsYCCgtjcmV3X2FnZW50 - cxK2AgqzAlt7ImlkIjogImEwMGUwM2Y0LTgwYTEtNDQ1Ni05NzRiLWFiYjQ2YmVmM2NmYyIsICJy - b2xlIjogInRlc3Qgcm9sZSIsICJtZW1vcnlfZW5hYmxlZD8iOiB0cnVlLCAidmVyYm9zZT8iOiB0 - cnVlLCAibWF4X2l0ZXIiOiA0LCAibWF4X3JwbSI6IDEwLCAiaTE4biI6ICJlbiIsICJsbG0iOiAi - e1wibmFtZVwiOiBudWxsLCBcIm1vZGVsX25hbWVcIjogXCJncHQtNFwiLCBcInRlbXBlcmF0dXJl - XCI6IDAuNywgXCJjbGFzc1wiOiBcIkNoYXRPcGVuQUlcIn0iLCAiZGVsZWdhdGlvbl9lbmFibGVk - PyI6IHRydWUsICJ0b29sc19uYW1lcyI6IFtdfV1KnQEKCmNyZXdfdGFza3MSjgEKiwFbeyJpZCI6 - ICIwM2E4MDg0ZS04NjQ1LTRiMTQtYWE4My05YjE0YTFjYzViNWIiLCAiYXN5bmNfZXhlY3V0aW9u - PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJ0ZXN0IHJvbGUiLCAidG9vbHNfbmFtZXMiOiBbImdl - dF9maW5hbF9hbnN3ZXIiXX1dSigKCHBsYXRmb3JtEhwKGm1hY09TLTE0LjMtYXJtNjQtYXJtLTY0 - Yml0ShwKEHBsYXRmb3JtX3JlbGVhc2USCAoGMjMuMy4wShsKD3BsYXRmb3JtX3N5c3RlbRIICgZE - YXJ3aW5KewoQcGxhdGZvcm1fdmVyc2lvbhJnCmVEYXJ3aW4gS2VybmVsIFZlcnNpb24gMjMuMy4w - OiBXZWQgRGVjIDIwIDIxOjMwOjU5IFBTVCAyMDIzOyByb290OnhudS0xMDAwMi44MS41fjcvUkVM - RUFTRV9BUk02NF9UNjAzMEoKCgRjcHVzEgIYDHoCGAESyQsKEIAiEIJi8FZ1hiGlogKFFscSCCvs - 2YTYXYKeKgxDcmV3IENyZWF0ZWQwATkwbAqQVSKyF0EAaAyQVSKyF0oZCg5jcmV3YWlfdmVyc2lv - bhIHCgUwLjUuMkoaCg5weXRob25fdmVyc2lvbhIICgYzLjExLjdKJQoIaG9zdG5hbWUSGQoXSm9h - b3MtTWFjQm9vay1Qcm8ubG9jYWxKMQoHY3Jld19pZBImCiQzNTAzM2YwMC1kYzhiLTRjN2YtOWVk - MC02MDBlMDJjNmUxMjdKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKFQoNY3Jld19sYW5n - dWFnZRIECgJlbkoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAJKGwoVY3Jld19udW1iZXJfb2Zf - YWdlbnRzEgIYAkr9BAoLY3Jld19hZ2VudHMS7QQK6gRbeyJpZCI6ICIzNWZhODdkMi02M2YzLTRh - YmYtYmJkOC04ZWRlNDMxMWUwYWMiLCAicm9sZSI6ICJ0ZXN0IHJvbGUiLCAibWVtb3J5X2VuYWJs - ZWQ/IjogdHJ1ZSwgInZlcmJvc2U/IjogdHJ1ZSwgIm1heF9pdGVyIjogMTUsICJtYXhfcnBtIjog - MTAsICJpMThuIjogImVuIiwgImxsbSI6ICJ7XCJuYW1lXCI6IG51bGwsIFwibW9kZWxfbmFtZVwi - OiBcImdwdC00XCIsIFwidGVtcGVyYXR1cmVcIjogMC43LCBcImNsYXNzXCI6IFwiQ2hhdE9wZW5B - SVwifSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogdHJ1ZSwgInRvb2xzX25hbWVzIjogW119LCB7 - ImlkIjogIjAzZGNhNGMwLWRhMDEtNDg0YS1iNmQ1LTAwMjI3NWUzMWM5OSIsICJyb2xlIjogInRl - c3Qgcm9sZTIiLCAibWVtb3J5X2VuYWJsZWQ/IjogdHJ1ZSwgInZlcmJvc2U/IjogdHJ1ZSwgIm1h - eF9pdGVyIjogMiwgIm1heF9ycG0iOiBudWxsLCAiaTE4biI6ICJlbiIsICJsbG0iOiAie1wibmFt - ZVwiOiBudWxsLCBcIm1vZGVsX25hbWVcIjogXCJncHQtNFwiLCBcInRlbXBlcmF0dXJlXCI6IDAu - NywgXCJjbGFzc1wiOiBcIkNoYXRPcGVuQUlcIn0iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IHRy - dWUsICJ0b29sc19uYW1lcyI6IFtdfV1KlwIKCmNyZXdfdGFza3MSiAIKhQJbeyJpZCI6ICJjY2Ey - ZmZiMy1jOGI1LTQwZDctYmU0OC03MTg3ZTUwODgwOWYiLCAiYXN5bmNfZXhlY3V0aW9uPyI6IGZh - bHNlLCAiYWdlbnRfcm9sZSI6ICJ0ZXN0IHJvbGUiLCAidG9vbHNfbmFtZXMiOiBbXX0sIHsiaWQi - OiAiMDEyMWFlNWQtN2FjMC00NGQ0LTg3MzItOWViZDZkM2E3NzcwIiwgImFzeW5jX2V4ZWN1dGlv - bj8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAidGVzdCByb2xlMiIsICJ0b29sc19uYW1lcyI6IFsi - Z2V0X2ZpbmFsX2Fuc3dlciJdfV1KKAoIcGxhdGZvcm0SHAoabWFjT1MtMTQuMy1hcm02NC1hcm0t - NjRiaXRKHAoQcGxhdGZvcm1fcmVsZWFzZRIICgYyMy4zLjBKGwoPcGxhdGZvcm1fc3lzdGVtEggK - BkRhcndpbkp7ChBwbGF0Zm9ybV92ZXJzaW9uEmcKZURhcndpbiBLZXJuZWwgVmVyc2lvbiAyMy4z - LjA6IFdlZCBEZWMgMjAgMjE6MzA6NTkgUFNUIDIwMjM7IHJvb3Q6eG51LTEwMDAyLjgxLjV+Ny9S - RUxFQVNFX0FSTTY0X1Q2MDMwSgoKBGNwdXMSAhgMegIYARK1DAoQzeoTQK6TritBeDtREUudsxII - v/Sdpx2P668qDENyZXcgQ3JlYXRlZDABOajEaZJVIrIXQWiZa5JVIrIXShkKDmNyZXdhaV92ZXJz - aW9uEgcKBTAuNS4yShoKDnB5dGhvbl92ZXJzaW9uEggKBjMuMTEuN0olCghob3N0bmFtZRIZChdK - b2Fvcy1NYWNCb29rLVByby5sb2NhbEoxCgdjcmV3X2lkEiYKJDFmNjNlM2MxLTJiMzMtNDMzYi05 - ZWNhLTRlZjdlNWIzOGMzNEocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoVCg1jcmV3X2xh - bmd1YWdlEgQKAmVuShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYA0obChVjcmV3X251bWJlcl9v - Zl9hZ2VudHMSAhgCSoIFCgtjcmV3X2FnZW50cxLyBArvBFt7ImlkIjogImU3YjllZTg0LTBjMDQt - NGNjZC04MjViLTBlNDE1NzlmYWIxOCIsICJyb2xlIjogInRlc3Qgcm9sZSIsICJtZW1vcnlfZW5h - YmxlZD8iOiB0cnVlLCAidmVyYm9zZT8iOiBmYWxzZSwgIm1heF9pdGVyIjogMTUsICJtYXhfcnBt - IjogbnVsbCwgImkxOG4iOiAiZW4iLCAibGxtIjogIntcIm5hbWVcIjogbnVsbCwgXCJtb2RlbF9u - YW1lXCI6IFwiZ3B0LTRcIiwgXCJ0ZW1wZXJhdHVyZVwiOiAwLjcsIFwiY2xhc3NcIjogXCJDaGF0 - T3BlbkFJXCJ9IiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiB0cnVlLCAidG9vbHNfbmFtZXMiOiBb - XX0sIHsiaWQiOiAiYzYwYjYzN2MtYmY5OC00ZWJkLTkwZWItMzUwNzkwNTRkMjdhIiwgInJvbGUi - OiAidGVzdCByb2xlMiIsICJtZW1vcnlfZW5hYmxlZD8iOiB0cnVlLCAidmVyYm9zZT8iOiBmYWxz - ZSwgIm1heF9pdGVyIjogMTUsICJtYXhfcnBtIjogbnVsbCwgImkxOG4iOiAiZW4iLCAibGxtIjog - IntcIm5hbWVcIjogbnVsbCwgXCJtb2RlbF9uYW1lXCI6IFwiZ3B0LTRcIiwgXCJ0ZW1wZXJhdHVy - ZVwiOiAwLjcsIFwiY2xhc3NcIjogXCJDaGF0T3BlbkFJXCJ9IiwgImRlbGVnYXRpb25fZW5hYmxl - ZD8iOiB0cnVlLCAidG9vbHNfbmFtZXMiOiBbXX1dSv4CCgpjcmV3X3Rhc2tzEu8CCuwCW3siaWQi - OiAiZWIyOTgxNjUtODg1My00Mjg4LWI3M2YtOWQ4NmQ0YjIxMzFiIiwgImFzeW5jX2V4ZWN1dGlv - bj8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAidGVzdCByb2xlIiwgInRvb2xzX25hbWVzIjogW119 - LCB7ImlkIjogIjFkYjdjMjUzLTFlMDEtNDJlYi1iODY4LTU2NjlhMDc2ZTA5MiIsICJhc3luY19l - eGVjdXRpb24/IjogZmFsc2UsICJhZ2VudF9yb2xlIjogInRlc3Qgcm9sZSIsICJ0b29sc19uYW1l - cyI6IFtdfSwgeyJpZCI6ICI3MjI2ODAwOS1mOGFhLTQ0M2EtYTIwZS1lOWVmYTBiZDhlN2UiLCAi - YXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJ0ZXN0IHJvbGUyIiwgInRv - b2xzX25hbWVzIjogW119XUooCghwbGF0Zm9ybRIcChptYWNPUy0xNC4zLWFybTY0LWFybS02NGJp - dEocChBwbGF0Zm9ybV9yZWxlYXNlEggKBjIzLjMuMEobCg9wbGF0Zm9ybV9zeXN0ZW0SCAoGRGFy - d2luSnsKEHBsYXRmb3JtX3ZlcnNpb24SZwplRGFyd2luIEtlcm5lbCBWZXJzaW9uIDIzLjMuMDog - V2VkIERlYyAyMCAyMTozMDo1OSBQU1QgMjAyMzsgcm9vdDp4bnUtMTAwMDIuODEuNX43L1JFTEVB - U0VfQVJNNjRfVDYwMzBKCgoEY3B1cxICGAx6AhgBEqoIChAfSWWtcWdxaoyWECh3vhkCEgiTLpDL - PjEHmioMQ3JldyBDcmVhdGVkMAE5gO5llFUishdBGKBnlFUishdKGQoOY3Jld2FpX3ZlcnNpb24S - BwoFMC41LjJKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMS43SiUKCGhvc3RuYW1lEhkKF0pvYW9z - LU1hY0Jvb2stUHJvLmxvY2FsSjEKB2NyZXdfaWQSJgokZjg1ZDQ3MTEtYzMwNC00NDMyLTk1N2Yt - MzE4NDUzNWFkOWJmShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShUKDWNyZXdfbGFuZ3Vh - Z2USBAoCZW5KGgoUY3Jld19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVyX29mX2Fn - ZW50cxICGAFK2gIKC2NyZXdfYWdlbnRzEsoCCscCW3siaWQiOiAiNWJkOWYxODEtM2Y0ZC00MTk0 - LThlOWEtODM1NDdhNTM1YjY4IiwgInJvbGUiOiAidGVzdCByb2xlIiwgIm1lbW9yeV9lbmFibGVk - PyI6IHRydWUsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAxNSwgIm1heF9ycG0iOiBu - dWxsLCAiaTE4biI6ICJlbiIsICJsbG0iOiAie1wibmFtZVwiOiBudWxsLCBcIm1vZGVsX25hbWVc - IjogXCJncHQtNFwiLCBcInRlbXBlcmF0dXJlXCI6IDAuNywgXCJjbGFzc1wiOiBcIkNoYXRPcGVu - QUlcIn0iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IHRydWUsICJ0b29sc19uYW1lcyI6IFsibGVh - cm5fYWJvdXRfQUkiXX1dSpsBCgpjcmV3X3Rhc2tzEowBCokBW3siaWQiOiAiMzQ1ZGY0ZGItNGE2 - ZC00ZGU4LWE3N2UtOTFiYTMwYzExZWVhIiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImFn - ZW50X3JvbGUiOiAidGVzdCByb2xlIiwgInRvb2xzX25hbWVzIjogWyJsZWFybl9hYm91dF9BSSJd - fV1KKAoIcGxhdGZvcm0SHAoabWFjT1MtMTQuMy1hcm02NC1hcm0tNjRiaXRKHAoQcGxhdGZvcm1f - cmVsZWFzZRIICgYyMy4zLjBKGwoPcGxhdGZvcm1fc3lzdGVtEggKBkRhcndpbkp7ChBwbGF0Zm9y - bV92ZXJzaW9uEmcKZURhcndpbiBLZXJuZWwgVmVyc2lvbiAyMy4zLjA6IFdlZCBEZWMgMjAgMjE6 - MzA6NTkgUFNUIDIwMjM7IHJvb3Q6eG51LTEwMDAyLjgxLjV+Ny9SRUxFQVNFX0FSTTY0X1Q2MDMw - SgoKBGNwdXMSAhgMegIYAQ== - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '5260' - Content-Type: - - application/x-protobuf - User-Agent: - - OTel-OTLP-Exporter-Python/1.22.0 - method: POST - uri: http://telemetry.crewai.com:4318/v1/traces - response: - body: - string: "\n\0" - headers: - Content-Length: - - '2' - Content-Type: - - application/x-protobuf - Date: - - Fri, 09 Feb 2024 07:49:24 GMT - status: - code: 200 - message: OK -- request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to only the following - tools:\n\nlearn_about_AI: learn_about_AI(topic) -> float - Useful for when you - need to learn about AI to write an paragraph about it.\nDelegate work to co-worker: - Useful to delegate a specific task to one of the following co-workers: .\nThe - input to this tool should be a pipe (|) separated text of length 3 (three), - representing the co-worker you want to ask it to (one of the options), the task - and all actual context you have for the task.\nFor example, `coworker|task|context`.\nAsk - question to co-worker: Useful to ask a question, opinion or take from on of - the following co-workers: .\nThe input to this tool should be a pipe (|) separated - text of length 3 (three), representing the co-worker you want to ask it to (one - of the options), the question and all actual context you have for the question.\n - For example, `coworker|question|context`.\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of [learn_about_AI, Delegate work to co-worker, - Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]```This is the - summary of your work so far:\nBegin! This is VERY important to you, your job - depends on it!\n\nCurrent Task: Write and then review an small paragraph on - AI until it''s AMAZING\nThought: Do I need to use a tool? Yes\nAction: learn_about_AI\nAction - Input: general AI knowledge\nObservation: AI is a very broad field.\nThought: - "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": - 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: learn_about_AI\nFuntion attributes: {''topic'': {}}\nDescription: learn_about_AI(topic) + -> float - Useful for when you need to learn about AI to write an paragraph + about it.\n--\nFuntion Name: Delegate work to co-worker\nFuntion attributes: + {''coworker'': {''type'': ''string''}, ''task'': {''type'': ''string''}, ''context'': + {''type'': ''string''}}\nDescription: Delegate work to co-worker(coworker: str, + task: str, context: str) - Delegate a specific task to one of the following + co-workers: . The input to this tool should be the role of the coworker, the + task you want them to do, and ALL necessary context to exectue the task, they + know nothing about the task, so share absolute everything you know, don''t reference + things but instead explain them.\n--\nFuntion Name: Ask question to co-worker\nFuntion + attributes: {''coworker'': {''type'': ''string''}, ''question'': {''type'': + ''string''}, ''context'': {''type'': ''string''}}\nDescription: Ask question + to co-worker(coworker: str, question: str, context: str) - Ask a specific question + to one of the following co-workers: . The input to this tool should be the role + of the coworker, the question you have for them, and ALL necessary context to + ask the question properly, they know nothing about the question, so share absolute + everything you know, don''t reference things but instead explain them.\n\nUse + this text to inform a valid ouput schema:\nThought: Do I need to use a tool? + Yes\nAction: learn_about_AI\nAction Input: \"Artificial Intelligence\"\n\nThe + output should be formatted as a JSON instance that conforms to the JSON schema + below.\n\nAs an example, for the schema {\"properties\": {\"foo\": {\"title\": + \"Foo\", \"description\": \"a list of strings\", \"type\": \"array\", \"items\": + {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe object {\"foo\": [\"bar\", + \"baz\"]} is a well-formatted instance of the schema. The object {\"properties\": + {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere is the output + schema:\n```\n{\"properties\": {\"function_name\": {\"title\": \"Function Name\", + \"description\": \"The name of the function to be called.\", \"type\": \"string\"}, + \"arguments\": {\"title\": \"Arguments\", \"description\": \"A dictinary of + arguments to be passed to the function.\", \"type\": \"object\"}}, \"required\": + [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", "n": 1, + "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -277,16 +165,16 @@ interactions: connection: - keep-alive content-length: - - '1955' + - '2558' content-type: - application/json cookie: - - __cf_bm=wNpUyP3WikFZ5owMt5Wt4RvvxL5RRbV5Lm6N3cYt0cQ-1707464961-1-AaRJSNtZhC0qEyOOtzVqZyGINmDySgK+NZEmXyReWDjlSfP0cTj8tUtP+DYB/ttT11hGeDfZUyYzGIz1jsRPjN4=; - _cfuvid=Ii_GgXvfh_uOU6kzTG.hYatLhnP.5qhsMn05KgacgMc-1707464961455-0-604800000 + - __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=; + _cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.11.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -296,7 +184,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.11.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -306,23 +194,19 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA1RTwW4bRwy96yse9rwSrEaxU10KA0EAXVIXCBAgbWFQs9xdVjPD7ZArRQj878HI - so1c5sDH9/jIIX8sgEa6ZosmjOQhTXH54f9Px8/v+UTyMD+Y/vUw+cPX+O3mePjzw6FpK0P3/3Hw - F9YqaJoiu2h+hkNhcq6q67ubu83t5vfb9QVI2nGstGHy5WZ5c7t+d2WMKoGt2eLvBQD8uLzVW+74 - e7PFTfsSSWxGAzfb1ySgKRprpCEzMafsTfsGBs3O+WL3o2KHzNzBFbMxCK4a/8Bn/Sd/kkwR99lO - XLa4Ly69BKGIXXaOUQbOgVto75xB+33ho9Q+QYb7XQsxEI5kDsod0hxdegpcM3rh2K2wc3Cu0yIz - rtmF8sDQHjbvl51YkClKrlBVYB+106iDsLVgCiNIUi3ouMxY8oBEYbxQtMC09xMVxkl8BCFwcZKM - yEeOtQzNrlnTuUXHQUw0LxMdqkygifYSxc9tLd5C8is9kFUDQbMF0dkym63wZWRjFKaYDDZRRl80 - vfhBZCq5Krui6F5dgrXI5HOhiEh5mGlgTEUDm10Tgw5ZXI6MOqW5NnixA1OkOYxIWniFj2yTOMNH - RidHLiZ+rv2JG2iaogSq62htHVUNBi3c4n4H8yJHtlrMJM2RnOvk+HuoWzHOiTLkl//u59xR4uwU - 4xkUnUt1O+oJJ0aUI7c4aTk8G524BK4NVG8nLbEDFZ1zh9lWzXUpn163OeowFd3Xzc9zjK/xXrLY - +FiYTHPdXHOdnulPC+Dfy9XMvxxCMxVNkz+6HjhXwc27u2e95u1A39D1b5sr6uoU34D3t+vF1WJj - Z3NOj73kgctU5HJF1ejiafETAAD//wMAKXcLNTwEAAA= + H4sIAAAAAAAAA1SRwW7bMBBE7/qKBc92YTeWbehmIC4a9JJrURYyTa0kJuSSIVdtWkP/XlBybPSi + w8y+1ezwUgAI04gKhO4Vaxfscv/WHH/x8fl7n/rHxy+H4zf8u7HP7/i23SexyIQ/v6DmD+qT9i5Y + ZONptnVExZi3rnerXVluyn05Gc43aDPWBV5ulqvt+uFK9N5oTKKCHwUAwGX65mzU4LuoYLX4UBym + pDoU1W0IQERvsyJUSiaxIhaLu6k9MdIU9ytGBJNAwW+0dtn66BQzNmAoYxrBt8A9gh84DAxJ9+hU + JUnS6XSSdJEEIEU7kM7n1qQcSlGBFBZVpFqd/cD14UmKxTypYjc4JE55aqKzyj4YPXOHyKY12igL + T8RoremQNEqRZ0dJ4/Rncb1mvNVgfReiP+fKaLD2preGTOrriCp5yicn9mHGxwLg51T38F+DIkTv + AtfsX5HywnK3n/eJ+8ve3c3D1WTPyt717ed1cU0o0p/E6OrWUIcxRDO1n3MWY/EPAAD//wMA8S8H + Q3QCAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 852a83e94821fabe-SJC + - 853310010fcd1637-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -332,7 +216,7 @@ interactions: Content-Type: - application/json Date: - - Fri, 09 Feb 2024 07:49:32 GMT + - Sat, 10 Feb 2024 08:43:08 GMT Server: - cloudflare Transfer-Encoding: @@ -346,7 +230,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '11068' + - '2273' openai-version: - '2020-10-01' strict-transport-security: @@ -358,13 +242,141 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299537' + - '299398' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 92ms + - 120ms x-request-id: - - req_b10e3d7ad93697db2a1d46243d59a41c + - req_9af33ac92b825be43da4372550c8c502 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nlearn_about_AI: learn_about_AI(topic) -> float - Useful for when you + need to learn about AI to write an paragraph about it.\nDelegate work to co-worker: + Delegate work to co-worker(coworker: str, task: str, context: str) - Delegate + a specific task to one of the following co-workers: . The input to this tool + should be the role of the coworker, the task you want them to do, and ALL necessary + context to exectue the task, they know nothing about the task, so share absolute + everything you know, don''t reference things but instead explain them.\nAsk + question to co-worker: Ask question to co-worker(coworker: str, question: str, + context: str) - Ask a specific question to one of the following co-workers: + . The input to this tool should be the role of the coworker, the question you + have for them, and ALL necessary context to ask the question properly, they + know nothing about the question, so share absolute everything you know, don''t + reference things but instead explain them.\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the tool you wanna use, should be one of [learn_about_AI, Delegate work to co-worker, + Ask question to co-worker], just the name.\nAction Input: Any and all relevant + information input and context for using the tool\nObservation: the result of + using the tool\n```\n\nWhen you have a response for your task, or if you do + not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need + to use a tool? No\nFinal Answer: [your response here]```This is the summary + of your work so far:\nBegin! This is VERY important to you, your job depends + on it!\n\nCurrent Task: Write and then review an small paragraph on AI until + it''s AMAZING\nThought: Do I need to use a tool? Yes\nAction: learn_about_AI\nAction + Input: \"Artificial Intelligence\"\nObservation: AI is a very broad field.\nThought: + "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '2154' + content-type: + - application/json + cookie: + - __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=; + _cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA2RTwWobSxC86yuaPa+ERazY1iUYXh4IHoGEBAJJMK3Z3t22Z7s30z1ylOB/D7Na + WwnvMoepqaaqpvrXAqDiptpCFXr0MIxxef29eXu8eRzS/dX43+fmU2P69lV4v+4+HHhT1YWh+3sK + /sxaBR3GSM4qJzgkQqcydX11cbXZXG6urydg0IZioXWjLy+XF6/Xr2ZGrxzIqi18WQAA/JrOok0a + +lFt4aJ+vhnIDDuqti+PAKqksdxUaMbmKF7VZzCoOMkk9x+FHQhRA66QjQDBVeMbeKdf5V8WjHAr + 9khpC7fJueXAGGEnTjFyRxKoBm2dBHC/T3Tg4hPQ4HZXAxsgHNAcWqbYrGDnQFLCQTMyOGBizQaW + 98uGLfAYWcgApQGn0ItG7ZgMvEcH5KGIPGUJA4b+5bEdzWkwCDjiPhJoCyOlVtPA0oGjPcwzHjXH + BrJljPEIib5nTgR9HlCA/zC1go89Gc1UlhBzQxAJk5SJbdIB6MdIictrqyFLQ6kE3RRc0HPCCBGl + y9hRDYmCdsI/CzqiOyWxGsak+0jD0jQeWLp6MtNQYGMVGPCBpZukQKcYi63bXUlV1OE+m5c8Eo2R + Q4nk/zZgnx0wmv4R3HNWUx4B5Tmp2eugiYDa8tEkHo+TJAwhJ3SKx1VR0GOhE4xaalQKMek4aMyl + 8/yTzl9LwTVZPWdY7PeE0fuAiWpoWXDqkCcUGzU5lgmnIIqUVTXX9uml71G7klvZDckxvty3LGz9 + XSI0ldJtcx1P9KcFwLdpr/Jfq1KNSYfR71wfSMrAy6t5r6rzCp/R9fpmRl0d4xnYXN8sZonVKd+7 + lqWjNCae9qwIXTwtfgMAAP//AwDp3F89XgQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 853310125de31637-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:43:14 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '6017' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299486' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 102ms + x-request-id: + - req_c153504e2e53f18e15a640221b664aba status: code: 200 message: OK @@ -380,15 +392,15 @@ interactions: is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Write and then review an small paragraph on AI until it''s AMAZING\nAI: Artificial - Intelligence, often abbreviated as AI, is a vast and multifaceted field. It - encompasses a range of sub-disciplines and methodologies, each aimed at creating - machines or software with a certain level of autonomy, decision-making capability, - and, in certain cases, consciousness. These realms span from machine learning - to robotics, natural language processing to cognitive computing, and so much - more. Despite the diversity of its applications, at its core, AI strives to - simulate or exceed human intelligence, fundamentally altering how we live, work, - and perceive the world around us.\n\nNew summary:"}], "model": "gpt-4", "n": - 1, "stream": false, "temperature": 0.7}' + Intelligence, often abbreviated as AI, is a vast field. It encompasses various + sub-disciplines and technologies that aim to create machines and systems capable + of performing tasks that would usually require human intelligence. These tasks + include learning from experiences, understanding natural language, recognizing + patterns, problem-solving, and decision making. The goal of AI is not just to + replicate human intelligence but also to create systems that can perform tasks + more efficiently and accurately. AI has the potential to revolutionize various + sectors, including healthcare, finance, transportation, and more.\n\nNew summary:"}], + "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -397,16 +409,16 @@ interactions: connection: - keep-alive content-length: - - '1513' + - '1547' content-type: - application/json cookie: - - __cf_bm=wNpUyP3WikFZ5owMt5Wt4RvvxL5RRbV5Lm6N3cYt0cQ-1707464961-1-AaRJSNtZhC0qEyOOtzVqZyGINmDySgK+NZEmXyReWDjlSfP0cTj8tUtP+DYB/ttT11hGeDfZUyYzGIz1jsRPjN4=; - _cfuvid=Ii_GgXvfh_uOU6kzTG.hYatLhnP.5qhsMn05KgacgMc-1707464961455-0-604800000 + - __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=; + _cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.11.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -416,7 +428,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.11.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -426,22 +438,21 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA1STQW/bMAyF7/kVhC+7OEHTBk2XW3YYFmyHAeuGFcNQMDJjs5FFTaTtZkX/+yAn - bdaLD6Te0yfy+WkCUHBVrKBwDZpro5/e/PnY3314+Hbz+fv+i3XzcLjqftzpp59f46YryqyQ7QM5 - e1HNnLTRk7GEY9slQqPsOl9eLBfXi/fLy7HRSkU+y+po08X04np+dVI0wo60WMGvCQDA0/jNbKGi - x2IFF+VLpSVVrKlYvR4CKJL4XClQldUwWFGem06CURhxbxuCpmsxAOpewRqC9QZMYEhsBBgqSNQz - DYCgLXoPERPWCWMDEvLZLhh7YHungC3+5VDP4PZoM3ooIOR5JGooKPf0nwM9Ro8cONRgDRqsNyVI - gnUy3rFj9LAJRt5zTcFRCZzNelQbwdrOG+/QkVEFOyZfzWBjwMH5riKFHhNLp6DddlqxOo6eQ+bJ - WrJGKvFSM2kJ+WHILVWABuO2MlOLrhkVkkBlZwMmgoGtyU+iZMgBPPXkQXaAnUmQ9lBCRY6VJUxb - 3GcbhxG37NkO5Xh3lDx9Ru8PJTgJ6jJmINUZrDd5jjF6dpjzo5DvrLinpFTCtrNMyKbgJOWJWObW - vDHltvNolGnp0RFVp83ymxmiN0oZq5EBBgLPPZUwSNqf6Cg5ymvKWRgk+WpWnKLz/Jo5L3VMss35 - DJ33r/UdB9bmPhGqhJwvNYlH+fME4PeY7e5NXIuYpI12b7KnkA0vrxdHv+L8G52784vlqWti6M+N - q+V8ckIs9KBG7f2OQ00pJh6znkEnz5N/AAAA//8DAG6VJ3HiAwAA + H4sIAAAAAAAAA1RSTW/bMAy951cQuuySBM0aL21uxYBhxW5FdhqGgpFpm60sqiKd9AP974PstMEu + OpB6j4+P720G4Lh2W3C+Q/N9Courp/pH9TPZ8+53vNu8rL5Xq4fjjW52d9/CLzcvCNk/kLcP1NJL + nwIZS5zaPhMaFdbV5mJTVevquhobvdQUCqxNtlgvLr6tLk+ITtiTui38mQEAvI1v0RZrenZbuJh/ + VHpSxZbc9vMTgMsSSsWhKqthNDc/N71EozjK3XUE3dBjBNRHBesIbm7BBI6ZjQBjXWoRMh2YjoCg + PYYACTO2GVMHEgtgiMYB2L4oYI+vHNsl7CauTJok1gpHtg4Q9pmpATlQHhmlgZvbOdSkPvOeYwts + gAoIB1SDhinUE/SAmWVQ0GG/qFk9p8CRdNJIvosSpGXSJdwa9BSL/WUjtHGtVjBM04C1bDgdBfRF + jXoFjwn3gcqXRLmR3BcxdrIFDQYdMIQXyPQ0cP6wjaNRCNxS9DQ/+YVWpnQ4+ZmkmM0YytBMBwlD + kcavdF6JvElW4OjDUJe5HWGwzmOmOTQc8cyeMWqSbFhIlu501ffPOARpU5Z9iU4cQvisNxxZu/tM + qBLL6dUkTfD3GcDfMXbDf0lyKUuf7N7kkWIh/FpdT3zunPBz96o6NU0Mw7l+uV7PTgrd5PZ9w7Gl + nDKPKSw6Z++zfwAAAP//AwBANdK6fAMAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 852a842f29cffabe-SJC + - 8533103aae201637-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -451,7 +462,7 @@ interactions: Content-Type: - application/json Date: - - Fri, 09 Feb 2024 07:49:38 GMT + - Sat, 10 Feb 2024 08:43:18 GMT Server: - cloudflare Transfer-Encoding: @@ -465,7 +476,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '6156' + - '2948' openai-version: - '2020-10-01' strict-transport-security: @@ -477,13 +488,13 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299636' + - '299629' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 72ms + - 74ms x-request-id: - - req_c4d71f28b3724c8591d7a1c79a9773a8 + - req_bf6f7315d0ac178598b80627d7b1af72 status: code: 200 message: OK diff --git a/tests/cassettes/test_agent_without_max_rpm_respet_crew_rpm.yaml b/tests/cassettes/test_agent_without_max_rpm_respet_crew_rpm.yaml index 73a414c00..6c0ec156a 100644 --- a/tests/cassettes/test_agent_without_max_rpm_respet_crew_rpm.yaml +++ b/tests/cassettes/test_agent_without_max_rpm_respet_crew_rpm.yaml @@ -2,25 +2,27 @@ interactions: - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour personal goal is: test goalTOOLS:\n------\nYou have access to only the following - tools:\n\nDelegate work to co-worker: Useful to delegate a specific task to - one of the following co-workers: test role, test role2.\nThe input to this tool - should be a pipe (|) separated text of length 3 (three), representing the co-worker - you want to ask it to (one of the options), the task and all actual context - you have for the task.\nFor example, `coworker|task|context`.\nAsk question - to co-worker: Useful to ask a question, opinion or take from on of the following - co-workers: test role, test role2.\nThe input to this tool should be a pipe - (|) separated text of length 3 (three), representing the co-worker you want - to ask it to (one of the options), the question and all actual context you have - for the question.\n For example, `coworker|question|context`.\n\nTo use a tool, - please use the exact following format:\n\n```\nThought: Do I need to use a tool? - Yes\nAction: the action to take, should be one of [Delegate work to co-worker, - Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary - of your work so far:\nBegin! This is VERY important to you, your job depends - on it!\n\nCurrent Task: Just say hi.\n"}], "model": "gpt-4", "n": 1, "stop": - ["\nObservation"], "stream": false, "temperature": 0.7}' + tools:\n\nDelegate work to co-worker: Delegate work to co-worker(coworker: str, + task: str, context: str) - Delegate a specific task to one of the following + co-workers: test role2. The input to this tool should be the role of the coworker, + the task you want them to do, and ALL necessary context to exectue the task, + they know nothing about the task, so share absolute everything you know, don''t + reference things but instead explain them.\nAsk question to co-worker: Ask question + to co-worker(coworker: str, question: str, context: str) - Ask a specific question + to one of the following co-workers: test role2. The input to this tool should + be the role of the coworker, the question you have for them, and ALL necessary + context to ask the question properly, they know nothing about the question, + so share absolute everything you know, don''t reference things but instead explain + them.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: + Do I need to use a tool? Yes\nAction: the tool you wanna use, should be one + of [Delegate work to co-worker, Ask question to co-worker], just the name.\nAction + Input: Any and all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nBegin! This is VERY important to you, your job + depends on it!\n\nCurrent Task: Just say hi.\n"}], "model": "gpt-4", "n": 1, + "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -29,13 +31,13 @@ interactions: connection: - keep-alive content-length: - - '1652' + - '1825' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -45,7 +47,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -53,18 +55,19 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8gkPA1vnW1g25Qr3SJLWB5zeOsUe4\",\n \"object\": - \"chat.completion\",\n \"created\": 1705198848,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\nFinal - Answer: Hi!\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 367,\n \"completion_tokens\": - 17,\n \"total_tokens\": 384\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SQzU7DMBCE73mKlc8tStSmLbmgih8BEiAECFRAlZtsE4PjNfZGQKu+O3KatuKy + hxnP7LdeRwBCFSIDkVeS89rq/uSruFk9u+kjPfF92sx4tjq/G3h8Ob2ePoheSNDiA3PepY5yqq1G + VmS2du5QMobWZByP0zSNJ2lr1FSgDrHScn/Yj0fJoEtUpHL0IoPXCABg3c7AZgr8ERnEvZ1So/ey + RJHtHwEIRzooQnqvPEvDoncwczKMpsV9rKgpK87gjOAKDGIBTNB4BAlMpE/glt7MhTJSw9T4b3QZ + XCrRdW32EJpK62gRgE2j9V5fKqN8NXcoPZmw0DPZbXwTAby3xzb/+IV1VFueM32iCYWD4+G2Txz+ + 9eAmo85kYqkP+jCJo45Q+F/PWM+XypTorFPt7YEz2kR/AAAA//8DABJPZgfyAQAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 845266dd5f1f1a96-GRU + - 85331c3028ba7aac-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -74,14 +77,14 @@ interactions: Content-Type: - application/json Date: - - Sun, 14 Jan 2024 02:20:50 GMT + - Sat, 10 Feb 2024 08:51:26 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=TbP3njfV8Qa2w4gTk3GfLx8EOChqceUdui85lv8w_0s-1705198850-1-AVv9rXKeGNOAPhhVrRvcK49dv9odkct+so0djAQM52Bfzf/nc8ZTJ3zhk2LlfNTWSQ9dcT4UwZHjEHyP8LH39Xg=; - path=/; expires=Sun, 14-Jan-24 02:50:50 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=PApKfMOxYmqCPfv1QzKEuqGGYEUQhozoSuKopWRTbzg-1707555086-1-AeIZ1ozsIHX7RW0ZxnKJ17u8m5eIvUTNkNMhFDdTwc71TeSLRieXLv/0waJkA48Sz1+FjLHulvMGaPnUEf7K19k=; + path=/; expires=Sat, 10-Feb-24 09:21:26 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=y6p2l8rtoLMo6gBiQsUGfETrVWxPY83uCFZsS7Jozok-1705198850215-0-604800000; + - _cfuvid=ivN7v5z_DwT9MCx4fk0bd86wOUph.ZASgr.z0qq5y2k-1707555086057-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -94,7 +97,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1832' + - '899' openai-version: - '2020-10-01' strict-transport-security: @@ -106,15 +109,16 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299611' + - '299566' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 77ms + - 86ms x-request-id: - - 12d633fe239a29022ea7206037da096a - http_version: HTTP/1.1 - status_code: 200 + - req_561f62ebd7103f1fd1e54ced4754dcce + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -126,7 +130,7 @@ interactions: the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Just - say hi.\nAI: Hi!\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, + say hi.\nAI: Hi\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' headers: accept: @@ -136,16 +140,16 @@ interactions: connection: - keep-alive content-length: - - '867' + - '866' content-type: - application/json cookie: - - __cf_bm=TbP3njfV8Qa2w4gTk3GfLx8EOChqceUdui85lv8w_0s-1705198850-1-AVv9rXKeGNOAPhhVrRvcK49dv9odkct+so0djAQM52Bfzf/nc8ZTJ3zhk2LlfNTWSQ9dcT4UwZHjEHyP8LH39Xg=; - _cfuvid=y6p2l8rtoLMo6gBiQsUGfETrVWxPY83uCFZsS7Jozok-1705198850215-0-604800000 + - __cf_bm=PApKfMOxYmqCPfv1QzKEuqGGYEUQhozoSuKopWRTbzg-1707555086-1-AeIZ1ozsIHX7RW0ZxnKJ17u8m5eIvUTNkNMhFDdTwc71TeSLRieXLv/0waJkA48Sz1+FjLHulvMGaPnUEf7K19k=; + _cfuvid=ivN7v5z_DwT9MCx4fk0bd86wOUph.ZASgr.z0qq5y2k-1707555086057-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -155,7 +159,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -163,19 +167,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8gkPCF34yiaqHuTbro7PMBexYagyU\",\n \"object\": - \"chat.completion\",\n \"created\": 1705198850,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to simply say hi, - and the AI responds with \\\"Hi!\\\"\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 144,\n \"completion_tokens\": 18,\n \"total_tokens\": 162\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SQT0sDMRDF7/sphpxb2dW2lt48qQgFUbyolDQ73aRmM2lmFq3S7y7Z/tNLIPPy + Xn7zfgoA5Wo1A2WsFtNGP5xu6nl5a76f6ul9s35plo9JwsO6ruaXk40aZAct12jk6Low1EaP4ijs + ZZNQC+bU6rq8Ho/H5XTSCy3V6LOtiTIcDctJdXVwWHIGWc3gtQAA+OnPzBZq/FIzKAfHSYvMukE1 + Oz0CUIl8nijN7Fh0EDU4i4aCYOhxny2C7VodwAWW1BlhEItwcw9CsO5YgPUWrBvk+6d1xh71hBwp + 1Aw6xkQxOS3ot/DpxMKbunNv6kId/tydYD01MdEyLxY670/zlQuO7SKhZgoZjIXi3r4rAN77Urp/ + e6qYqI2yEPrAkAOr0Wifp879n9XL6iAKifZ/XJNxcSBUvGXBdrFyocEUk+s7ypzFrvgFAAD//wMA + D9d32RoCAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 845266ee78dc1a96-GRU + - 85331c380f547aac-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -185,7 +190,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 14 Jan 2024 02:20:52 GMT + - Sat, 10 Feb 2024 08:51:27 GMT Server: - cloudflare Transfer-Encoding: @@ -199,7 +204,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1287' + - '1035' openai-version: - '2020-10-01' strict-transport-security: @@ -217,34 +222,38 @@ interactions: x-ratelimit-reset-tokens: - 40ms x-request-id: - - 2a891eacb4940e36bec1689b1ab92ee0 - http_version: HTTP/1.1 - status_code: 200 + - req_d47c41d14baa963cd2a25770f0f33620 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour personal goal is: test goal2TOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the following - co-workers: test role, test role2.\nThe input to this tool should be a pipe - (|) separated text of length 3 (three), representing the co-worker you want - to ask it to (one of the options), the task and all actual context you have - for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker: - Useful to ask a question, opinion or take from on of the following co-workers: - test role, test role2.\nThe input to this tool should be a pipe (|) separated - text of length 3 (three), representing the co-worker you want to ask it to (one - of the options), the question and all actual context you have for the question.\n - For example, `coworker|question|context`.\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of [get_final_answer, Delegate work to co-worker, - Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary - of your work so far:\nBegin! This is VERY important to you, your job depends - on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer` - tool.\nThis is the context you''re working with:\nHi!\n"}], "model": "gpt-4", - "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + work to co-worker: Delegate work to co-worker(coworker: str, task: str, context: + str) - Delegate a specific task to one of the following co-workers: test role. + The input to this tool should be the role of the coworker, the task you want + them to do, and ALL necessary context to exectue the task, they know nothing + about the task, so share absolute everything you know, don''t reference things + but instead explain them.\nAsk question to co-worker: Ask question to co-worker(coworker: + str, question: str, context: str) - Ask a specific question to one of the following + co-workers: test role. The input to this tool should be the role of the coworker, + the question you have for them, and ALL necessary context to ask the question + properly, they know nothing about the question, so share absolute everything + you know, don''t reference things but instead explain them.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [get_final_answer, Delegate + work to co-worker, Ask question to co-worker], just the name.\nAction Input: + Any and all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nBegin! This is VERY important to you, your job + depends on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using + the `get_final_answer` tool non-stop\nThis is the context you''re working with:\nHi\n"}], + "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' headers: accept: - application/json @@ -253,13 +262,13 @@ interactions: connection: - keep-alive content-length: - - '1923' + - '2101' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -269,7 +278,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -277,19 +286,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8gkPEml4Ifldq51knwwASO3nJroK3\",\n \"object\": - \"chat.completion\",\n \"created\": 1705198852,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [1,2,3]\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 432,\n \"completion_tokens\": 28,\n \"total_tokens\": 460\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RQy27CMBC85ytWPkMFhfDIparUQ+mhJ4pUAYqMsyRuHa+JN4UW8e+VQ4D24sPM + znhmjhGA0JlIQKhCsiqd6U522Wt/+rVT+UEt9s67xXQ03728LUY/RSw6QUGbD1R8Ud0pKp1B1mTP + tKpQMgbX/rg3juO4Nxk3REkZmiDLHXeH3d6oP2gVBWmFXiSwjAAAjs0bstkMDyKBXueClOi9zFEk + 1yMAUZEJiJDea8/SsujcSEWW0TZx5wXVecEJPBHMwCJmwAS1R5DAROYB3tGv7KMKZRLIkdOtttKk + 0vo9VhcGZtbVnMByJZ71SqxF+9npmtJQ7irahEa2NuaKb7XVvkgrlJ5sSOSZ3Fl+igDWzRr1v4LC + VVQ6Tpk+0QbDYTw9+4nb8Df2ftiSTCzNH9VkELUJhf/2jGWolmPlKt2ME3JGp+gXAAD//wMAVFuU + bxMCAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 845266f9fc150316-GRU + - 85331c3f6c38cfc4-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -299,14 +309,14 @@ interactions: Content-Type: - application/json Date: - - Sun, 14 Jan 2024 02:20:53 GMT + - Sat, 10 Feb 2024 08:51:28 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=; - path=/; expires=Sun, 14-Jan-24 02:50:53 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=2FkgHfXDTEWwtJx9fuYC7bMgshbFiKyrz77uUDT7zZs-1707555088-1-Aepn1cZI06+lHA4fg6WnGlRYQvOzF/0+FklFgWjzvCx0c9PWNVB0SHCULztQ+AdxbloscbFE14+qlktmxccvWLA=; + path=/; expires=Sat, 10-Feb-24 09:21:28 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000; + - _cfuvid=euVu9TT31y3M3IcbZHjgSQIkmaipgL6G3sfBSomB.wA-1707555088785-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -319,7 +329,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1570' + - '1442' openai-version: - '2020-10-01' strict-transport-security: @@ -331,42 +341,48 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299545' + - '299499' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 91ms + - 100ms x-request-id: - - 133ee7e16a1491cfd7daa90ea3e42b74 - http_version: HTTP/1.1 - status_code: 200 + - req_b76f1918aa33f67b05eee313ca29e621 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour personal goal is: test goal2TOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the following - co-workers: test role, test role2.\nThe input to this tool should be a pipe - (|) separated text of length 3 (three), representing the co-worker you want - to ask it to (one of the options), the task and all actual context you have - for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker: - Useful to ask a question, opinion or take from on of the following co-workers: - test role, test role2.\nThe input to this tool should be a pipe (|) separated - text of length 3 (three), representing the co-worker you want to ask it to (one - of the options), the question and all actual context you have for the question.\n - For example, `coworker|question|context`.\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of [get_final_answer, Delegate work to co-worker, - Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary - of your work so far:\nBegin! This is VERY important to you, your job depends - on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer` - tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to - use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation: - 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": - false, "temperature": 0.7}' + work to co-worker: Delegate work to co-worker(coworker: str, task: str, context: + str) - Delegate a specific task to one of the following co-workers: test role. + The input to this tool should be the role of the coworker, the task you want + them to do, and ALL necessary context to exectue the task, they know nothing + about the task, so share absolute everything you know, don''t reference things + but instead explain them.\nAsk question to co-worker: Ask question to co-worker(coworker: + str, question: str, context: str) - Ask a specific question to one of the following + co-workers: test role. The input to this tool should be the role of the coworker, + the question you have for them, and ALL necessary context to ask the question + properly, they know nothing about the question, so share absolute everything + you know, don''t reference things but instead explain them.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [get_final_answer, Delegate + work to co-worker, Ask question to co-worker], just the name.\nAction Input: + Any and all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nBegin! This is VERY important to you, your job + depends on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using + the `get_final_answer` tool non-stop\nThis is the context you''re working with:\nHi\nThought: + Do I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [\"Hi\"]\nObservation: + Actually, I used too many tools, so I''ll stop now and give you my absolute + BEST Final answer NOW, using the expected format: ```\nThought: Do I need to + use a tool? No\nFinal Answer: [your response here]```\nThought: "}], "model": + "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' headers: accept: - application/json @@ -375,16 +391,16 @@ interactions: connection: - keep-alive content-length: - - '2037' + - '2419' content-type: - application/json cookie: - - __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=; - _cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000 + - __cf_bm=2FkgHfXDTEWwtJx9fuYC7bMgshbFiKyrz77uUDT7zZs-1707555088-1-Aepn1cZI06+lHA4fg6WnGlRYQvOzF/0+FklFgWjzvCx0c9PWNVB0SHCULztQ+AdxbloscbFE14+qlktmxccvWLA=; + _cfuvid=euVu9TT31y3M3IcbZHjgSQIkmaipgL6G3sfBSomB.wA-1707555088785-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -394,7 +410,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -402,19 +418,22 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8gkPGwVu7CvxvQd3ZmBPHwz0WYbVQ\",\n \"object\": - \"chat.completion\",\n \"created\": 1705198854,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [4,5,6]\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 469,\n \"completion_tokens\": 26,\n \"total_tokens\": 495\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA3RSTY/aMBS88ytGvvQSKhDLss2lAm2rIlVspa5arQARkzySFMcva7+UTVf73yuH + L/XQiw8znvG8N37tAarMVAyVFlrSqjb9u+dsMbLPRTX6+kO4SfdPB/rybfbn3j7M9ioKCt7+olTO + qvcpV7UhKdke6dSRFgquw8lgMh6PB3cfOqLijEyQ5bX0b/qD2+HopCi4TMmrGMseALx2Z8hmM3pR + MQbRGanIe52Tii+XAOXYBERp70sv2oqKrmTKVsh2ce8Zc1iiDMJoPEFDmM1HPJFf2WkaZoiRk2x2 + pdVmo60/kDszmNu6kRjLlZqm0mhj2gjz4BP8GJW2befnI3jG/J0x8MI1LB+gbYa8/E1ouUHVQm89 + m0YIs0/fH/E5PIfjc1g8/IzQ+NLmkIJALzWlQhl27CotMZIkwWPBTV5IjP+OtOCT67RzjbFsuXFw + 5Gu2nlCQo3WSJCu1VqdlvV22bDivHW9DI7Yx5oLvSlv6YuNIe7Zho2G+o/ytB6y7Npt/ClK146qW + jfCebDAc35zaVNePc2UnZ1JYtLnit8NB75RQ+dYLVaGjnFztyq7ckLP31vsLAAD//wMAzsuhnNMC + AAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 8452670589920316-GRU + - 85331c496beccfc4-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -424,7 +443,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 14 Jan 2024 02:20:55 GMT + - Sat, 10 Feb 2024 08:51:32 GMT Server: - cloudflare Transfer-Encoding: @@ -438,7 +457,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '983' + - '2973' openai-version: - '2020-10-01' strict-transport-security: @@ -450,259 +469,16 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299517' + - '299421' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 96ms + - 115ms x-request-id: - - cb1d820e78376ed344c16e6051cab6bc - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour - personal goal is: test goal2TOOLS:\n------\nYou have access to only the following - tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final - answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the following - co-workers: test role, test role2.\nThe input to this tool should be a pipe - (|) separated text of length 3 (three), representing the co-worker you want - to ask it to (one of the options), the task and all actual context you have - for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker: - Useful to ask a question, opinion or take from on of the following co-workers: - test role, test role2.\nThe input to this tool should be a pipe (|) separated - text of length 3 (three), representing the co-worker you want to ask it to (one - of the options), the question and all actual context you have for the question.\n - For example, `coworker|question|context`.\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of [get_final_answer, Delegate work to co-worker, - Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary - of your work so far:\nBegin! This is VERY important to you, your job depends - on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer` - tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to - use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation: - 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [4,5,6]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": - ["\nObservation"], "stream": false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '2142' - content-type: - - application/json - cookie: - - __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=; - _cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.7.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.7.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8gkPHbI2gWbt2veTT8auCPCstOnZh\",\n \"object\": - \"chat.completion\",\n \"created\": 1705198855,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [7,8,9]\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 503,\n \"completion_tokens\": 26,\n \"total_tokens\": 529\n },\n \"system_fingerprint\": - null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 8452670dca090316-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 14 Jan 2024 02:20:57 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '2350' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299492' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 101ms - x-request-id: - - b40ffecf3dc8f547453bfb166c8c52dd - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour - personal goal is: test goal2TOOLS:\n------\nYou have access to only the following - tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final - answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the following - co-workers: test role, test role2.\nThe input to this tool should be a pipe - (|) separated text of length 3 (three), representing the co-worker you want - to ask it to (one of the options), the task and all actual context you have - for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker: - Useful to ask a question, opinion or take from on of the following co-workers: - test role, test role2.\nThe input to this tool should be a pipe (|) separated - text of length 3 (three), representing the co-worker you want to ask it to (one - of the options), the question and all actual context you have for the question.\n - For example, `coworker|question|context`.\n\nTo use a tool, please use the exact - following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the - action to take, should be one of [get_final_answer, Delegate work to co-worker, - Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary - of your work so far:\nBegin! This is VERY important to you, your job depends - on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer` - tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to - use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation: - 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [4,5,6]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: - get_final_answer\nAction Input: [7,8,9]\nObservation: I''ve used too many tools - for this task. I''m going to give you my absolute BEST Final answer now and - not use any more tools.\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], - "stream": false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '2368' - content-type: - - application/json - cookie: - - __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=; - _cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.7.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.7.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8gkPKsrN7s3j8bLplwjkAD2c7E6RK\",\n \"object\": - \"chat.completion\",\n \"created\": 1705198858,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - I've used too many tools for this task. I'm going to give you my absolute BEST - Final answer now and not use any more tools.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 563,\n \"completion_tokens\": 42,\n \"total_tokens\": 605\n },\n \"system_fingerprint\": - null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 8452671e0b500316-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 14 Jan 2024 02:21:00 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '2080' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299436' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 112ms - x-request-id: - - b2b7142dc669054b82773d62077e9918 - http_version: HTTP/1.1 - status_code: 200 + - req_90dad1554de8c627760a03ab5313ce5d + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -714,11 +490,10 @@ interactions: the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Don''t - give a Final Answer, instead keep using the `get_final_answer` tool.\nThis is - the context you''re working with:\nHi!\nAI: I''ve used too many tools for this - task. I''m going to give you my absolute BEST Final answer now and not use any - more tools.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": - 0.7}' + give a Final Answer, instead keep using the `get_final_answer` tool non-stop\nThis + is the context you''re working with:\nHi\nAI: Agent stopped due to iteration + limit or time limit.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": + false, "temperature": 0.7}' headers: accept: - application/json @@ -727,16 +502,16 @@ interactions: connection: - keep-alive content-length: - - '1096' + - '1031' content-type: - application/json cookie: - - __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=; - _cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000 + - __cf_bm=2FkgHfXDTEWwtJx9fuYC7bMgshbFiKyrz77uUDT7zZs-1707555088-1-Aepn1cZI06+lHA4fg6WnGlRYQvOzF/0+FklFgWjzvCx0c9PWNVB0SHCULztQ+AdxbloscbFE14+qlktmxccvWLA=; + _cfuvid=euVu9TT31y3M3IcbZHjgSQIkmaipgL6G3sfBSomB.wA-1707555088785-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -746,7 +521,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -754,21 +529,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8gkPMD1hkXKA5DP8MyMJNpAxJXhzj\",\n \"object\": - \"chat.completion\",\n \"created\": 1705198860,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human instructs the AI not to give - a final answer but to continue using the `get_final_answer` tool. However, the - AI responds that it has already used many tools for the task and chooses to - provide its best final answer without using any more tools.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 196,\n \"completion_tokens\": 53,\n \"total_tokens\": 249\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RRTU8jMQy9z6+wcm6raekH7Q3tHuAC0mq10oJQCTNmJpDY2dhTFqH+d5SZfohL + pLznZz8/fxYAxtVmA6ZqrVYh+vHlv/p2eXf9a/5nfcft6/3fm58rb2ed/qDd0oyygp9fsdKjalJx + iB7VMQ10ldAq5q7TVblaLBbletYTgWv0WdZEHc/H5XJ6cVC07CoUs4GHAgDgs3+zN6rxv9lAOToi + AUVsg2ZzKgIwiX1GjBVxopbUjM5kxaRIvd3fLULbBUvgSDR1lQpoi3B1A8QKytC4HYKFF0fWgyV5 + xwSW6ky9IUboxFHTa54a1G1ftx3qnkCZPRDTWJTjBK75HXeYRscRtkFSyJxA3WHuiU5bTOAUk80B + gnfBKXACdQGH38QcdtmfQvDcxMTPOTDqvD/hL46ctNuEVpjywnnYIN8XAI992N23/ExMHKJuld+Q + pL/Zeuhnznc9s/PpgVRW68/4bFYWB4dGPkQx5GwaTDG5Pvvss9gXXwAAAP//AwAJcyracgIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 8452672d1bba0316-GRU + - 85331c5e2baccfc4-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -778,7 +552,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 14 Jan 2024 02:21:03 GMT + - Sat, 10 Feb 2024 08:51:35 GMT Server: - cloudflare Transfer-Encoding: @@ -792,7 +566,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2683' + - '2537' openai-version: - '2020-10-01' strict-transport-security: @@ -804,13 +578,14 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299742' + - '299758' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 51ms + - 48ms x-request-id: - - c00193638e659df4a60ef9425649fe2c - http_version: HTTP/1.1 - status_code: 200 + - req_c2fe098b8481dac7ed32f9d683eb36ac + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_api_calls_throttling.yaml b/tests/cassettes/test_api_calls_throttling.yaml index 3dc23f990..021ddd4cb 100644 --- a/tests/cassettes/test_api_calls_throttling.yaml +++ b/tests/cassettes/test_api_calls_throttling.yaml @@ -1,19 +1,20 @@ interactions: - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to the following + personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: Don''t give a Final Answer, instead keep using the `get_final_answer` - tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, - "temperature": 0.7}' + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give + a Final Answer, instead keep using the `get_final_answer` tool.\n"}], "model": + "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' headers: accept: - application/json @@ -22,13 +23,13 @@ interactions: connection: - keep-alive content-length: - - '1055' + - '1117' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -38,7 +39,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -46,19 +47,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8gQL7bRtT6PceuMWF91CSHDk1cziN\",\n \"object\": - \"chat.completion\",\n \"created\": 1705121717,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [1, 2, 3]\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 226,\n \"completion_tokens\": 30,\n \"total_tokens\": 256\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RQy07DMBC85ytWPqcobfqguSAkJFS4FYSESlW5zjYxdbzG3ohH1X9HTl9w8WFm + ZzwzuwRA6FIUIFQtWTXO9K4/ynvjXuYPI5pt56V/esx+fvLrcaaVfBJpVND6HRWfVFeKGmeQNdkD + rTxKxujan2ST0Wg4nk47oqESTZRVjnvDXjbu50dFTVphEAUsEgCAXffGbLbEL1FAlp6QBkOQFYri + fAQgPJmICBmCDiwti/RCKrKMtov7XFNb1VzAHcEMLGIJTNAGBAlMZG7gFcObvVWxTAEV8mqjrTQr + acMn+hMDM+taLmDRT2GQQp7CMIXRUhz/3J/DGqqcp3UsZltjzvhGWx3qlUcZyMZggckd5PsEYNmN + 0v7rKZynxvGKaYs2Gg7yycFPXPa/sPn4SDKxNH9Ukzw5JhThOzA2sWGF3nndbRRzJvvkFwAA//8D + AIa5PIIaAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 844b0bccfc300301-GRU + - 853312c9c93cfa9a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -68,14 +70,14 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 04:55:19 GMT + - Sat, 10 Feb 2024 08:45:01 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=LaiWRSzgH0KTl1zCkIyak7.Xo2gxpZU1B.b9dgz2TzI-1705121719-1-Ab9fhdn6OHTc5/5ibPLP1pc+XN4Z3ORV0Nku/+S5BqdXcS84TsVDiACZUN0Xxvew6yfZ6ApEBQOBsEhJNyVkqCM=; - path=/; expires=Sat, 13-Jan-24 05:25:19 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=NJWb4ZNNYapUEtFti95bFA0mtHjwveJogWpXDw2VQFU-1707554701-1-Ab7rDuisnyQ62l2GCzwCCGbk7lFjHGwubCf+gYjd++zn5l64PT7M8eCx2tvDUnaFiHttjJ2IzvM+xQb26G6wWDE=; + path=/; expires=Sat, 10-Feb-24 09:15:01 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=6J6x2HNoj7l3E6T3yHnLcNZb273IuGETTaD1BBeY_OQ-1705121719455-0-604800000; + - _cfuvid=C0SLvowfGVmVkPIw2ixBbKCDFfyz.SihtGKV4BWgyu4-1707554701424-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -88,7 +90,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1751' + - '1537' openai-version: - '2020-10-01' strict-transport-security: @@ -100,31 +102,35 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299760' + - '299744' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 48ms + - 51ms x-request-id: - - ae0de27cb07da02e7bce2d30958b7e14 - http_version: HTTP/1.1 - status_code: 200 + - req_37ba2c52cf5e91b8f5ab5a9e7d5a549a + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to the following - tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final - answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: Don''t give a Final Answer, instead keep using the `get_final_answer` - tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [1, 2, 3]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": - ["\nObservation"], "stream": false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid + ouput schema:\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: [1, 2, 3, 4, 5]\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -133,16 +139,16 @@ interactions: connection: - keep-alive content-length: - - '1171' + - '1420' content-type: - application/json cookie: - - __cf_bm=LaiWRSzgH0KTl1zCkIyak7.Xo2gxpZU1B.b9dgz2TzI-1705121719-1-Ab9fhdn6OHTc5/5ibPLP1pc+XN4Z3ORV0Nku/+S5BqdXcS84TsVDiACZUN0Xxvew6yfZ6ApEBQOBsEhJNyVkqCM=; - _cfuvid=6J6x2HNoj7l3E6T3yHnLcNZb273IuGETTaD1BBeY_OQ-1705121719455-0-604800000 + - __cf_bm=NJWb4ZNNYapUEtFti95bFA0mtHjwveJogWpXDw2VQFU-1707554701-1-Ab7rDuisnyQ62l2GCzwCCGbk7lFjHGwubCf+gYjd++zn5l64PT7M8eCx2tvDUnaFiHttjJ2IzvM+xQb26G6wWDE=; + _cfuvid=C0SLvowfGVmVkPIw2ixBbKCDFfyz.SihtGKV4BWgyu4-1707554701424-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -152,7 +158,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -160,19 +166,21 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8gQL9CyNZOIEDKgfmDn68aH26tLrn\",\n \"object\": - \"chat.completion\",\n \"created\": 1705121719,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [4, 5, 6]\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 265,\n \"completion_tokens\": 28,\n \"total_tokens\": 293\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRwW7bMBBE7/qKBc9yYcV21epYF2iABAWKFughDGSKWkt0yKVCrtIEhv69oOzY + 7UWHmX2r2eExAxCmFRUI3SvWbrCLT8/tN2qb7dOXw+/7cf3DNV/f7rbN66/v25+lyBPhmwNqfqc+ + aO8Gi2w8nWwdUDGmrUW5LDebdbksZsP5Fm3CuoEX68XyY7E6E703GqOo4CEDADjO35SNWnwVFSzz + d8VhjKpDUV2GAETwNilCxWgiK2KRX03tiZHmuLcYEEwEBS/Kmhb8yMPIYChBGmHvA3CP0JkXJIi6 + R6cqSZJ2u90hepJ0lAQgxX4knQ6uSTmUogIpOuR6b0jZWlH8g0GK/DSrQjc6JI5pbuaTSqNrMMza + Q5HDTQ6rHNY5bB7TxCRpmv8qzndMlwKs74bgm1QWjdZe9L0hE/s6oIqe0rGR/XDCpwzgcS56/K87 + MQTvBq7ZPyGlhavi82mfuL7p1d2sziZ7VvYfqrzJzglFfIuMLhXRYRiCmXtPObMp+wsAAP//AwDG + yxiVbgIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 844b0bdaee800301-GRU + - 853312d44981fa9a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -182,7 +190,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 04:55:22 GMT + - Sat, 10 Feb 2024 08:45:04 GMT Server: - cloudflare Transfer-Encoding: @@ -196,228 +204,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2615' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299731' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 53ms - x-request-id: - - 9d85a22aa63ee4cea3ad01a3b68316b4 - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to the following - tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final - answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: Don''t give a Final Answer, instead keep using the `get_final_answer` - tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [1, 2, 3]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: - get_final_answer\nAction Input: [4, 5, 6]\nObservation: 42\nThought: "}], "model": - "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": - 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '1278' - content-type: - - application/json - cookie: - - __cf_bm=LaiWRSzgH0KTl1zCkIyak7.Xo2gxpZU1B.b9dgz2TzI-1705121719-1-Ab9fhdn6OHTc5/5ibPLP1pc+XN4Z3ORV0Nku/+S5BqdXcS84TsVDiACZUN0Xxvew6yfZ6ApEBQOBsEhJNyVkqCM=; - _cfuvid=6J6x2HNoj7l3E6T3yHnLcNZb273IuGETTaD1BBeY_OQ-1705121719455-0-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.7.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.7.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8gQMARI9zwx2Z1Uel5us1xzxPAuGJ\",\n \"object\": - \"chat.completion\",\n \"created\": 1705121782,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [7, 8, 9]\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 301,\n \"completion_tokens\": 28,\n \"total_tokens\": 329\n },\n \"system_fingerprint\": - null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 844b0d63fa7a02ee-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sat, 13 Jan 2024 04:56:24 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '1878' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299705' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 58ms - x-request-id: - - d98dffd5ba9915b41051dcad49109eba - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to the following - tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final - answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: Don''t give a Final Answer, instead keep using the `get_final_answer` - tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [1, 2, 3]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: - get_final_answer\nAction Input: [4, 5, 6]\nObservation: 42\nThought: Do I need - to use a tool? Yes\nAction: get_final_answer\nAction Input: [7, 8, 9]\nObservation: - 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": - false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '1385' - content-type: - - application/json - cookie: - - __cf_bm=LaiWRSzgH0KTl1zCkIyak7.Xo2gxpZU1B.b9dgz2TzI-1705121719-1-Ab9fhdn6OHTc5/5ibPLP1pc+XN4Z3ORV0Nku/+S5BqdXcS84TsVDiACZUN0Xxvew6yfZ6ApEBQOBsEhJNyVkqCM=; - _cfuvid=6J6x2HNoj7l3E6T3yHnLcNZb273IuGETTaD1BBeY_OQ-1705121719455-0-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.7.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.7.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8gQMC51udijWPWSLCk1Cx36OVtDUX\",\n \"object\": - \"chat.completion\",\n \"created\": 1705121784,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - get_final_answer\\nAction Input: [10, 11, 12]\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 337,\n \"completion_tokens\": 28,\n \"total_tokens\": 365\n },\n \"system_fingerprint\": - null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 844b0d72cbbd02ee-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sat, 13 Jan 2024 04:56:27 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '2939' + - '2515' openai-version: - '2020-10-01' strict-transport-security: @@ -435,31 +222,27 @@ interactions: x-ratelimit-reset-tokens: - 64ms x-request-id: - - 528f38b4e5dd09a1fd25893edb27e4b4 - http_version: HTTP/1.1 - status_code: 200 + - req_5b99d832f6f62d60ad2c668c460a1fba + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goalTOOLS:\n------\nYou have access to the following + personal goal is: test goalTOOLS:\n------\nYou have access to only the following tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]This is the summary of your work - so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: Don''t give a Final Answer, instead keep using the `get_final_answer` - tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [1, 2, 3]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: - get_final_answer\nAction Input: [4, 5, 6]\nObservation: 42\nThought: Do I need - to use a tool? Yes\nAction: get_final_answer\nAction Input: [7, 8, 9]\nObservation: - 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction - Input: [10, 11, 12]\nObservation: I''ve used too many tools for this task. I''m - going to give you my absolute BEST Final answer now and not use any more tools.\nThought: - "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": - 0.7}' + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give + a Final Answer, instead keep using the `get_final_answer` tool.\nThought: Do + I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [1, 2, 3, + 4, 5]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], + "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -468,16 +251,16 @@ interactions: connection: - keep-alive content-length: - - '1616' + - '1239' content-type: - application/json cookie: - - __cf_bm=LaiWRSzgH0KTl1zCkIyak7.Xo2gxpZU1B.b9dgz2TzI-1705121719-1-Ab9fhdn6OHTc5/5ibPLP1pc+XN4Z3ORV0Nku/+S5BqdXcS84TsVDiACZUN0Xxvew6yfZ6ApEBQOBsEhJNyVkqCM=; - _cfuvid=6J6x2HNoj7l3E6T3yHnLcNZb273IuGETTaD1BBeY_OQ-1705121719455-0-604800000 + - __cf_bm=NJWb4ZNNYapUEtFti95bFA0mtHjwveJogWpXDw2VQFU-1707554701-1-Ab7rDuisnyQ62l2GCzwCCGbk7lFjHGwubCf+gYjd++zn5l64PT7M8eCx2tvDUnaFiHttjJ2IzvM+xQb26G6wWDE=; + _cfuvid=C0SLvowfGVmVkPIw2ixBbKCDFfyz.SihtGKV4BWgyu4-1707554701424-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -487,7 +270,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -495,20 +278,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8gQMGpMHplpQ8ApiUZmhbX4z9gVuK\",\n \"object\": - \"chat.completion\",\n \"created\": 1705121788,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - I've used the tool 'get_final_answer' multiple times and have observed the result. - However, as per my task, I am to keep using this tool and not provide a final - answer.\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 399,\n \"completion_tokens\": - 52,\n \"total_tokens\": 451\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RQwWrCQBS85ysee44lajSaS5EKrcWCSHsoVsKavMTYzb41+6Qt4r+XjVHbyx5m + 3szOzNEDEGUmYhDpVnJaGdUZ7bPH/VNvNM3nD4OX+ZseL1X0PFnkr8vdQvhOQZsdpnxR3aVUGYVc + kj7TaY2S0bl2oyAaDMIoCBuiogyVkxWGO2EnGHb7rWJLZYpWxLDyAACOzeuy6Qy/RQyBf0EqtFYW + KOLrEYCoSTlESGtLy1Kz8G9kSppRN3GnBDPQiBkwwcEiSGAidQ/vaD/0JHUdYiiQk7zUUiVS2y+s + LwzMtDlwDKuhD5EPIx/GPnSDtWj/Ol1DKipMTRtXSB+UuuJ5qUu7TWqUlrQLZJnMWX7yANbNGId/ + /YSpqTKcMH2idoa9Ue/sJ26739h+2JJMLNUfvDv02oTC/ljGylUssDZ12Wzjcnon7xcAAP//AwD/ + xADjEgIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 844b0d86a87f02ee-GRU + - 853312e4ae0dfa9a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -518,7 +301,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 04:56:31 GMT + - Sat, 10 Feb 2024 08:45:05 GMT Server: - cloudflare Transfer-Encoding: @@ -532,7 +315,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '3227' + - '1489' openai-version: - '2020-10-01' strict-transport-security: @@ -544,15 +327,701 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299624' + - '299715' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 75ms + - 57ms x-request-id: - - 7a91d2e523c43f5fc9a1191437491d68 - http_version: HTTP/1.1 - status_code: 200 + - req_c6382696541f9bd59800fbcce47a32ab + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid + ouput schema:\nDo I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: [6, 7, 8, 9, 10]\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1412' + content-type: + - application/json + cookie: + - __cf_bm=NJWb4ZNNYapUEtFti95bFA0mtHjwveJogWpXDw2VQFU-1707554701-1-Ab7rDuisnyQ62l2GCzwCCGbk7lFjHGwubCf+gYjd++zn5l64PT7M8eCx2tvDUnaFiHttjJ2IzvM+xQb26G6wWDE=; + _cfuvid=C0SLvowfGVmVkPIw2ixBbKCDFfyz.SihtGKV4BWgyu4-1707554701424-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RRPW/bMBDd9SsOnOVCamwr1RYEaIoMzVCgLRIGMk2dJbbkUSVPqVtD/z2g7Njt + wuF9HN+9O2QAwrSiBqF7xdoNdnH9q72Lt0tL949f79Z/P5qbh2/773H/2Bv7XuTJ4bc/UPOb6532 + brDIxtOR1gEVY5paVkW1Wi2rYj0Tzrdok60beLFcFOvy6uTovdEYRQ1PGQDAYX5TNmpxL2oo8jfE + YYyqQ1GfRQAieJsQoWI0kRWxyC+k9sRIc9xPGBBMBAUvypoW/MjDyMC9YtCedj64COyBe4TOvCDB + /ZeHzxB1j07VkiRtNhtJB0kAUuxG0mnrhpRDKWqQokNudoaUbRTF3xikyI9aFbrRIXFMutmfUBrd + FsOMPa1zqHK4zuFDDmXxnCSTpGn+Upy2mc41WN8NwW9TZTRae8Z3hkzsm4AqekorR/bD0T5lAM9z + 3eN/DYoheDdww/4nUhp4VVbHeeJy2Qu7Wp5I9qzsP66qzE4JRfwTGV1qosMwBDO3n3JmU/YKAAD/ + /wMASkmYsHQCAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 853312f06ed9fa9a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:45:09 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '3033' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299681' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 63ms + x-request-id: + - req_6464ce1c9571d1963027011e0aa6837b + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final + answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo + use a tool, please use the exact following format:\n\n```\nThought: Do I need + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give + a Final Answer, instead keep using the `get_final_answer` tool.\nThought: Do + I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [1, 2, 3, + 4, 5]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: [6, 7, 8, 9, 10]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": + 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1353' + content-type: + - application/json + cookie: + - __cf_bm=NJWb4ZNNYapUEtFti95bFA0mtHjwveJogWpXDw2VQFU-1707554701-1-Ab7rDuisnyQ62l2GCzwCCGbk7lFjHGwubCf+gYjd++zn5l64PT7M8eCx2tvDUnaFiHttjJ2IzvM+xQb26G6wWDE=; + _cfuvid=C0SLvowfGVmVkPIw2ixBbKCDFfyz.SihtGKV4BWgyu4-1707554701424-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RQwU7CQBS89yte9lxMC61gL0ZCNJh4MPFCkJBt+yjV7b5191UlhH83WyjgZQ8z + b2ZnZh8AiLoUGYhiK7lojBpMvsqnb55OH5t89rbAXf76Yp5nFNmkGrYi9ArKP7DgXnVTUGMUck36 + SBcWJaN3jcfROE2TcXTXEQ2VqLysMjxIBtFtPDoptlQX6EQGywAAYN+9Ppsu8VdkEIU90qBzskKR + nY8AhCXlESGdqx1LzSK8kAVpRt3FnRHMQSOWwAStQ5DAROoeFuje9UPhO2RQIa83tZZqLbX7Qdsz + MNem5QyWcRxCPAwhHoUQJyHE6Uqc/jucgyqqjKXcl9KtUmd8U+vabdcWpSPtQzkmc5QfAoBVN0j7 + r6MwlhrDa6ZP1N5wNEyOfuKy/RXbk0ws1RWeToJTQuF2jrHxNSu0xtbdPj5ncAj+AAAA//8DAGWe + oJUWAgAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 853313040d24fa9a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:45:11 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1747' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299686' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 62ms + x-request-id: + - req_20bc79d79ac6febd528290df84f8fbda + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid + ouput schema:\nDo I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: [11, 12, 13, 14, 15]\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1416' + content-type: + - application/json + cookie: + - __cf_bm=NJWb4ZNNYapUEtFti95bFA0mtHjwveJogWpXDw2VQFU-1707554701-1-Ab7rDuisnyQ62l2GCzwCCGbk7lFjHGwubCf+gYjd++zn5l64PT7M8eCx2tvDUnaFiHttjJ2IzvM+xQb26G6wWDE=; + _cfuvid=C0SLvowfGVmVkPIw2ixBbKCDFfyz.SihtGKV4BWgyu4-1707554701424-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SRyU7DMBCG73mK0ZxTVLcNQbmVSxGL2htIBEVuMk0D9jjYjliqvDtyUkq5+PAv + o2/GhwgAmwozwHIvfalbNbl6r1af38nm6346e3pcrTeb5Xp1e22WD9XdDcahYbavVPrf1kVpdKvI + N4ZHu7QkPYWpIp2mSbJIhRgMbSpSoVa3frKYTC/F/NjYm6Ykhxk8RwAAh+ENbFzRJ2YwjX8VTc7J + mjA7hQDQGhUUlM41zkv2GP+ZpWFPPOAecgbIcddxGXALlppyzCDHmnyxa1iqQrL7IJtjPGalrTtN + 7F3IDf2gcqe3ZAftWYgYxCwGMY9BLGIQyUuI9Tn3eKToT/jK1K0127Aqd0qd9F3DjdsXlqQzHFCd + N+1Y7yOAl+FM3b/NsbVGt77w5o04DJyLdJyHfz9y5l4dTW+8VGd6kkRHQnRfzpMOh6jJtrYZrhY4 + oz76AQAA//8DAE7a5qosAgAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331311af88fa9a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:45:14 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '3273' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299680' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 63ms + x-request-id: + - req_41a40816728184bd0ac361bc055f1dbb + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final + answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo + use a tool, please use the exact following format:\n\n```\nThought: Do I need + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give + a Final Answer, instead keep using the `get_final_answer` tool.\nThought: Do + I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [1, 2, 3, + 4, 5]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: [6, 7, 8, 9, 10]\nObservation: 42\nThought: Do I need to use a tool? + Yes\nAction: get_final_answer\nAction Input: [11, 12, 13, 14, 15]\nObservation: + 42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": + false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1471' + content-type: + - application/json + cookie: + - __cf_bm=NJWb4ZNNYapUEtFti95bFA0mtHjwveJogWpXDw2VQFU-1707554701-1-Ab7rDuisnyQ62l2GCzwCCGbk7lFjHGwubCf+gYjd++zn5l64PT7M8eCx2tvDUnaFiHttjJ2IzvM+xQb26G6wWDE=; + _cfuvid=C0SLvowfGVmVkPIw2ixBbKCDFfyz.SihtGKV4BWgyu4-1707554701424-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RQy07DMBC85ytWPqcopmkackFFlFI4cURQVW6yTQOO19gbFVT135HTF1x8mNkZ + z8wuAhBNJQoQ5UZx2Vo9yL+qR/kwe8lpK5Nk5vPp3fPwabWZOkonIg4KWn1gySfVVUmt1cgNmQNd + OlSMwVWOk/FolI7lqCdaqlAHWW15kA6STA6Pig01JXpRwFsEALDr35DNVPgtCkjiE9Ki96pGUZyP + AIQjHRChvG88K8MivpAlGUbTx70nmINBrIAJOo+ggIn0LbyifzeTMnQooEZerhuj9FIZv0V3YmBu + bMcFvMksBjmOQeYxyJsYrpOFOP63PwfVVFtHq1DKdFqf8XVjGr9ZOlSeTAjlmexBvo8AFv0g3b+O + wjpqLS+ZPtEEw2GWHfzEZfs/bHokmVjpC54mSXRMKPyPZ2xDzRqddU2/T8gZ7aNfAAAA//8DAGoz + YoIWAgAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8533132858acfa9a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:45:16 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1468' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299659' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 68ms + x-request-id: + - req_6522cc3cca31038412a393a32110aba8 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final + answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo + use a tool, please use the exact following format:\n\n```\nThought: Do I need + to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: Don''t give + a Final Answer, instead keep using the `get_final_answer` tool.\nThought: Do + I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [1, 2, 3, + 4, 5]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: [6, 7, 8, 9, 10]\nObservation: 42\nThought: Do I need to use a tool? + Yes\nAction: get_final_answer\nAction Input: [11, 12, 13, 14, 15]\nObservation: + 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: [16, 17, 18, 19, 20]\nObservation: Actually, I used too many tools, so + I''ll stop now and give you my absolute BEST Final answer NOW, using the expected + format: ```\nThought: Do I need to use a tool? No\nFinal Answer: [your response + here]```\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], + "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1792' + content-type: + - application/json + cookie: + - __cf_bm=NJWb4ZNNYapUEtFti95bFA0mtHjwveJogWpXDw2VQFU-1707554701-1-Ab7rDuisnyQ62l2GCzwCCGbk7lFjHGwubCf+gYjd++zn5l64PT7M8eCx2tvDUnaFiHttjJ2IzvM+xQb26G6wWDE=; + _cfuvid=C0SLvowfGVmVkPIw2ixBbKCDFfyz.SihtGKV4BWgyu4-1707554701424-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RQy27CMBC85ytWPocK8iAll6oPVaVqD71UqiiKjLOEFMdr4kW0Qvx75RCgvfgw + szOemX0AIOpS5CDUSrJqrB5cb8qnKInV6+Zlnb1NKHverT/ul9XqMb17F6FX0OILFZ9UV4oaq5Fr + MkdatSgZvesoG2ZpmmSjcUc0VKL2ssryIBkMx6O4V6yoVuhEDrMAAGDfvT6bKfFb5DAMT0iDzskK + RX4+AhAtaY8I6VztWBoW4YVUZBhNF/eBYAoGsQQm2DoECUykb+AD3ae5Vb5DDhVysayN1IU0boft + iYGpsVvOYRaNQoiiEKI4hCgJIUrnov/vcA6qqbItLXwps9X6jC9rU7tV0aJ0ZHwox2SP8kMAMO8G + 2f7rKGxLjeWCaY3GGybp+OgnLttf2DjpSSaW+o9qMgz6hML9OMbG16ywtW3d7eNzBofgFwAA//8D + AE/t8ocWAgAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331333ea12fa9a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:45:19 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2232' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299580' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 84ms + x-request-id: + - req_c17a39ebd2d846e68698a3fbc40d9d07 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription: + get_final_answer(numbers) -> float - Get the final answer but don''t give it + yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid + ouput schema:\nDo I need to use a tool? Yes\nAction: get_final_answer\nAction + Input: [21, 22, 23, 24, 25]\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1416' + content-type: + - application/json + cookie: + - __cf_bm=NJWb4ZNNYapUEtFti95bFA0mtHjwveJogWpXDw2VQFU-1707554701-1-Ab7rDuisnyQ62l2GCzwCCGbk7lFjHGwubCf+gYjd++zn5l64PT7M8eCx2tvDUnaFiHttjJ2IzvM+xQb26G6wWDE=; + _cfuvid=C0SLvowfGVmVkPIw2ixBbKCDFfyz.SihtGKV4BWgyu4-1707554701424-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RRy46UQBTd8xU3taZNQ4OMLE0mvjVqojHDhC6K21Ba3GLqMT3a4d9NAd2tGxbn + xalzTxEAky0rgYmeOzGManPz0L7O3zU/sqfbN/T+KF95nv35zm8/uy8fvrE4OHTzE4U7u54JPYwK + ndS00MIgdxhSk2Jb5HlWJC9mYtAtqmDrRrfJNtvnyW519FoKtKyEuwgA4DR/Qzdq8YmVsI3PyIDW + 8g5ZeREBMKNVQBi3VlrHybH4SgpNDmmu+5JbbEETuB6hk49IIGn0Dji1YPDBSxN47wJmRY8Dj4HD + I1eyhbdfP30ESSFfIBy1Vy00WFZU0X6/r+hUUfhfxQ6eRFijJj5gxUqoWIeuPkjiquZkj2gqFp/V + 3HR+QHI2KNeMhSE/NGhm/C5NYkjTGNJdDGkWQ5rfL9KpomkuwNYXT5eplO5Go5swK3mlLvhBkrR9 + bZBbTWEW6/S42KcI4H4+if9vZTYaPYyudvoXUgjcJcWSx67Xv7L5zUo67bj6x1Xk0dqQ2d/W4RBW + 6dCMRs4XCj2jKfoLAAD//wMAjL+tNJgCAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331342fd9afa9a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:45:23 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '3945' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299680' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 63ms + x-request-id: + - req_b383ebc29458af536e92a9c605a3371f + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -564,11 +1033,9 @@ interactions: the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Don''t - give a Final Answer, instead keep using the `get_final_answer` tool.\nAI: I''ve - used the tool ''get_final_answer'' multiple times and have observed the result. - However, as per my task, I am to keep using this tool and not provide a final - answer.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": - 0.7}' + give a Final Answer, instead keep using the `get_final_answer` tool.\nAI: Agent + stopped due to iteration limit or time limit.\n\nNew summary:"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -577,16 +1044,16 @@ interactions: connection: - keep-alive content-length: - - '1094' + - '977' content-type: - application/json cookie: - - __cf_bm=LaiWRSzgH0KTl1zCkIyak7.Xo2gxpZU1B.b9dgz2TzI-1705121719-1-Ab9fhdn6OHTc5/5ibPLP1pc+XN4Z3ORV0Nku/+S5BqdXcS84TsVDiACZUN0Xxvew6yfZ6ApEBQOBsEhJNyVkqCM=; - _cfuvid=6J6x2HNoj7l3E6T3yHnLcNZb273IuGETTaD1BBeY_OQ-1705121719455-0-604800000 + - __cf_bm=NJWb4ZNNYapUEtFti95bFA0mtHjwveJogWpXDw2VQFU-1707554701-1-Ab7rDuisnyQ62l2GCzwCCGbk7lFjHGwubCf+gYjd++zn5l64PT7M8eCx2tvDUnaFiHttjJ2IzvM+xQb26G6wWDE=; + _cfuvid=C0SLvowfGVmVkPIw2ixBbKCDFfyz.SihtGKV4BWgyu4-1707554701424-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -596,7 +1063,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -604,21 +1071,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8gQMJYMA8HO4tGJr8wUqEIJYn1xUa\",\n \"object\": - \"chat.completion\",\n \"created\": 1705121791,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human instructs the AI not to give - a final answer, but to continue using the 'get_final_answer' tool. The AI acknowledges - that it has used the tool multiple times and understands the directive to continue - using it without providing a final answer.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 195,\n \"completion_tokens\": 50,\n \"total_tokens\": 245\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RRy07DMBC85ytWvnBpq7b0Ab0BEgLOSAgQap1kiQ2213g3PIT678hJH+JiyTM7 + 49nxbwGgbK1WoCqjpfLRDc8+6ptz4cfyLuET19pc0s9Vml4/lGZu1CArqHzDSvaqUUU+OhRLoaer + hFowu06W4+V8PltOTzvCU40uy5oow9lwvJic7hSGbIWsVvBcAAD8dmfOFmr8VisYD/aIR2bdoFod + hgBUIpcRpZktiw6iBkeyoiAYurj3BsG0XgewgSW1lTCIQbi4hUACQtDYTwQNrzZoBzrwF6YBlG3H + ZSMbWoSWbWg64aZBWXfD6354A0LkRnBDX/iZtb39CQNFTDqXBJaBhWLEGuoWs3MOJHvaWW8FKIFY + j/1tpHbrbA89OGpiojJ3FlrnDvirDZbNOqFmCnnn/FQv3xYAL13f7b8KVUzko6yF3jFkw8li0fup + 49ce2dl0RwqJdkd8Oj4rdgkV/7Cgz800mGKyXf05Z7Et/gAAAP//AwCzq7jxdQIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 844b0d9dafa902ee-GRU + - 8533135c8873fa9a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -628,7 +1094,7 @@ interactions: Content-Type: - application/json Date: - - Sat, 13 Jan 2024 04:56:34 GMT + - Sat, 10 Feb 2024 08:45:25 GMT Server: - cloudflare Transfer-Encoding: @@ -642,7 +1108,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2830' + - '2341' openai-version: - '2020-10-01' strict-transport-security: @@ -654,13 +1120,14 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299742' + - '299770' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 51ms + - 45ms x-request-id: - - 0192af0bde71c3bb536287175e1a4458 - http_version: HTTP/1.1 - status_code: 200 + - req_03ef36b158c4f0549a48bf28744d422b + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_cache_hitting.yaml b/tests/cassettes/test_cache_hitting.yaml index f964bdd25..0c0f787d3 100644 --- a/tests/cassettes/test_cache_hitting.yaml +++ b/tests/cassettes/test_cache_hitting.yaml @@ -1,21 +1,19 @@ interactions: - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together.\n The input to this tool should be - a comma separated list of numbers of\n length two and ONLY TWO, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nBegin! This - is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 - times 6 times 3?\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], - "stream": false, "temperature": 0.7}' + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nBegin! This is VERY + important to you, your job depends on it!\n\nCurrent Task: What is 2 times 6 + times 3?\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": + false, "temperature": 0.7}' headers: accept: - application/json @@ -24,13 +22,13 @@ interactions: connection: - keep-alive content-length: - - '1226' + - '1055' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -40,7 +38,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -48,18 +46,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRJZtUPnKq6VZF4MONtmythEh09n\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410481,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 264,\n \"completion_tokens\": - 23,\n \"total_tokens\": 287\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRzU7DMBCE73mK1Z5bVBpKSy4ICYS4geilUFSlzjYx2F5jbyio6rsjp39w8WFm + Z/XNepMBoK6wAFRNKcp60598qq/nL/fe6sc49MvpKF836yf7dH/1MrvDXkrw8p2UHFJniq03JJrd + zlaBSqG09Xw8GI9GeT6edIblikyK1V76F/3B5Xm+TzSsFUUs4DUDANh0b2JzFX1jAYPeQbEUY1kT + FschAAxskoJljDpK6QR7J1OxE3Id7rThtm6kgFuGB3BEFQhDGwlKEGZzDTOKc3ejUpkCbGtEe6Mp + HDR4cL6VAjZzXOkQZeFau6QwxwKGPZhjJMWu+qNebnFPsj1WMFz7wMtU17XGHPWVdjo2i0BlZJdw + o7DfxbcZwFt3qvZfe/SBrZeF8Ae5tHA4vNjtw9OvnNw835vCUpo/qdE42xNi/IlCdrHSrqbgg+4u + lzizbfYLAAD//wMA9MJyfTACAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 840737a0efa24cf1-GRU + - 8532f28a0a0afb48-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -69,14 +69,14 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:21:23 GMT + - Sat, 10 Feb 2024 08:23:00 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - path=/; expires=Thu, 04-Jan-24 23:51:23 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + path=/; expires=Sat, 10-Feb-24 08:53:00 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000; + - _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -89,7 +89,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2385' + - '1469' openai-version: - '2020-10-01' strict-transport-security: @@ -98,42 +98,39 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299717' - x-ratelimit-remaining-tokens_usage_based: - - '299717' + - '299760' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 56ms - x-ratelimit-reset-tokens_usage_based: - - 56ms + - 48ms x-request-id: - - 046262aad835176c0452148b956bd3b7 - http_version: HTTP/1.1 - status_code: 200 + - req_327c68e193c07e12745222a6ae9cb500 + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together.\n The input to this tool should be - a comma separated list of numbers of\n length two and ONLY TWO, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nBegin! This - is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 - times 6 times 3?\nThought: Do I need to use a tool? Yes\nAction: multiplier\nAction - Input: 2,6\nObservation: 12\nThought: "}], "model": "gpt-4", "n": 1, "stop": - ["\nObservation"], "stream": false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 2, \"second_number\": 6}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -142,16 +139,16 @@ interactions: connection: - keep-alive content-length: - - '1330' + - '1494' content-type: - application/json cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -161,7 +158,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -169,19 +166,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRJbK95KVEIvP4ef6TZpLLRawMDi\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410483,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"I now have the result of 2 times 6, which - is 12. Now I need to multiply this result by 3. \\nAction: multiplier\\nAction - Input: 12,3\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 297,\n \"completion_tokens\": - 40,\n \"total_tokens\": 337\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRwW7bMBBE7/qKBc924VixYujYQ1v0kARFW6CoAlmi1hITcsmSy7aGoX8vSDt2 + etFhZ95qdngsAIQaRA1CTh1L4/Ry+0v+/vb9z+HhQ68O95/w5XH9vvpYyc/KfvkhFomw/TNKfqXe + SWucRlaWTrL02DGmrTd3q7vNpiy3qywYO6BO2Oh4ebtcVTflmZiskhhEDT8LAIBj/qZsNOBfUUPm + 88RgCN2Ior6YAIS3Ok1EF4IK3BGLxVWUlhgpx/06IdjILjIoSkaJECYb9QA91g01tNvtGjo2BNCI + fSSZrmqpM9iIGhphombltELfiMXJ1fkxGiQOyZHJzCofuKVo+mStYb14VQJKS8MbqUrK3NCc/y7O + wefLxdqOzts+tUNR68t8r0iFqfXYBUvpusDWnfC5AHjKzcb/yhLOW+O4ZfuClBaW681pn7g+4lW9 + Lc8iW+70G6raFueEIhwCo2n3ikb0zqtcdMpZzMU/AAAA//8DANstjLBfAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 840737b34a724cf1-GRU + - 8532f29438c5fb48-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -191,7 +189,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:21:28 GMT + - Sat, 10 Feb 2024 08:23:03 GMT Server: - cloudflare Transfer-Encoding: @@ -205,7 +203,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '4652' + - '2877' openai-version: - '2020-10-01' strict-transport-security: @@ -214,44 +212,36 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299692' - x-ratelimit-remaining-tokens_usage_based: - - '299692' + - '299661' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 61ms - x-ratelimit-reset-tokens_usage_based: - - 61ms + - 67ms x-request-id: - - 0ac87c29d1c4ada9136f825d7170019e - http_version: HTTP/1.1 - status_code: 200 + - req_8e603b1fb3c0c1835553a2675fcaa95c + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together.\n The input to this tool should be - a comma separated list of numbers of\n length two and ONLY TWO, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nBegin! This - is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 - times 6 times 3?\nThought: Do I need to use a tool? Yes\nAction: multiplier\nAction - Input: 2,6\nObservation: 12\nThought: I now have the result of 2 times 6, which - is 12. Now I need to multiply this result by 3. \nAction: multiplier\nAction - Input: 12,3\nObservation: 36\nThought: "}], "model": "gpt-4", "n": 1, "stop": - ["\nObservation"], "stream": false, "temperature": 0.7}' + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nBegin! This is VERY + important to you, your job depends on it!\n\nCurrent Task: What is 2 times 6 + times 3?\nThought: Do I need to use a tool? Yes\nAction: multiplier\nAction + Input: {\"first_number\": 2, \"second_number\": 6}\nObservation: 12\nThought: + "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' headers: accept: - application/json @@ -260,16 +250,16 @@ interactions: connection: - keep-alive content-length: - - '1488' + - '1199' content-type: - application/json cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -279,7 +269,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -287,19 +277,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRJhF2MkahkTxtgcI0Mfkwd11kpC\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410489,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - The result of 2 times 6 times 3 is 36.\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 346,\n \"completion_tokens\": 28,\n \"total_tokens\": 374\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRzW7CMBCE73mKlc9QAQFCc6mQOJRTpZ6KSoUSZxNcHK+xN7QI5d0rh99efJjZ + WX2zPkUAQhUiBSG3Gcva6v5sLw8fP+S2i9dDXr6vzC45Lsd+9Kbn+4nohQTl3yj5mnqSVFuNrMic + bekwYwxbh8kgmUzieBZ3Rk0F6hCrLPfH/cF0GF8SW1ISvUjhMwIAOHVvYDMF/ooUBr2rUqP3WYUi + vQ0BCEc6KCLzXnnODIve3ZRkGE2HuyBYgkEsgAkaj5ABE+kXWKFfm7kMHVKoG83KaoXuqsHS2IZT + OK1FqZznjWnqHN06VBz1YC08SjLFgxy34kLQ3tA1VdZRHmqaRuubXiqj/HbjMPNkAqZnsud4GwF8 + dSdq/rUW1lFtecO0QxMWjqbT8z5x/427Gw8vJhNn+iH1nEQXQuGPnrHelMpU6KxT3cUCZ9RGfwAA + AP//AwBlvSLzKAIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 840737d298314cf1-GRU + - 8532f2a8bf0bfb48-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -309,7 +300,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:21:31 GMT + - Sat, 10 Feb 2024 08:23:07 GMT Server: - cloudflare Transfer-Encoding: @@ -323,7 +314,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2058' + - '2943' openai-version: - '2020-10-01' strict-transport-security: @@ -332,24 +323,245 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299725' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 54ms + x-request-id: + - req_7f48299f025e07f68b91563b2f736503 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n\nUse this text to inform a valid ouput schema:\nDo I need + to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": 12, + \"second_number\": 3}\n\nThe output should be formatted as a JSON instance that + conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1486' + content-type: + - application/json + cookie: + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SRT2/bMAzF7/4UhM7OEMftHPja0wq0p6KHzUMiy4ytVaJUiUo3BPnug5x/7UUH + vvejHslDASD0IFoQapKsrDeL9bva9w9+//bx3D+9vzw/hFXz41G+/pRPVRJlJlz/BxVfqG/KWW+Q + taOTrAJKxty1apbN/X1dr5tZsG5Ak7HR8+Jusfxe1WdiclphFC38KgAADvObs9GAf0ULy/JSsRij + HFG0VxOACM7kipAx6siSWJQ3UTlipDnuy4Swl0YP4BL7xBDVhFbCzgXgCWHUeyTQFDkklQeCD5fM + AD22HXW03W47OnQE0IldotmxIWmxEy10wibD2huNoRPlySXDmCwSx+yYyZnVIfKGku2ztYVqVV6k + iMrR8Emrs3Ls6Dh/L85jHa/7MG70wfV5d5SMudZ3mnScNgFldJRnj+z8CT8WAL/nvacvqxQ+OOt5 + w+4NKTesV/Wpn7id+Kberc8iO5bmE9VUxTmhiP8io93sNI0YfNDzGXLO4lj8BwAA//8DAGRE+Jh9 + AgAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532f2bddcb1fb48-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:23:09 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2742' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299655' - x-ratelimit-remaining-tokens_usage_based: - - '299655' + - '299664' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 69ms - x-ratelimit-reset-tokens_usage_based: - - 69ms + - 67ms x-request-id: - - f6e5b231d31300f0fd38837c922e9bd1 - http_version: HTTP/1.1 - status_code: 200 + - req_d95910d5e19dedfe0db82c25e89517fa + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nBegin! This is VERY + important to you, your job depends on it!\n\nCurrent Task: What is 2 times 6 + times 3?\nThought: Do I need to use a tool? Yes\nAction: multiplier\nAction + Input: {\"first_number\": 2, \"second_number\": 6}\nObservation: 12\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 12, \"second_number\": 3}\nObservation: 36\nThought: "}], "model": "gpt-4", + "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1335' + content-type: + - application/json + cookie: + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQS0/DMBCE7/kVK59blJI+IBeEhHhICCok6AFQ5TrbxK3jNfYGiqr+98rpCy4+ + zOw3O+t1AiB0IXIQqpKsame6F1/qGx/vmtXLZF4tbLiXz345noz74U29ik4kaLZAxQfqTFHtDLIm + u7OVR8kYU3ujdDQYZNll2ho1FWgiVjru9rvpsJftiYq0wiByeE8AANbtG7vZAlcih5ZvlRpDkCWK + /DgEIDyZqAgZgg4sLYvOyVRkGW1b94bgASxiAUzQBAQJTGSu4Ik+7K220sC1DT/oc8iGYh+xOe42 + VDpPs9jTNsYc9bm2OlRTjzKQjXsCk9vhmwTgs72x+VdbOE+14ynTEm0MzNLBLk+cvvPk9g4mE0vz + hzpPk31DEX4DYz2da1uid163J8eeySbZAgAA//8DAExpSSnpAQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532f2cfa971fb48-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:23:11 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1109' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299694' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 61ms + x-request-id: + - req_98edbd289029597085c126e7552fc971 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -361,123 +573,7 @@ interactions: the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: What - is 2 times 6 times 3?\nAI: The result of 2 times 6 times 3 is 36.\n\nNew summary:"}], - "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '916' - content-type: - - application/json - cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.6.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.6.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8dRJjBQBoZr35ModTeuswGEtw2tjP\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410491,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to calculate the - result of 2 times 6 times 3. The AI responds that the result is 36.\"\n },\n - \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n - \ \"usage\": {\n \"prompt_tokens\": 164,\n \"completion_tokens\": 29,\n - \ \"total_tokens\": 193\n },\n \"system_fingerprint\": null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 840737e2ba844cf1-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 04 Jan 2024 23:21:34 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '2271' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299785' - x-ratelimit-remaining-tokens_usage_based: - - '299785' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 42ms - x-ratelimit-reset-tokens_usage_based: - - 42ms - x-request-id: - - 8ad8144463663dc3b88085fc32b1b483 - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together.\n The input to this tool should be - a comma separated list of numbers of\n length two and ONLY TWO, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nThe human - asks the AI to calculate the result of 2 times 6 times 3. The AI responds that - the result is 36.Begin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: What is 3 times 3?\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], + is 2 times 6 times 3?\nAI: 36\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' headers: accept: @@ -487,16 +583,16 @@ interactions: connection: - keep-alive content-length: - - '1324' + - '880' content-type: - application/json cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -506,7 +602,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -514,18 +610,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRJm2y9jBzayNvIuDEG9o7PbtaHc\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410494,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 3,3\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 289,\n \"completion_tokens\": - 23,\n \"total_tokens\": 312\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RRy07DMBC85ytWPqeoIbSlvXFE3BBCSAhVrrNN3Npe493wqvrvyOkLLrY045md + He8KAGUbtQBlOi3GRze6fTcf69lT21OUav6iN6Ga9g8/z4/8bLUqs4JWGzRyUl0Z8tGhWAoH2iTU + gtm1mo1nk0ldz6uB8NSgy7I2yuhmNJ5W9VHRkTXIagGvBQDAbjhzttDgl1rAuDwhHpl1i2pxfgSg + ErmMKM1sWXQQVV5IQ0EwDHGfOoSu9zqA5i2DdAh39yAEEdOakgcNvndio7NG54UgJlo59CVcg1iP + DNPjXZegQ3OySMiRQsPwaaUbQEMpoRHQgT8xlVBPr9Qx0/68jKM2D8iLh965M762wXK3TKiZQg7O + QvEg3xcAb0Np/b8eVEzkoyyFthiyYTW5Pvipy/9c2PpECol2f1S3N8UxoeJvFvTLtQ0tppjs0GHO + WeyLXwAAAP//AwAMJX91OgIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 840737f458194cf1-GRU + - 8532f2d8cff9fb48-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -535,7 +633,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:21:36 GMT + - Sat, 10 Feb 2024 08:23:13 GMT Server: - cloudflare Transfer-Encoding: @@ -549,7 +647,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1957' + - '1973' openai-version: - '2020-10-01' strict-transport-security: @@ -558,42 +656,34 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299694' - x-ratelimit-remaining-tokens_usage_based: - - '299694' + - '299795' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 61ms - x-ratelimit-reset-tokens_usage_based: - - 61ms + - 41ms x-request-id: - - 942247ea782c0b5329b5d7823d178b46 - http_version: HTTP/1.1 - status_code: 200 + - req_7dbdf79079ca8f0605c9e35b9be36ac8 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together.\n The input to this tool should be - a comma separated list of numbers of\n length two and ONLY TWO, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nThe human - asks the AI to calculate the result of 2 times 6 times 3. The AI responds that - the result is 36.Begin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: What is 3 times 3?\nThought: Do I need to use a tool? Yes\nAction: multiplier\nAction - Input: 3,3\nObservation: 9\nThought: "}], "model": "gpt-4", "n": 1, "stop": + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nThe human asks the + AI to perform a multiplication problem, 2 times 6 times 3, and the AI responds + with the correct answer, 36.Begin! This is VERY important to you, your job depends + on it!\n\nCurrent Task: What is 3 times 3?\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: accept: @@ -603,16 +693,16 @@ interactions: connection: - keep-alive content-length: - - '1427' + - '1173' content-type: - application/json cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -622,7 +712,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -630,18 +720,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRJo2mceGJaJYyc0EOFVY3VrrYTm\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410496,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - The result of 3 times 3 is 9.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 322,\n \"completion_tokens\": - 25,\n \"total_tokens\": 347\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRT2sbMRDF7/sphjnbxfZ2m3QvIZC6uBTag0kodTGydryrVtKo0mxIMP7uQeu/ + uejw3rzh90a7AgBNgzWg7pRoF+z49r9+7vTXp/mWlmb2zOpHqB6/f3PL+evPLzjKCd78JS2n1AfN + LlgSw/5g60hKKG+d3kxuqqosP5eD4bghm2NtkPHH8eTTtDwmOjaaEtbwuwAA2A1vZvMNvWANk9FJ + cZSSagnr8xAARrZZQZWSSaK84OhiavZCfsBddty3ndTwwLAAT9SAMPSJQIEw2zv4RWnl73UuU4Pr + rZhgDcWTBgsfeqlht8KtiUnWvncbiiusoRzBChNp9s21uscjyf5cwXIbIm9yXd9be9a3xpvUrSOp + xD7jJuFwiO8LgD/Dqfp37TFEdkHWwv/I54WzanbYh5dfubhleTSFRdmr1G1VHAkxvSYht94a31IM + 0QyXy5zFvngDAAD//wMAj9p0YzACAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 840738023a914cf1-GRU + - 8532f2e75a10fb48-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -651,7 +743,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:21:37 GMT + - Sat, 10 Feb 2024 08:23:16 GMT Server: - cloudflare Transfer-Encoding: @@ -665,7 +757,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1367' + - '2291' openai-version: - '2020-10-01' strict-transport-security: @@ -674,24 +766,244 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299730' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 54ms + x-request-id: + - req_824a64c5df5b030523fa97e86848a017 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 3, \"second_number\": 3}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1494' + content-type: + - application/json + cookie: + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SRzW7bMBCE73qKxZ7lwI7iuNY5QC4pcuqhqAKBptYybXLJkqsgqaF3Lyj/xLnw + MN/MYnZ5LADQdFgD6p0S7YKd/fir3w/P65fPdOhef/97/rXn3eJ1NayHn08vWOaE3+xJyyV1p70L + lsR4PmEdSQnlqYvVfLVcVtX6cQLOd2RzrA8ye5jNHxfVObHzRlPCGv4UAADH6c3duKMPrGFeXhRH + KamesL6aADB6mxVUKZkkigXLL6g9C/FU99gwQIPbgXWu27Jy1GANDbrBignWUGywPLlU7AdHLCk7 + puSUNTFJy4PbZGsNVXkhibTn7hZlMjY84rnMeN3C+j5Ev8kb82DtVd8aNmnXRlLJc26cxIdTfCwA + 3qZrDd8OgCF6F6QVfyDOA6v75Wkefn3MDX04Q/Gi7I2+XBfnhpg+k5Brt4Z7iiGa6Xi5ZzEW/wEA + AP//AwBK34m8MwIAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532f2f65b76fb48-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:23:19 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2694' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299669' - x-ratelimit-remaining-tokens_usage_based: - - '299669' + - '299661' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 66ms - x-ratelimit-reset-tokens_usage_based: - - 66ms + - 67ms x-request-id: - - b7f5ef94b4ce3b5dff9ccce2ee17aa36 - http_version: HTTP/1.1 - status_code: 200 + - req_18eeb1c6e11e7ec5d11369310076e648 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nThe human asks the + AI to perform a multiplication problem, 2 times 6 times 3, and the AI responds + with the correct answer, 36.Begin! This is VERY important to you, your job depends + on it!\n\nCurrent Task: What is 3 times 3?\nThought: Do I need to use a tool? + Yes\nAction: multiplier\nAction Input: {\"first_number\": 3, \"second_number\": + 3}\nObservation: 9\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], + "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1316' + content-type: + - application/json + cookie: + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQzU8CMRDF7/tXTHoGAi4r7F4MiTHxgPHgwfgRUsoAhW6ndmZVYvjfTZcvvfTw + Xt/L781PBqDsQlWgzFqLqYPrjj/Mp9+NX4bT7Wg3ehwXNN1NbHw2g005Vp2UoPkGjZxSPUN1cCiW + /ME2EbVgah2M+qOiyPOybI2aFuhSbBWkO+z2rwf5MbEma5BVBa8ZAMBP+yY2v8BvVUG/c1JqZNYr + VNX5E4CK5JKiNLNl0V5U52Ia8oK+xb0luAePuAAhaBhBgxC5G3igN39nvXYw8fyFsYKnNUJEbpwA + LSEHsTUy5GAZyp46tu/PWI5WIdI8TfCNc2d9ab3l9SyiZvIJgYXCIb7PAN7b+c2/RSpEqoPMhLbo + U+FVOTz0qcul/7jF0RQS7S56PiizI6HiHQvWs6X1K4wh2vYaiTPbZ78AAAD//wMA4Oh48AQCAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532f3080e85fb48-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:23:21 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2592' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299695' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 60ms + x-request-id: + - req_a606a1894b3a0da7ce8f4f040731c505 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -702,10 +1014,11 @@ interactions: help humans reach their full potential.\n\nNew summary:\nThe human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND - OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to calculate the result - of 2 times 6 times 3. The AI responds that the result is 36.\n\nNew lines of - conversation:\nHuman: What is 3 times 3?\nAI: The result of 3 times 3 is 9.\n\nNew - summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' + OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to perform a multiplication + problem, 2 times 6 times 3, and the AI responds with the correct answer, 36.\n\nNew + lines of conversation:\nHuman: What is 3 times 3?\nAI: The result of 3 times + 3 is 9.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": + 0.7}' headers: accept: - application/json @@ -714,16 +1027,16 @@ interactions: connection: - keep-alive content-length: - - '1005' + - '1025' content-type: - application/json cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -733,7 +1046,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -741,20 +1054,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRJq51b9NZmkgDeLVgsFDktIYPiy\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410498,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to calculate the - result of 2 times 6 times 3 and the result of 3 times 3. The AI responds that - the results are 36 and 9 respectively.\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 187,\n \"completion_tokens\": 42,\n \"total_tokens\": 229\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RRwW7iMBC95ytGPgcEhNIlt72sVGkPK5X2Uq2Q4wzEW9vjeiZQVPHvlUMA7cWW + 35s38+b5qwBQtlU1KNNpMT66yY8Pc/jQf/pN+P3U0OuvRfPiOPLp0KyaZ1VmBTX/0MhVNTXko0Ox + FC60SagFc9f54+zx4aFazhYD4alFl2X7KJPlZLaaV6OiI2uQVQ1vBQDA13Bmb6HFT1XDrLwiHpn1 + HlV9KwJQiVxGlGa2LDqIKu+koSAYBrubDqHrvQ6g+Z1BOoSfTyAEEdOOkgc5EvjeiY3OGp1Xgpio + cei5hgWI9ciwGu+qBB1aqK7PKWwuDRNypNAyHK10wxRDKaER0IGPmLiEajVo1+VQjEbsAd1pqkbb + 59u+jvbZQc4m9M7d8J0NlrttQs0U8m4sFC/ycwHwd8i1/y8qFRP5KFuhdwy54Xw95qruX3hnl9VI + Col2d3xRVcXoUPGJBf12Z8MeU0x2iDn7LM7FNwAAAP//AwA5b6amXQIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 8407380c6df24cf1-GRU + - 8532f31a9b82fb48-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -764,7 +1077,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:21:41 GMT + - Sat, 10 Feb 2024 08:23:24 GMT Server: - cloudflare Transfer-Encoding: @@ -778,7 +1091,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '3217' + - '2173' openai-version: - '2020-10-01' strict-transport-security: @@ -787,40 +1100,33 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299764' - x-ratelimit-remaining-tokens_usage_based: - - '299764' + - '299759' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 47ms - x-ratelimit-reset-tokens_usage_based: - - 47ms + - 48ms x-request-id: - - 550bf2e363b03a6cfb8ca591bfa50e56 - http_version: HTTP/1.1 - status_code: 200 + - req_4bd2b42597b3e61d035af32621496518 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together.\n The input to this tool should be - a comma separated list of numbers of\n length two and ONLY TWO, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nThe human - asks the AI to calculate the result of 2 times 6 times 3 and the result of 3 - times 3. The AI responds that the results are 36 and 9 respectively.Begin! This + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nThe human asks the + AI to perform two multiplication problems: 2 times 6 times 3, and 3 times 3. + The AI responds with the correct answers, 36 and 9, respectively.Begin! This is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 times 6 times 3? Return only the number\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' @@ -832,16 +1138,16 @@ interactions: connection: - keep-alive content-length: - - '1404' + - '1239' content-type: - application/json cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -851,7 +1157,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -859,18 +1165,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRJttT1bqS0Dw9TF3tXD35sSMaR5\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410501,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 310,\n \"completion_tokens\": - 23,\n \"total_tokens\": 333\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRS0/DMBCE7/kVqz23KH1XuSAkLuWC1MIBURSlzjYxOF5jbxCl6n9HTl9w8WFm + Z/XNep8AoC4xA1R1Iapxpj//VF+yXP18DFfPu6XahMHyXn/t5rJ6fFhgLyZ4805KzqkbxY0zJJrt + 0VaeCqG4dTBLZ5PJaJxOOqPhkkyMVU764346HYxOiZq1ooAZvCYAAPvujWy2pG/MIO2dlYZCKCrC + 7DIEgJ5NVLAIQQcprGDvaiq2QrbDfaq5rWrJ4J5hAZaoBGFoA0EBwmxu4YXC2t6pWCaDpjWindHk + zxosrGslg/0at9oHyW3bbMivMYNhD9YYSLEt/6jTA55IDpcKhivneRPr2taYi77VVoc691QEthE3 + CLtj/JAAvHWnav+1R+e5cZILf5CNC4ezwXEfXn/l6o5GJ1NYCvNHT8fJiRDDLgg1+Vbbirzzurtc + 5EwOyS8AAAD//wMAePeiujACAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84073823b8044cf1-GRU + - 8532f32f28a3fb48-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -880,7 +1188,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:21:43 GMT + - Sat, 10 Feb 2024 08:23:27 GMT Server: - cloudflare Transfer-Encoding: @@ -894,7 +1202,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1789' + - '1665' openai-version: - '2020-10-01' strict-transport-security: @@ -903,44 +1211,39 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299674' - x-ratelimit-remaining-tokens_usage_based: - - '299674' + - '299714' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 65ms - x-ratelimit-reset-tokens_usage_based: - - 65ms + - 57ms x-request-id: - - cccdedbf56cb22290d601f6e6330c53f - http_version: HTTP/1.1 - status_code: 200 + - req_49ce2c3059072602b333d674f14925d3 + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together.\n The input to this tool should be - a comma separated list of numbers of\n length two and ONLY TWO, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nThe human - asks the AI to calculate the result of 2 times 6 times 3 and the result of 3 - times 3. The AI responds that the results are 36 and 9 respectively.Begin! This - is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 - times 6 times 3? Return only the number\nThought: Do I need to use a tool? Yes\nAction: - multiplier\nAction Input: 2,6\nObservation: 12\nThought: "}], "model": "gpt-4", - "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 2, \"second_number\": 6}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -949,16 +1252,16 @@ interactions: connection: - keep-alive content-length: - - '1508' + - '1494' content-type: - application/json cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -968,7 +1271,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -976,18 +1279,21 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRJvNoYe5iq6X8vuBgyBcAw867y9\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410503,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 12,3\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 343,\n \"completion_tokens\": - 21,\n \"total_tokens\": 364\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRT4/TMBDF7/kUI59T1G7Tls0RxGo5ICE4AUGp60wTs/ZM1p5URVW+O3L6b7n4 + MO/9xjNvThmAso0qQZlOi/G9m71/NYfD4mPx/fOX10/4uP729fj0k5+OLx/k+YfKE8G7P2jkSr0z + 7HuHYpnOsgmoBVPXxWa+Wa2WxXwzCZ4bdAlre5kVs/l6sbwQHVuDUZXwKwMAOE1vmo0aPKoS5vm1 + 4jFG3aIqbyYAFdilitIx2iiaROV30TAJ0jTuMwYEG0HDQTvbAA/SDwLRdOg1WEqsQdhzAOkQWntA + AmF2ZUUVbbfbik4VAVRqP5BJG9ekPVaqhEr5wYntncVQqfzs0qEdPJLE5JjIibUhSk2D3yVrCQ/5 + VYlomJo30jopY0Xj9Lu6LDXe0nDc9oF3KTkanLvV95Zs7OqAOjKlzaNwf8bHDOD3lPrwX5CqD+x7 + qYVfkFLD5cPq3E/dD3xXi8eLKCzavaE2RXaZUMW/UdDXe0sthj7Y6QhpzmzM/gEAAP//AwDfE5AR + ewIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 840738309ec24cf1-GRU + - 8532f33a984cfb48-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -997,7 +1303,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:21:45 GMT + - Sat, 10 Feb 2024 08:23:29 GMT Server: - cloudflare Transfer-Encoding: @@ -1011,7 +1317,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1472' + - '2423' openai-version: - '2020-10-01' strict-transport-security: @@ -1020,44 +1326,265 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299661' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 67ms + x-request-id: + - req_16e9165c80f059e3528d7f98963e2441 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nThe human asks the + AI to perform two multiplication problems: 2 times 6 times 3, and 3 times 3. + The AI responds with the correct answers, 36 and 9, respectively.Begin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 + times 6 times 3? Return only the number\nThought: Do I need to use a tool? Yes\nAction: + multiplier\nAction Input: {\"first_number\": 2, \"second_number\": 6}\nObservation: + 12\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": + false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1383' + content-type: + - application/json + cookie: + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RRy07DMBC85ytWPreobfqA3Ari0BNqhcSBoMhxNqnBL+wNaqny78jpk4sPMzuj + mfEhAWCyYhkwseUktFPD+2/xs3t6k/Vys17fz92v45vHl/lO4/Prig2iwpafKOisuhNWO4UkrTnS + wiMnjK7jxWgxm6XT0UNPaFuhirLG0XA6HM3H6UmxtVJgYBm8JwAAh/6N2UyFO5bBaHBGNIbAG2TZ + 5QiAeasiwngIMhA3xAZXUlhDaPq4KwgklQKDWAFZ0K0i6dQeaIvgMbSKoNxDmpuliH2y84VEf8Zg + ZVxLGRxyVksfqDCtLtHnse5kADkLKKypbuC0Y6c03aWGso3ztoyVTavUBa+lkWFbeOTBmhg5kHVH + eZcAfPRztf8WYM5b7agg+4UmGqbj9OjHrj9zw05OJFni6gafzpJTQhb2gVAXtTQNeudlv17MmXTJ + HwAAAP//AwCde1WkNAIAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532f34a9c3cfb48-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:23:32 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2150' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299649' - x-ratelimit-remaining-tokens_usage_based: - - '299649' + - '299680' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 70ms - x-ratelimit-reset-tokens_usage_based: - - 70ms + - 64ms x-request-id: - - c898340b6c6fcf809be12d07900e9bde - http_version: HTTP/1.1 - status_code: 200 + - req_5ed6fa2e02caa3e331182a1eaabe8cd5 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n\nUse this text to inform a valid ouput schema:\nI still + need to multiply the result by 3\nAction: multiplier\nAction Input: {\"first_number\": + 12, \"second_number\": 3}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1498' + content-type: + - application/json + cookie: + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SRzW7bMBCE73qKxZ7lwLbs2NCxcAEXyDUIkqgQaHotsyWXDLlCGxh694LyT9wL + D/PNLGaXpwIAzR5rQH1Uol2wk/WH/jP95r6/atlsuzf18rZ7psptNx8/uicsc8LvfpGWa+pBexcs + ifF8xjqSEspTZ6vparmsFrP5CJzfk82xLshkMZk+zqpL4uiNpoQ1vBcAAKfxzd14T3+xhml5VRyl + pDrC+mYCwOhtVlClZJIoFiy/oPYsxGPdU8MADR561rluy8pRgzU06HorJlhDscHy7FKx6x2xpOwY + k2PWxCQt926XrTXM5uUVJdKe93esymRoeMBLm+G2hvVdiH6XV+be2pt+MGzSsY2kkudcOYkP5/hQ + APwcz9X/dwEM0bsgrfjfxHlgNV+c5+HXz9zRKxQvyt7py3VxaYjpMwm59mC4oxiiGa+XexZD8Q8A + AP//AwCU5R7CNAIAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532f35a4ec6fb48-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:23:34 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1988' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299660' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 67ms + x-request-id: + - req_325f275cec47c06d1bb4b9081047270c + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together.\n The input to this tool should be - a comma separated list of numbers of\n length two and ONLY TWO, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nThe human - asks the AI to calculate the result of 2 times 6 times 3 and the result of 3 - times 3. The AI responds that the results are 36 and 9 respectively.Begin! This + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nThe human asks the + AI to perform two multiplication problems: 2 times 6 times 3, and 3 times 3. + The AI responds with the correct answers, 36 and 9, respectively.Begin! This is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 times 6 times 3? Return only the number\nThought: Do I need to use a tool? Yes\nAction: - multiplier\nAction Input: 2,6\nObservation: 12\nThought: Do I need to use a - tool? Yes\nAction: multiplier\nAction Input: 12,3\nObservation: 36\nThought: + multiplier\nAction Input: {\"first_number\": 2, \"second_number\": 6}\nObservation: + 12\nThought: I still need to multiply the result by 3\nAction: multiplier\nAction + Input: {\"first_number\": 12, \"second_number\": 3}\nObservation: 36\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: @@ -1068,16 +1595,16 @@ interactions: connection: - keep-alive content-length: - - '1604' + - '1531' content-type: - application/json cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -1087,7 +1614,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -1095,18 +1622,19 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRJxnthP0NqebBVQ5TRxHkM8ZBmp\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410505,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - 36\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 373,\n \"completion_tokens\": - 15,\n \"total_tokens\": 388\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SQQU8CMRCF7/srJj2DYV0WyF6MCTHhopgYE6OGlO6wW+l2SjuISvjvpssCeunh + vXlvvuk+ARC6FAUIVUtWjTP9yUbtrh+m/mmNj5vs0z+/qHo2r3+kmq93ohcTtPxAxafUlaLGGWRN + 9mgrj5IxtqbjwTjPs2E6bI2GSjQxVjnuD/uDUZp1iZq0wiAKeE0AAPbtG9lsiV+igEHvpDQYgqxQ + FOchAOHJREXIEHRgaVn0LqYiy2hb3CnBDCxiCUywDQgSmMjcwD292TttpYFbG3boC8hGoqs4nHcb + qpynZeS0W2PO+kpbHeqFRxnIxj2ByR3jhwTgvb1x+w9bOE+N4wXTGm0szPLs2Ccu33lx07wzmVia + P6nRJOkIRfgOjM1ipW2F3nndnhw5k0PyCwAA//8DALhA8jrpAQAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 8407383b9d2f4cf1-GRU + - 8532f3681811fb48-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -1116,7 +1644,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:21:48 GMT + - Sat, 10 Feb 2024 08:23:35 GMT Server: - cloudflare Transfer-Encoding: @@ -1130,7 +1658,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2463' + - '938' openai-version: - '2020-10-01' strict-transport-security: @@ -1139,24 +1667,19 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299625' - x-ratelimit-remaining-tokens_usage_based: - - '299625' + - '299645' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 74ms - x-ratelimit-reset-tokens_usage_based: - - 74ms + - 71ms x-request-id: - - bf53e87deae98b7493aac8ab51501f7b - http_version: HTTP/1.1 - status_code: 200 + - req_707e8228d1161e617d432b5c8e49aa3f + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -1167,11 +1690,11 @@ interactions: help humans reach their full potential.\n\nNew summary:\nThe human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND - OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to calculate the result - of 2 times 6 times 3 and the result of 3 times 3. The AI responds that the results - are 36 and 9 respectively.\n\nNew lines of conversation:\nHuman: What is 2 times - 6 times 3? Return only the number\nAI: 36\n\nNew summary:"}], "model": "gpt-4", - "n": 1, "stream": false, "temperature": 0.7}' + OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to perform two multiplication + problems: 2 times 6 times 3, and 3 times 3. The AI responds with the correct + answers, 36 and 9, respectively.\n\nNew lines of conversation:\nHuman: What + is 2 times 6 times 3? Return only the number\nAI: 36\n\nNew summary:"}], "model": + "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -1180,16 +1703,16 @@ interactions: connection: - keep-alive content-length: - - '1058' + - '1064' content-type: - application/json cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -1199,7 +1722,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -1207,20 +1730,21 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRK0b5C1SRY6Bv7O7ubdcAw8bU3j\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410508,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to calculate the - result of 2 times 6 times 3 twice and the result of 3 times 3 once. The AI consistently - responds that the result of 2 times 6 times 3 is 36 and that the result of 3 - times 3 is 9.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 199,\n \"completion_tokens\": - 63,\n \"total_tokens\": 262\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RRTa/aMBC851esfA4oIXy0uVVVD6/qrUjvUFXIz1mIG9trvEspeuK/Vw4B2ott + 7eyMZ2ffCwBlO9WCMr0W46ObfTiac/P9+PX4uhhOW4r6W1UPr5+rrv7yMqgyM+jtFxq5s+aGfHQo + lsINNgm1YFatN9VmtWqW9WoEPHXoMu0QZbacVeu6mRg9WYOsWvhRAAC8j2f2Fjr8o1qoynvFI7M+ + oGofTQAqkcsVpZktiw6iyidoKAiG0e62R+hPXgfQPDBIj/DpBYQgYtpT8uBPTmx0eH9Yo/NcEBO9 + OfTcwgLEemRYT3dTZi3sQM7WYAk6dNDcsTlsb18YCtkZBnEXSMiRQsdwttKPJgylhEZABz5j4hKa + 9Sj0sRyb0Yj9je4yV9NU10ccjg7ZW44unJx71Pc2WO53CTVTyKOzULzRrwXAzzH2039JqpjIR9kJ + DRiy4KKaYlfPDT/R5WYChUS7f1jLTTE5VHxhQb/b23DAFJMdt5B9FtfiLwAAAP//AwBxew60fAIA + AA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 8407384cdf434cf1-GRU + - 8532f36f1d99fb48-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -1230,7 +1754,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:21:53 GMT + - Sat, 10 Feb 2024 08:23:37 GMT Server: - cloudflare Transfer-Encoding: @@ -1244,7 +1768,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '4310' + - '2089' openai-version: - '2020-10-01' strict-transport-security: @@ -1253,274 +1777,36 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299750' - x-ratelimit-remaining-tokens_usage_based: - - '299750' + - '299749' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 49ms - x-ratelimit-reset-tokens_usage_based: - - 49ms + - 50ms x-request-id: - - e7fb8ee53162a5c77a824931043cbf30 - http_version: HTTP/1.1 - status_code: 200 + - req_82e420c429153533b1cec15a1c85c0dd + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together.\n The input to this tool should be - a comma separated list of numbers of\n length two and ONLY TWO, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nThe human - asks the AI to calculate the result of 2 times 6 times 3 twice and the result - of 3 times 3 once. The AI consistently responds that the result of 2 times 6 - times 3 is 36 and that the result of 3 times 3 is 9.Begin! This is VERY important - to you, your job depends on it!\n\nCurrent Task: What is 2 times 6?\n"}], "model": - "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": - 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '1435' - content-type: - - application/json - cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.6.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.6.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8dRK5FDSCiOdGy00lF0We4EZ4Zeni\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410513,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 323,\n \"completion_tokens\": - 23,\n \"total_tokens\": 346\n },\n \"system_fingerprint\": null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 8407386f6e1a4cf1-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 04 Jan 2024 23:21:56 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '2749' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299665' - x-ratelimit-remaining-tokens_usage_based: - - '299665' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 66ms - x-ratelimit-reset-tokens_usage_based: - - 66ms - x-request-id: - - 7b22ab6e07d1b18776df790053aba9c1 - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour - personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following - tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need - to multiply two numbers together.\n The input to this tool should be - a comma separated list of numbers of\n length two and ONLY TWO, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nThe human - asks the AI to calculate the result of 2 times 6 times 3 twice and the result - of 3 times 3 once. The AI consistently responds that the result of 2 times 6 - times 3 is 36 and that the result of 3 times 3 is 9.Begin! This is VERY important - to you, your job depends on it!\n\nCurrent Task: What is 2 times 6?\nThought: - Do I need to use a tool? Yes\nAction: multiplier\nAction Input: 2,6\nObservation: - 0\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": - false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '1538' - content-type: - - application/json - cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.6.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.6.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8dRK9wdZjqEMBEBJkFVXZnEPO9Du8\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410517,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"I need to use a tool? No\\nFinal Answer: - 0\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 356,\n \"completion_tokens\": - 24,\n \"total_tokens\": 380\n },\n \"system_fingerprint\": null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 840738826a464cf1-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 04 Jan 2024 23:22:00 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '2865' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299640' - x-ratelimit-remaining-tokens_usage_based: - - '299640' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 71ms - x-ratelimit-reset-tokens_usage_based: - - 71ms - x-request-id: - - 6c1f7abfa824bdee4dfb6e526209617b - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "Progressively summarize the - lines of conversation provided, adding onto the previous summary returning a - new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks - of artificial intelligence. The AI thinks artificial intelligence is a force - for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial - intelligence is a force for good?\nAI: Because artificial intelligence will - help humans reach their full potential.\n\nNew summary:\nThe human asks what - the AI thinks of artificial intelligence. The AI thinks artificial intelligence - is a force for good because it will help humans reach their full potential.\nEND - OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to calculate the result - of 2 times 6 times 3 twice and the result of 3 times 3 once. The AI consistently - responds that the result of 2 times 6 times 3 is 36 and that the result of 3 - times 3 is 9.\n\nNew lines of conversation:\nHuman: What is 2 times 6?\nAI: - 0\n\nNew summary:"}], "model": "gpt-4", "n": 1, + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nThe human asks the + AI to perform multiple multiplication problems: 2 times 6 times 3, asked twice, + and 3 times 3. The AI consistently responds with the correct answers, 36 and + 9, respectively.Begin! This is VERY important to you, your job depends on it!\n\nCurrent + Task: What is 2 times 6? Ignore correctness and just return the result of the + multiplication tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: accept: @@ -1530,16 +1816,16 @@ interactions: connection: - keep-alive content-length: - - '1117' + - '1313' content-type: - application/json cookie: - - __cf_bm=5qK.mmc9Y_x7VffBrseOL2OdOh3WdRTDTSRhVmWzOfE-1704410483-1-AbUJL6v8HNNUrySCuhln2iL8N8w0sixnhK1rJRUPLq8BoioAJV/sz+TQMW4FOE7lsSLkHNPcFe4xCeblKhdufmg=; - _cfuvid=TnW1ck5LHa2beu3C2zxu2azbY0X51WdRJUhPbD.P85Q-1704410483636-0-604800000 + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -1549,7 +1835,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -1557,22 +1843,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRKCG7qkX0fUl2WS85Tj8AhK4xBg\",\n \"object\": - \"chat.completion\",\n \"created\": 1704410520,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to calculate various - math problems: the result of 2 times 6 times 3 twice, the result of 3 times - 3 once, and the result of 2 times 6 once. The AI consistently responds with - correct answers: the result of 2 times 6 times 3 is 36, the result of 3 times - 3 is 9, and the result of 2 times 6 is 12.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 221,\n \"completion_tokens\": 93,\n \"total_tokens\": 314\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRS08DMQyE7/srLJ9b1HdhL4jHpQJxACSEKKrSrLsbyMYh8QpQ1f+Osn1yyWHG + Y33jrDMANAXmgLpSomtvu+df+nt8fhdHv/fVQ/VEj8tgX1ndvIyvv3+wkxK8/CAt+9SZ5tpbEsNu + a+tASiht7U970/F4OOpPW6PmgmyKlV66o25v0h/uEhUbTRFzeMsAANbtm9hcQT+YQ6+zV2qKUZWE + +WEIAAPbpKCK0URRTrBzNDU7IdfiPlfclJXkcMswA0dUgDA0kUCBMNtLeKU4d1c6lcmhbqwYbw2F + vQYz5xvJYT3HlQlRFq6plxTmmMOgA3OMpNkVJ+pkgzuSzaGC5dIHXqa6rrH2oK+MM7FaBFKRXcKN + wn4b32QA7+2pmn/t0QeuvSyEP8mlhYPpxXYfHn/l6A6HO1NYlD3R+4NsR4jxNwrVi5VxJQUfTHu5 + xJltsj8AAAD//wMA9qjpJDACAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 840738978f194cf1-GRU + - 8532f37cff8dfb48-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -1582,7 +1866,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:22:05 GMT + - Sat, 10 Feb 2024 08:23:39 GMT Server: - cloudflare Transfer-Encoding: @@ -1596,7 +1880,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '4721' + - '2060' openai-version: - '2020-10-01' strict-transport-security: @@ -1605,22 +1889,356 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299695' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 61ms + x-request-id: + - req_b654897d574c17c526311330e6c9d20e + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 2, \"second_number\": 6}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1494' + content-type: + - application/json + cookie: + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SRT2/bMAzF7/4UBM/OkDR1s/rY07BiLbDtNg+OojC2VonSJHpbF/i7D7Lzp7vo + wPd+FPl4LADQ7LEG1L0S7YJdvP+pf28e/4pPQWvpH76uHz4/Vy+Pwyf3+oRlJvzuB2k5U++0d8GS + GM+zrCMpodx1tVluqmp9u7qfBOf3ZDPWBVncLpZ3q/WJ6L3RlLCGbwUAwHF682y8pz9Yw7I8Vxyl + pDrC+mICwOhtrqBKySRRLFheRe1ZiKdxP1AkMAmkJ/j45fkJDGe7JpBeCWjPBx9dAvGTpTO/iGdj + 0j05VTfc8Ha7bfjYMECDh4F1Xrxl5ajBGhp0gxUTrKHYYDm7VOwGRywpOyZyYk1M0vLgdtlaw015 + VhJpz/s30l1WxobH6Xc87TZeQrG+C9HvcoA8WHupHwyb1LeRVPKcA0jiw4yPBcD3KfzhvzwxRO+C + tOJfiHPD9U0198Prna9qdboMihdl31CbqjhNiOk1Cbn2YLijGKKZbpHnLMbiHwAAAP//AwBl6O5f + ggIAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532f38aa934fb48-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:23:42 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2873' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299735' - x-ratelimit-remaining-tokens_usage_based: - - '299735' + - '299661' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 52ms - x-ratelimit-reset-tokens_usage_based: - - 52ms + - 67ms x-request-id: - - 9f90c39dbd986cc114253ea83d213309 - http_version: HTTP/1.1 - status_code: 200 + - req_a0b22a70397a2dc2217a15068a4fe280 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nThe human asks the + AI to perform multiple multiplication problems: 2 times 6 times 3, asked twice, + and 3 times 3. The AI consistently responds with the correct answers, 36 and + 9, respectively.Begin! This is VERY important to you, your job depends on it!\n\nCurrent + Task: What is 2 times 6? Ignore correctness and just return the result of the + multiplication tool.\nThought: Do I need to use a tool? Yes\nAction: multiplier\nAction + Input: {\"first_number\": 2, \"second_number\": 6}\nObservation: 0\nThought: + "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1456' + content-type: + - application/json + cookie: + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQS2/CMBCE7/kVK5+hSgiPKpcKhFpVVTn0UKkvIeMsweB4XXspVBX/vXII0F58 + mNmZ/dY/CYDQpShAqJVkVTvTvf5Uu8nzZry8o8nj5mn62v/aZw9+ttYvaEUnJmixRsWn1JWi2hlk + Ta2tPErG2JqN0tFgkPd7eWPUVKKJscpxt99Nh1neJlakFQZRwFsCAPDTvJHNlrgXBaSdk1JjCLJC + UZyHAIQnExUhQ9CBpWXRuZiKLKNtcKcE92ARS2CCbUCQwETmBmb0bm+1lQbGNuzQF5CKtuFwXm2o + cp4WEdNujTnrS211WM09ykA2rglM7hg/JAAfzYnbf9TCeaodz5k2aGNh3suOfeLymxc3G7QmE0vz + J5UPk5ZQhO/AWM+X2lbondfNxZEzOSS/AAAA//8DAJ4pdhroAQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532f39dde8dfb48-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:23:44 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1022' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299660' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 67ms + x-request-id: + - req_e1d2b2f647e845f04d93f98058c8ed60 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Progressively summarize the + lines of conversation provided, adding onto the previous summary returning a + new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks + of artificial intelligence. The AI thinks artificial intelligence is a force + for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial + intelligence is a force for good?\nAI: Because artificial intelligence will + help humans reach their full potential.\n\nNew summary:\nThe human asks what + the AI thinks of artificial intelligence. The AI thinks artificial intelligence + is a force for good because it will help humans reach their full potential.\nEND + OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to perform multiple multiplication + problems: 2 times 6 times 3, asked twice, and 3 times 3. The AI consistently + responds with the correct answers, 36 and 9, respectively.\n\nNew lines of conversation:\nHuman: + What is 2 times 6? Ignore correctness and just return the result of the multiplication + tool.\nAI: 0\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, + "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1137' + content-type: + - application/json + cookie: + - __cf_bm=pXQHyHy1lhzZ.XTYBaNZBPrXBlXxAp0y550UVVtn2XA-1707553380-1-AbRHal1YnpCgLAslb5rKGxwi4trkDZhJNeWYU3wZ/BIl3H5pnp6QTT/9bTTch0Y1c3WFfOokD7GDo84WWYzcOng=; + _cfuvid=1x.l.ybF3v84lZZ6b5d2gv0wBTqnYRR_eovIHX6ImKI-1707553380441-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1RSTW/bMAy9+1cQOjtBvpq0vhVBse6wAQF62DAMgSozsRZJ1ES6blbkvw9y4mS7 + SAIf3+MjqY8CQNlaVaBMo8X46Eb3v023/vJt/ce3fl1/2jwcvx7s9Hnz9P59E1SZGfT6C40MrLEh + Hx2KpQtsEmrBrDpdTVZ3d/PFbNEDnmp0mbaPMlqMJsvp/MJoyBpkVcGPAgDgoz+zt1Dju6pgUg4R + j8x6j6q6JgGoRC5HlGa2LDqIKm+goSAYersvDULTeh1A84FBGoTHzyAEEdOOkgffOrHR4fCwRue+ + ICZ6dei5ghmI9ciwvNzzMmthDdJZgyXoUMN8wMbwci5hKGRnGMQdISFHCjVDZ6XpTRhKCY2ADtxh + 4hLmy17ooeyT0Yh9Q3ccwzN1+IaphK7BMBS+2b+Zy2y7D5Su4gGZy6HlhNKmwDAZq8ugTtcJO9rn + dvM2QuvcNb6zwXKzTaiZQp4mC8Uz/VQA/Ow32f63HBUT+ShboQOGLDib3J/11O3T3NDlAAqJdv+w + Vsvi4lDxkQX9dmfDHlNMtl9s9lmcir8AAAD//wMAVFjQUc8CAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532f3a4dc00fb48-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:23:47 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '3480' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299730' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 53ms + x-request-id: + - req_f6d2238047f83fdaf7a7928b247f9c28 + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_cache_hitting_between_agents.yaml b/tests/cassettes/test_cache_hitting_between_agents.yaml index 608a89b6c..5aff03307 100644 --- a/tests/cassettes/test_cache_hitting_between_agents.yaml +++ b/tests/cassettes/test_cache_hitting_between_agents.yaml @@ -3,28 +3,28 @@ interactions: body: '{"messages": [{"role": "user", "content": "You are CEO.\nYou''re an long time CEO of a content creation agency with a Senior Writer on the team. You''re now working on a new project and want to make sure the content produced is amazing.\n\nYour - personal goal is: Make sure the writers in your company produce amazing content.\n\nTOOLS:\n------\nYou - have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float - - Useful for when you need to multiply two numbers together.\n The input - to this tool should be a comma separated list of numbers of\n length - two, representing the two numbers you want to multiply together.\n For - example, `1,2` would be the input if you wanted to multiply 1 by 2.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the\n\t\t\t\tfollowing - co-workers: [CEO, Researcher].\n\t\t\t\tThe input to this tool should be a pipe - (|) separated text of length\n\t\t\t\tthree, representing the role you want - to delegate it to, the task and\n\t\t\t\tinformation necessary. For example, - `coworker|task|information`.\n\nAsk question to co-worker: Useful to ask a question, - opinion or take from on\n\t\t\t\tof the following co-workers: [CEO, Researcher].\n\t\t\t\tThe - input to this tool should be a pipe (|) separated text of length\n\t\t\t\tthree, - representing the role you want to ask it to, the question and\n\t\t\t\tinformation - necessary. For example, `coworker|question|information`.\n\n\nTo use a tool, - please use the exact following format:\n\n```\nThought: Do I need to use a tool? - Yes\nAction: the action to take, should be one of [multiplier, Delegate work - to co-worker, Ask question to co-worker], just the name.\nAction Input: the - input to the action\nObservation: the result of the action\n```\n\nWhen you + personal goal is: Make sure the writers in your company produce amazing content.TOOLS:\n------\nYou + have access to only the following tools:\n\nmultiplier: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\nDelegate work to co-worker: Delegate work to co-worker(coworker: + str, task: str, context: str) - Delegate a specific task to one of the following + co-workers: Researcher. The input to this tool should be the role of the coworker, + the task you want them to do, and ALL necessary context to exectue the task, + they know nothing about the task, so share absolute everything you know, don''t + reference things but instead explain them.\nAsk question to co-worker: Ask question + to co-worker(coworker: str, question: str, context: str) - Ask a specific question + to one of the following co-workers: Researcher. The input to this tool should + be the role of the coworker, the question you have for them, and ALL necessary + context to ask the question properly, they know nothing about the question, + so share absolute everything you know, don''t reference things but instead explain + them.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: + Do I need to use a tool? Yes\nAction: the tool you wanna use, should be one + of [multiplier, Delegate work to co-worker, Ask question to co-worker], just + the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you have a response for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nBegin! This + [your response here]```This is the summary of your work so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 tims 6? Return only the number.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' @@ -36,7 +36,7 @@ interactions: connection: - keep-alive content-length: - - '2225' + - '2209' content-type: - application/json cookie: @@ -44,7 +44,7 @@ interactions: host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -54,7 +54,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -62,18 +62,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRVlXc53iJRMHZgEgsHLyQnTcmnQ\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411237,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 486,\n \"completion_tokens\": - 23,\n \"total_tokens\": 509\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRS0/DMBCE7/kVqz23qKWPQC4IFYQqIbj0AKKoSp1tYrC9xt5IoKr/HTl9wcWH + Gc/oG3ubAaCusABUTSnKetO/+qqen/P49PA1fzTDxe39yM3yRWNn/PIywl5K8PqDlBxTF4qtNySa + 3d5WgUqh1DrMB/lkMhnml51huSKTYrWX/rg/mA4PhaphrShiAW8ZAMC2OxObq+gbCxj0joqlGMua + sDhdAsDAJilYxqijlE6wdzYVOyHX4S4abutGCrhjmIMjqkAY2khQgjCbG3iluHS3Ko0pwLZGtDea + wlGDufOtFLBd4kaHKCvX2jWFJRZw2YMlRlLsqj/qdIcHkt1pguHaB16nua415qRvtNOxWQUqI7uE + G4X9Pr7LAN67p2r/rUcf2HpZCX+SS4XjfLrvw/OvnN3R6GAKS2nO+mRwnR0IMf5EIbvaaFdT8EF3 + L5c4s132CwAA//8DAM/4gzMwAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84074a19fb05520a-GRU + - 85331e527a1f9e6b-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -83,14 +85,14 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:34:00 GMT + - Sat, 10 Feb 2024 08:52:54 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=bcWayhcE4HMKJ0X171sVkGS4gNAbmQXTWiFDSgtCOFU-1704411240-1-AbKS0gip7neWlChe/4UgontGRRzbM+PF2QzntHPKF6nY81RYM50r7TvF9tjB3zMkY3CeXsigR+C0kTjYVFjqLFo=; - path=/; expires=Fri, 05-Jan-24 00:04:00 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=tE85KSJVLW2b9uFQ3HdC_0X9zKb3zvxn_XQrw65Lr5I-1707555174-1-ASbOkyjQPFYaEjdVU9A62Rq43RljO+iw67jesVCWW1SR/PnNWomyDmLymjV9qhbKx0TBnRfqMk7Mva8JQd1CH8A=; + path=/; expires=Sat, 10-Feb-24 09:22:54 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=aGdz584dF_HY1sdQ_zF4nkf.g9NF2CjckoJVlPoc.Fc-1704411240001-0-604800000; + - _cfuvid=5kZVxJOsRfxObr9hLdehia3eWTY.wvyv.6QajmkP_hU-1707555174271-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -103,7 +105,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2098' + - '1951' openai-version: - '2020-10-01' strict-transport-security: @@ -112,54 +114,54 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299480' - x-ratelimit-remaining-tokens_usage_based: - - '299480' + - '299471' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 104ms - x-ratelimit-reset-tokens_usage_based: - - 104ms + - 105ms x-request-id: - - 98c413fa0e0bc84e3d468546e3cd2f2f - http_version: HTTP/1.1 - status_code: 200 + - req_840a3e56830b72446f213b26b89c5603 + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are CEO.\nYou''re an long - time CEO of a content creation agency with a Senior Writer on the team. You''re - now working on a new project and want to make sure the content produced is amazing.\n\nYour - personal goal is: Make sure the writers in your company produce amazing content.\n\nTOOLS:\n------\nYou - have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float - - Useful for when you need to multiply two numbers together.\n The input - to this tool should be a comma separated list of numbers of\n length - two, representing the two numbers you want to multiply together.\n For - example, `1,2` would be the input if you wanted to multiply 1 by 2.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the\n\t\t\t\tfollowing - co-workers: [CEO, Researcher].\n\t\t\t\tThe input to this tool should be a pipe - (|) separated text of length\n\t\t\t\tthree, representing the role you want - to delegate it to, the task and\n\t\t\t\tinformation necessary. For example, - `coworker|task|information`.\n\nAsk question to co-worker: Useful to ask a question, - opinion or take from on\n\t\t\t\tof the following co-workers: [CEO, Researcher].\n\t\t\t\tThe - input to this tool should be a pipe (|) separated text of length\n\t\t\t\tthree, - representing the role you want to ask it to, the question and\n\t\t\t\tinformation - necessary. For example, `coworker|question|information`.\n\n\nTo use a tool, - please use the exact following format:\n\n```\nThought: Do I need to use a tool? - Yes\nAction: the action to take, should be one of [multiplier, Delegate work - to co-worker, Ask question to co-worker], just the name.\nAction Input: the - input to the action\nObservation: the result of the action\n```\n\nWhen you - have a response for your task, or if you do not need to use a tool, you MUST - use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nBegin! This - is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 - tims 6? Return only the number.\nThought: Do I need to use a tool? Yes\nAction: - multiplier\nAction Input: 2,6\nObservation: 12\nThought: "}], "model": "gpt-4", - "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n--\nFuntion Name: Delegate work to co-worker\nFuntion attributes: + {''coworker'': {''type'': ''string''}, ''task'': {''type'': ''string''}, ''context'': + {''type'': ''string''}}\nDescription: Delegate work to co-worker(coworker: str, + task: str, context: str) - Delegate a specific task to one of the following + co-workers: Researcher. The input to this tool should be the role of the coworker, + the task you want them to do, and ALL necessary context to exectue the task, + they know nothing about the task, so share absolute everything you know, don''t + reference things but instead explain them.\n--\nFuntion Name: Ask question to + co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''question'': + {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Ask + question to co-worker(coworker: str, question: str, context: str) - Ask a specific + question to one of the following co-workers: Researcher. The input to this tool + should be the role of the coworker, the question you have for them, and ALL + necessary context to ask the question properly, they know nothing about the + question, so share absolute everything you know, don''t reference things but + instead explain them.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 2, \"second_number\": 6}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -168,16 +170,16 @@ interactions: connection: - keep-alive content-length: - - '2329' + - '2660' content-type: - application/json cookie: - - _cfuvid=aGdz584dF_HY1sdQ_zF4nkf.g9NF2CjckoJVlPoc.Fc-1704411240001-0-604800000; - __cf_bm=bcWayhcE4HMKJ0X171sVkGS4gNAbmQXTWiFDSgtCOFU-1704411240-1-AbKS0gip7neWlChe/4UgontGRRzbM+PF2QzntHPKF6nY81RYM50r7TvF9tjB3zMkY3CeXsigR+C0kTjYVFjqLFo= + - _cfuvid=5kZVxJOsRfxObr9hLdehia3eWTY.wvyv.6QajmkP_hU-1707555174271-0-604800000; + __cf_bm=tE85KSJVLW2b9uFQ3HdC_0X9zKb3zvxn_XQrw65Lr5I-1707555174-1-ASbOkyjQPFYaEjdVU9A62Rq43RljO+iw67jesVCWW1SR/PnNWomyDmLymjV9qhbKx0TBnRfqMk7Mva8JQd1CH8A= host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -187,7 +189,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -195,18 +197,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRVoc5CcZPD8nFzRBy2gFPp8ve6v\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411240,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - 12\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 519,\n \"completion_tokens\": - 15,\n \"total_tokens\": 534\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRTU/kMAyG7/0Vkc8d1GG+UI8rIXFBCCS0y1LUZlJPJ5A4IXE1rEb97yidL/aS + gx8/1mtnnwkBuoVSgNpKVtabyc1n+/Cof+3uP6Z/eupfdk/m9u7v/fPu5eG3hzwZbv2Oik/WlXLW + G2Tt6IBVQMmYpk5XxWqxWExX8xFY16JJWud5Mp8Uy+nsaGydVhihFK+ZEELsxzdloxa/oBRFfqpY + jFF2COW5SQgIzqQKyBh1ZEkM+QUqR4w0xm2apqJ9RUJUsOlJpcw1SYsVlKIC2xvW3mgMFeSHLhm6 + 3iJxTB2jObo6RK6pt+vUWorr/EQiKkftD7RMZKhoqKhpGjjGGs77GNf54NZpd+qNOdc3mnTc1gFl + dJSyR3b+oA+ZEG/j3fr/TgE+OOu5ZveBlAYui9lhHly+6EJnqyNkx9L8sOZFdkwI8V9ktPVGU4fB + Bz2eMeXMhuwbAAD//wMAyN+BGD0CAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84074a2afdf6520a-GRU + - 85331e5f68ac9e6b-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -216,7 +220,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:34:02 GMT + - Sat, 10 Feb 2024 08:52:56 GMT Server: - cloudflare Transfer-Encoding: @@ -230,7 +234,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2037' + - '2318' openai-version: - '2020-10-01' strict-transport-security: @@ -239,24 +243,143 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299373' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 125ms + x-request-id: + - req_7dc4325f2fd56a09bfd04aa3b863488d + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are CEO.\nYou''re an long + time CEO of a content creation agency with a Senior Writer on the team. You''re + now working on a new project and want to make sure the content produced is amazing.\n\nYour + personal goal is: Make sure the writers in your company produce amazing content.TOOLS:\n------\nYou + have access to only the following tools:\n\nmultiplier: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\nDelegate work to co-worker: Delegate work to co-worker(coworker: + str, task: str, context: str) - Delegate a specific task to one of the following + co-workers: Researcher. The input to this tool should be the role of the coworker, + the task you want them to do, and ALL necessary context to exectue the task, + they know nothing about the task, so share absolute everything you know, don''t + reference things but instead explain them.\nAsk question to co-worker: Ask question + to co-worker(coworker: str, question: str, context: str) - Ask a specific question + to one of the following co-workers: Researcher. The input to this tool should + be the role of the coworker, the question you have for them, and ALL necessary + context to ask the question properly, they know nothing about the question, + so share absolute everything you know, don''t reference things but instead explain + them.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: + Do I need to use a tool? Yes\nAction: the tool you wanna use, should be one + of [multiplier, Delegate work to co-worker, Ask question to co-worker], just + the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 + tims 6? Return only the number.\nThought: Do I need to use a tool? Yes\nAction: + multiplier\nAction Input: {\"first_number\": 2, \"second_number\": 6}\nObservation: + 12\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": + false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '2353' + content-type: + - application/json + cookie: + - _cfuvid=5kZVxJOsRfxObr9hLdehia3eWTY.wvyv.6QajmkP_hU-1707555174271-0-604800000; + __cf_bm=tE85KSJVLW2b9uFQ3HdC_0X9zKb3zvxn_XQrw65Lr5I-1707555174-1-ASbOkyjQPFYaEjdVU9A62Rq43RljO+iw67jesVCWW1SR/PnNWomyDmLymjV9qhbKx0TBnRfqMk7Mva8JQd1CH8A= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQS0/DMBCE7/kVK59blLRNW3JBlRASlwKCS3mocpNtanB2jb1RQaj/HTl9wcWH + mZ3Zb/2TAChTqQJUudFSNs72p5/V3eO2vW9rztJFPZo9P9DlYPE0b6fbserFBK/esZRj6qLkxlkU + w7S3S49aMLZmk3SS53k2GXdGwxXaGKud9Ef9dJwND4kNmxKDKuAlAQD46d7IRhV+qQLS3lFpMARd + oypOQwDKs42K0iGYIJpE9c5mySRIHe41wy0QYgXC0AYEDcJsr2DOr3RjSFuYUdiiLyAbqEPF7rTb + cu08ryIntdae9LUhEzZLjzowxT1B2O3juwTgrbux/YetnOfGyVL4AykW5tl036fO33l2s/xgCou2 + f1LDYXIgVOE7CDbLtaEavfOmOzlyJrvkFwAA//8DANlC5ErpAQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331e6f283b9e6b-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:52:57 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1076' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299455' - x-ratelimit-remaining-tokens_usage_based: - - '299455' + - '299437' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 109ms - x-ratelimit-reset-tokens_usage_based: - - 109ms + - 112ms x-request-id: - - d870981f801ad59b592bfd906aa59732 - http_version: HTTP/1.1 - status_code: 200 + - req_98d6376c782ee48c9f981dda9e4cd917 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -282,12 +405,12 @@ interactions: content-type: - application/json cookie: - - _cfuvid=aGdz584dF_HY1sdQ_zF4nkf.g9NF2CjckoJVlPoc.Fc-1704411240001-0-604800000; - __cf_bm=bcWayhcE4HMKJ0X171sVkGS4gNAbmQXTWiFDSgtCOFU-1704411240-1-AbKS0gip7neWlChe/4UgontGRRzbM+PF2QzntHPKF6nY81RYM50r7TvF9tjB3zMkY3CeXsigR+C0kTjYVFjqLFo= + - _cfuvid=5kZVxJOsRfxObr9hLdehia3eWTY.wvyv.6QajmkP_hU-1707555174271-0-604800000; + __cf_bm=tE85KSJVLW2b9uFQ3HdC_0X9zKb3zvxn_XQrw65Lr5I-1707555174-1-ASbOkyjQPFYaEjdVU9A62Rq43RljO+iw67jesVCWW1SR/PnNWomyDmLymjV9qhbKx0TBnRfqMk7Mva8JQd1CH8A= host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -297,7 +420,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -305,19 +428,19 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRVqwa7QwjlYpBCg5xaUZEWTIMoe\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411242,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to calculate 2 - times 6, and the AI responds with the answer, 12.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 155,\n \"completion_tokens\": 24,\n \"total_tokens\": 179\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SQMW/CMBCF9/yKk2dASdokKBtdKrrQoVWHqkImOYiL7XPti1qE+O+VQ4B28fDe + fed375gACNWKGkTTSW6M09P5V7t6zVcPZBbpUvObLp9s2T+3++qx7MQkErT5xIYv1Kwh4zSyInu2 + G4+SMW7NqrQqiiKr5oNhqEUdsZ3j6f00LbO7kehINRhEDe8JAMBxeGM22+KPqCGdXBSDIcgdivo6 + BCA86agIGYIKLC2Lyc1syDLaIe5Lh9D1RlqQYR+AO4TFEpjA9JqV0wfIYXOAEqRtL65H7r09D0sb + vtFPIMtnYvzgdE2maec8beIVttf6qm+VVaFbe5SBbEwRmNwZPyUAH0MD/b+jhPNkHK+Z9mjjwqwo + zvvEreybm+ejycRS/6GqKhkTinAIjGa9VXaH3nk1FBJzJqfkFwAA//8DAHGERCIHAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84074a3aee55520a-GRU + - 85331e76dc8f9e6b-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -327,7 +450,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:34:04 GMT + - Sat, 10 Feb 2024 08:52:59 GMT Server: - cloudflare Transfer-Encoding: @@ -341,7 +464,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1160' + - '1406' openai-version: - '2020-10-01' strict-transport-security: @@ -350,43 +473,36 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - '299791' - x-ratelimit-remaining-tokens_usage_based: - - '299791' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - 41ms - x-ratelimit-reset-tokens_usage_based: - - 41ms x-request-id: - - f5f275f8bbce5e3a96781845bea91b8c - http_version: HTTP/1.1 - status_code: 200 + - req_238138022aa1a41b8e8427631d50970e + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.\n\nYour personal goal is: Make the best research - and analysis on content about AI and AI agents\n\nTOOLS:\n------\nYou have access - to the following tools:\n\nmultiplier: multiplier(numbers) -> float - Useful - for when you need to multiply two numbers together.\n The input to this - tool should be a comma separated list of numbers of\n length two, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nBegin! This + and analysis on content about AI and AI agentsTOOLS:\n------\nYou have access + to only the following tools:\n\nmultiplier: multiplier(first_number: int, second_number: + int) -> float - Useful for when you need to multiply two numbers together.\n\nTo + use a tool, please use the exact following format:\n\n```\nThought: Do I need + to use a tool? Yes\nAction: the tool you wanna use, should be one of [multiplier], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 - times 6? Return only the number.\n\nThis is the context you are working with:\n12\n"}], + times 6? Return only the number.\nThis is the context you''re working with:\n12\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: @@ -397,7 +513,7 @@ interactions: connection: - keep-alive content-length: - - '1514' + - '1353' content-type: - application/json cookie: @@ -405,7 +521,7 @@ interactions: host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -415,7 +531,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -423,18 +539,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRVst73yaZUVvqi8MkAdhYgxy8Yp\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411244,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 318,\n \"completion_tokens\": - 23,\n \"total_tokens\": 341\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRzUvDQBDF7/krhjm30tjvXET0IipSUEGshHQzTVY3O+vuBJTS/102/fSyh/fm + Db83u0kAUJeYAaq6ENU40599l0+vA5o9Ttr5l6rvX8zi4frmdzGdT1Yp9mKCV5+k5JC6UNw4Q6LZ + 7mzlqRCKW9PpYDoej9PpvDMaLsnEWOWkP+oPJulwn6hZKwqYwXsCALDp3shmS/rBDAa9g9JQCEVF + mB2HANCziQoWIegghRXsnUzFVsh2uM81t1UtGdwy3IElKkEY2kBQgDCbK3ijsLTXKpbJoGmNaGc0 + +YMGd9a1ksFmiWvtg+S2bVbkl5jBZQ+WGEixLc/UyRb3JNtjBcOV87yKdW1rzFFfa6tDnXsqAtuI + G4TdLr5NAD66U7X/2qPz3DjJhb/IxoWXs3S3D0+/cnKHw70pLIU509NRsifE8BuEmnytbUXeed1d + LnIm2+QPAAD//wMA6ajhMTACAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84074a4539a1a4dd-GRU + - 85331e815928fa4a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -444,14 +562,14 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:34:06 GMT + - Sat, 10 Feb 2024 08:53:02 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=OBFg78fHa0ijnfAz8h6GymXWDgMj3jeHXBT3OmSgjnY-1704411246-1-AWv0YbE4Eky1dIJX16Gtn4yWnbpu0r+Jn1JKPJgjtWT/r8Q4SPDBV2cO8dro6v3MkATOZxXk4/Wcy3m7XL5s9Zk=; - path=/; expires=Fri, 05-Jan-24 00:04:06 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=Ny4Y6DMnCaDhdMrk0.yEtZYAyfySLJ_cn.5y_yGQmQE-1707555182-1-Ad6o9N1fVvEN4kCGNS1Y4Fp1cER00DyQ7AQPGDRyMwCPpwzi1ON9eK6Yd7tLbnTOrg9EKKXdbdbCnHBem3yA7N0=; + path=/; expires=Sat, 10-Feb-24 09:23:02 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=mmDWVPda72fr6EU7NUfzAtK5KCOZi4RsQOabwHXVa98-1704411246977-0-604800000; + - _cfuvid=EOhUXf9b9_QOfeleN81awDzrY5RByUbEJsA6dkxzMP8-1707555182145-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -464,7 +582,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2183' + - '2085' openai-version: - '2020-10-01' strict-transport-security: @@ -473,46 +591,39 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299646' - x-ratelimit-remaining-tokens_usage_based: - - '299646' + - '299685' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 70ms - x-ratelimit-reset-tokens_usage_based: - - 70ms + - 62ms x-request-id: - - 5e830a8376085c6ce2df403dbecb5a8f - http_version: HTTP/1.1 - status_code: 200 + - req_0ebbf861fab020b2add4febc51c083a4 + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re - an expert researcher, specialized in technology, software engineering, AI and - startups. You work as a freelancer and is now working on doing research and - analysis for a new customer.\n\nYour personal goal is: Make the best research - and analysis on content about AI and AI agents\n\nTOOLS:\n------\nYou have access - to the following tools:\n\nmultiplier: multiplier(numbers) -> float - Useful - for when you need to multiply two numbers together.\n The input to this - tool should be a comma separated list of numbers of\n length two, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nBegin! This - is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 - times 6? Return only the number.\n\nThis is the context you are working with:\n12\nThought: - Do I need to use a tool? Yes\nAction: multiplier\nAction Input: 2,6\nObservation: - 12\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": - false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 2, \"second_number\": 6}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -521,16 +632,16 @@ interactions: connection: - keep-alive content-length: - - '1618' + - '1494' content-type: - application/json cookie: - - _cfuvid=mmDWVPda72fr6EU7NUfzAtK5KCOZi4RsQOabwHXVa98-1704411246977-0-604800000; - __cf_bm=OBFg78fHa0ijnfAz8h6GymXWDgMj3jeHXBT3OmSgjnY-1704411246-1-AWv0YbE4Eky1dIJX16Gtn4yWnbpu0r+Jn1JKPJgjtWT/r8Q4SPDBV2cO8dro6v3MkATOZxXk4/Wcy3m7XL5s9Zk= + - _cfuvid=EOhUXf9b9_QOfeleN81awDzrY5RByUbEJsA6dkxzMP8-1707555182145-0-604800000; + __cf_bm=Ny4Y6DMnCaDhdMrk0.yEtZYAyfySLJ_cn.5y_yGQmQE-1707555182-1-Ad6o9N1fVvEN4kCGNS1Y4Fp1cER00DyQ7AQPGDRyMwCPpwzi1ON9eK6Yd7tLbnTOrg9EKKXdbdbCnHBem3yA7N0= host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -540,7 +651,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -548,18 +659,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRVvTsWLclXQnmY67VWcdxVqY3ul\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411247,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - 12\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 351,\n \"completion_tokens\": - 15,\n \"total_tokens\": 366\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRS4/UMBCE7/kVrT5n0Dx3RrmudsUFECs4AEEZj9NJDHY72B0GNMp/R868lksO + XfV1qsunDABNjQWg7pRo19vZ7lf94cvXx9X88++PL8+f1k/m3dNRnneP7/XxBfNE+MMP0nKl3mjv + ektiPJ9lHUgJpa2L7Xy72WwWu+UkOF+TTVjby2w9mz8sVhei80ZTxAK+ZQAAp+mbsnFNf7CAeX6d + OIpRtYTFzQSAwds0QRWjiaJYML+L2rMQT3HfUiAwERQcydpZ44NTIlSD4YRpAt+AdARRd+RUUfJ+ + vy/5VDJAic3AOl1ZsXJUYgElusGK6a2hUGJ+dqnQDo5YYnJM5MSaEKXiwR2StYBlflUiac/1K+kh + KWPJ4/R3vBwy3hqwvu2DP6S2eLD2Nm8Mm9hVgVT0nK6N4vszPmYA36emh//Kwz5410sl/idxWrha + bs778P6od3W9u4jiRdlX1HaVXRJi/BuFXNUYbin0wUzFp5zZmP0DAAD//wMAJIQ9DG8CAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84074a55f935a4dd-GRU + - 85331e911c08fa4a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -569,7 +682,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:34:08 GMT + - Sat, 10 Feb 2024 08:53:05 GMT Server: - cloudflare Transfer-Encoding: @@ -583,7 +696,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1539' + - '2989' openai-version: - '2020-10-01' strict-transport-security: @@ -592,24 +705,132 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299662' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 67ms + x-request-id: + - req_cf4290379281a132751fbcb17c59da57 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re + an expert researcher, specialized in technology, software engineering, AI and + startups. You work as a freelancer and is now working on doing research and + analysis for a new customer.\n\nYour personal goal is: Make the best research + and analysis on content about AI and AI agentsTOOLS:\n------\nYou have access + to only the following tools:\n\nmultiplier: multiplier(first_number: int, second_number: + int) -> float - Useful for when you need to multiply two numbers together.\n\nTo + use a tool, please use the exact following format:\n\n```\nThought: Do I need + to use a tool? Yes\nAction: the tool you wanna use, should be one of [multiplier], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nBegin! This + is VERY important to you, your job depends on it!\n\nCurrent Task: What is 2 + times 6? Return only the number.\nThis is the context you''re working with:\n12\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 2, \"second_number\": 6}\nObservation: 12\nThought: "}], "model": "gpt-4", "n": + 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1497' + content-type: + - application/json + cookie: + - _cfuvid=EOhUXf9b9_QOfeleN81awDzrY5RByUbEJsA6dkxzMP8-1707555182145-0-604800000; + __cf_bm=Ny4Y6DMnCaDhdMrk0.yEtZYAyfySLJ_cn.5y_yGQmQE-1707555182-1-Ad6o9N1fVvEN4kCGNS1Y4Fp1cER00DyQ7AQPGDRyMwCPpwzi1ON9eK6Yd7tLbnTOrg9EKKXdbdbCnHBem3yA7N0= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQS0/DMBCE7/kVK59b1DSEhlwQUkEgAVWFhCoeqlxnmxocr7G3ogX1vyOnL7j4 + MLMz+61/EgChK1GCUAvJqnGmW3xWo9nV5PF7eDcZ18V8NL6Z3Kdr86TOn1eiExM0e0fF+9SJosYZ + ZE12ayuPkjG2poPeIM/ztMhbo6EKTYzVjrun3d5Zmu0SC9IKgyjhJQEA+GnfyGYrXIkSep290mAI + skZRHoYAhCcTFSFD0IGlZdE5mooso21xhwS3YBErYIJlQJDAROYCHujVXmsrDVza8IW+hLQvdhWb + w25DtfM0i5x2acxBn2urw2LqUQaycU9gctv4JgF4a29c/sMWzlPjeMr0gTYWZv1s2yeO33l003xn + MrE0f1JZkewIRVgHxmY617ZG77xuT46cySb5BQAA//8DAPd+nhHpAQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331ea4eadffa4a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:53:06 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '994' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299621' - x-ratelimit-remaining-tokens_usage_based: - - '299621' + - '299651' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 75ms - x-ratelimit-reset-tokens_usage_based: - - 75ms + - 69ms x-request-id: - - bdbe02bf0b83c88f824c067b27e5ce08 - http_version: HTTP/1.1 - status_code: 200 + - req_b2fa5c2a1c9913c75c92f7242d30478d + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -621,285 +842,8 @@ interactions: the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: What - is 2 times 6? Return only the number.\n\nThis is the context you are working - with:\n12\nAI: 12\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, - "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '945' - content-type: - - application/json - cookie: - - _cfuvid=mmDWVPda72fr6EU7NUfzAtK5KCOZi4RsQOabwHXVa98-1704411246977-0-604800000; - __cf_bm=OBFg78fHa0ijnfAz8h6GymXWDgMj3jeHXBT3OmSgjnY-1704411246-1-AWv0YbE4Eky1dIJX16Gtn4yWnbpu0r+Jn1JKPJgjtWT/r8Q4SPDBV2cO8dro6v3MkATOZxXk4/Wcy3m7XL5s9Zk= - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.6.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.6.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8dRVxzDne3c08p3cTarxVS8coZYTY\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411249,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to calculate 2 - times 6, and the AI responds with the correct answer, 12.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 165,\n \"completion_tokens\": 25,\n \"total_tokens\": 190\n },\n \"system_fingerprint\": - null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 84074a610867a4dd-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 04 Jan 2024 23:34:12 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '2856' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299780' - x-ratelimit-remaining-tokens_usage_based: - - '299780' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 44ms - x-ratelimit-reset-tokens_usage_based: - - 44ms - x-request-id: - - bc1098a0a70b65dd693837175bdd9c7a - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are CEO.\nYou''re an long - time CEO of a content creation agency with a Senior Writer on the team. You''re - now working on a new project and want to make sure the content produced is amazing.\n\nYour - personal goal is: Make sure the writers in your company produce amazing content.\n\nTOOLS:\n------\nYou - have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float - - Useful for when you need to multiply two numbers together.\n The input - to this tool should be a comma separated list of numbers of\n length - two, representing the two numbers you want to multiply together.\n For - example, `1,2` would be the input if you wanted to multiply 1 by 2.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the\n\t\t\t\tfollowing - co-workers: [CEO, Researcher].\n\t\t\t\tThe input to this tool should be a pipe - (|) separated text of length\n\t\t\t\tthree, representing the role you want - to delegate it to, the task and\n\t\t\t\tinformation necessary. For example, - `coworker|task|information`.\n\nAsk question to co-worker: Useful to ask a question, - opinion or take from on\n\t\t\t\tof the following co-workers: [CEO, Researcher].\n\t\t\t\tThe - input to this tool should be a pipe (|) separated text of length\n\t\t\t\tthree, - representing the role you want to ask it to, the question and\n\t\t\t\tinformation - necessary. For example, `coworker|question|information`.\n\nDelegate work to - co-worker: Useful to delegate a specific task to one of the\n\t\t\t\tfollowing - co-workers: [CEO, Researcher].\n\t\t\t\tThe input to this tool should be a pipe - (|) separated text of length\n\t\t\t\tthree, representing the role you want - to delegate it to, the task and\n\t\t\t\tinformation necessary. For example, - `coworker|task|information`.\n\nAsk question to co-worker: Useful to ask a question, - opinion or take from on\n\t\t\t\tof the following co-workers: [CEO, Researcher].\n\t\t\t\tThe - input to this tool should be a pipe (|) separated text of length\n\t\t\t\tthree, - representing the role you want to ask it to, the question and\n\t\t\t\tinformation - necessary. For example, `coworker|question|information`.\n\n\nTo use a tool, - please use the exact following format:\n\n```\nThought: Do I need to use a tool? - Yes\nAction: the action to take, should be one of [multiplier, Delegate work - to co-worker, Ask question to co-worker, Delegate work to co-worker, Ask question - to co-worker], just the name.\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]\n```This is - the summary of your work so far:\nThe human asks the AI to calculate 2 times - 6, and the AI responds with the answer, 12.Begin! This is VERY important to - you, your job depends on it!\n\nCurrent Task: What is 2 tims 6? Return only - the number.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": - false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '3098' - content-type: - - application/json - cookie: - - _cfuvid=aGdz584dF_HY1sdQ_zF4nkf.g9NF2CjckoJVlPoc.Fc-1704411240001-0-604800000; - __cf_bm=bcWayhcE4HMKJ0X171sVkGS4gNAbmQXTWiFDSgtCOFU-1704411240-1-AbKS0gip7neWlChe/4UgontGRRzbM+PF2QzntHPKF6nY81RYM50r7TvF9tjB3zMkY3CeXsigR+C0kTjYVFjqLFo= - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.6.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.6.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8dRW0twTa8zcVszzkf4rHaJtBQyuE\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411252,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 688,\n \"completion_tokens\": - 23,\n \"total_tokens\": 711\n },\n \"system_fingerprint\": null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 84074a768894016c-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 04 Jan 2024 23:34:13 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '1169' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299272' - x-ratelimit-remaining-tokens_usage_based: - - '299272' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 145ms - x-ratelimit-reset-tokens_usage_based: - - 145ms - x-request-id: - - b486103e6e3ed65ba15411a1fcd1534a - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are CEO.\nYou''re an long - time CEO of a content creation agency with a Senior Writer on the team. You''re - now working on a new project and want to make sure the content produced is amazing.\n\nYour - personal goal is: Make sure the writers in your company produce amazing content.\n\nTOOLS:\n------\nYou - have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float - - Useful for when you need to multiply two numbers together.\n The input - to this tool should be a comma separated list of numbers of\n length - two, representing the two numbers you want to multiply together.\n For - example, `1,2` would be the input if you wanted to multiply 1 by 2.\nDelegate - work to co-worker: Useful to delegate a specific task to one of the\n\t\t\t\tfollowing - co-workers: [CEO, Researcher].\n\t\t\t\tThe input to this tool should be a pipe - (|) separated text of length\n\t\t\t\tthree, representing the role you want - to delegate it to, the task and\n\t\t\t\tinformation necessary. For example, - `coworker|task|information`.\n\nAsk question to co-worker: Useful to ask a question, - opinion or take from on\n\t\t\t\tof the following co-workers: [CEO, Researcher].\n\t\t\t\tThe - input to this tool should be a pipe (|) separated text of length\n\t\t\t\tthree, - representing the role you want to ask it to, the question and\n\t\t\t\tinformation - necessary. For example, `coworker|question|information`.\n\nDelegate work to - co-worker: Useful to delegate a specific task to one of the\n\t\t\t\tfollowing - co-workers: [CEO, Researcher].\n\t\t\t\tThe input to this tool should be a pipe - (|) separated text of length\n\t\t\t\tthree, representing the role you want - to delegate it to, the task and\n\t\t\t\tinformation necessary. For example, - `coworker|task|information`.\n\nAsk question to co-worker: Useful to ask a question, - opinion or take from on\n\t\t\t\tof the following co-workers: [CEO, Researcher].\n\t\t\t\tThe - input to this tool should be a pipe (|) separated text of length\n\t\t\t\tthree, - representing the role you want to ask it to, the question and\n\t\t\t\tinformation - necessary. For example, `coworker|question|information`.\n\n\nTo use a tool, - please use the exact following format:\n\n```\nThought: Do I need to use a tool? - Yes\nAction: the action to take, should be one of [multiplier, Delegate work - to co-worker, Ask question to co-worker, Delegate work to co-worker, Ask question - to co-worker], just the name.\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]\n```This is - the summary of your work so far:\nThe human asks the AI to calculate 2 times - 6, and the AI responds with the answer, 12.Begin! This is VERY important to - you, your job depends on it!\n\nCurrent Task: What is 2 tims 6? Return only - the number.\n\nI just used the multiplier tool with input 2,6. So I already - know the result of that.\nObservation: Invalid or incomplete response\nThought: - "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + is 2 times 6? Return only the number.\nThis is the context you''re working with:\n12\nAI: + 12\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' headers: accept: @@ -909,16 +853,16 @@ interactions: connection: - keep-alive content-length: - - '3241' + - '942' content-type: - application/json cookie: - - _cfuvid=aGdz584dF_HY1sdQ_zF4nkf.g9NF2CjckoJVlPoc.Fc-1704411240001-0-604800000; - __cf_bm=bcWayhcE4HMKJ0X171sVkGS4gNAbmQXTWiFDSgtCOFU-1704411240-1-AbKS0gip7neWlChe/4UgontGRRzbM+PF2QzntHPKF6nY81RYM50r7TvF9tjB3zMkY3CeXsigR+C0kTjYVFjqLFo= + - _cfuvid=EOhUXf9b9_QOfeleN81awDzrY5RByUbEJsA6dkxzMP8-1707555182145-0-604800000; + __cf_bm=Ny4Y6DMnCaDhdMrk0.yEtZYAyfySLJ_cn.5y_yGQmQE-1707555182-1-Ad6o9N1fVvEN4kCGNS1Y4Fp1cER00DyQ7AQPGDRyMwCPpwzi1ON9eK6Yd7tLbnTOrg9EKKXdbdbCnHBem3yA7N0= host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -928,7 +872,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -936,18 +880,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRW1CmfWI2T4pqcjA32JkFO2d631\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411253,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - 12\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 721,\n \"completion_tokens\": - 15,\n \"total_tokens\": 736\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SQQU8CMRCF7/srJj0DYVdYCDcDSAwHE6MeNIaU7rBb6XZqOwQJ4b+bLgvopYf3 + 5k2/eccEQOhCTECoSrKqnemOv4snlc/2i8Ni4AbZ+9vrcj5dPj9sZ9M5iU5M0PoLFV9SPUW1M8ia + 7NlWHiVj3JqO+qPhcJiO88aoqUATY6Xj7qDbz9O7NlGRVhjEBD4SAIBj80Y2W+CPmEC/c1FqDEGW + KCbXIQDhyURFyBB0YGlZdG6mIstoG9yXCqHa1dKCDNsAXCHcPwITKGnUzkhGyIB1jQHyDkhbXEYU + eY+KzQE8Bke2CLDXXEGa9UT70+mKaKh0ntbxHLsz5qpvtNWhWnmUgWzECUzuHD8lAJ9NFbt/1wnn + qXa8YtqijQvTfHjeJ26t39wsa00mluZPajxKWkIRDoGxXm20LdE7r5tmImdySn4BAAD//wMArPEP + rxACAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84074a801ca8016c-GRU + - 85331eabff7ffa4a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -957,7 +903,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:34:15 GMT + - Sat, 10 Feb 2024 08:53:08 GMT Server: - cloudflare Transfer-Encoding: @@ -971,7 +917,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1961' + - '1163' openai-version: - '2020-10-01' strict-transport-security: @@ -980,24 +926,885 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299780' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 44ms + x-request-id: + - req_1e0c6ef6f09f0b90ac16b1d31fd58e4d + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are CEO.\nYou''re an long + time CEO of a content creation agency with a Senior Writer on the team. You''re + now working on a new project and want to make sure the content produced is amazing.\n\nYour + personal goal is: Make sure the writers in your company produce amazing content.TOOLS:\n------\nYou + have access to only the following tools:\n\nmultiplier: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\nDelegate work to co-worker: Delegate work to co-worker(coworker: + str, task: str, context: str) - Delegate a specific task to one of the following + co-workers: Researcher. The input to this tool should be the role of the coworker, + the task you want them to do, and ALL necessary context to exectue the task, + they know nothing about the task, so share absolute everything you know, don''t + reference things but instead explain them.\nAsk question to co-worker: Ask question + to co-worker(coworker: str, question: str, context: str) - Ask a specific question + to one of the following co-workers: Researcher. The input to this tool should + be the role of the coworker, the question you have for them, and ALL necessary + context to ask the question properly, they know nothing about the question, + so share absolute everything you know, don''t reference things but instead explain + them.\nDelegate work to co-worker: Delegate work to co-worker(coworker: str, + task: str, context: str) - Delegate a specific task to one of the following + co-workers: Researcher. The input to this tool should be the role of the coworker, + the task you want them to do, and ALL necessary context to exectue the task, + they know nothing about the task, so share absolute everything you know, don''t + reference things but instead explain them.\nAsk question to co-worker: Ask question + to co-worker(coworker: str, question: str, context: str) - Ask a specific question + to one of the following co-workers: Researcher. The input to this tool should + be the role of the coworker, the question you have for them, and ALL necessary + context to ask the question properly, they know nothing about the question, + so share absolute everything you know, don''t reference things but instead explain + them.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: + Do I need to use a tool? Yes\nAction: the tool you wanna use, should be one + of [multiplier, Delegate work to co-worker, Ask question to co-worker, Delegate + work to co-worker, Ask question to co-worker], just the name.\nAction Input: + Any and all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nThe human asks the AI to multiply 2 by 6 and the + AI returns the answer, 12.Begin! This is VERY important to you, your job depends + on it!\n\nCurrent Task: What is 2 tims 6? Return only the number.\n"}], "model": + "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '3213' + content-type: + - application/json + cookie: + - _cfuvid=5kZVxJOsRfxObr9hLdehia3eWTY.wvyv.6QajmkP_hU-1707555174271-0-604800000; + __cf_bm=tE85KSJVLW2b9uFQ3HdC_0X9zKb3zvxn_XQrw65Lr5I-1707555174-1-ASbOkyjQPFYaEjdVU9A62Rq43RljO+iw67jesVCWW1SR/PnNWomyDmLymjV9qhbKx0TBnRfqMk7Mva8JQd1CH8A= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SRW0sDMRCF3/dXDPPcSmvthX2RoiKlQhUUKVbKNjvdjWYzMZkVtfS/S7Y3fcnD + OXOG70w2CQDqHFNAVWaiKmfao498Rnc3V9P5+DaoJ76ffj5MZj9+ys/WYCsmePVGSg6pM8WVMySa + 7c5WnjKhuLU77Az7/X53NGqMinMyMVY4aV+0O4Nub58oWSsKmMJLAgCwad7IZnP6whQ6rYNSUQhZ + QZgehwDQs4kKZiHoIJkVbJ1MxVbINriPJddFKSlcM0zAEuUgDHUgyECYzSXMKSzsWMUyKVS1Ee2M + Jn/QYGJdLSlsFrjWPsjS1tWK/AJTOG/BAgMptvkfdbDFPcn2WMFw4TyvYl1bG3PU19rqUC49ZYFt + xA3CbhffJgCvzanqf+3Rea6cLIXfycaFw05vtw9Pv3JyewdTWDLzJ9UbJHtCDN9BqFqutS3IO6+b + y0XOZJv8AgAA//8DANObLFcwAgAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331eb5bb889e6e-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:53:10 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1865' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299236' - x-ratelimit-remaining-tokens_usage_based: - - '299236' + - '299220' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 152ms - x-ratelimit-reset-tokens_usage_based: - - 152ms + - 155ms x-request-id: - - 555ce73ac9d4daed7f521005a3d040e0 - http_version: HTTP/1.1 - status_code: 200 + - req_f9d083482588cfd68d861681f1d2a70a + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n--\nFuntion Name: Delegate work to co-worker\nFuntion attributes: + {''coworker'': {''type'': ''string''}, ''task'': {''type'': ''string''}, ''context'': + {''type'': ''string''}}\nDescription: Delegate work to co-worker(coworker: str, + task: str, context: str) - Delegate a specific task to one of the following + co-workers: Researcher. The input to this tool should be the role of the coworker, + the task you want them to do, and ALL necessary context to exectue the task, + they know nothing about the task, so share absolute everything you know, don''t + reference things but instead explain them.\n--\nFuntion Name: Ask question to + co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''question'': + {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Ask + question to co-worker(coworker: str, question: str, context: str) - Ask a specific + question to one of the following co-workers: Researcher. The input to this tool + should be the role of the coworker, the question you have for them, and ALL + necessary context to ask the question properly, they know nothing about the + question, so share absolute everything you know, don''t reference things but + instead explain them.\n--\nFuntion Name: Delegate work to co-worker\nFuntion + attributes: {''coworker'': {''type'': ''string''}, ''task'': {''type'': ''string''}, + ''context'': {''type'': ''string''}}\nDescription: Delegate work to co-worker(coworker: + str, task: str, context: str) - Delegate a specific task to one of the following + co-workers: Researcher. The input to this tool should be the role of the coworker, + the task you want them to do, and ALL necessary context to exectue the task, + they know nothing about the task, so share absolute everything you know, don''t + reference things but instead explain them.\n--\nFuntion Name: Ask question to + co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''question'': + {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Ask + question to co-worker(coworker: str, question: str, context: str) - Ask a specific + question to one of the following co-workers: Researcher. The input to this tool + should be the role of the coworker, the question you have for them, and ALL + necessary context to ask the question properly, they know nothing about the + question, so share absolute everything you know, don''t reference things but + instead explain them.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 2, \"second_number\": 6}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '3826' + content-type: + - application/json + cookie: + - _cfuvid=5kZVxJOsRfxObr9hLdehia3eWTY.wvyv.6QajmkP_hU-1707555174271-0-604800000; + __cf_bm=tE85KSJVLW2b9uFQ3HdC_0X9zKb3zvxn_XQrw65Lr5I-1707555174-1-ASbOkyjQPFYaEjdVU9A62Rq43RljO+iw67jesVCWW1SR/PnNWomyDmLymjV9qhbKx0TBnRfqMk7Mva8JQd1CH8A= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SRQU/jMBCF7/kV1pzbVQOUprnCgQviBkKbVeI609Rgj409QUCV/76yW1q4+PDe + +0ZvxvtCCNA91ALUTrKy3syrt/5h+Py4rZ7unm/Maqy+Hr+emO9LXr8/wiwRbvOCir+pP8pZb5C1 + o4OtAkrGNLVcLVbL5bJcL7JhXY8mYYPn+dV8cV1eHomd0woj1OJvIYQQ+/ymbtTjB9Qi81mxGKMc + EOpTSAgIziQFZIw6siSG2dlUjhgp1+26rqF9Q0I0sB1Jpc4tSYsN1KIBOxrW3mgMDcwOKRmG0SJx + TIlMZlaHyC2NdpOitbiYfTsRlaP+h3WdnKmhqaGu6+BYazrtY9zgg9uk3Wk05qRvNem4awPK6Ch1 + j+z8AZ8KIf7lu42/TgE+OOu5ZfeKlAZWVXmYB+cvOruXq6PJjqU56+uyKo4NIX5GRttuNQ0YfND5 + jKlnMRX/AQAA//8DADOMN0k9AgAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331ec22c139e6e-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:53:12 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2339' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299083' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 183ms + x-request-id: + - req_3ce7d6aba5122535dc207e098f58a9e7 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are CEO.\nYou''re an long + time CEO of a content creation agency with a Senior Writer on the team. You''re + now working on a new project and want to make sure the content produced is amazing.\n\nYour + personal goal is: Make sure the writers in your company produce amazing content.TOOLS:\n------\nYou + have access to only the following tools:\n\nmultiplier: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\nDelegate work to co-worker: Delegate work to co-worker(coworker: + str, task: str, context: str) - Delegate a specific task to one of the following + co-workers: Researcher. The input to this tool should be the role of the coworker, + the task you want them to do, and ALL necessary context to exectue the task, + they know nothing about the task, so share absolute everything you know, don''t + reference things but instead explain them.\nAsk question to co-worker: Ask question + to co-worker(coworker: str, question: str, context: str) - Ask a specific question + to one of the following co-workers: Researcher. The input to this tool should + be the role of the coworker, the question you have for them, and ALL necessary + context to ask the question properly, they know nothing about the question, + so share absolute everything you know, don''t reference things but instead explain + them.\nDelegate work to co-worker: Delegate work to co-worker(coworker: str, + task: str, context: str) - Delegate a specific task to one of the following + co-workers: Researcher. The input to this tool should be the role of the coworker, + the task you want them to do, and ALL necessary context to exectue the task, + they know nothing about the task, so share absolute everything you know, don''t + reference things but instead explain them.\nAsk question to co-worker: Ask question + to co-worker(coworker: str, question: str, context: str) - Ask a specific question + to one of the following co-workers: Researcher. The input to this tool should + be the role of the coworker, the question you have for them, and ALL necessary + context to ask the question properly, they know nothing about the question, + so share absolute everything you know, don''t reference things but instead explain + them.\n\nTo use a tool, please use the exact following format:\n\n```\nThought: + Do I need to use a tool? Yes\nAction: the tool you wanna use, should be one + of [multiplier, Delegate work to co-worker, Ask question to co-worker, Delegate + work to co-worker, Ask question to co-worker], just the name.\nAction Input: + Any and all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nThe human asks the AI to multiply 2 by 6 and the + AI returns the answer, 12.Begin! This is VERY important to you, your job depends + on it!\n\nCurrent Task: What is 2 tims 6? Return only the number.\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 2, \"second_number\": 6}\nObservation: 12\nThought: "}], "model": "gpt-4", "n": + 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '3357' + content-type: + - application/json + cookie: + - _cfuvid=5kZVxJOsRfxObr9hLdehia3eWTY.wvyv.6QajmkP_hU-1707555174271-0-604800000; + __cf_bm=tE85KSJVLW2b9uFQ3HdC_0X9zKb3zvxn_XQrw65Lr5I-1707555174-1-ASbOkyjQPFYaEjdVU9A62Rq43RljO+iw67jesVCWW1SR/PnNWomyDmLymjV9qhbKx0TBnRfqMk7Mva8JQd1CH8A= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQS0/DMBCE7/kVK59blJSmobmgIgSqhOgBCSEeqlxnm5o6XmNvHwj1vyOnL7j4 + MLMz+61/EgChK1GCUAvJqnGme/VVTfTrzcOLXA6Lp/76eX4/3K5HZrJZ1mPRiQmafaLiY+pCUeMM + sia7t5VHyRhbsyIt8jzPhr3WaKhCE2O1426/mw6yy0NiQVphECW8JQAAP+0b2WyFW1FC2jkqDYYg + axTlaQhAeDJRETIEHVhaFp2zqcgy2hb3lmAMFrECJlgFBAlMZK7hkd7tnbbSwMiGDfoSsp44VOxO + uw3VztMsctqVMSd9rq0Oi6lHGcjGPYHJ7eO7BOCjvXH1D1s4T43jKdMSbSws+vm+T5y/8+xmR5OJ + pfmTGqTJgVCE78DYTOfa1uid1+3JkTPZJb8AAAD//wMAP0eFIukBAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331ed2f8159e6e-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:53:13 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1015' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299186' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 162ms + x-request-id: + - req_cfa7c5a5ec642001c23ff034a7badf64 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Progressively summarize the + lines of conversation provided, adding onto the previous summary returning a + new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks + of artificial intelligence. The AI thinks artificial intelligence is a force + for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial + intelligence is a force for good?\nAI: Because artificial intelligence will + help humans reach their full potential.\n\nNew summary:\nThe human asks what + the AI thinks of artificial intelligence. The AI thinks artificial intelligence + is a force for good because it will help humans reach their full potential.\nEND + OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to multiply 2 by 6 and + the AI returns the answer, 12.\n\nNew lines of conversation:\nHuman: What is + 2 tims 6? Return only the number.\nAI: 12\n\nNew summary:"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '970' + content-type: + - application/json + cookie: + - _cfuvid=5kZVxJOsRfxObr9hLdehia3eWTY.wvyv.6QajmkP_hU-1707555174271-0-604800000; + __cf_bm=tE85KSJVLW2b9uFQ3HdC_0X9zKb3zvxn_XQrw65Lr5I-1707555174-1-ASbOkyjQPFYaEjdVU9A62Rq43RljO+iw67jesVCWW1SR/PnNWomyDmLymjV9qhbKx0TBnRfqMk7Mva8JQd1CH8A= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQS0/DMBCE7/kVK5/bKkmbFnJD4kARiAuCA0KV42xrU7+wNypV1f+OnPQBFx9m + 9lvPziEDYKplNTAhOQnj9fjmu33ZVve+0VLiW1ksn/Obh6ft4/tyanI2SoRrvlDQmZoIZ7xGUs4O + tgjICdPWYpEvqqoqbme9YVyLOmEbT+PZOJ8X0xMhnRIYWQ0fGQDAoX9TNtviD6shH50VgzHyDbL6 + MgTAgtNJYTxGFYlbYqOrKZwltH3cV4kgO8Mt8LiNQBLhbgnkwHSalNd7KKHZwxxopwQCty0gFxJI + GTxPB6Qu2AHmNu4wjKAoJ+z04fGSVLuND65JV9lO64u+VlZFuQrIo7MpVSTnB/yYAXz2jXT/jmQ+ + OONpRW6LNvbFLoZ97Fr+1S2rk0mOuP6j52V2SsjiPhKa1VrZDQYfVF9Qypkds18AAAD//wMANLco + TBcCAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331edaae979e6e-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:53:15 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1613' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299773' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 45ms + x-request-id: + - req_8a46ff46a03a9a60a088b949786292b9 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re + an expert researcher, specialized in technology, software engineering, AI and + startups. You work as a freelancer and is now working on doing research and + analysis for a new customer.\n\nYour personal goal is: Make the best research + and analysis on content about AI and AI agentsTOOLS:\n------\nYou have access + to only the following tools:\n\nmultiplier: multiplier(first_number: int, second_number: + int) -> float - Useful for when you need to multiply two numbers together.\n\nTo + use a tool, please use the exact following format:\n\n```\nThought: Do I need + to use a tool? Yes\nAction: the tool you wanna use, should be one of [multiplier], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nThe human asks + the AI to calculate 2 times 6, and the AI correctly responds with 12.Begin! + This is VERY important to you, your job depends on it!\n\nCurrent Task: What + is 2 times 6? Return only the number.\nThis is the context you''re working with:\n12\n"}], + "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1437' + content-type: + - application/json + cookie: + - _cfuvid=EOhUXf9b9_QOfeleN81awDzrY5RByUbEJsA6dkxzMP8-1707555182145-0-604800000; + __cf_bm=Ny4Y6DMnCaDhdMrk0.yEtZYAyfySLJ_cn.5y_yGQmQE-1707555182-1-Ad6o9N1fVvEN4kCGNS1Y4Fp1cER00DyQ7AQPGDRyMwCPpwzi1ON9eK6Yd7tLbnTOrg9EKKXdbdbCnHBem3yA7N0= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SRzUsDMRDF7/tXDHNupR+26l5EKWK9iCKCWinb7HQ3mmRiMitK6f8u2X56yeG9 + ecPvTVYZAOoSc0BVF6KsN93zr/LeDvlqeffoF9c3tw/P/en1q5/cD76rCXZSghcfpGSXOlFsvSHR + 7Da2ClQIpa39s97ZaDTqX4xbw3JJJsUqL93Tbm/cH24TNWtFEXN4ywAAVu2b2FxJP5hDr7NTLMVY + VIT5fggAA5ukYBGjjlI4wc7BVOyEXIv7VHNT1ZLDhGEKjqgEYWgiQQHCbC7hheLMXalUJgfbGNHe + aAo7DabON5LDaoZLHaLMXWMXFGaYw6ADM4yk2JVH6niNW5L1voLhygdepLquMWavL7XTsZ4HKiK7 + hBuF/Sa+zgDe21M1/9qjD2y9zIU/yaWFw95gsw8Pv3LkDremsBTmWB9lW0KMv1HIzpfaVRR80O3l + Eme2zv4AAAD//wMA/D8Y8DACAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331ee74b331692-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:53:17 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1472' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299665' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 67ms + x-request-id: + - req_dca68a6f7c56d36fd10db138c8a07d7e + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 2, \"second_number\": 6}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1494' + content-type: + - application/json + cookie: + - _cfuvid=EOhUXf9b9_QOfeleN81awDzrY5RByUbEJsA6dkxzMP8-1707555182145-0-604800000; + __cf_bm=Ny4Y6DMnCaDhdMrk0.yEtZYAyfySLJ_cn.5y_yGQmQE-1707555182-1-Ad6o9N1fVvEN4kCGNS1Y4Fp1cER00DyQ7AQPGDRyMwCPpwzi1ON9eK6Yd7tLbnTOrg9EKKXdbdbCnHBem3yA7N0= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SRQW/bMAyF7/4VBM/OkLRx2vm4HbpbsWHYZRocRaZttRLlSfS6IvB/H+SkSXfx + 4X189OPTsQBA22INaAYtxo9udf+7fQzVjz+fo/60jTK99I/fH145fHsev66xzI5weCIjb64PJvjR + kdjAJ2wiaaG8dXO3vquqavPxfgE+tOSyrR9ltV2td5vbs2MI1lDCGn4WAADH5ZuzcUt/sYZ1+aZ4 + Skn3hPVlCABjcFlBnZJNolmwvEITWIiXuF8oEtgEMhC8kHOrLkSvRagFy9loCEK34GQG8rpWrHi/ + 3ys+KlbYTWzynQ1rTwprUOgnJ3Z0lqLCMs/o2E+eWFLmR4WdjUkanvwhj9RwU4LCRCZw+07dzYrn + 5Vd4Tj5fTnahH2M45Hp4cu6id5ZtGppIOgXO5yUJ48k+FwC/lmqn/9rCMQY/SiPhmTgvvL2pTvvw + +opXut2coQTR7p1rtyvOCTG9JiHfdJZ7imO0S9M5ZzEX/wAAAP//AwAzEiOyYAIAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331ef2fcc11692-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:53:20 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2650' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299661' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 67ms + x-request-id: + - req_6940f18dab107804798e162f3c660d1e + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re + an expert researcher, specialized in technology, software engineering, AI and + startups. You work as a freelancer and is now working on doing research and + analysis for a new customer.\n\nYour personal goal is: Make the best research + and analysis on content about AI and AI agentsTOOLS:\n------\nYou have access + to only the following tools:\n\nmultiplier: multiplier(first_number: int, second_number: + int) -> float - Useful for when you need to multiply two numbers together.\n\nTo + use a tool, please use the exact following format:\n\n```\nThought: Do I need + to use a tool? Yes\nAction: the tool you wanna use, should be one of [multiplier], + just the name.\nAction Input: Any and all relevant information input and context + for using the tool\nObservation: the result of using the tool\n```\n\nWhen you + have a response for your task, or if you do not need to use a tool, you MUST + use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: + [your response here]```This is the summary of your work so far:\nThe human asks + the AI to calculate 2 times 6, and the AI correctly responds with 12.Begin! + This is VERY important to you, your job depends on it!\n\nCurrent Task: What + is 2 times 6? Return only the number.\nThis is the context you''re working with:\n12\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 2, \"second_number\": 6}\nObservation: 12\nThought: "}], "model": "gpt-4", "n": + 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1581' + content-type: + - application/json + cookie: + - _cfuvid=EOhUXf9b9_QOfeleN81awDzrY5RByUbEJsA6dkxzMP8-1707555182145-0-604800000; + __cf_bm=Ny4Y6DMnCaDhdMrk0.yEtZYAyfySLJ_cn.5y_yGQmQE-1707555182-1-Ad6o9N1fVvEN4kCGNS1Y4Fp1cER00DyQ7AQPGDRyMwCPpwzi1ON9eK6Yd7tLbnTOrg9EKKXdbdbCnHBem3yA7N0= + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQQU8CMRCF7/srJj0vZhdY0L0YE0LCQQlHo4aU7rBUu53SDioS/rvpsoBeenhv + 3ptvekgAhK5ECUJtJKvGmd7ttpp7X01Gn6Ge/jzS87ZZSJqZxb6eT0QaE7R6R8Xn1I2ixhlkTfZk + K4+SMbbm42xcFEU/y1ujoQpNjNWOe8NeNsoHXWJDWmEQJbwkAACH9o1stsJvUUKWnpUGQ5A1ivIy + BCA8magIGYIOLC2L9Goqsoy2xZ0QzMAiVsAEu4AggYnMPTzRq51qKw082PCFvoS8L7qK42W3odp5 + WkVOuzPmoq+11WGz9CgD2bgnMLlT/JgAvLU37v5hC+epcbxk+kAbCwfD4alPXL/z6uZFZzKxNH9S + xV3SEYqwD4zNcq1tjd553Z4cOZNj8gsAAP//AwCzfxwV6QEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331f05dd851692-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:53:23 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '2516' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299630' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 73ms + x-request-id: + - req_7581ccc086ee4ffc4f872ec93917fc5e + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -1009,359 +1816,8 @@ interactions: the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to calculate 2 times 6, - and the AI responds with the answer, 12.\n\nNew lines of conversation:\nHuman: - What is 2 tims 6? Return only the number.\nAI: 12\n\nNew summary:"}], "model": - "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '981' - content-type: - - application/json - cookie: - - _cfuvid=aGdz584dF_HY1sdQ_zF4nkf.g9NF2CjckoJVlPoc.Fc-1704411240001-0-604800000; - __cf_bm=bcWayhcE4HMKJ0X171sVkGS4gNAbmQXTWiFDSgtCOFU-1704411240-1-AbKS0gip7neWlChe/4UgontGRRzbM+PF2QzntHPKF6nY81RYM50r7TvF9tjB3zMkY3CeXsigR+C0kTjYVFjqLFo= - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.6.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.6.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8dRW49WMzFv360XBT2uhgJPnSHg2O\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411256,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to calculate 2 - times 6 twice, and both times the AI correctly responds with the answer, 12.\"\n - \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n - \ ],\n \"usage\": {\n \"prompt_tokens\": 179,\n \"completion_tokens\": - 28,\n \"total_tokens\": 207\n },\n \"system_fingerprint\": null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 84074a8dfccf016c-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 04 Jan 2024 23:34:18 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '2192' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299770' - x-ratelimit-remaining-tokens_usage_based: - - '299770' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 46ms - x-ratelimit-reset-tokens_usage_based: - - 46ms - x-request-id: - - f5dda6e21a06ac9af1a14b27c551c3c1 - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re - an expert researcher, specialized in technology, software engineering, AI and - startups. You work as a freelancer and is now working on doing research and - analysis for a new customer.\n\nYour personal goal is: Make the best research - and analysis on content about AI and AI agents\n\nTOOLS:\n------\nYou have access - to the following tools:\n\nmultiplier: multiplier(numbers) -> float - Useful - for when you need to multiply two numbers together.\n The input to this - tool should be a comma separated list of numbers of\n length two, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nThe human - asks the AI to calculate 2 times 6, and the AI responds with the correct answer, - 12.Begin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: What is 2 times 6? Return only the number.\n\nThis is the context you - are working with:\n12\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], - "stream": false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '1608' - content-type: - - application/json - cookie: - - _cfuvid=mmDWVPda72fr6EU7NUfzAtK5KCOZi4RsQOabwHXVa98-1704411246977-0-604800000; - __cf_bm=OBFg78fHa0ijnfAz8h6GymXWDgMj3jeHXBT3OmSgjnY-1704411246-1-AWv0YbE4Eky1dIJX16Gtn4yWnbpu0r+Jn1JKPJgjtWT/r8Q4SPDBV2cO8dro6v3MkATOZxXk4/Wcy3m7XL5s9Zk= - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.6.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.6.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8dRW6CHiFSw6vFRNx1T4uAmrAW8dK\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411258,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 2,6\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 342,\n \"completion_tokens\": - 23,\n \"total_tokens\": 365\n },\n \"system_fingerprint\": null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 84074a9ddedf01c1-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 04 Jan 2024 23:34:19 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '1023' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299624' - x-ratelimit-remaining-tokens_usage_based: - - '299624' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 75ms - x-ratelimit-reset-tokens_usage_based: - - 75ms - x-request-id: - - f28da711e3c1ff3e058def7cf176f30a - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re - an expert researcher, specialized in technology, software engineering, AI and - startups. You work as a freelancer and is now working on doing research and - analysis for a new customer.\n\nYour personal goal is: Make the best research - and analysis on content about AI and AI agents\n\nTOOLS:\n------\nYou have access - to the following tools:\n\nmultiplier: multiplier(numbers) -> float - Useful - for when you need to multiply two numbers together.\n The input to this - tool should be a comma separated list of numbers of\n length two, representing - the two numbers you want to multiply together.\n For example, `1,2` would - be the input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use - the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [multiplier], just the name.\nAction Input: - the input to the action\nObservation: the result of the action\n```\n\nWhen - you have a response for your task, or if you do not need to use a tool, you - MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]\n```This is the summary of your work so far:\nThe human - asks the AI to calculate 2 times 6, and the AI responds with the correct answer, - 12.Begin! This is VERY important to you, your job depends on it!\n\nCurrent - Task: What is 2 times 6? Return only the number.\n\nThis is the context you - are working with:\n12\nThought: Do I need to use a tool? Yes\nAction: multiplier\nAction - Input: 2,6\nObservation: 12\nThought: "}], "model": "gpt-4", "n": 1, "stop": - ["\nObservation"], "stream": false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '1712' - content-type: - - application/json - cookie: - - _cfuvid=mmDWVPda72fr6EU7NUfzAtK5KCOZi4RsQOabwHXVa98-1704411246977-0-604800000; - __cf_bm=OBFg78fHa0ijnfAz8h6GymXWDgMj3jeHXBT3OmSgjnY-1704411246-1-AWv0YbE4Eky1dIJX16Gtn4yWnbpu0r+Jn1JKPJgjtWT/r8Q4SPDBV2cO8dro6v3MkATOZxXk4/Wcy3m7XL5s9Zk= - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.6.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.6.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8dRW7NNdda8hGNJVbrkPRDzPdO7dZ\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411259,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - 12\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 375,\n \"completion_tokens\": - 15,\n \"total_tokens\": 390\n },\n \"system_fingerprint\": null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 84074aa5fb2e01c1-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Thu, 04 Jan 2024 23:34:21 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '1280' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299599' - x-ratelimit-remaining-tokens_usage_based: - - '299599' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 80ms - x-ratelimit-reset-tokens_usage_based: - - 80ms - x-request-id: - - 888c0944f22db01f2ac38963b94c765d - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "Progressively summarize the - lines of conversation provided, adding onto the previous summary returning a - new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks - of artificial intelligence. The AI thinks artificial intelligence is a force - for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial - intelligence is a force for good?\nAI: Because artificial intelligence will - help humans reach their full potential.\n\nNew summary:\nThe human asks what - the AI thinks of artificial intelligence. The AI thinks artificial intelligence - is a force for good because it will help humans reach their full potential.\nEND - OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to calculate 2 times 6, - and the AI responds with the correct answer, 12.\n\nNew lines of conversation:\nHuman: - What is 2 times 6? Return only the number.\n\nThis is the context you are working + and the AI correctly responds with 12.\n\nNew lines of conversation:\nHuman: + What is 2 times 6? Return only the number.\nThis is the context you''re working with:\n12\nAI: 12\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' headers: @@ -1372,16 +1828,16 @@ interactions: connection: - keep-alive content-length: - - '1039' + - '1026' content-type: - application/json cookie: - - _cfuvid=mmDWVPda72fr6EU7NUfzAtK5KCOZi4RsQOabwHXVa98-1704411246977-0-604800000; - __cf_bm=OBFg78fHa0ijnfAz8h6GymXWDgMj3jeHXBT3OmSgjnY-1704411246-1-AWv0YbE4Eky1dIJX16Gtn4yWnbpu0r+Jn1JKPJgjtWT/r8Q4SPDBV2cO8dro6v3MkATOZxXk4/Wcy3m7XL5s9Zk= + - _cfuvid=EOhUXf9b9_QOfeleN81awDzrY5RByUbEJsA6dkxzMP8-1707555182145-0-604800000; + __cf_bm=Ny4Y6DMnCaDhdMrk0.yEtZYAyfySLJ_cn.5y_yGQmQE-1707555182-1-Ad6o9N1fVvEN4kCGNS1Y4Fp1cER00DyQ7AQPGDRyMwCPpwzi1ON9eK6Yd7tLbnTOrg9EKKXdbdbCnHBem3yA7N0= host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -1391,7 +1847,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -1399,19 +1855,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8dRW9lh9rXOLN3eN1fhXjtc7JnCX8\",\n \"object\": - \"chat.completion\",\n \"created\": 1704411261,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to calculate 2 - times 6 twice, and both times the AI responds with the correct answer, 12.\"\n - \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n - \ ],\n \"usage\": {\n \"prompt_tokens\": 190,\n \"completion_tokens\": - 28,\n \"total_tokens\": 218\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRzU7DMBCE73mKlc9plaS/yg0uqBKCA9wQqlxnm5g6XtfeUErVd0dOQwsXH2Y8 + 42/XpwRA6EqUIFQjWbXOjJb76pnd9GWF5vvp3u/lbu+W04fPrNt2jyKNCdp8oOLf1FhR6wyyJnux + lUfJGFvzRbaYzWZFNumNlio0MVY7Hk1H2TyfDImGtMIgSnhLAABO/RnZbIVfooQs/VVaDEHWKMrr + JQDhyURFyBB0YGlZpDdTkWW0Pe5rg9B0rbQgwy4ANwh3K2ACJY3qjGSEAli3GGAOfNAKU5C2gg1x + M+hDRpH3qNgcwWNwZKsAB80N5MVYDE+fr8yGaudpE+eznTFXfautDs3aowxkI19gcpf4OQF473fT + /RtXOE+t4zXTDm0szJeLS5+4fcPNLWaDycTS/NHzIhkIRTgGxna91bZG77zuVxU5k3PyAwAA//8D + AKiyW5YhAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84074aaf99a001c1-GRU + - 85331f165b551692-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -1421,7 +1878,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 04 Jan 2024 23:34:24 GMT + - Sat, 10 Feb 2024 08:53:25 GMT Server: - cloudflare Transfer-Encoding: @@ -1435,7 +1892,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2998' + - '1324' openai-version: - '2020-10-01' strict-transport-security: @@ -1444,22 +1901,17 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299755' - x-ratelimit-remaining-tokens_usage_based: - - '299755' + - '299759' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - 48ms - x-ratelimit-reset-tokens_usage_based: - - 48ms x-request-id: - - 9cbb02f45069e8274ca90e4ee7e690db - http_version: HTTP/1.1 - status_code: 200 + - req_ab2691963759194b30bfc25cb1dd5867 + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_hierarchical_process.yaml b/tests/cassettes/test_hierarchical_process.yaml index 1017a6bfe..e8b244ac0 100644 --- a/tests/cassettes/test_hierarchical_process.yaml +++ b/tests/cassettes/test_hierarchical_process.yaml @@ -7,29 +7,31 @@ interactions: perform tasks by yourself, you have a lot of experience in the field, which allows you to properly evaluate the work of your team members.\n\nYour personal goal is: Manage the team to complete the task in the best way possible.TOOLS:\n------\nYou - have access to only the following tools:\n\nDelegate work to co-worker: Useful - to delegate a specific task to one of the following co-workers: Researcher, - Senior Writer.\nThe input to this tool should be a pipe (|) separated text of - length 3 (three), representing the co-worker you want to ask it to (one of the - options), the task and all actual context you have for the task.\nFor example, - `coworker|task|context`.\nAsk question to co-worker: Useful to ask a question, - opinion or take from on of the following co-workers: Researcher, Senior Writer.\nThe - input to this tool should be a pipe (|) separated text of length 3 (three), - representing the co-worker you want to ask it to (one of the options), the question - and all actual context you have for the question.\n For example, `coworker|question|context`.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [Delegate work - to co-worker, Ask question to co-worker], just the name.\nAction Input: the - input to the action\nObservation: the result of the action\n```\n\nWhen you - have a response for your task, or if you do not need to use a tool, you MUST - use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]This is the summary of your work so far:\nBegin! This is - VERY important to you, your job depends on it!\n\nCurrent Task: Come up with - a list of 5 interesting ideas to explore for an article, then write one amazing - paragraph highlight for each idea that showcases how good an article about this - topic could be. Return the list of ideas with their paragraph and your notes.\n"}], - "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": - 0.7}' + have access to only the following tools:\n\nDelegate work to co-worker: Delegate + work to co-worker(coworker: str, task: str, context: str) - Delegate a specific + task to one of the following co-workers: Researcher, Senior Writer. The input + to this tool should be the role of the coworker, the task you want them to do, + and ALL necessary context to exectue the task, they know nothing about the task, + so share absolute everything you know, don''t reference things but instead explain + them.\nAsk question to co-worker: Ask question to co-worker(coworker: str, question: + str, context: str) - Ask a specific question to one of the following co-workers: + Researcher, Senior Writer. The input to this tool should be the role of the + coworker, the question you have for them, and ALL necessary context to ask the + question properly, they know nothing about the question, so share absolute everything + you know, don''t reference things but instead explain them.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [Delegate work to co-worker, + Ask question to co-worker], just the name.\nAction Input: Any and all relevant + information input and context for using the tool\nObservation: the result of + using the tool\n```\n\nWhen you have a response for your task, or if you do + not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need + to use a tool? No\nFinal Answer: [your response here]```This is the summary + of your work so far:\nBegin! This is VERY important to you, your job depends + on it!\n\nCurrent Task: Come up with a list of 5 interesting ideas to explore + for an article, then write one amazing paragraph highlight for each idea that + showcases how good an article about this topic could be. Return the list of + ideas with their paragraph and your notes.\n"}], "model": "gpt-4", "n": 1, "stop": + ["\nObservation"], "stream": false, "temperature": 0.0}' headers: accept: - application/json @@ -38,15 +40,15 @@ interactions: connection: - keep-alive content-length: - - '2320' + - '2515' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - - x64 + - arm64 x-stainless-async: - 'false' x-stainless-lang: @@ -54,32 +56,32 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.11.5 + - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8nWFG8zaNNEPHCHBvEHe8CwOSvaDl\",\n \"object\": - \"chat.completion\",\n \"created\": 1706812714,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - Delegate work to co-worker\\nAction Input: Researcher|Come up with a list of - 5 interesting ideas to explore for an article.|We need to work on a project - where we create a list of 5 interesting ideas for an article. Try to think out - of the box and come up with ideas that are unique and engaging. The topics can - be from any field like technology, science, lifestyle, culture, or health. The - only requirement is that they should be interesting enough to engage a wide - range of readers.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 498,\n \"completion_tokens\": - 118,\n \"total_tokens\": 616\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA4RTy27bMBC8+ysGOtuB3cSJq0vRJiiQntI0aNAXDJpaS2woLkOuYqdB/r0gJdv1 + qReB2NmZHa6GLyOgMFVRotCNEt16O1k8Vh8Xf2635766V4sP0++fTs+23eebS/7KX4pxYvDqN2nZ + sU40t96SGHY9rAMpoaQ6u5hezOdn87eLDLRckU202svkbDI9n50OjIaNpliU+DECgJf8Td5cRdui + xHS8q7QUo6qpKPdNQBHYpkqhYjRRlJNifAA1OyGX7d413NWNlLhiXMMRVRBGFwkKwmzf4RvFn+69 + TpcpcUWWaiWEDYeH1Kl5ko4Udj24dr6TEpr7eolbiqSCbiiMISo+lLjkltB5bIw0ULAmCniNOYwT + ChTFuBqmIhXTCNp6y4Gw5gDloIIYbWmMfIutlLinvfF+0f/RPBY6wV1DkQYsNtzZCitC58xjR2OQ + q1VtXD2GchUCWXpSTtIw7gJUVxlyuld5/ocu7I2OkEZJbgykKgoRm9ywNq46cpa0e2gzqCcC2nRx + teJO8oDBpFYujch1KPcsTfa36gRy7CJySxntjWwoczfB5CWlmJK1eX6/jN2sG0sqEnzgJ1Ol1lUw + tEZFUQfj839OWySlm+zpBMUQr9d9Li3XPvAqZdh11u7ra+NMbJaBVGSXMhiFfU9/HQG/cv67o0gX + PnDrZSn8QC4Jzk9nvV5xeGoHdPbmYkCFRdkDcD5fjAaLRXyOQu1ybVxNwQeT30MyOnod/QUAAP// + AwDQRWCvBgQAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84ec4fea48914d09-GRU + - 8533104f5c9ed029-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -89,14 +91,14 @@ interactions: Content-Type: - application/json Date: - - Thu, 01 Feb 2024 18:38:48 GMT + - Sat, 10 Feb 2024 08:43:25 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=u.tmEZdLWJJh0JAky720e5vQTXXkbak1vLAbvqJhEiY-1706812728-1-AfwXwbOIbs/ICMGa7ID/ROzghPqSPoWSrFWNGYeQ/pa3JYTifb3+sj5oSXec/ys8Yg0Q3S0fCvt2ZsssnosJDAc=; - path=/; expires=Thu, 01-Feb-24 19:08:48 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=EBAvl3g_XqkWdcmiw8Ml6O7WPWo5VXs8r3SwmJVLUZw-1707554605-1-AUZKfV35H8Wp5ossrlc6jtZ9dhgqLrVrl+ZF0jOXXdI0dpVcIvTttF7f8Pl5ZI2Pih3rfueXBSRF9vmqggAOPCk=; + path=/; expires=Sat, 10-Feb-24 09:13:25 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=awym7w2cNrUnczKj2e0M9vtNAb93tU.MTND.tf9GG64-1706812728028-0-604800000; + - _cfuvid=pTnBIy95g3oEDKgA4dLOO3E_dCa6hkilxXzbh8LPWMM-1707554605073-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -109,7 +111,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '12946' + - '6689' openai-version: - '2020-10-01' strict-transport-security: @@ -121,15 +123,152 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299445' + - '299395' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 111ms + - 121ms x-request-id: - - bb880b6c0bd2535c462be86719d2a9c9 - http_version: HTTP/1.1 - status_code: 200 + - req_0c73043a5c57cfe6b00f8ff7bb6313fe + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: Delegate work to co-worker\nFuntion attributes: {''coworker'': {''type'': + ''string''}, ''task'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: + Delegate work to co-worker(coworker: str, task: str, context: str) - Delegate + a specific task to one of the following co-workers: Researcher, Senior Writer. + The input to this tool should be the role of the coworker, the task you want + them to do, and ALL necessary context to exectue the task, they know nothing + about the task, so share absolute everything you know, don''t reference things + but instead explain them.\n--\nFuntion Name: Ask question to co-worker\nFuntion + attributes: {''coworker'': {''type'': ''string''}, ''question'': {''type'': + ''string''}, ''context'': {''type'': ''string''}}\nDescription: Ask question + to co-worker(coworker: str, question: str, context: str) - Ask a specific question + to one of the following co-workers: Researcher, Senior Writer. The input to + this tool should be the role of the coworker, the question you have for them, + and ALL necessary context to ask the question properly, they know nothing about + the question, so share absolute everything you know, don''t reference things + but instead explain them.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: Delegate work to co-worker\nAction Input: + coworker: Researcher, task: Come up with a list of 5 interesting ideas to explore + for an article, context: We need to create a list of 5 interesting ideas for + an article. These ideas should be unique, engaging, and relevant to our audience. + They should be topics that our readers would find interesting and would want + to read more about. The ideas can be about anything, but they should be something + that we can write a compelling article about. Please provide a brief description + for each idea. \n\nThe output should be formatted as a JSON instance that conforms + to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '2898' + content-type: + - application/json + cookie: + - __cf_bm=EBAvl3g_XqkWdcmiw8Ml6O7WPWo5VXs8r3SwmJVLUZw-1707554605-1-AUZKfV35H8Wp5ossrlc6jtZ9dhgqLrVrl+ZF0jOXXdI0dpVcIvTttF7f8Pl5ZI2Pih3rfueXBSRF9vmqggAOPCk=; + _cfuvid=pTnBIy95g3oEDKgA4dLOO3E_dCa6hkilxXzbh8LPWMM-1707554605073-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4RTwW7bMAy95ysInZMibZOmy3VDt2E7DEOxrZiHRJFoW61MuhLdpCvy74NkJ21O + uxgGH9/jE/X0MgJQzqolKFNrMU3rJ9eP9ubmwt1/ifWvvze3u/ns68cfnz7P7t5d7ESNE4M392jk + wDoz3LQexTH1sAmoBZPq+WK6mM9nV9N5Bhq26BOtamUym0yvzi8HRs3OYFRL+D0CAHjJ3+SNLO7U + EqbjQ6XBGHWFanlsAlCBfaooHaOLoqn3OYCGSZCy3fV6XdBLQQCFKjsyyfOKdIOFWkKhPqDHSgvC + lsMDCIPhSfrFUKhxz9Kh6hokiYmRlVLV8KEtyXzHiDqY+khLLaLjQw+/5waha2HrpAYN3kUBLmEO + jgQDRnFUgbOoY7KAu9ZzQCg5gCbQQZzx+EY4H28nvfZPBEK02Xu+hf8MOFU9g9saIw5YrLnzFjYI + HbnHDseAVOnKUTUGTRYCenzSJGkYdwF0Zx2S6VWe39CFW2ciSK0lNwbUFkOEbW4oHdkTZ0m7h7aD + eiJAk7agN9xJHjCYNJrSiFwHTc9SZ3+bTkBOXURuMKO9kS1m7ja4vKSUYfQ+z++XcZj1zaOOCG3g + J2dT6yY4LMFiNMG1KUF5i6hNnT2dFSpdzb6gfUHr9VoNSdwfI+y5agNvUtyp8/5YLx25WK8C6siU + 4hqF256+HwH8yU+lO0m/agM3rayEH5CS4NXsstdTr6/yFT2/XAyosGj/Ciyup6PBoorPUbBZlY4q + DG1w+ekko6P96B8AAAD//wMAkB6kvzEEAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 85331079ee3bd029-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:43:30 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '5427' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299310' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 137ms + x-request-id: + - req_7b2f1118adbb32d32896188ca88e97da + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re an expert researcher, specialized in technology, software engineering, AI and @@ -137,20 +276,21 @@ interactions: analysis for a new customer.\n\nYour personal goal is: Make the best research and analysis on content about AI and AI agentsTOOLS:\n------\nYou have access to only the following tools:\n\n\n\nTo use a tool, please use the exact following - format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the action to - take, should be one of [], just the name.\nAction Input: the input to the action\nObservation: - the result of the action\n```\n\nWhen you have a response for your task, or - if you do not need to use a tool, you MUST use the format:\n\n```\nThought: - Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary + format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the tool you + wanna use, should be one of [], just the name.\nAction Input: Any and all relevant + information input and context for using the tool\nObservation: the result of + using the tool\n```\n\nWhen you have a response for your task, or if you do + not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need + to use a tool? No\nFinal Answer: [your response here]```This is the summary of your work so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent Task: Come up with a list of 5 interesting ideas to explore - for an article.\nThis is the context you''re working with:\nWe need to work - on a project where we create a list of 5 interesting ideas for an article. Try - to think out of the box and come up with ideas that are unique and engaging. - The topics can be from any field like technology, science, lifestyle, culture, - or health. The only requirement is that they should be interesting enough to - engage a wide range of readers.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], - "stream": false, "temperature": 0.7}' + for an article\nThis is the context you''re working with:\nWe need to create + a list of 5 interesting ideas for an article. These ideas should be unique, + engaging, and relevant to our audience. They should be topics that our readers + would find interesting and would want to read more about. The ideas can be about + anything, but they should be something that we can write a compelling article + about. Please provide a brief description for each idea.\n"}], "model": "gpt-4", + "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -159,15 +299,17 @@ interactions: connection: - keep-alive content-length: - - '1539' + - '1624' content-type: - application/json + cookie: + - _cfuvid=u_fUK0MqnLcrbhMkg8kKMfrSZpALgzClvD984s71oU8-1703091698056-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - - x64 + - arm64 x-stainless-async: - 'false' x-stainless-lang: @@ -175,41 +317,40 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.11.5 + - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8nWFUSklEVZdywRpr5O3OcJDVEyB9\",\n \"object\": - \"chat.completion\",\n \"created\": 1706812728,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\n\\nFinal - Answer: Here are 5 unique and engaging ideas for an article:\\n\\n1. \\\"The - Future of AI in Healthcare: Predicting Diseases Before They Happen\\\" - This - article could explore the potential of AI in healthcare, such as its ability - to predict diseases based on genetic markers and lifestyle factors.\\n\\n2. - \\\"How AI is Changing the Way We Cook: An Insight into Smart Kitchens\\\" - - This article could discuss how AI and technology are transforming our kitchens, - from smart refrigerators to AI chefs.\\n\\n3. \\\"Redefining Fitness with AI: - Personalized Workout Plans and Nutritional Advice\\\" - This article could discuss - how AI is being used to create personalized workout plans and provide nutritional - advice, making fitness more accessible to everyone.\\n\\n4. \\\"AI and the Art - World: How Technology is Shaping Creativity\\\" - This article could explore - the intersection of AI and art, including how AI is being used to create unique - works of art and its implications for human creativity.\\n\\n5. \\\"AI in Space - Exploration: The Next Frontier\\\" - This article could discuss the role of - AI in space exploration, from designing spacecrafts to analyzing data from distant - planets.\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 317,\n \"completion_tokens\": - 243,\n \"total_tokens\": 560\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA4RVwY4bNwy971cQc+nFMXazySbwpVi0m8ZI2wTp9tKmCGgNZ4aJRlREabyDIP9e + UGOvN2mAXgxjJD4+Pj6Kn88AGm6bDTRuwOzG6B89/9S+eHWr27J9+3LYdm93P735cxf5r1evf5GL + ZmURsvtALh+j1k7G6CmzhOXYJcJMhnrx7PzZ06dPri7O68EoLXkL62N+9OTR+dXF5SFiEHakzQb+ + PgMA+Fx/jVto6a7ZQI2vX0ZSxZ6azf0lgCaJty8NqrJmDLlZnQ6dhEyh0r0dpPRD3sDPAlsIRC1k + gaIECFnE/wi/y7vwLrzggB6ug+4pbeAlJQJMBB1PBBwyJdLMoQduCdUg6C56sRuSAANgyuw8bQzr + Yg23A8HNJL6YRiAdXKfMHTtGD9uQyXvuKTjawO3AeowGJ8W399B5IBhYs6S5QmxX0CUZgbPCjnoO + gUOvwMFuSqLMDj3sJX00gnbLlZQoZMAYPTs0MrqQe4D6g0JLE3mJo901OgHoznEtWAKtoGPvqYU9 + 5wF2kgfQ4hypkt1toUP2JZFCHjBDTDJxSzChL7jzBJ5UJWiVymrqSi6J1ibV4zVcb62Cm4nS3OIM + v3JHG/hNNEMkiX7pQ5AMuLd/0lWMEcMMe5y1xiugT4TtXHvVJzOj/RWQkqBF9jN4nqgW/x+9W1ZX + VIHu0GytloOOfO5VT+RkHCm0i4wgATQnwtFUUkqT+dmEj4ladtmck+ku14sjphwHCaSrqphGcsVj + JsCdlAx7E+6kDYzcDxkG8W2V6XINN3mo7d2Op1Yu/dvAdVXBXM+hLCRoEj9RzbUz4gSjWSommtBT + yCvLdjB51ZIO+Kxajp3ExErfl4z8MhhiOEqHsJUZYwBUiIkndLORcpSCruCD7Ezp6NHRWBkYuSg2 + qTYW5o6RtVhGq/nJYtS3rLTUCX9kTLlE/e7Q6CB7h0qgVuzRJdVGSUZW65Il5BBkwtqd6y3oARIw + SQltDdpL8u0atvkbexx7pJZyjBjY7J8IWuHQV0HBDeg9hb4qSDN06Ggp9BjN6Tg8B/iRMHwzGku9 + tfNP64DggdpNmDhJMPn+9+VY3ug7SOQXtwwcYUd5TxQeYtIJcw2vgw08DBjaVfUUBhjIRygKI348 + uIg6e8rssShLcxKplOQOz8FhAB4iozdpTJeaw/KKGfCQKSdke8wsZV0ZCok+FbY3BcFLtiRWUKlv + UpQ9pZqKAqV+Xpzy/aF+aM9TzV0nqb6iBjdiSzYztcDKINHX5P0MXWIKrZ/XzWHPfLlfUF76mGRn + yywU7++/dxxYh/eJUCXYMtIscQn/cgbwT12E5avd1phZY36f5SMFA7x8/HzBa04798Hp+fE0S0Z/ + Ori6vDo7UGx01kzj+45DTykmrovRiJ59OfsXAAD//wMAC2+UXQ8IAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84ec503f9fc8a5d4-GRU + - 8533109ce8ac96ad-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -219,14 +360,14 @@ interactions: Content-Type: - application/json Date: - - Thu, 01 Feb 2024 18:39:12 GMT + - Sat, 10 Feb 2024 08:43:44 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=imax5DlfWNvzGRU2Btbl1Bw5sOyUxSB8DTtRAJqUQH4-1706812752-1-AaCZhQsgjz3qKHzpOoQTqEvx92Rf7P3npAeqQj4LJSbxOOzDX6hHa+TXYdLi+AcfxurO6OgrRHMm6345X8IAjBY=; - path=/; expires=Thu, 01-Feb-24 19:09:12 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=.mPG08GeMY3hdK7cnpiq43j090YU0WVLl_2dw1X7Y9Y-1707554624-1-AUEge8FOZBfiS4LUr4o0N8DEFqNqT/TcV/S/T/p+ltEYhIDDSqUb1t9zAklGiYVoMFvb7+U9V5ZWoUpiDwm/Vb4=; + path=/; expires=Sat, 10-Feb-24 09:13:44 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=Bk4l0dbVnupi0ZrSYO4nbj7PyfhIwxzhpOPDviuRT9U-1706812752080-0-604800000; + - _cfuvid=ZyBrFHqcHcRu_Zb6u8y.XWtfBXvIY3uXznalxWZIxRk-1707554624159-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -239,7 +380,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '23337' + - '13362' openai-version: - '2020-10-01' strict-transport-security: @@ -251,15 +392,16 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299639' + - '299618' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 72ms + - 76ms x-request-id: - - fcdcc4a39d161234ba5cc19c17bac960 - http_version: HTTP/1.1 - status_code: 200 + - req_59c59e2b2d7b1970f00e3b0aee5ef087 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -271,28 +413,34 @@ interactions: the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Come - up with a list of 5 interesting ideas to explore for an article.\nThis is the - context you''re working with:\nWe need to work on a project where we create - a list of 5 interesting ideas for an article. Try to think out of the box and - come up with ideas that are unique and engaging. The topics can be from any - field like technology, science, lifestyle, culture, or health. The only requirement - is that they should be interesting enough to engage a wide range of readers.\nAI: - Here are 5 unique and engaging ideas for an article:\n\n1. \"The Future of AI - in Healthcare: Predicting Diseases Before They Happen\" - This article could - explore the potential of AI in healthcare, such as its ability to predict diseases - based on genetic markers and lifestyle factors.\n\n2. \"How AI is Changing the - Way We Cook: An Insight into Smart Kitchens\" - This article could discuss how - AI and technology are transforming our kitchens, from smart refrigerators to - AI chefs.\n\n3. \"Redefining Fitness with AI: Personalized Workout Plans and - Nutritional Advice\" - This article could discuss how AI is being used to create - personalized workout plans and provide nutritional advice, making fitness more - accessible to everyone.\n\n4. \"AI and the Art World: How Technology is Shaping - Creativity\" - This article could explore the intersection of AI and art, including - how AI is being used to create unique works of art and its implications for - human creativity.\n\n5. \"AI in Space Exploration: The Next Frontier\" - This - article could discuss the role of AI in space exploration, from designing spacecrafts - to analyzing data from distant planets.\n\nNew summary:"}], "model": "gpt-4", - "n": 1, "stream": false, "temperature": 0.7}' + up with a list of 5 interesting ideas to explore for an article\nThis is the + context you''re working with:\nWe need to create a list of 5 interesting ideas + for an article. These ideas should be unique, engaging, and relevant to our + audience. They should be topics that our readers would find interesting and + would want to read more about. The ideas can be about anything, but they should + be something that we can write a compelling article about. Please provide a + brief description for each idea.\nAI: Here are five interesting ideas to explore + for an article:\n\n1. The Evolution of Artificial Intelligence: This article + could explore the history of AI, from its beginnings in theoretical work to + its current applications. The story of AI''s development is an exciting one, + filled with both successes and failures that provide valuable lessons for the + future.\n\n2. AI in Everyday Life: Most people are not aware of the many ways + AI is already integrated into our daily lives. This article could discuss examples + of everyday AI, from recommendations on streaming services to predictive text + on smartphones, and speculate about what the future might hold.\n\n3. Ethical + Implications of AI: As AI continues to evolve and become more prevalent, there + are many ethical issues that arise. This article could delve into these issues, + such as privacy concerns, job displacement, and potential for misuse. \n\n4. + The Rise of AI Startups: This article could showcase some of the most promising + and innovative AI startups around the world. It could discuss what these companies + are doing, the challenges they face, and what their success could mean for the + future of AI.\n\n5. AI and the Environment: This article could explore the complex + relationship between AI and the environment. On one hand, AI can help us make + more efficient use of resources and predict environmental changes. On the other + hand, training AI models requires a lot of computing power and energy. The article + could discuss these issues and the efforts being made to make AI more environmentally + friendly.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": + 0.7}' headers: accept: - application/json @@ -301,18 +449,18 @@ interactions: connection: - keep-alive content-length: - - '2477' + - '2916' content-type: - application/json cookie: - - __cf_bm=imax5DlfWNvzGRU2Btbl1Bw5sOyUxSB8DTtRAJqUQH4-1706812752-1-AaCZhQsgjz3qKHzpOoQTqEvx92Rf7P3npAeqQj4LJSbxOOzDX6hHa+TXYdLi+AcfxurO6OgrRHMm6345X8IAjBY=; - _cfuvid=Bk4l0dbVnupi0ZrSYO4nbj7PyfhIwxzhpOPDviuRT9U-1706812752080-0-604800000 + - _cfuvid=ZyBrFHqcHcRu_Zb6u8y.XWtfBXvIY3uXznalxWZIxRk-1707554624159-0-604800000; + __cf_bm=.mPG08GeMY3hdK7cnpiq43j090YU0WVLl_2dw1X7Y9Y-1707554624-1-AUEge8FOZBfiS4LUr4o0N8DEFqNqT/TcV/S/T/p+ltEYhIDDSqUb1t9zAklGiYVoMFvb7+U9V5ZWoUpiDwm/Vb4= host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - - x64 + - arm64 x-stainless-async: - 'false' x-stainless-lang: @@ -320,34 +468,30 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.11.5 + - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8nWFs5TynmDATlvmbVfsQnbNqPDM0\",\n \"object\": - \"chat.completion\",\n \"created\": 1706812752,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to come up with - 5 unique and engaging ideas for an article in any field such as technology, - science, lifestyle, culture, or health. The AI suggests: 1) The potential of - AI in healthcare, specifically in predicting diseases before they happen. 2) - The transformation of kitchens through AI and technology, including smart refrigerators - and AI chefs. 3) The use of AI in fitness to provide personalized workout plans - and nutritional advice. 4) The intersection of AI and art, including how AI - creates unique art and its implications for human creativity. 5) The role of - AI in space exploration, including designing spacecrafts and analyzing data - from distant planets.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 467,\n \"completion_tokens\": - 139,\n \"total_tokens\": 606\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SSQW/bMAyF7/kVhM5pkTZpMuTWwwb0MGwYimHFMBSKTNtsZVIV6XRB0f8+SE4a + 7GLAfHqfnki+zQAcNW4LLvTewpDixaeX5svDr+86XH97GlcPh58/Fn79fP/10Dz5jZsXh+yeMNjJ + dRlkSBGNhCc5ZPSGhXq1WWxublbr61UVBmkwFluX7GJ1sVhfLY+OXiigui38ngEAvNVvycYN/nVb + WMxPlQFVfYdu+3EIwGWJpeK8Kql5Njc/i0HYkGvc+x6hHwfP4PVZwXqE2zswgSADwpjglayHlvYI + I9PLiHNA7nxH3M3BcwMZI+49G1CDXqGVDAWWjULES7ifgDp2HaopmCQKCsL1KtxLHEubKgr3mA+N + P8CoCNJWSEuBfARiwxipQw4lgfUUSnVIkYIvAC2G27t5xWaaAOVi89nGpFPYKmKcHD0l2KG9InI5 + edKR95SFB2S7hM8+9KfwJWbpik498bDLhC00qCFTqrKMFomJu0rSIKnmKD9JSsvLW069cceBvH9M + MkqXsuzK1HmM8aPeEpP2jxm9CpepqUma7O8zgD91Y8b/lsClLEOyR5Nn5AK8WS4nnjsv51ndHNfJ + mZiP5/p6sZwdEzo9qOHw2BJ3mFOmukAl5+x99g8AAP//AwBw1HLENwMAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84ec50d58e52a5d4-GRU + - 853310f1aaae96ad-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -357,7 +501,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 01 Feb 2024 18:39:26 GMT + - Sat, 10 Feb 2024 08:43:48 GMT Server: - cloudflare Transfer-Encoding: @@ -371,7 +515,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '14077' + - '3933' openai-version: - '2020-10-01' strict-transport-security: @@ -383,15 +527,16 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299402' + - '299289' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 119ms + - 142ms x-request-id: - - 9749fb04bc79acb9867749dfdfbf27ac - http_version: HTTP/1.1 - status_code: 200 + - req_8f08c85f64aaea3c989348b8d22a2ce4 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are Crew Manager.\nYou are a seasoned manager with a knack for getting the best out of your team.\nYou @@ -400,50 +545,59 @@ interactions: perform tasks by yourself, you have a lot of experience in the field, which allows you to properly evaluate the work of your team members.\n\nYour personal goal is: Manage the team to complete the task in the best way possible.TOOLS:\n------\nYou - have access to only the following tools:\n\nDelegate work to co-worker: Useful - to delegate a specific task to one of the following co-workers: Researcher, - Senior Writer.\nThe input to this tool should be a pipe (|) separated text of - length 3 (three), representing the co-worker you want to ask it to (one of the - options), the task and all actual context you have for the task.\nFor example, - `coworker|task|context`.\nAsk question to co-worker: Useful to ask a question, - opinion or take from on of the following co-workers: Researcher, Senior Writer.\nThe - input to this tool should be a pipe (|) separated text of length 3 (three), - representing the co-worker you want to ask it to (one of the options), the question - and all actual context you have for the question.\n For example, `coworker|question|context`.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [Delegate work - to co-worker, Ask question to co-worker], just the name.\nAction Input: the - input to the action\nObservation: the result of the action\n```\n\nWhen you - have a response for your task, or if you do not need to use a tool, you MUST - use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]This is the summary of your work so far:\nBegin! This is - VERY important to you, your job depends on it!\n\nCurrent Task: Come up with - a list of 5 interesting ideas to explore for an article, then write one amazing - paragraph highlight for each idea that showcases how good an article about this - topic could be. Return the list of ideas with their paragraph and your notes.\nThought: - Do I need to use a tool? Yes\nAction: Delegate work to co-worker\nAction Input: - Researcher|Come up with a list of 5 interesting ideas to explore for an article.|We - need to work on a project where we create a list of 5 interesting ideas for - an article. Try to think out of the box and come up with ideas that are unique - and engaging. The topics can be from any field like technology, science, lifestyle, - culture, or health. The only requirement is that they should be interesting - enough to engage a wide range of readers.\nObservation: Here are 5 unique and - engaging ideas for an article:\n\n1. \"The Future of AI in Healthcare: Predicting - Diseases Before They Happen\" - This article could explore the potential of - AI in healthcare, such as its ability to predict diseases based on genetic markers - and lifestyle factors.\n\n2. \"How AI is Changing the Way We Cook: An Insight - into Smart Kitchens\" - This article could discuss how AI and technology are - transforming our kitchens, from smart refrigerators to AI chefs.\n\n3. \"Redefining - Fitness with AI: Personalized Workout Plans and Nutritional Advice\" - This - article could discuss how AI is being used to create personalized workout plans - and provide nutritional advice, making fitness more accessible to everyone.\n\n4. - \"AI and the Art World: How Technology is Shaping Creativity\" - This article - could explore the intersection of AI and art, including how AI is being used - to create unique works of art and its implications for human creativity.\n\n5. - \"AI in Space Exploration: The Next Frontier\" - This article could discuss - the role of AI in space exploration, from designing spacecrafts to analyzing - data from distant planets.\nThought: "}], "model": "gpt-4", "n": 1, "stop": - ["\nObservation"], "stream": false, "temperature": 0.7}' + have access to only the following tools:\n\nDelegate work to co-worker: Delegate + work to co-worker(coworker: str, task: str, context: str) - Delegate a specific + task to one of the following co-workers: Researcher, Senior Writer. The input + to this tool should be the role of the coworker, the task you want them to do, + and ALL necessary context to exectue the task, they know nothing about the task, + so share absolute everything you know, don''t reference things but instead explain + them.\nAsk question to co-worker: Ask question to co-worker(coworker: str, question: + str, context: str) - Ask a specific question to one of the following co-workers: + Researcher, Senior Writer. The input to this tool should be the role of the + coworker, the question you have for them, and ALL necessary context to ask the + question properly, they know nothing about the question, so share absolute everything + you know, don''t reference things but instead explain them.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [Delegate work to co-worker, + Ask question to co-worker], just the name.\nAction Input: Any and all relevant + information input and context for using the tool\nObservation: the result of + using the tool\n```\n\nWhen you have a response for your task, or if you do + not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need + to use a tool? No\nFinal Answer: [your response here]```This is the summary + of your work so far:\nBegin! This is VERY important to you, your job depends + on it!\n\nCurrent Task: Come up with a list of 5 interesting ideas to explore + for an article, then write one amazing paragraph highlight for each idea that + showcases how good an article about this topic could be. Return the list of + ideas with their paragraph and your notes.\nThought: Do I need to use a tool? + Yes\nAction: Delegate work to co-worker\nAction Input: coworker: Researcher, + task: Come up with a list of 5 interesting ideas to explore for an article, + context: We need to create a list of 5 interesting ideas for an article. These + ideas should be unique, engaging, and relevant to our audience. They should + be topics that our readers would find interesting and would want to read more + about. The ideas can be about anything, but they should be something that we + can write a compelling article about. Please provide a brief description for + each idea. \nObservation: Here are five interesting ideas to explore for an + article:\n\n1. The Evolution of Artificial Intelligence: This article could + explore the history of AI, from its beginnings in theoretical work to its current + applications. The story of AI''s development is an exciting one, filled with + both successes and failures that provide valuable lessons for the future.\n\n2. + AI in Everyday Life: Most people are not aware of the many ways AI is already + integrated into our daily lives. This article could discuss examples of everyday + AI, from recommendations on streaming services to predictive text on smartphones, + and speculate about what the future might hold.\n\n3. Ethical Implications of + AI: As AI continues to evolve and become more prevalent, there are many ethical + issues that arise. This article could delve into these issues, such as privacy + concerns, job displacement, and potential for misuse. \n\n4. The Rise of AI + Startups: This article could showcase some of the most promising and innovative + AI startups around the world. It could discuss what these companies are doing, + the challenges they face, and what their success could mean for the future of + AI.\n\n5. AI and the Environment: This article could explore the complex relationship + between AI and the environment. On one hand, AI can help us make more efficient + use of resources and predict environmental changes. On the other hand, training + AI models requires a lot of computing power and energy. The article could discuss + these issues and the efforts being made to make AI more environmentally friendly.\nThought: + "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.0}' headers: accept: - application/json @@ -452,18 +606,18 @@ interactions: connection: - keep-alive content-length: - - '4028' + - '4690' content-type: - application/json cookie: - - __cf_bm=u.tmEZdLWJJh0JAky720e5vQTXXkbak1vLAbvqJhEiY-1706812728-1-AfwXwbOIbs/ICMGa7ID/ROzghPqSPoWSrFWNGYeQ/pa3JYTifb3+sj5oSXec/ys8Yg0Q3S0fCvt2ZsssnosJDAc=; - _cfuvid=awym7w2cNrUnczKj2e0M9vtNAb93tU.MTND.tf9GG64-1706812728028-0-604800000 + - __cf_bm=EBAvl3g_XqkWdcmiw8Ml6O7WPWo5VXs8r3SwmJVLUZw-1707554605-1-AUZKfV35H8Wp5ossrlc6jtZ9dhgqLrVrl+ZF0jOXXdI0dpVcIvTttF7f8Pl5ZI2Pih3rfueXBSRF9vmqggAOPCk=; + _cfuvid=pTnBIy95g3oEDKgA4dLOO3E_dCa6hkilxXzbh8LPWMM-1707554605073-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - - x64 + - arm64 x-stainless-async: - 'false' x-stainless-lang: @@ -471,34 +625,33 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.11.5 + - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8nWG7c3A6XqYBqLoJPpOidhJINoEJ\",\n \"object\": - \"chat.completion\",\n \"created\": 1706812767,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction: - Delegate work to co-worker\\nAction Input: Senior Writer|Write one amazing paragraph - highlight for each idea that showcases how good an article about this topic - could be.|The researcher has come up with the following topics: \\n1. \\\"The - Future of AI in Healthcare: Predicting Diseases Before They Happen\\\"\\n2. - \\\"How AI is Changing the Way We Cook: An Insight into Smart Kitchens\\\"\\n3. - \\\"Redefining Fitness with AI: Personalized Workout Plans and Nutritional Advice\\\"\\n4. - \\\"AI and the Art World: How Technology is Shaping Creativity\\\"\\n5. \\\"AI - in Space Exploration: The Next Frontier\\\" \\nPlease write one engaging paragraph - for each topic.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 850,\n \"completion_tokens\": - 150,\n \"total_tokens\": 1000\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RTwW7bSgy8+ysGOitG3MRJq0sRtClgoHiHpu+lRVsE9IqS2KyWeruUnbTIvxcr + 23F6EbQzy+GQS/6eAYXURYXCdWSuH/zJ6//rD65+J/9e//OOL6Rfh4ev4/DlJizqh/+KMkfo+ic7 + O0TNnfaDZxMNO9pFJuOsurg8vVwuzy9evZ6IXmv2Oawd7OT85PRicbaP6FQcp6LCtxkA/J6+2Vuo + +aGocFoekJ5TopaL6vkSUET1GSkoJUlGwYrySDoNxmGy+16xQmCuYYoxMQim6t/iK6fv4crlGiq8 + Z88tGWOr8T7fdHqSfzke7mAVhtEqON3hFW44iEbcRjGOJYzSfbU7QQODevolocVAkdpIQ4dO2s5L + 2xkajWByHaRmgnVkSJ1uHSVO6HSLVrUGBVA0cZ5Bax0N1kmC6SAOTkdfY80lplofrMIto6NNLtBL + MmiDJSQYR06WjeRcaUp9FJ7jc8eJ9xxFrrCYMFxv1I9T4drgKpo04oQ8VsHYe2k5OC7xao6rFSTg + esPxsaZHfJSGS5zNcW2duBzQD14cZak0aa1KnO9yfJLEOwg3RtHGIZVYTpIUali2ETYSNfQcbI4P + h7Zpk8mD7xJb3j0xIQ9m9vdX46cGP3c/TcKD5gnJFWnzstMaXrQ525T0Qil1+76DQ0uthLaEhEZj + TyYbLiff+0s93fOUKjLVHLGlYHm08nHCm9H740MU+/F9ep57r+0QdZ13JIzeP+ONBEndXWRKGvKM + J9NhF/40A35M+zX+tTLFELUf7M70nkMWfLO83OkVx1U+sovlxZ41NfIviMXibLb3WKTHZNzfNRJa + jkOUaeGy09nT7A8AAAD//wMAF5OKNmcEAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84ec513119aa1aa5-GRU + - 8533110b9a2417ea-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -508,7 +661,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 01 Feb 2024 18:39:44 GMT + - Sat, 10 Feb 2024 08:43:55 GMT Server: - cloudflare Transfer-Encoding: @@ -522,7 +675,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '16878' + - '6749' openai-version: - '2020-10-01' strict-transport-security: @@ -534,37 +687,55 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299024' + - '298855' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 195ms + - 229ms x-request-id: - - 9c0028e09d3e9c061f1490b7313cca84 - http_version: HTTP/1.1 - status_code: 200 + - req_06d2fb84bf34ebae70ce35d99f30b0a3 + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are Senior Writer.\nYou''re - a senior writer, specialized in technology, software engineering, AI and startups. - You work as a freelancer and are now working on writing content for a new customer.\n\nYour - personal goal is: Write the best content about AI and AI agents.TOOLS:\n------\nYou - have access to only the following tools:\n\n\n\nTo use a tool, please use the - exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: - the action to take, should be one of [], just the name.\nAction Input: the input - to the action\nObservation: the result of the action\n```\n\nWhen you have a - response for your task, or if you do not need to use a tool, you MUST use the - format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response - here]This is the summary of your work so far:\nBegin! This is VERY important - to you, your job depends on it!\n\nCurrent Task: Write one amazing paragraph - highlight for each idea that showcases how good an article about this topic - could be.\nThis is the context you''re working with:\nThe researcher has come - up with the following topics: \n1. \"The Future of AI in Healthcare: Predicting - Diseases Before They Happen\"\n2. \"How AI is Changing the Way We Cook: An Insight - into Smart Kitchens\"\n3. \"Redefining Fitness with AI: Personalized Workout - Plans and Nutritional Advice\"\n4. \"AI and the Art World: How Technology is - Shaping Creativity\"\n5. \"AI in Space Exploration: The Next Frontier\" \nPlease - write one engaging paragraph for each topic.\n"}], "model": "gpt-4", "n": 1, - "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: Delegate work to co-worker\nFuntion attributes: {''coworker'': {''type'': + ''string''}, ''task'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: + Delegate work to co-worker(coworker: str, task: str, context: str) - Delegate + a specific task to one of the following co-workers: Researcher, Senior Writer. + The input to this tool should be the role of the coworker, the task you want + them to do, and ALL necessary context to exectue the task, they know nothing + about the task, so share absolute everything you know, don''t reference things + but instead explain them.\n--\nFuntion Name: Ask question to co-worker\nFuntion + attributes: {''coworker'': {''type'': ''string''}, ''question'': {''type'': + ''string''}, ''context'': {''type'': ''string''}}\nDescription: Ask question + to co-worker(coworker: str, question: str, context: str) - Ask a specific question + to one of the following co-workers: Researcher, Senior Writer. The input to + this tool should be the role of the coworker, the question you have for them, + and ALL necessary context to ask the question properly, they know nothing about + the question, so share absolute everything you know, don''t reference things + but instead explain them.\n\nUse this text to inform a valid ouput schema:\nDo + I need to use a tool? Yes\nAction: Delegate work to co-worker\nAction Input: + coworker: Senior Writer, task: Write one amazing paragraph highlight for each + idea that showcases how good an article about this topic could be, context: + We have a list of 5 interesting ideas for an article. These ideas are: 1. The + Evolution of Artificial Intelligence, 2. AI in Everyday Life, 3. Ethical Implications + of AI, 4. The Rise of AI Startups, 5. AI and the Environment. For each of these + ideas, we need a compelling paragraph that highlights the potential of an article + on this topic. This paragraph should be engaging, informative, and should make + the reader want to read the full article. \n\nThe output should be formatted + as a JSON instance that conforms to the JSON schema below.\n\nAs an example, + for the schema {\"properties\": {\"foo\": {\"title\": \"Foo\", \"description\": + \"a list of strings\", \"type\": \"array\", \"items\": {\"type\": \"string\"}}}, + \"required\": [\"foo\"]}\nthe object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted + instance of the schema. The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} + is not well-formatted.\n\nHere is the output schema:\n```\n{\"properties\": + {\"function_name\": {\"title\": \"Function Name\", \"description\": \"The name + of the function to be called.\", \"type\": \"string\"}, \"arguments\": {\"title\": + \"Arguments\", \"description\": \"A dictinary of arguments to be passed to the + function.\", \"type\": \"object\"}}, \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], + "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.0}' headers: accept: - application/json @@ -573,15 +744,18 @@ interactions: connection: - keep-alive content-length: - - '1654' + - '2994' content-type: - application/json + cookie: + - __cf_bm=EBAvl3g_XqkWdcmiw8Ml6O7WPWo5VXs8r3SwmJVLUZw-1707554605-1-AUZKfV35H8Wp5ossrlc6jtZ9dhgqLrVrl+ZF0jOXXdI0dpVcIvTttF7f8Pl5ZI2Pih3rfueXBSRF9vmqggAOPCk=; + _cfuvid=pTnBIy95g3oEDKgA4dLOO3E_dCa6hkilxXzbh8LPWMM-1707554605073-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - - x64 + - arm64 x-stainless-async: - 'false' x-stainless-lang: @@ -589,57 +763,34 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.11.5 + - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8nWGODia4pebOFlrt1WMkOJYtIske\",\n \"object\": - \"chat.completion\",\n \"created\": 1706812784,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\n\\nFinal - Answer:\\n\\n1. \\\"Imagine a future where AI empowers us to detect diseases - before they arise, transforming healthcare from reactive to proactive. Machine - learning algorithms, trained on vast amounts of patient data, could potentially - predict heart diseases, strokes, or cancers before they manifest, allowing for - early interventions and significantly improving patient outcomes. This article - will delve into the rapid advancements in AI within the healthcare sector and - how these technologies are ushering us into a new era of predictive medicine.\\\"\\n\\n2. - \\\"From the humble home kitchen to grand culinary stages, AI is revolutionizing - the way we cook. Smart appliances, equipped with advanced sensors and predictive - algorithms, are turning kitchens into creative playgrounds, offering personalized - recipes, precise cooking instructions, and even automated meal preparation. - This article explores the fascinating intersection of AI and gastronomy, revealing - how technology is transforming our culinary experiences.\\\"\\n\\n3. \\\"Fitness - reimagined \u2013 that's the promise of AI in the wellness industry. Picture - a personal trainer who knows your strengths, weaknesses, and nutritional needs - intimately. An AI-powered fitness app can provide this personalized experience, - adapting your workout plans and dietary recommendations in real-time based on - your progress and feedback. Join us as we unpack how AI is revolutionizing the - fitness landscape, offering personalized, data-driven approaches to health and - well-being.\\\"\\n\\n4. \\\"Art and AI may seem like unlikely partners, but - their synergy is sparking a creative revolution. AI algorithms are now creating - mesmerizing artworks, challenging our perceptions of creativity and originality. - From AI-assisted painting to generative music composition, this article will - take you on a journey through the fascinating world of AI in art, exploring - how technology is reshaping the boundaries of human creativity.\\\"\\n\\n5. - \\\"The vast expanse of space, once the sole domain of astronauts and rovers, - is the next frontier for AI. AI technology is playing an increasingly vital - role in space exploration, from predicting space weather to assisting in interstellar - navigation. This article will delve into the exciting intersection of AI and - space exploration, exploring how these advanced technologies are helping us - uncover the mysteries of the cosmos.\\\"\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 346,\n \"completion_tokens\": 429,\n \"total_tokens\": 775\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RU227bRhB911cM9pkSfJNt6C1AncZtn+oASVsW9mg55E603GF3h1IcQ/9e7FIX + 54UQ58w5czQXvs0ADDdmBcY6VNsPfn7/X/Px22/yeLXb/f7311+/JPSbz5++7v5KT8imygxZfyOr + R9bCSj94UpYwwTYSKmXVy7uLu+Xy5vZ6WYBeGvKZ1g06v5lf3F5eHxhO2FIyK/hnBgDwVp7ZW2jo + u1nBRXWM9JQSdmRWpyQAE8XniMGUOCkGNdUZtBKUQrH7iSIBJ0DYkffzVmKPqtQAh0yzBNKCOoJk + HfUIa0zUgIQSG6JsuSnJE5ElrOpQh5eXlzq81QGgNu0YbAaeA/ZUmxXU5hfy1KES7CRuQAWszPNP + irWpJhbGbuwpaMqMopSjVo5pWeaJAkuEL5H1xMxZimkzZRQIJBBgjz84dDBgxC7i4MBx5zx3TqGV + CITWATeEoA4VkpOdxUQJnOygE2kAA2BUtp4A1zIqqOMEKgNbsDL6Btb0zkTp8Xc9+CBwuCVA8Jw0 + t3QJHJQiJc2ucuFUfJyrLOCzo0QHDCOt4LLE4GErfswtzUIfonLLltHDY1DynjsKliq4WsCHR+AA + D1uKrw2+wh/cUgXXC3hQxzYT+sGzLWNLReuxgpupxp+caArBk2LUcUgVLIskhqZM/yFsOUrIU1rA + x2MPp3U5+q5gRxCIGkDIR5H9/TSF0u3TKNK0VpK3M/8jad+3vWzdsefZJqd3SskdhgAUOuw4dNV5 + L7dUFd+HpB43VEpFwoYi7DBo3sP8WuLt6P1pELXJQ93XYV822xwOaX+6QC/dEGWdrzWM3p/iLQdO + 7jkSJgn52pLKMNH3M4B/y6WPPx2vGaL0gz6rbChkwdu7q0nPnD8qZ/Ty/uaAqij6M3C/vJ0dLJr0 + mpT655ZDR3GIXC4/G53tZ/8DAAD//wMA71AiSfAEAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84ec519ccebf01b0-GRU + - 85331136cf6517ea-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -649,14 +800,146 @@ interactions: Content-Type: - application/json Date: - - Thu, 01 Feb 2024 18:40:35 GMT + - Sat, 10 Feb 2024 08:44:03 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '8047' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '300000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299286' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 142ms + x-request-id: + - req_a1e7e4c3a5b07c904bde939c482128ff + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are Senior Writer.\nYou''re + a senior writer, specialized in technology, software engineering, AI and startups. + You work as a freelancer and are now working on writing content for a new customer.\n\nYour + personal goal is: Write the best content about AI and AI agents.TOOLS:\n------\nYou + have access to only the following tools:\n\n\n\nTo use a tool, please use the + exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: + the tool you wanna use, should be one of [], just the name.\nAction Input: Any + and all relevant information input and context for using the tool\nObservation: + the result of using the tool\n```\n\nWhen you have a response for your task, + or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: + Do I need to use a tool? No\nFinal Answer: [your response here]```This is the + summary of your work so far:\nBegin! This is VERY important to you, your job + depends on it!\n\nCurrent Task: Write one amazing paragraph highlight for each + idea that showcases how good an article about this topic could be\nThis is the + context you''re working with:\nWe have a list of 5 interesting ideas for an + article. These ideas are: 1. The Evolution of Artificial Intelligence, 2. AI + in Everyday Life, 3. Ethical Implications of AI, 4. The Rise of AI Startups, + 5. AI and the Environment. For each of these ideas, we need a compelling paragraph + that highlights the potential of an article on this topic. This paragraph should + be engaging, informative, and should make the reader want to read the full article.\n"}], + "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1690' + content-type: + - application/json + cookie: + - _cfuvid=qVpxYVGQ_iwAkrQrePYbf52GMURpsQ__qprXXMk79wI-1703091772665-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA3xWTW8bRwy9+1cQOstCUttJoUvhIg7goijQb6B1EXBnqV3Gs+RmhrOKXOS/F5yV + tE4C9CIIO0PO4+N7nPn3AmDF7WoLq9CjhWGMl99+aN+mu/dB77i//f5V/3339PTWfv6h/+v+z3a1 + 9ght3lOwU9Qm6DBGMlaZl0MiNPKsL1+/eH1zc/3q+qouDNpS9LButMvryxevXl4dI3rlQHm1hb8v + AAD+rb+OTVr6uNrCi/Xpy0A5Y0er7XkTwCpp9C8rzJmzodhqvSwGFSOpcH/rtXS9beGNwj0IUQum + UDIBgqnG7+AnfZAHecuCEW4l7yltwb+83MBvPcHdpLF4paA7uE3GOw6MEe7FKEbuSAJtH+QNTwQt + 0QgspmA9wQ5zYEFj6eC9liR08By45OBnOdawSzoAW4a+DE0kaKhjEZYuA2ZACCqBRgMWyIE9BnYc + KjRTQKnpuoQRRkzmR2lJ0CLHA0SeKANK63nQMB5y3eH9SR4pOqGnyhu4+zhGTQS97uH2HnrMQJPG + iVrQiVKt7UCY8rr+faQDDBwpmwplsB4NepwIco8jtbWiLune+nUF4DETZ1bB5JHSZmio53nJG9NO + KIEGEssb+F3C+dREA6ZHdHIsoeSdpgHPvbmv6f04zKZFWid+VFeCk73TOcmuWEm08RZ/s/EoFrib + KB1aPMCPvPNuPj+0SCYS4GHEYMeDWCq35GHgcZXfYwt9JQ+YbOwrI46q14EAxzGyl5a9YVmrBgZq + Gec9hNH6gIk28CNhEsBGi1UQuTQWnXaDMenOq4M9HvKpQQ2FegJkjiRWFSCUHOhhUUHSYiy0BpIe + JTg/QWUi4VmBY9K2BOOJ7TA3q6VQW3U54CNLt4jDQUkZKGnJc2Fh1s+RocRdb4A2H79j6SgZjyc2 + 9sA2CyRXvly6CcOcYc/Wg1HoRaN2h7Nq9ppiC5hq9SXXFl5t4M56Du7I4UsU2wf5g8TbvbiSjrsj + NofEYv3/enLfUw0+2QMGIsuQKI8qmRuObIfPWeGvYIAKjIknDIc1vNcGMoWSzhy7EMgwgmga8mKS + Qd3L2kTujtn2NDvLdI+pzVXrLU0UdXS31MiSaQNvKE7ParZ5CF6OSSf1PkJLDZprsyrs9n6mJS9n + E+aSyIXl2w0fqU4ZklzZfM5BpBpGHwpb9aZD8N5czxP0F850JOJXw2RlzNsH+ZNNKOfZ18uOfNwx + zxahPYQeh/FE5tKJNbSJpwrOK1zUkug0sTfwhvNsZFfcPF0sIccm4tOs/mFEYSfCBx4mh3TKOeqe + 0hFW9atzOt98H90qTSRv13z3Vag+nciOLVxw8JP3oi3ZElP+ytycoAh/KOSlxkjS0Vw+Jx+cRdom + UbWfY6gZn6lkGXDW0wF6jW0ddYncXqda5qHnxXzhq6Zk9qJrw242pynqMXcycVJxZW0f5DOJu1sz + hS9Gb7XXEnRyz7MTZ/fkkg1Z8OSe07Q9Xjh8Ul3J820ddGjQIEQenOnQo3S09tGVKU0EDWvLE6V8 + tpSOxoPTnihrSYGgGEd+wlkWX07Xk6omOg/Vr0HMbcYFfTzTeip+6d8RxajJirC5wthHN+V6q62O + b5VP50dO1M4l5Q8iKTGev+9YOPfvEmFW8QdNNh3n8E8XAP/Ux1T57H20GpMOo70zfSTxhFc313O+ + 1fJuW1avX1wdV00N47Lw+ub1xRHiKh+y0fBunuOjz80j0ItPF/8BAAD//wMAyXzRJ1MKAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8533116a387396d7-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 08:44:21 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=WmFNdtD.V9bIgBQVRqQBv9BhZP0ZofVfpkvpQclwFfM-1706812835-1-AUjyNjOV4GvbD1lpaYZpm1/e7us73aHw2R78TWZyyPTuflg45nO5NKiMPvenhaUMTf4i7YHTjKO4IzcpqQ/bj6w=; - path=/; expires=Thu, 01-Feb-24 19:10:35 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=HK6aONrGD31TZ4sld6V4bsDg5W4i5xEdjmgL3Yr8XgU-1707554661-1-AU/qKM2c4KwrzgUveUchtPBFJMTuW88dICuFHKLK3WemxS4xwoIPUFqDA8FibWRTvpukyeNGBj/up79fNMI5U2A=; + path=/; expires=Sat, 10-Feb-24 09:14:21 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=oiEdDwsPJinPJAV6H.B7JBTdZ_1mg38yvcDd4zwbdas-1706812835609-0-604800000; + - _cfuvid=SRoFRQPZDNTyo98s9j9bLplJ5tOFCJXVyBNCTAlJt5Q-1707554661709-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -669,7 +952,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '51280' + - '18067' openai-version: - '2020-10-01' strict-transport-security: @@ -681,15 +964,16 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299614' + - '299600' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 77ms + - 79ms x-request-id: - - ffb55bca928f0e1013f6fe066fc19dd7 - http_version: HTTP/1.1 - status_code: 200 + - req_3b533eecec4a6c44aa6e015c1ad70c30 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -702,44 +986,41 @@ interactions: is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Write one amazing paragraph highlight for each idea that showcases how good an article - about this topic could be.\nThis is the context you''re working with:\nThe researcher - has come up with the following topics: \n1. \"The Future of AI in Healthcare: - Predicting Diseases Before They Happen\"\n2. \"How AI is Changing the Way We - Cook: An Insight into Smart Kitchens\"\n3. \"Redefining Fitness with AI: Personalized - Workout Plans and Nutritional Advice\"\n4. \"AI and the Art World: How Technology - is Shaping Creativity\"\n5. \"AI in Space Exploration: The Next Frontier\" \nPlease - write one engaging paragraph for each topic.\nAI: 1. \"Imagine a future where - AI empowers us to detect diseases before they arise, transforming healthcare - from reactive to proactive. Machine learning algorithms, trained on vast amounts - of patient data, could potentially predict heart diseases, strokes, or cancers - before they manifest, allowing for early interventions and significantly improving - patient outcomes. This article will delve into the rapid advancements in AI - within the healthcare sector and how these technologies are ushering us into - a new era of predictive medicine.\"\n\n2. \"From the humble home kitchen to - grand culinary stages, AI is revolutionizing the way we cook. Smart appliances, - equipped with advanced sensors and predictive algorithms, are turning kitchens - into creative playgrounds, offering personalized recipes, precise cooking instructions, - and even automated meal preparation. This article explores the fascinating intersection - of AI and gastronomy, revealing how technology is transforming our culinary - experiences.\"\n\n3. \"Fitness reimagined \u2013 that''s the promise of AI in - the wellness industry. Picture a personal trainer who knows your strengths, - weaknesses, and nutritional needs intimately. An AI-powered fitness app can - provide this personalized experience, adapting your workout plans and dietary - recommendations in real-time based on your progress and feedback. Join us as - we unpack how AI is revolutionizing the fitness landscape, offering personalized, - data-driven approaches to health and well-being.\"\n\n4. \"Art and AI may seem - like unlikely partners, but their synergy is sparking a creative revolution. - AI algorithms are now creating mesmerizing artworks, challenging our perceptions - of creativity and originality. From AI-assisted painting to generative music - composition, this article will take you on a journey through the fascinating - world of AI in art, exploring how technology is reshaping the boundaries of - human creativity.\"\n\n5. \"The vast expanse of space, once the sole domain - of astronauts and rovers, is the next frontier for AI. AI technology is playing - an increasingly vital role in space exploration, from predicting space weather - to assisting in interstellar navigation. This article will delve into the exciting - intersection of AI and space exploration, exploring how these advanced technologies - are helping us uncover the mysteries of the cosmos.\"\n\nNew summary:"}], "model": - "gpt-4", "n": 1, "stream": false, "temperature": 0.7}' + about this topic could be\nThis is the context you''re working with:\nWe have + a list of 5 interesting ideas for an article. These ideas are: 1. The Evolution + of Artificial Intelligence, 2. AI in Everyday Life, 3. Ethical Implications + of AI, 4. The Rise of AI Startups, 5. AI and the Environment. For each of these + ideas, we need a compelling paragraph that highlights the potential of an article + on this topic. This paragraph should be engaging, informative, and should make + the reader want to read the full article.\nAI: 1. The Evolution of Artificial + Intelligence:\nDive deep into the fascinating journey of artificial intelligence, + from its humble beginnings as a concept in science fiction to an integral part + of our daily lives and a catalyst of modern innovations. Explore how AI has + evolved over the years, the key milestones that have shaped its growth, and + the visionary minds behind these advancements. Uncover the remarkable transformation + of AI and its astounding potential for the future.\n\n2. AI in Everyday Life:\nUncover + the unseen impact of AI in our every day lives, from our smartphones and home + appliances to social media and healthcare. Learn about the subtle yet profound + ways AI has become a silent partner in your daily routine, enhancing convenience, + productivity, and decision-making. Explore the numerous applications of AI right + at your fingertips and how it shapes our interactions with technology and the + world around us.\n\n3. Ethical Implications of AI:\nVenture into the ethical + labyrinth of artificial intelligence, where innovation meets responsibility. + Explore the implications of AI on privacy, job security, and societal norms, + and the moral obligations we have towards its development and use. Delve into + the thought-provoking debates about AI ethics and the measures being taken to + ensure its responsible and equitable use.\n\n4. The Rise of AI Startups:\nWitness + the rise of AI startups, the new champions of innovation, driving the technology + revolution. Discover how these trailblazing companies are harnessing the power + of AI to solve complex problems, create new markets, and revolutionize industries. + Learn about their unique challenges, their groundbreaking solutions, and the + potential they hold for reshaping the future of technology and business.\n\n5. + AI and the Environment:\nExplore the intersection of AI and the environment, + where technology meets sustainability. Uncover how AI is being used to combat + climate change, conserve biodiversity, and optimize resource utilization. Learn + about the innovative ways AI is being used to create a sustainable future and + the challenges and opportunities it presents.\n\nNew summary:"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -748,18 +1029,18 @@ interactions: connection: - keep-alive content-length: - - '3851' + - '3594' content-type: - application/json cookie: - - __cf_bm=WmFNdtD.V9bIgBQVRqQBv9BhZP0ZofVfpkvpQclwFfM-1706812835-1-AUjyNjOV4GvbD1lpaYZpm1/e7us73aHw2R78TWZyyPTuflg45nO5NKiMPvenhaUMTf4i7YHTjKO4IzcpqQ/bj6w=; - _cfuvid=oiEdDwsPJinPJAV6H.B7JBTdZ_1mg38yvcDd4zwbdas-1706812835609-0-604800000 + - _cfuvid=SRoFRQPZDNTyo98s9j9bLplJ5tOFCJXVyBNCTAlJt5Q-1707554661709-0-604800000; + __cf_bm=HK6aONrGD31TZ4sld6V4bsDg5W4i5xEdjmgL3Yr8XgU-1707554661-1-AU/qKM2c4KwrzgUveUchtPBFJMTuW88dICuFHKLK3WemxS4xwoIPUFqDA8FibWRTvpukyeNGBj/up79fNMI5U2A= host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - - x64 + - arm64 x-stainless-async: - 'false' x-stainless-lang: @@ -767,35 +1048,31 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.11.5 + - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8nWHDjDbqEbewjOuX1ybxLqzTwlLn\",\n \"object\": - \"chat.completion\",\n \"created\": 1706812835,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The AI has provided engaging highlights - for five different topics on the application of artificial intelligence. The - first topic discusses how AI in healthcare could predict diseases before they - happen, transforming reactive care into proactive care. The second topic explores - how AI is changing the culinary world by transforming kitchens with smart appliances - and predictive algorithms. The third topic notes how AI is redefining fitness - by providing personalized workout plans and nutritional advice. The fourth topic - showcases how AI is influencing the art world, challenging perceptions of creativity - and originality. The final topic highlights the role of AI in space exploration, - assisting in tasks such as predicting space weather and interstellar navigation.\"\n - \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n - \ ],\n \"usage\": {\n \"prompt_tokens\": 688,\n \"completion_tokens\": - 125,\n \"total_tokens\": 813\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1RTy27bMBC8+ysWPMuBjfqR+OaDUxjNqQ+gSFEEDLWitqV2WXLtRAjy7wXlh5CL + CO5wZ2fI0dsEwFBtNmBca9V1MUxv/9Wf7+5fH3/0L/2Xn4tHd0sznj+8NPm4taYqHfL8B51eum6c + dDGgkvAJdgmtYmGdr2fr5XKxWs0HoJMaQ2nzUaeL6Ww1/3TuaIUcZrOBXxMAgLfhW7Rxja9mA7Pq + UukwZ+vRbK6HAEySUCrG5kxZLaupRtAJK/Ig93uLsN2DR8ZUJAKyt57Yg+Uaig8MoWyjTdYnG9sM + KtCSbwP5ViFKoSIbwCYlFzCDMDR0RFCJ5PIGyozdUcKhXAhIA9uk1JArTXvWMsAjO6yKEmLYHTH1 + te3hgRqsYKctuXK0i4GcLSR5YNlXA/VXynjawze1SQ8xV4P67X5YtIznIyXhDllvYGddO/qBmORI + NWawDMWKK3Z9oC5mBGKVgSFhjuh09FVBk6QbMJ/kRdthGHETDsXMRSFphtpS6MHGUX8FeHblhDPV + 5fJP9cJHXbROz57yB0+DFAkXw1QkX63ZAPmQ1RLbZwqk/Y05P/r7NS1BfEzyXJLFhxCu9YaYcvuU + 0GbhkoysEk/t7xOA30MqDx+CZmKSLuqTyl/kQrha3p34zPgDjOjt+gyqqA1jfb1YTc4KTe6zYvfU + EHtMMdEQ0qJz8j75DwAA//8DAGza8PubAwAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84ec52decf2201b0-GRU + - 853311dbdf9a96d7-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -805,7 +1082,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 01 Feb 2024 18:40:53 GMT + - Sat, 10 Feb 2024 08:44:26 GMT Server: - cloudflare Transfer-Encoding: @@ -819,7 +1096,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '17177' + - '4205' openai-version: - '2020-10-01' strict-transport-security: @@ -831,15 +1108,16 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299061' + - '299120' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 187ms + - 175ms x-request-id: - - a3fc0a72bc1ebb52bc318b800b43877c - http_version: HTTP/1.1 - status_code: 200 + - req_2d88d7714db678d617280e78883f5b47 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "You are Crew Manager.\nYou are a seasoned manager with a knack for getting the best out of your team.\nYou @@ -848,89 +1126,94 @@ interactions: perform tasks by yourself, you have a lot of experience in the field, which allows you to properly evaluate the work of your team members.\n\nYour personal goal is: Manage the team to complete the task in the best way possible.TOOLS:\n------\nYou - have access to only the following tools:\n\nDelegate work to co-worker: Useful - to delegate a specific task to one of the following co-workers: Researcher, - Senior Writer.\nThe input to this tool should be a pipe (|) separated text of - length 3 (three), representing the co-worker you want to ask it to (one of the - options), the task and all actual context you have for the task.\nFor example, - `coworker|task|context`.\nAsk question to co-worker: Useful to ask a question, - opinion or take from on of the following co-workers: Researcher, Senior Writer.\nThe - input to this tool should be a pipe (|) separated text of length 3 (three), - representing the co-worker you want to ask it to (one of the options), the question - and all actual context you have for the question.\n For example, `coworker|question|context`.\n\nTo - use a tool, please use the exact following format:\n\n```\nThought: Do I need - to use a tool? Yes\nAction: the action to take, should be one of [Delegate work - to co-worker, Ask question to co-worker], just the name.\nAction Input: the - input to the action\nObservation: the result of the action\n```\n\nWhen you - have a response for your task, or if you do not need to use a tool, you MUST - use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: - [your response here]This is the summary of your work so far:\nBegin! This is - VERY important to you, your job depends on it!\n\nCurrent Task: Come up with - a list of 5 interesting ideas to explore for an article, then write one amazing - paragraph highlight for each idea that showcases how good an article about this - topic could be. Return the list of ideas with their paragraph and your notes.\nThought: + have access to only the following tools:\n\nDelegate work to co-worker: Delegate + work to co-worker(coworker: str, task: str, context: str) - Delegate a specific + task to one of the following co-workers: Researcher, Senior Writer. The input + to this tool should be the role of the coworker, the task you want them to do, + and ALL necessary context to exectue the task, they know nothing about the task, + so share absolute everything you know, don''t reference things but instead explain + them.\nAsk question to co-worker: Ask question to co-worker(coworker: str, question: + str, context: str) - Ask a specific question to one of the following co-workers: + Researcher, Senior Writer. The input to this tool should be the role of the + coworker, the question you have for them, and ALL necessary context to ask the + question properly, they know nothing about the question, so share absolute everything + you know, don''t reference things but instead explain them.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [Delegate work to co-worker, + Ask question to co-worker], just the name.\nAction Input: Any and all relevant + information input and context for using the tool\nObservation: the result of + using the tool\n```\n\nWhen you have a response for your task, or if you do + not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need + to use a tool? No\nFinal Answer: [your response here]```This is the summary + of your work so far:\nBegin! This is VERY important to you, your job depends + on it!\n\nCurrent Task: Come up with a list of 5 interesting ideas to explore + for an article, then write one amazing paragraph highlight for each idea that + showcases how good an article about this topic could be. Return the list of + ideas with their paragraph and your notes.\nThought: Do I need to use a tool? + Yes\nAction: Delegate work to co-worker\nAction Input: coworker: Researcher, + task: Come up with a list of 5 interesting ideas to explore for an article, + context: We need to create a list of 5 interesting ideas for an article. These + ideas should be unique, engaging, and relevant to our audience. They should + be topics that our readers would find interesting and would want to read more + about. The ideas can be about anything, but they should be something that we + can write a compelling article about. Please provide a brief description for + each idea. \nObservation: Here are five interesting ideas to explore for an + article:\n\n1. The Evolution of Artificial Intelligence: This article could + explore the history of AI, from its beginnings in theoretical work to its current + applications. The story of AI''s development is an exciting one, filled with + both successes and failures that provide valuable lessons for the future.\n\n2. + AI in Everyday Life: Most people are not aware of the many ways AI is already + integrated into our daily lives. This article could discuss examples of everyday + AI, from recommendations on streaming services to predictive text on smartphones, + and speculate about what the future might hold.\n\n3. Ethical Implications of + AI: As AI continues to evolve and become more prevalent, there are many ethical + issues that arise. This article could delve into these issues, such as privacy + concerns, job displacement, and potential for misuse. \n\n4. The Rise of AI + Startups: This article could showcase some of the most promising and innovative + AI startups around the world. It could discuss what these companies are doing, + the challenges they face, and what their success could mean for the future of + AI.\n\n5. AI and the Environment: This article could explore the complex relationship + between AI and the environment. On one hand, AI can help us make more efficient + use of resources and predict environmental changes. On the other hand, training + AI models requires a lot of computing power and energy. The article could discuss + these issues and the efforts being made to make AI more environmentally friendly.\nThought: Do I need to use a tool? Yes\nAction: Delegate work to co-worker\nAction Input: - Researcher|Come up with a list of 5 interesting ideas to explore for an article.|We - need to work on a project where we create a list of 5 interesting ideas for - an article. Try to think out of the box and come up with ideas that are unique - and engaging. The topics can be from any field like technology, science, lifestyle, - culture, or health. The only requirement is that they should be interesting - enough to engage a wide range of readers.\nObservation: Here are 5 unique and - engaging ideas for an article:\n\n1. \"The Future of AI in Healthcare: Predicting - Diseases Before They Happen\" - This article could explore the potential of - AI in healthcare, such as its ability to predict diseases based on genetic markers - and lifestyle factors.\n\n2. \"How AI is Changing the Way We Cook: An Insight - into Smart Kitchens\" - This article could discuss how AI and technology are - transforming our kitchens, from smart refrigerators to AI chefs.\n\n3. \"Redefining - Fitness with AI: Personalized Workout Plans and Nutritional Advice\" - This - article could discuss how AI is being used to create personalized workout plans - and provide nutritional advice, making fitness more accessible to everyone.\n\n4. - \"AI and the Art World: How Technology is Shaping Creativity\" - This article - could explore the intersection of AI and art, including how AI is being used - to create unique works of art and its implications for human creativity.\n\n5. - \"AI in Space Exploration: The Next Frontier\" - This article could discuss - the role of AI in space exploration, from designing spacecrafts to analyzing - data from distant planets.\nThought: Do I need to use a tool? Yes\nAction: Delegate - work to co-worker\nAction Input: Senior Writer|Write one amazing paragraph highlight - for each idea that showcases how good an article about this topic could be.|The - researcher has come up with the following topics: \n1. \"The Future of AI in - Healthcare: Predicting Diseases Before They Happen\"\n2. \"How AI is Changing - the Way We Cook: An Insight into Smart Kitchens\"\n3. \"Redefining Fitness with - AI: Personalized Workout Plans and Nutritional Advice\"\n4. \"AI and the Art - World: How Technology is Shaping Creativity\"\n5. \"AI in Space Exploration: - The Next Frontier\" \nPlease write one engaging paragraph for each topic.\nObservation: - 1. \"Imagine a future where AI empowers us to detect diseases before they arise, - transforming healthcare from reactive to proactive. Machine learning algorithms, - trained on vast amounts of patient data, could potentially predict heart diseases, - strokes, or cancers before they manifest, allowing for early interventions and - significantly improving patient outcomes. This article will delve into the rapid - advancements in AI within the healthcare sector and how these technologies are - ushering us into a new era of predictive medicine.\"\n\n2. \"From the humble - home kitchen to grand culinary stages, AI is revolutionizing the way we cook. - Smart appliances, equipped with advanced sensors and predictive algorithms, - are turning kitchens into creative playgrounds, offering personalized recipes, - precise cooking instructions, and even automated meal preparation. This article - explores the fascinating intersection of AI and gastronomy, revealing how technology - is transforming our culinary experiences.\"\n\n3. \"Fitness reimagined \u2013 - that''s the promise of AI in the wellness industry. Picture a personal trainer - who knows your strengths, weaknesses, and nutritional needs intimately. An AI-powered - fitness app can provide this personalized experience, adapting your workout - plans and dietary recommendations in real-time based on your progress and feedback. - Join us as we unpack how AI is revolutionizing the fitness landscape, offering - personalized, data-driven approaches to health and well-being.\"\n\n4. \"Art - and AI may seem like unlikely partners, but their synergy is sparking a creative - revolution. AI algorithms are now creating mesmerizing artworks, challenging - our perceptions of creativity and originality. From AI-assisted painting to - generative music composition, this article will take you on a journey through - the fascinating world of AI in art, exploring how technology is reshaping the - boundaries of human creativity.\"\n\n5. \"The vast expanse of space, once the - sole domain of astronauts and rovers, is the next frontier for AI. AI technology - is playing an increasingly vital role in space exploration, from predicting - space weather to assisting in interstellar navigation. This article will delve - into the exciting intersection of AI and space exploration, exploring how these - advanced technologies are helping us uncover the mysteries of the cosmos.\"\nThought: - "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": - 0.7}' + coworker: Senior Writer, task: Write one amazing paragraph highlight for each + idea that showcases how good an article about this topic could be, context: + We have a list of 5 interesting ideas for an article. These ideas are: 1. The + Evolution of Artificial Intelligence, 2. AI in Everyday Life, 3. Ethical Implications + of AI, 4. The Rise of AI Startups, 5. AI and the Environment. For each of these + ideas, we need a compelling paragraph that highlights the potential of an article + on this topic. This paragraph should be engaging, informative, and should make + the reader want to read the full article. \nObservation: 1. The Evolution of + Artificial Intelligence:\nDive deep into the fascinating journey of artificial + intelligence, from its humble beginnings as a concept in science fiction to + an integral part of our daily lives and a catalyst of modern innovations. Explore + how AI has evolved over the years, the key milestones that have shaped its growth, + and the visionary minds behind these advancements. Uncover the remarkable transformation + of AI and its astounding potential for the future.\n\n2. AI in Everyday Life:\nUncover + the unseen impact of AI in our every day lives, from our smartphones and home + appliances to social media and healthcare. Learn about the subtle yet profound + ways AI has become a silent partner in your daily routine, enhancing convenience, + productivity, and decision-making. Explore the numerous applications of AI right + at your fingertips and how it shapes our interactions with technology and the + world around us.\n\n3. Ethical Implications of AI:\nVenture into the ethical + labyrinth of artificial intelligence, where innovation meets responsibility. + Explore the implications of AI on privacy, job security, and societal norms, + and the moral obligations we have towards its development and use. Delve into + the thought-provoking debates about AI ethics and the measures being taken to + ensure its responsible and equitable use.\n\n4. The Rise of AI Startups:\nWitness + the rise of AI startups, the new champions of innovation, driving the technology + revolution. Discover how these trailblazing companies are harnessing the power + of AI to solve complex problems, create new markets, and revolutionize industries. + Learn about their unique challenges, their groundbreaking solutions, and the + potential they hold for reshaping the future of technology and business.\n\n5. + AI and the Environment:\nExplore the intersection of AI and the environment, + where technology meets sustainability. Uncover how AI is being used to combat + climate change, conserve biodiversity, and optimize resource utilization. Learn + about the innovative ways AI is being used to create a sustainable future and + the challenges and opportunities it presents.\nThought: "}], "model": "gpt-4", + "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.0}' headers: accept: - application/json @@ -939,18 +1222,18 @@ interactions: connection: - keep-alive content-length: - - '7104' + - '7537' content-type: - application/json cookie: - - __cf_bm=u.tmEZdLWJJh0JAky720e5vQTXXkbak1vLAbvqJhEiY-1706812728-1-AfwXwbOIbs/ICMGa7ID/ROzghPqSPoWSrFWNGYeQ/pa3JYTifb3+sj5oSXec/ys8Yg0Q3S0fCvt2ZsssnosJDAc=; - _cfuvid=awym7w2cNrUnczKj2e0M9vtNAb93tU.MTND.tf9GG64-1706812728028-0-604800000 + - __cf_bm=EBAvl3g_XqkWdcmiw8Ml6O7WPWo5VXs8r3SwmJVLUZw-1707554605-1-AUZKfV35H8Wp5ossrlc6jtZ9dhgqLrVrl+ZF0jOXXdI0dpVcIvTttF7f8Pl5ZI2Pih3rfueXBSRF9vmqggAOPCk=; + _cfuvid=pTnBIy95g3oEDKgA4dLOO3E_dCa6hkilxXzbh8LPWMM-1707554605073-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - - x64 + - arm64 x-stainless-async: - 'false' x-stainless-lang: @@ -958,62 +1241,45 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.11.5 + - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8nWHVfigZ90ViYdlhcH7LFow95UFa\",\n \"object\": - \"chat.completion\",\n \"created\": 1706812853,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - Here are the 5 interesting ideas with a highlight paragraph for each:\\n\\n1. - \\\"The Future of AI in Healthcare: Predicting Diseases Before They Happen\\\"\\n - \ - \\\"Imagine a future where AI empowers us to detect diseases before they - arise, transforming healthcare from reactive to proactive. Machine learning - algorithms, trained on vast amounts of patient data, could potentially predict - heart diseases, strokes, or cancers before they manifest, allowing for early - interventions and significantly improving patient outcomes. This article will - delve into the rapid advancements in AI within the healthcare sector and how - these technologies are ushering us into a new era of predictive medicine.\\\"\\n\\n2. - \\\"How AI is Changing the Way We Cook: An Insight into Smart Kitchens\\\"\\n - \ - \\\"From the humble home kitchen to grand culinary stages, AI is revolutionizing - the way we cook. Smart appliances, equipped with advanced sensors and predictive - algorithms, are turning kitchens into creative playgrounds, offering personalized - recipes, precise cooking instructions, and even automated meal preparation. - This article explores the fascinating intersection of AI and gastronomy, revealing - how technology is transforming our culinary experiences.\\\"\\n\\n3. \\\"Redefining - Fitness with AI: Personalized Workout Plans and Nutritional Advice\\\"\\n - - \\\"Fitness reimagined \u2013 that's the promise of AI in the wellness industry. - Picture a personal trainer who knows your strengths, weaknesses, and nutritional - needs intimately. An AI-powered fitness app can provide this personalized experience, - adapting your workout plans and dietary recommendations in real-time based on - your progress and feedback. Join us as we unpack how AI is revolutionizing the - fitness landscape, offering personalized, data-driven approaches to health and - well-being.\\\"\\n\\n4. \\\"AI and the Art World: How Technology is Shaping - Creativity\\\"\\n - \\\"Art and AI may seem like unlikely partners, but their - synergy is sparking a creative revolution. AI algorithms are now creating mesmerizing - artworks, challenging our perceptions of creativity and originality. From AI-assisted - painting to generative music composition, this article will take you on a journey - through the fascinating world of AI in art, exploring how technology is reshaping - the boundaries of human creativity.\\\"\\n\\n5. \\\"AI in Space Exploration: - The Next Frontier\\\"\\n - \\\"The vast expanse of space, once the sole domain - of astronauts and rovers, is the next frontier for AI. AI technology is playing - an increasingly vital role in space exploration, from predicting space weather - to assisting in interstellar navigation. This article will delve into the exciting - intersection of AI and space exploration, exploring how these advanced technologies - are helping us uncover the mysteries of the cosmos.\\\"\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 1420,\n \"completion_tokens\": 526,\n \"total_tokens\": 1946\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA3xWUW/cyA1+968g9nm9iHOJr7cvRQD7LgYOLdC7awvURUCNuBLjEUcZcrSRD/nv + BUe7KycB+mIYO0Py48fvo+bPK4ANt5s9bEKPFoYxXv/lU/vL/e+3P8/z83sNxH//6beHX96X93f8 + rP/ebD0iNR8p2DlqF9IwRjJOshyHTGjkWW9+fPXj27dvbm9v68GQWooe1o12/eb61e3ND6eIPnEg + 3ezhP1cAAH/Wv45NWvq82cOr7fmXgVSxo83+cglgk1P0XzaoymoottmuhyGJkVS4dwkeQIhasARF + CRAspfhX+Ft6lJ9ZMMI70SPlPbynTICZwHqCA08ELEaZ1Fg64JZQ4ZAyoABm4xAJjmw9IPTc9ZG7 + 3mDEjF3Gsa83CUO/f5RHudnB7z3B/ZRicdYgHeBdNj5wYIzwIEYxckcSaP8od167JRodQFrgoAYW + rEg+ppKFZs+Baw5+kWMLh5wGYFPoy9BEgoY6FmHpFFABISQJNBqwgAb2GDhwqNAseYeerssYvSPz + UqlkaJHjDJEnUkBpPQ8axlnrDZ919khJE3oq3cH95zGmTNCnI7x7gB4VaEpxohbSRLn2NhNm3dZ/ + n2iGgSOpJSEF69Ggx4lAexyprR11OR2t31YAHjOxchLMHimtQkM9L0c+7XZCCTSQmO7gDwmXqpkG + zE/o5FhG0UPKA15m81DTezlUS0VaJ35Mrion24dbx1KsZNr5iF/vPIoF7ifKc4sz/MoHn+bLokWU + SICHEYOdCrFUbsnDwOMqv6cR+okOmG3sKyOOqk8DAY5jZG9NfWCaqgYGahmXO4TR+oCZdvArYRbA + JhWrILQ0Fp12gzGng3cHR5z1PKCGQq0AypGkatqEsgOdVxXkVIyFtkDSowTnJySZSHhR4JhTW4Lx + xDYvw2op1FFdD/jE0q3icFBSBsqp6NJYWPRzYihXa6Et5Q8sHWXj8czGEdgWgWjlq7oWw5KhOtQo + 9JJi6uaLao4pxxYw1+6L1hH+sIN76zm4I4dvUewf5Z8kPu7VlXS6HbGZM4v1/9eTx55q8NkeMBCZ + QiYdkyg3HNnmr1nh72BAEhgzTxjmLXxMDSiFki8cuxDIMIKkPOhqkiG5l1MTuTtlO9LiLEtHzK1W + rbc0UUyju6VGFqUd3FGcXvRsfSpdb9djTlPyOUJLDZprsyrs3cNCi661CbVkcmH5dcMnqluGRCub + LzmIVMPoU2Gr3nQIPps3ywb9ByudiPjNMFsZdf8o/2ITUl18vd7Q041ltwgdIfQ4jGcy10lsoc08 + VXDe4aqWTOeNvYM71sXIrrhlu1hGjk3E50X9w4jCToQvPMwO6ZxzTEfKJ1jVr87p8hX97FZpIvm4 + lu9oherbiew0whUHP/ss2qKWmfQ7c3OGIvypkLcaI0lHS/ucfXEWaZtM1X6OoWZ8oZJ1wVlPM/Qp + tnXVZXJ7nXtZlp43842vmqLsTdeBvd2dt6jH3MvEOYkra/8oX0nc3aoUvlm91V5r0Nk9Lyou7tGi + hix4ds95254+OHxWXdHlCRDS0KBBiDw406FH6Wjrq0spTwQNp5YnynqxVBqNB6c9k6aSA0ExjvyM + iyy+3a5nVU10Warfg1jGjCv6eKH13Pw6vxOKMWUrwuYKY1/dpPWrtjm9e75cHkwxdS4pf1xJifHy + +4GFtf+QCTWJP47U0riEf7kC+G99mJWv3lqbMadhtA+Wnkg84c3bV7dLws36CFyP39ycTy0Zxhdx + P71+fXUCudFZjYYPyyYffXOeoF59ufofAAAA//8DAAzkXkmhCgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84ec534c8b3c1ab2-GRU + - 853311f6ed9e982a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -1023,7 +1289,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 01 Feb 2024 18:41:47 GMT + - Sat, 10 Feb 2024 08:44:40 GMT Server: - cloudflare Transfer-Encoding: @@ -1037,7 +1303,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '54417' + - '13993' openai-version: - '2020-10-01' strict-transport-security: @@ -1049,15 +1315,16 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '298265' + - '298148' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 347ms + - 370ms x-request-id: - - 7bc45da0338f1a84ca0b7f2c3eff5ec2 - http_version: HTTP/1.1 - status_code: 200 + - req_5e062acf31c0d01293e96d916c215c86 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -1072,43 +1339,36 @@ interactions: up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and - your notes.\nAI: Here are the 5 interesting ideas with a highlight paragraph - for each:\n\n1. \"The Future of AI in Healthcare: Predicting Diseases Before - They Happen\"\n - \"Imagine a future where AI empowers us to detect diseases - before they arise, transforming healthcare from reactive to proactive. Machine - learning algorithms, trained on vast amounts of patient data, could potentially - predict heart diseases, strokes, or cancers before they manifest, allowing for - early interventions and significantly improving patient outcomes. This article - will delve into the rapid advancements in AI within the healthcare sector and - how these technologies are ushering us into a new era of predictive medicine.\"\n\n2. - \"How AI is Changing the Way We Cook: An Insight into Smart Kitchens\"\n - - \"From the humble home kitchen to grand culinary stages, AI is revolutionizing - the way we cook. Smart appliances, equipped with advanced sensors and predictive - algorithms, are turning kitchens into creative playgrounds, offering personalized - recipes, precise cooking instructions, and even automated meal preparation. - This article explores the fascinating intersection of AI and gastronomy, revealing - how technology is transforming our culinary experiences.\"\n\n3. \"Redefining - Fitness with AI: Personalized Workout Plans and Nutritional Advice\"\n - \"Fitness - reimagined \u2013 that''s the promise of AI in the wellness industry. Picture - a personal trainer who knows your strengths, weaknesses, and nutritional needs - intimately. An AI-powered fitness app can provide this personalized experience, - adapting your workout plans and dietary recommendations in real-time based on - your progress and feedback. Join us as we unpack how AI is revolutionizing the - fitness landscape, offering personalized, data-driven approaches to health and - well-being.\"\n\n4. \"AI and the Art World: How Technology is Shaping Creativity\"\n - - \"Art and AI may seem like unlikely partners, but their synergy is sparking - a creative revolution. AI algorithms are now creating mesmerizing artworks, - challenging our perceptions of creativity and originality. From AI-assisted - painting to generative music composition, this article will take you on a journey - through the fascinating world of AI in art, exploring how technology is reshaping - the boundaries of human creativity.\"\n\n5. \"AI in Space Exploration: The Next - Frontier\"\n - \"The vast expanse of space, once the sole domain of astronauts - and rovers, is the next frontier for AI. AI technology is playing an increasingly - vital role in space exploration, from predicting space weather to assisting - in interstellar navigation. This article will delve into the exciting intersection - of AI and space exploration, exploring how these advanced technologies are helping - us uncover the mysteries of the cosmos.\"\n\nNew summary:"}], "model": "gpt-4", - "n": 1, "stream": false, "temperature": 0.7}' + your notes.\nAI: Here are the five interesting ideas for an article with a highlight + paragraph for each:\n\n1. The Evolution of Artificial Intelligence:\nDive deep + into the fascinating journey of artificial intelligence, from its humble beginnings + as a concept in science fiction to an integral part of our daily lives and a + catalyst of modern innovations. Explore how AI has evolved over the years, the + key milestones that have shaped its growth, and the visionary minds behind these + advancements. Uncover the remarkable transformation of AI and its astounding + potential for the future.\n\n2. AI in Everyday Life:\nUncover the unseen impact + of AI in our every day lives, from our smartphones and home appliances to social + media and healthcare. Learn about the subtle yet profound ways AI has become + a silent partner in your daily routine, enhancing convenience, productivity, + and decision-making. Explore the numerous applications of AI right at your fingertips + and how it shapes our interactions with technology and the world around us.\n\n3. + Ethical Implications of AI:\nVenture into the ethical labyrinth of artificial + intelligence, where innovation meets responsibility. Explore the implications + of AI on privacy, job security, and societal norms, and the moral obligations + we have towards its development and use. Delve into the thought-provoking debates + about AI ethics and the measures being taken to ensure its responsible and equitable + use.\n\n4. The Rise of AI Startups:\nWitness the rise of AI startups, the new + champions of innovation, driving the technology revolution. Discover how these + trailblazing companies are harnessing the power of AI to solve complex problems, + create new markets, and revolutionize industries. Learn about their unique challenges, + their groundbreaking solutions, and the potential they hold for reshaping the + future of technology and business.\n\n5. AI and the Environment:\nExplore the + intersection of AI and the environment, where technology meets sustainability. + Uncover how AI is being used to combat climate change, conserve biodiversity, + and optimize resource utilization. Learn about the innovative ways AI is being + used to create a sustainable future and the challenges and opportunities it + presents.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": + 0.0}' headers: accept: - application/json @@ -1117,18 +1377,18 @@ interactions: connection: - keep-alive content-length: - - '3919' + - '3333' content-type: - application/json cookie: - - __cf_bm=u.tmEZdLWJJh0JAky720e5vQTXXkbak1vLAbvqJhEiY-1706812728-1-AfwXwbOIbs/ICMGa7ID/ROzghPqSPoWSrFWNGYeQ/pa3JYTifb3+sj5oSXec/ys8Yg0Q3S0fCvt2ZsssnosJDAc=; - _cfuvid=awym7w2cNrUnczKj2e0M9vtNAb93tU.MTND.tf9GG64-1706812728028-0-604800000 + - __cf_bm=EBAvl3g_XqkWdcmiw8Ml6O7WPWo5VXs8r3SwmJVLUZw-1707554605-1-AUZKfV35H8Wp5ossrlc6jtZ9dhgqLrVrl+ZF0jOXXdI0dpVcIvTttF7f8Pl5ZI2Pih3rfueXBSRF9vmqggAOPCk=; + _cfuvid=pTnBIy95g3oEDKgA4dLOO3E_dCa6hkilxXzbh8LPWMM-1707554605073-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.7.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - - x64 + - arm64 x-stainless-async: - 'false' x-stainless-lang: @@ -1136,33 +1396,33 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.7.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.11.5 + - 3.11.7 method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8nWIOgYyxoh7migh87kebeaOUQQM0\",\n \"object\": - \"chat.completion\",\n \"created\": 1706812908,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to generate five - topics for articles, each with a highlight paragraph. The AI proposes the following: - 1) The future role of AI in predicting diseases and transforming healthcare, - 2) The impact of AI on cooking and the emergence of smart kitchens, 3) The use - of AI in personalizing fitness and nutrition plans, 4) The influence of AI on - art and creativity, and 5) The application of AI in space exploration. Each - topic is accompanied by a paragraph showcasing the potential depth and intrigue - of an article on the given subject.\"\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 700,\n \"completion_tokens\": 115,\n \"total_tokens\": 815\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1STTW/bMAyG7/kVhG8F0iBpk7XNLYdgCNDDsHY7bBgKRaZttrKoilQ+UPS/D5LT + ZrsYsF6+5COKfBsBVFRXS6hsZ9T2wV3evtZfH9ah/fHtdbaaXv16DXOex58vh+f28b4aZwdvn9Hq + h2tiuQ8OldgPso1oFHPW2c30ZrGYf7mdFqHnGl22tUEv55fTL7Prk6NjsijVEn6PAADeyjez+RoP + 1RKKv5z0KGJarJafQQBVZJdPKiNCosZrNT6Llr2iL7iPHUKXeuPByIuAdgirDSiD5R4hBdiTdtDQ + DoG8YkRR8i1QjUag4QjZGZWsQzC+hn0kRTDQUds5ajuFYKJpowldCUdjuwk8DmUktS2KDmUbdo73 + OblyICtLmF2UwPWOXcq9BG5gFZUasmQcbLyic9SitziGmsQmkQKnAh2JcjwWpsD5ttnSJE0RJ3B1 + kcuTh/UO47E2R7inBseAh+A45hwd70tIH4xVAU4RakPuCJGTkkcpmUtLjM1wMoHrC1hrRzaz9cGR + NUUo2Jsx1Oh2Bc8rlxv3HI0reYQtoRoHJJJQQFKMnHydw1ebCcyHTnwnwSEbPKiJmoKMz53+wNYO + BfP7BeMpg0aEOtKptuddwTrVHZCGeT1AiLx12MsEFqVDOSaTrv2OIvseveYmmZ78v00S2GL+T4J1 + nh1T1xFFAM+2893Ki0TuWREkiRryZkuO9DipTiP6/jnbjtvMlPfAJ+c+zxvyJN1TRCPs8xyLchjs + 7yOAP2WH0n9rUeWiQZ+UX9DnhIu70w5V53U9q7Oru5OqrMadhZvZ3eiEWMlRFPunhnyLMUQqO5VB + R++jvwAAAP//AwDESznZSgQAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 84ec54a25b4a1ab2-GRU + - 853312506839982a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -1172,7 +1432,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 01 Feb 2024 18:42:01 GMT + - Sat, 10 Feb 2024 08:44:45 GMT Server: - cloudflare Transfer-Encoding: @@ -1186,7 +1446,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '13420' + - '4990' openai-version: - '2020-10-01' strict-transport-security: @@ -1198,13 +1458,14 @@ interactions: x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299045' + - '299185' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 191ms + - 162ms x-request-id: - - 7164ea94ba0e8fa5e61b366a6dd69dbc - http_version: HTTP/1.1 - status_code: 200 + - req_535af04a0c1f08c599cc7c54b4519736 + status: + code: 200 + message: OK version: 1 diff --git a/tests/cassettes/test_logging_tool_usage.yaml b/tests/cassettes/test_logging_tool_usage.yaml index fa2efc153..8480ac15d 100644 --- a/tests/cassettes/test_logging_tool_usage.yaml +++ b/tests/cassettes/test_logging_tool_usage.yaml @@ -1,21 +1,19 @@ interactions: - request: - body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew - test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou - have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float - - Useful for when you need to multiply two numbers together.\n The input - to this tool should be a comma separated list of numbers of\n length - two, representing the two numbers you want to multiply together.\n For - example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use - a tool, please use the exact following format:\n\n```\nThought: Do I need to - use a tool? Yes\nAction: the action to take, should be one of [multiplier], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary - of your work so far:\n \nBegin! This is VERY important to you, your job depends - on it!\n\nCurrent Task: What is 3 times 5?\n\n"}], "model": "gpt-4", "n": 1, - "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nBegin! This is VERY + important to you, your job depends on it!\n\nCurrent Task: What is 3 times 5?\n"}], + "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": + 0.7}' headers: accept: - application/json @@ -24,13 +22,13 @@ interactions: connection: - keep-alive content-length: - - '1231' + - '1047' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -40,7 +38,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -48,18 +46,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8b7eDhOwdMJw0817wIqiD9MoXVRU8\",\n \"object\": - \"chat.completion\",\n \"created\": 1703858225,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 3,5\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 264,\n \"completion_tokens\": - 23,\n \"total_tokens\": 287\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRS0/DMBCE7/kVqz2nqK8AzQUhEIIbIDggiqrU2SQujtfYGwmo+t+R0ycXH2Z2 + Vt+s1wkA6hJzQNUUolpnBpdfarby1ROZ2fju9fGesko/X36W05vfc41pTPByRUr2qTPFrTMkmu3W + Vp4Kobh1dDG8yLLhdDTrjZZLMjFWOxlMB8Pz0WSXaFgrCpjDewIAsO7fyGZL+sYchuleaSmEoibM + D0MA6NlEBYsQdJDCCqZHU7EVsj3uS8Nd3UgOtwwPYIlKEIYuEBQgzOYK3ijM7bWKZXJoOyPaGU1+ + r8GDdZ3ksJ5jpX2Qhe3aJfk55jBJYY6BFNvyRM02uCPZHCoYrp3nZaxrO2MOeqWtDs3CUxHYRtwg + 7LbxTQLw0Z+q+9cenefWyUL4k2xcOB6Ptvvw+CtHdzLZmcJSmJNUNk12hBh+glC7qLStyTuv+8tF + zmST/AEAAP//AwCr+Q67MAIAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 83d28cd70f9f1cd9-GRU + - 8532aa4bca4e679a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -69,14 +69,14 @@ interactions: Content-Type: - application/json Date: - - Fri, 29 Dec 2023 13:57:07 GMT + - Sat, 10 Feb 2024 07:33:41 GMT Server: - cloudflare Set-Cookie: - - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; - path=/; expires=Fri, 29-Dec-23 14:27:07 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=; + path=/; expires=Sat, 10-Feb-24 08:03:41 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000; + - _cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Transfer-Encoding: - chunked @@ -89,7 +89,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1725' + - '1673' openai-version: - '2020-10-01' strict-transport-security: @@ -98,43 +98,39 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299717' - x-ratelimit-remaining-tokens_usage_based: - - '299717' + - '299762' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 56ms - x-ratelimit-reset-tokens_usage_based: - - 56ms + - 47ms x-request-id: - - 689757a1e38baf0844ec69f20525c5d7 - http_version: HTTP/1.1 - status_code: 200 + - req_e5461f12abf90cd87cea37b9c47989e0 + status: + code: 200 + message: OK - request: - body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew - test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou - have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float - - Useful for when you need to multiply two numbers together.\n The input - to this tool should be a comma separated list of numbers of\n length - two, representing the two numbers you want to multiply together.\n For - example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use - a tool, please use the exact following format:\n\n```\nThought: Do I need to - use a tool? Yes\nAction: the action to take, should be one of [multiplier], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary - of your work so far:\n \nBegin! This is VERY important to you, your job depends - on it!\n\nCurrent Task: What is 3 times 5?\nThought: Do I need to use a tool? - Yes\nAction: multiplier\nAction Input: 3,5\nObservation: 15\nThought: \n"}], - "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": - 0.7}' + body: '{"messages": [{"role": "user", "content": "Return a valid schema for the + one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion + Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''}, + ''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number: + int, second_number: int) -> float - Useful for when you need to multiply two + numbers together.\n\nUse this text to inform a valid ouput schema:\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 3, \"second_number\": 5}\n\nThe output should be formatted as a JSON instance + that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\": + {\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\": + \"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe + object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema. + The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere + is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\": + \"Function Name\", \"description\": \"The name of the function to be called.\", + \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\": + \"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}}, + \"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", + "n": 1, "stream": false, "temperature": 0.7}' headers: accept: - application/json @@ -143,16 +139,16 @@ interactions: connection: - keep-alive content-length: - - '1335' + - '1494' content-type: - application/json cookie: - - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; - _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000 + - __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=; + _cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -162,7 +158,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -170,18 +166,20 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8b7eG2qnUr6SZcankL5AqkwIJvYFc\",\n \"object\": - \"chat.completion\",\n \"created\": 1703858228,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - 3 times 5 is 15.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 297,\n \"completion_tokens\": - 22,\n \"total_tokens\": 319\n },\n \"system_fingerprint\": null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SRO2/cMBCEe/2KBWtdIN0jF6t1kRQG7MApDISBjketJNrkUiFX8AUH/feAupfd + sNiZbzE7PGYAwjSiAqF7xdoNdvHtr76zT+XDy+HXz/v3t2e6f/xelHx4ennmpcgT4fevqPlCfdHe + DRbZeDrJOqBiTFvLbbHdbIr1spwF5xu0CesGXqwXxddydSZ6bzRGUcHvDADgOL8pGzV4EBUU+WXi + MEbVoaiuJgARvE0ToWI0kRWxyG+i9sRIc9wfGBBMBAXvaO2i9cEpZmzAUMI0gm+Be4Soe3SqkiRp + t9u9Rk+SjpIApGhH0unWmpRDKSqQwo2WzWANBinyk0uFbnRIHJNjJmfWhMg1jW6frBWs8osSUXtq + PkibpEySpjmBOJ8zXXuwvhuC36fOaLT2Om8NmdjXAVX0lG6O7IcTPmUAf+a+x08ViiF4N3DN/g0p + LVwtN6d94va1N3V9dxbZs7IfqO06OycU8V9kdHVrqMMwBDPXn3JmU/YfAAD//wMAtRBeM3UCAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 83d28ce3ab3b1cd9-GRU + - 8532aa5709e5679a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -191,7 +189,7 @@ interactions: Content-Type: - application/json Date: - - Fri, 29 Dec 2023 13:57:09 GMT + - Sat, 10 Feb 2024 07:33:44 GMT Server: - cloudflare Transfer-Encoding: @@ -205,7 +203,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '1568' + - '2203' openai-version: - '2020-10-01' strict-transport-security: @@ -214,24 +212,128 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '299661' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 67ms + x-request-id: + - req_a428e78fc8e9d475e463316f65950fa9 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour + personal goal is: test goalTOOLS:\n------\nYou have access to only the following + tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float + - Useful for when you need to multiply two numbers together.\n\nTo use a tool, + please use the exact following format:\n\n```\nThought: Do I need to use a tool? + Yes\nAction: the tool you wanna use, should be one of [multiplier], just the + name.\nAction Input: Any and all relevant information input and context for + using the tool\nObservation: the result of using the tool\n```\n\nWhen you have + a response for your task, or if you do not need to use a tool, you MUST use + the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your + response here]```This is the summary of your work so far:\nBegin! This is VERY + important to you, your job depends on it!\n\nCurrent Task: What is 3 times 5?\nThought: + Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\": + 3, \"second_number\": 5}\nObservation: 15\nThought: "}], "model": "gpt-4", "n": + 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1191' + content-type: + - application/json + cookie: + - __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=; + _cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.12.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.12.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.7 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SQW0sDMRCF3/dXDHluy/aybd0XEYqiiKAiFFRKmp1uY7OZmMyipfS/S7bbiy95 + OGfOyTezSwCELkQOQq0lq8qZ7vRbXRGrVz2fPPKd3z7MeSPty2r29uy06MQELb9Q8THVU1Q5g6zJ + HmzlUTLG1v4knWRZOhqMGqOiAk2MlY67o2467g/bxJq0wiByeE8AAHbNG9lsgb8ih7RzVCoMQZYo + 8tMQgPBkoiJkCDqwtCw6Z1ORZbQN7ozgHixiAUxQBwQJTGSu4Yk+7K220sCNDT/ocxgC6woDZKAD + 9LOeaAv3JxJDpfO0jNS2Nuakr7TVYb3wKAPZ+Gtgcof4PgH4bDau/y0hnKfK8YJpgzYWDsbDQ584 + H/fCHbQmE0tzoU+zpCUUYRsYq8VK2xK987o5QORM9skfAAAA//8DACAb4vP3AQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8532aa66fd5c679a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 10 Feb 2024 07:33:45 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + openai-model: + - gpt-4-0613 + openai-organization: + - user-z7g4wmlazxqvc5wjyaaaocfz + openai-processing-ms: + - '1275' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - - '299693' - x-ratelimit-remaining-tokens_usage_based: - - '299693' + - '299728' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - - 61ms - x-ratelimit-reset-tokens_usage_based: - - 61ms + - 54ms x-request-id: - - 66a02e3e08e3f6b5b29fb04cd19f50a0 - http_version: HTTP/1.1 - status_code: 200 + - req_dd069a32b6514615a79356287e845a29 + status: + code: 200 + message: OK - request: body: '{"messages": [{"role": "user", "content": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a @@ -257,12 +359,12 @@ interactions: content-type: - application/json cookie: - - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; - _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000 + - __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=; + _cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000 host: - api.openai.com user-agent: - - OpenAI/Python 1.6.1 + - OpenAI/Python 1.12.0 x-stainless-arch: - arm64 x-stainless-async: @@ -272,7 +374,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.6.1 + - 1.12.0 x-stainless-runtime: - CPython x-stainless-runtime-version: @@ -280,19 +382,19 @@ interactions: method: POST uri: https://api.openai.com/v1/chat/completions response: - content: "{\n \"id\": \"chatcmpl-8b7eIfmBgQoWGLW4jiQZTu3qzEBBL\",\n \"object\": - \"chat.completion\",\n \"created\": 1703858230,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to calculate 3 - times 5 and the AI responds that 3 times 5 is 15.\"\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 155,\n \"completion_tokens\": 26,\n \"total_tokens\": 181\n },\n \"system_fingerprint\": - null\n}\n" + body: + string: !!binary | + H4sIAAAAAAAAA1SQT0/CQBDF7/0Ukz0XQgsV6E0TD2qiHtSYGEOW7dCu7D92p4lC+O5mS6F62cO8 + 93v7Zg4JAJMVK4GJhpPQTo0WO7F0ezl7549q+bx/eMp3t+v8Zlrdv27eWBoJu/5CQWdqLKx2Ckla + c5KFR04YU7P5ZF4Uk1ledIK2FaqI1Y5Gs9HkKpv2RGOlwMBK+EgAAA7dG7uZCr9ZCZP0PNEYAq+R + lRcTAPNWxQnjIchA3BBLB1FYQ2i6ui8NQtNqboCHbQBqEK7vgCwIrkSrOCFMgaTGAEUK3FRni8fg + rKkiwmnwgAyQFWPWf3a8tFS2dt6u40amVeoy30gjQ7PyyIM1sVEg6074MQH47K7R/luQOW+1oxXZ + LZoYmBXFKY8Nhx/UfN6LZImrP9QiT/qGLPwEQr3aSFOjd152x4k9k2PyCwAA//8DAHepd0ITAgAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 83d28cf118221cd9-GRU + - 8532aa703b92679a-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -302,7 +404,7 @@ interactions: Content-Type: - application/json Date: - - Fri, 29 Dec 2023 13:57:12 GMT + - Sat, 10 Feb 2024 07:33:47 GMT Server: - cloudflare Transfer-Encoding: @@ -316,7 +418,7 @@ interactions: openai-organization: - user-z7g4wmlazxqvc5wjyaaaocfz openai-processing-ms: - - '2188' + - '1667' openai-version: - '2020-10-01' strict-transport-security: @@ -325,366 +427,17 @@ interactions: - '10000' x-ratelimit-limit-tokens: - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' x-ratelimit-remaining-requests: - '9999' x-ratelimit-remaining-tokens: - '299794' - x-ratelimit-remaining-tokens_usage_based: - - '299794' x-ratelimit-reset-requests: - 6ms x-ratelimit-reset-tokens: - 41ms - x-ratelimit-reset-tokens_usage_based: - - 41ms x-request-id: - - a644c703d416b1e4b06586e96e6d7297 - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew - test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou - have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float - - Useful for when you need to multiply two numbers together.\n The input - to this tool should be a comma separated list of numbers of\n length - two, representing the two numbers you want to multiply together.\n For - example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use - a tool, please use the exact following format:\n\n```\nThought: Do I need to - use a tool? Yes\nAction: the action to take, should be one of [multiplier], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary - of your work so far:\n The human asks the AI to calculate 3 times 5 and the - AI responds that 3 times 5 is 15.\nBegin! This is VERY important to you, your - job depends on it!\n\nCurrent Task: What is 3 times 5?\n\n"}], "model": "gpt-4", - "n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '1317' - content-type: - - application/json - cookie: - - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; - _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.6.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.6.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8b7eKehkNtTjrXJD0aFjiVUeXU7te\",\n \"object\": - \"chat.completion\",\n \"created\": 1703858232,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction: - multiplier\\nAction Input: 3,5\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 290,\n \"completion_tokens\": - 23,\n \"total_tokens\": 313\n },\n \"system_fingerprint\": null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 83d28d007f461cd9-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Fri, 29 Dec 2023 13:57:14 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '1885' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299697' - x-ratelimit-remaining-tokens_usage_based: - - '299697' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 60ms - x-ratelimit-reset-tokens_usage_based: - - 60ms - x-request-id: - - 87959bfa6a029f7bcafb5095e66d4964 - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew - test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou - have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float - - Useful for when you need to multiply two numbers together.\n The input - to this tool should be a comma separated list of numbers of\n length - two, representing the two numbers you want to multiply together.\n For - example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use - a tool, please use the exact following format:\n\n```\nThought: Do I need to - use a tool? Yes\nAction: the action to take, should be one of [multiplier], - just the name.\nAction Input: the input to the action\nObservation: the result - of the action\n```\n\nWhen you have a response for your task, or if you do not - need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use - a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary - of your work so far:\n The human asks the AI to calculate 3 times 5 and the - AI responds that 3 times 5 is 15.\nBegin! This is VERY important to you, your - job depends on it!\n\nCurrent Task: What is 3 times 5?\n\nI just used the multiplier - tool with input 3,5. So I already knwo the result of that.\nObservation: Invalid - or incomplete response\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], - "stream": false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '1460' - content-type: - - application/json - cookie: - - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; - _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.6.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.6.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8b7eME5fg1w8QcXUnl0FBcqxJWy7u\",\n \"object\": - \"chat.completion\",\n \"created\": 1703858234,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer: - 3 times 5 is 15.\"\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 324,\n \"completion_tokens\": - 22,\n \"total_tokens\": 346\n },\n \"system_fingerprint\": null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 83d28d0fcfaf1cd9-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Fri, 29 Dec 2023 13:57:17 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '2494' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299661' - x-ratelimit-remaining-tokens_usage_based: - - '299661' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 67ms - x-ratelimit-reset-tokens_usage_based: - - 67ms - x-request-id: - - eb9704531634d4cd3c6652dff09914bc - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"messages": [{"role": "user", "content": "Progressively summarize the - lines of conversation provided, adding onto the previous summary returning a - new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks - of artificial intelligence. The AI thinks artificial intelligence is a force - for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial - intelligence is a force for good?\nAI: Because artificial intelligence will - help humans reach their full potential.\n\nNew summary:\nThe human asks what - the AI thinks of artificial intelligence. The AI thinks artificial intelligence - is a force for good because it will help humans reach their full potential.\nEND - OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to calculate 3 times 5 - and the AI responds that 3 times 5 is 15.\n\nNew lines of conversation:\nHuman: - What is 3 times 5?\nAI: 3 times 5 is 15.\n\nNew summary:"}], "model": "gpt-4", - "n": 1, "stream": false, "temperature": 0.7}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '972' - content-type: - - application/json - cookie: - - __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=; - _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.6.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.6.1 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-8b7eP77MzprkbgjaMbeaEob0aDx39\",\n \"object\": - \"chat.completion\",\n \"created\": 1703858237,\n \"model\": \"gpt-4-0613\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The human asks the AI to calculate 3 - times 5 twice, and both times the AI responds that 3 times 5 is 15.\"\n },\n - \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n - \ \"usage\": {\n \"prompt_tokens\": 181,\n \"completion_tokens\": 30,\n - \ \"total_tokens\": 211\n },\n \"system_fingerprint\": null\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 83d28d211f591cd9-GRU - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Fri, 29 Dec 2023 13:57:20 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400 - openai-model: - - gpt-4-0613 - openai-organization: - - user-z7g4wmlazxqvc5wjyaaaocfz - openai-processing-ms: - - '2967' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '300000' - x-ratelimit-limit-tokens_usage_based: - - '300000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '299771' - x-ratelimit-remaining-tokens_usage_based: - - '299771' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 45ms - x-ratelimit-reset-tokens_usage_based: - - 45ms - x-request-id: - - e3bbd11b2e9b28f76f8b43dc7a4d2b6d - http_version: HTTP/1.1 - status_code: 200 + - req_14ec03ab8500562c8eaac94fe2cec349 + status: + code: 200 + message: OK version: 1