feat: restructure project as UV workspace with crewai in lib/

This commit is contained in:
Greyson LaLonde
2025-09-26 14:29:28 -04:00
parent 74b5c88834
commit daf6f679ff
763 changed files with 1181 additions and 398 deletions

View File

@@ -1 +0,0 @@
"""Tests for utilities."""

View File

@@ -1,243 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
final answer: hi\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '836'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AzTXAk4GatJOmLO9sEOCCITIjf1Dx\",\n \"object\":
\"chat.completion\",\n \"created\": 1739214900,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-RAY:
- 90fe6ce92eba67b3-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 10 Feb 2025 19:15:01 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=pjX1I6y8RlqCjS.gvOqvXk4vM69UNwFwmslh1BhALNg-1739214901-1.0.1.1-nJcNlSdNcug82eDl7KSvteLbsg0xCiEh2yI1TZX2jMAblL7AMQ8LFhvXkJLlAMfk49RMzRzWy2aiQgeM7WRHPg;
path=/; expires=Mon, 10-Feb-25 19:45:01 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '571'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999810'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_a95183a7a85e6bdfe381b2510bf70f34
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
completed based on the description, expected output, and actual results.\n\nTask
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
extracted from the task output, if any, their type, description, and relationships"}],
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
{"description": "The name of the entity.", "title": "Name", "type": "string"},
"type": {"description": "The type of the entity.", "title": "Type", "type":
"string"}, "description": {"description": "Description of the entity.", "title":
"Description", "type": "string"}, "relationships": {"description": "Relationships
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
from 0 to 10 evaluating on completion, quality, and overall performance, all
taking into account the task description, expected output, and the result of
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
"suggestions"], "type": "object"}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1962'
content-type:
- application/json
cookie:
- __cf_bm=pjX1I6y8RlqCjS.gvOqvXk4vM69UNwFwmslh1BhALNg-1739214901-1.0.1.1-nJcNlSdNcug82eDl7KSvteLbsg0xCiEh2yI1TZX2jMAblL7AMQ8LFhvXkJLlAMfk49RMzRzWy2aiQgeM7WRHPg;
_cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AzTXDcgKWq3yosIyBal8LcY8dDrn1\",\n \"object\":
\"chat.completion\",\n \"created\": 1739214903,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_c41SAnqyEKNXEAZd5XV3jKF3\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Consider specifying
the tone or context of the greeting for more engaging interactions.\\\",\\\"Clarify
if additional greetings or responses are acceptable to enhance the task's scope.\\\"],\\\"quality\\\":10,\\\"entities\\\":[]
}\"\n }\n }\n ],\n \"refusal\": null\n },\n
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
\ \"usage\": {\n \"prompt_tokens\": 273,\n \"completion_tokens\": 43,\n
\ \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 90fe6cf8c96e67b3-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 10 Feb 2025 19:15:04 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1181'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999876'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_b2286c8ae6f9b2a42f46a3e2c52b4211
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,114 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Name: Alice, Age: 30"}], "model":
"gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name": "SimpleModel"}},
"tools": [{"type": "function", "function": {"name": "SimpleModel", "description":
"Correctly extracted `SimpleModel` with all the required parameters with correct
types", "parameters": {"properties": {"name": {"title": "Name", "type": "string"},
"age": {"title": "Age", "type": "integer"}}, "required": ["age", "name"], "type":
"object"}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '507'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.59.6
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.59.6
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-Aq4a4xDv8G0i4fbTtPJEI2B8UNBup\",\n \"object\":
\"chat.completion\",\n \"created\": 1736974028,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_uO5nec8hTk1fpYINM8TUafhe\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"SimpleModel\",\n
\ \"arguments\": \"{\\\"name\\\":\\\"Alice\\\",\\\"age\\\":30}\"\n
\ }\n }\n ],\n \"refusal\": null\n },\n
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
\ \"usage\": {\n \"prompt_tokens\": 79,\n \"completion_tokens\": 10,\n
\ \"total_tokens\": 89,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 9028b81aeb1cb05f-ATL
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 15 Jan 2025 20:47:08 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=PzayZLF04c14veGc.0ocVg3VHBbpzKRW8Hqox8L9U7c-1736974028-1.0.1.1-mZpK8.SH9l7K2z8Tvt6z.dURiVPjFqEz7zYEITfRwdr5z0razsSebZGN9IRPmI5XC_w5rbZW2Kg6hh5cenXinQ;
path=/; expires=Wed, 15-Jan-25 21:17:08 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=ciwC3n2Srn20xx4JhEUeN6Ap0tNBaE44S95nIilboQ0-1736974028496-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '439'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999978'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_a468000458b9d2848b7497b2e3d485a3
http_version: HTTP/1.1
status_code: 200
version: 1

File diff suppressed because one or more lines are too long

View File

@@ -1,67 +0,0 @@
interactions:
- request:
body: '{"model": "meta-llama/llama-3.2-3b-instruct", "messages": [{"role": "system",
"content": "Please convert the following text into valid JSON.\n\nOutput ONLY
the valid JSON and nothing else.\n\nThe JSON must follow this format exactly:\n{\n \"name\":
str,\n \"age\": int\n}"}, {"role": "user", "content": "Name: Alice Llama, Age:
30"}], "stream": false, "stop": []}'
headers:
accept:
- '*/*'
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '365'
content-type:
- application/json
host:
- openrouter.ai
http-referer:
- https://litellm.ai
user-agent:
- litellm/1.68.0
x-title:
- liteLLM
method: POST
uri: https://openrouter.ai/api/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//dJDBTsMwEER/pZqzU1ICKfKNMwgQEieCKsfZJgbHG9mbghTl31FatZy4
7GHm8N7OBNdAo6WQbbY327zMi81t9vwizVv5sB9f22/DdOg730FhiHxwDUVoPFHtxgSFnhvy0OhJ
TOa96c3V8WbF+jor6syFJHG0AgWuP8kKNGxnZG25HzyJ4wAFG8kINdB/Egq2Y2cpQb9P8NwOkesE
HUbvFfYuuNTtIpnEARpJeIBCMOIOtPundaGhH+hcoaeUTEvQEyJ7goZJySUxYRG1HITCYjpVCKan
CnpV4d47S6vH5bsKalXBtMemyGcoRNqPyfiz4IntQnsK5vlDYTwzh8j9IDvhLwoJuiwX5nmOS7xs
ICzGX5K7zTz/AgAA//8DAIXfumyyAQAA
headers:
Access-Control-Allow-Origin:
- '*'
CF-RAY:
- 93ea9f59596559fa-DEL
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 12 May 2025 14:31:55 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
Vary:
- Accept-Encoding
x-clerk-auth-message:
- Invalid JWT form. A JWT consists of three parts separated by dots. (reason=token-invalid,
token-carrier=header)
x-clerk-auth-reason:
- token-invalid
x-clerk-auth-status:
- signed-out
status:
code: 200
message: OK
version: 1

File diff suppressed because one or more lines are too long

View File

@@ -1,315 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
final answer: hi\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '836'
content-type:
- application/json
cookie:
- __cf_bm=4s6sWmJ49B9F_wNc1STtdZF1nikfl6uN9_ov3Xzfa8U-1738698987-1.0.1.1-lmbRRS1MHrDbnU93Gh16CP3qNczxxIrQnyBU7vpHSwNf6PdmuWOHKd1mkl5SBx6rg7p1NLaNUMyqDDcE0Mvjzw;
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AxJK2OCJSkUj1plgbj59b4dC39QV2\",\n \"object\":
\"chat.completion\",\n \"created\": 1738698990,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-RAY:
- 90cd396c0ab71698-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Feb 2025 19:56:30 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '951'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999810'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_2c3cb5caed61ccd1e058ef3e6301c691
http_version: HTTP/1.1
status_code: 200
- request:
body: !!binary |
Cq0TCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkShBMKEgoQY3Jld2FpLnRl
bGVtZXRyeRKkBwoQzBQBWCz+GLuI1awj3OPWrRIIGpT16t5bk6MqDENyZXcgQ3JlYXRlZDABOUBz
OyuEGSEYQYDBSCuEGSEYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAwLjBKGgoOcHl0aG9uX3Zl
cnNpb24SCAoGMy4xMi44Si4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhj
NzQ2MjhjSjEKB2NyZXdfaWQSJgokMDE3NjQ5ZWMtYTBlMS00MzYxLWFlNjgtYzA1N2E3ZGM5YzI5
ShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3
X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrRAgoLY3Jl
d19hZ2VudHMSwQIKvgJbeyJrZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEi
LCAiaWQiOiAiOGU3NzgyN2QtN2Y2OC00ZDA2LWI2YTctOWI4YjRkMGE0YzMzIiwgInJvbGUiOiAi
YmFzZV9hZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0i
OiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIs
ICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBm
YWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEKCmNyZXdf
dGFza3MS8AEK7QFbeyJrZXkiOiAiMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDIiLCAi
aWQiOiAiOTJiZDIzMWYtYzAxMC00ZDI3LWIxNGYtZjE5NjEyZTBmZTkzIiwgImFzeW5jX2V4ZWN1
dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJiYXNl
X2FnZW50IiwgImFnZW50X2tleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIs
ICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEo4CChC22Au0eMkAAjV6cfU1NrNIEggxb1Bq
Xnll/ioMVGFzayBDcmVhdGVkMAE5IOJaK4QZIRhBwG5bK4QZIRhKLgoIY3Jld19rZXkSIgogZTU4
MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiQwMTc2NDllYy1hMGUx
LTQzNjEtYWU2OC1jMDU3YTdkYzljMjlKLgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4
OWEwZWMzYjI2YTEzZDJKMQoHdGFza19pZBImCiQ5MmJkMjMxZi1jMDEwLTRkMjctYjE0Zi1mMTk2
MTJlMGZlOTN6AhgBhQEAAQAAEqQHChC63jCLGR8RP8RmYiHrdNVeEggZ39ffmGm5xyoMQ3JldyBD
cmVhdGVkMAE5GFEe04QZIRhBELEq04QZIRhKGwoOY3Jld2FpX3ZlcnNpb24SCQoHMC4xMDAuMEoa
Cg5weXRob25fdmVyc2lvbhIICgYzLjEyLjhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVh
ZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiQ5MTY4YmQxNC0yN2Q2LTQ3NWMtODljOC01
NjJjOTAyMGIxOTBKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkS
AhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMS
AhgBStECCgtjcmV3X2FnZW50cxLBAgq+Alt7ImtleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2
YjI0OWM0YzY0YSIsICJpZCI6ICI4ZTc3ODI3ZC03ZjY4LTRkMDYtYjZhNy05YjhiNGQwYTRjMzMi
LCAicm9sZSI6ICJiYXNlX2FnZW50IiwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRlciI6IDIw
LCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdw
dC00by1taW5pIiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhl
Y3V0aW9uPyI6IGZhbHNlLCAibWF4X3JldHJ5X2xpbWl0IjogMiwgInRvb2xzX25hbWVzIjogW119
XUr/AQoKY3Jld190YXNrcxLwAQrtAVt7ImtleSI6ICIxYjE1ZWYyMzkxNWIyNzU1ZTg5YTBlYzNi
MjZhMTNkMiIsICJpZCI6ICI5MmJkMjMxZi1jMDEwLTRkMjctYjE0Zi1mMTk2MTJlMGZlOTMiLCAi
YXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9y
b2xlIjogImJhc2VfYWdlbnQiLCAiYWdlbnRfa2V5IjogImFkMTUzMTYxYzVjNWE4NTZhYTBkMDZi
MjQ5YzRjNjRhIiwgInRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASjgIKEOo6FGs7r9hHrN+f
qhMTUysSCJgbYV+vQMbCKgxUYXNrIENyZWF0ZWQwATlAxjrThBkhGEEYIDvThBkhGEouCghjcmV3
X2tleRIiCiBlNTgwNzAxZDUyZWI2NWFmZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJDkx
NjhiZDE0LTI3ZDYtNDc1Yy04OWM4LTU2MmM5MDIwYjE5MEouCgh0YXNrX2tleRIiCiAxYjE1ZWYy
MzkxNWIyNzU1ZTg5YTBlYzNiMjZhMTNkMkoxCgd0YXNrX2lkEiYKJDkyYmQyMzFmLWMwMTAtNGQy
Ny1iMTRmLWYxOTYxMmUwZmU5M3oCGAGFAQABAAA=
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '2480'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.27.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Tue, 04 Feb 2025 19:56:31 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
completed based on the description, expected output, and actual results.\n\nTask
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
extracted from the task output, if any, their type, description, and relationships"}],
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
{"description": "The name of the entity.", "title": "Name", "type": "string"},
"type": {"description": "The type of the entity.", "title": "Type", "type":
"string"}, "description": {"description": "Description of the entity.", "title":
"Description", "type": "string"}, "relationships": {"description": "Relationships
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
from 0 to 10 evaluating on completion, quality, and overall performance, all
taking into account the task description, expected output, and the result of
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
"suggestions"], "type": "object"}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1962'
content-type:
- application/json
cookie:
- __cf_bm=4s6sWmJ49B9F_wNc1STtdZF1nikfl6uN9_ov3Xzfa8U-1738698987-1.0.1.1-lmbRRS1MHrDbnU93Gh16CP3qNczxxIrQnyBU7vpHSwNf6PdmuWOHKd1mkl5SBx6rg7p1NLaNUMyqDDcE0Mvjzw;
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AxJK3bJiyqGhPeqdCcCjoeNavGHrR\",\n \"object\":
\"chat.completion\",\n \"created\": 1738698991,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_uAFkclWHIRqgrXFrQFcEoUIS\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Include additional
context for the greeting to make it more meaningful.\\\",\\\"Specify if you
want a casual or formal tone for greetings.\\\",\\\"Provide examples of variations
of the greeting if necessary.\\\"],\\\"quality\\\":10,\\\"entities\\\":[],\\\"relationships\\\":[]}\"\n
\ }\n }\n ],\n \"refusal\": null\n },\n
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
\ \"usage\": {\n \"prompt_tokens\": 273,\n \"completion_tokens\": 50,\n
\ \"total_tokens\": 323,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_bd83329f63\"\n}\n"
headers:
CF-RAY:
- 90cd3973589f1698-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Feb 2025 19:56:32 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1408'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999876'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_519fd27ca3d5da4d541c4331654e0520
http_version: HTTP/1.1
status_code: 200
version: 1

File diff suppressed because one or more lines are too long

View File

@@ -1,245 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
final answer: hi\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '836'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AxJIrSWAFqDEsNtLRhcM8vMHO9Ejw\",\n \"object\":
\"chat.completion\",\n \"created\": 1738698917,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-RAY:
- 90cd37a83f5f176a-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Feb 2025 19:55:18 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=rKQWp4fbAvcCp4rasEN6DqiTjQfiWYpLfjcLpWcmzi0-1738698918-1.0.1.1-qlcCSdBY3KWbzVms0eLtz5ub5SSLGs_sRLxTdNhDk_purQuz9k6EFp8PHJfN3aP_sLnuyKnFlppM3.2k_dCtPQ;
path=/; expires=Tue, 04-Feb-25 20:25:18 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=Oi91zDXvjWohBYXSVqK4hFsq3_GZePEIIbi7b7wrjcI-1738698918130-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '894'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999810'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_864253996bbc0f797f9a2c1b9247a0d5
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
completed based on the description, expected output, and actual results.\n\nTask
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
extracted from the task output, if any, their type, description, and relationships"}],
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
{"description": "The name of the entity.", "title": "Name", "type": "string"},
"type": {"description": "The type of the entity.", "title": "Type", "type":
"string"}, "description": {"description": "Description of the entity.", "title":
"Description", "type": "string"}, "relationships": {"description": "Relationships
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
from 0 to 10 evaluating on completion, quality, and overall performance, all
taking into account the task description, expected output, and the result of
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
"suggestions"], "type": "object"}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1962'
content-type:
- application/json
cookie:
- __cf_bm=rKQWp4fbAvcCp4rasEN6DqiTjQfiWYpLfjcLpWcmzi0-1738698918-1.0.1.1-qlcCSdBY3KWbzVms0eLtz5ub5SSLGs_sRLxTdNhDk_purQuz9k6EFp8PHJfN3aP_sLnuyKnFlppM3.2k_dCtPQ;
_cfuvid=Oi91zDXvjWohBYXSVqK4hFsq3_GZePEIIbi7b7wrjcI-1738698918130-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AxJIsVEppA04iGQh0k6sanKnVObrO\",\n \"object\":
\"chat.completion\",\n \"created\": 1738698918,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_AQ3iizjGWjEvk1SmhGCzjbf1\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Provide context for
the greeting, like a specific scenario or recipient.\\\",\\\"Encourage responses
or follow-ups to promote engagement.\\\",\\\"Specify the tone or formality of
the greeting, if relevant.\\\"],\\\"quality\\\":10,\\\"entities\\\":[{\\\"name\\\":\\\"hi\\\",\\\"type\\\":\\\"greeting\\\",\\\"description\\\":\\\"A
common informal expression used to initiate conversation or acknowledge someone.\\\",\\\"relationships\\\":[\\\"used
in conversation\\\",\\\"expresses friendliness\\\"]}]}\"\n }\n }\n
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
273,\n \"completion_tokens\": 84,\n \"total_tokens\": 357,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_bd83329f63\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 90cd37aec8c8176a-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Feb 2025 19:55:21 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '3269'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999876'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_e6e67a3f5c6f2d48e0351cdce95edd97
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,243 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
final answer: hi\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '836'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AxJJzafmayYpGTsTAWbOyZkmQJNa5\",\n \"object\":
\"chat.completion\",\n \"created\": 1738698987,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 90cd395b0e641698-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Feb 2025 19:56:27 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=4s6sWmJ49B9F_wNc1STtdZF1nikfl6uN9_ov3Xzfa8U-1738698987-1.0.1.1-lmbRRS1MHrDbnU93Gh16CP3qNczxxIrQnyBU7vpHSwNf6PdmuWOHKd1mkl5SBx6rg7p1NLaNUMyqDDcE0Mvjzw;
path=/; expires=Tue, 04-Feb-25 20:26:27 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '839'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999810'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_577b484a927b455c40ed80f9fd4d9106
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
completed based on the description, expected output, and actual results.\n\nTask
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
extracted from the task output, if any, their type, description, and relationships"}],
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
{"description": "The name of the entity.", "title": "Name", "type": "string"},
"type": {"description": "The type of the entity.", "title": "Type", "type":
"string"}, "description": {"description": "Description of the entity.", "title":
"Description", "type": "string"}, "relationships": {"description": "Relationships
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
from 0 to 10 evaluating on completion, quality, and overall performance, all
taking into account the task description, expected output, and the result of
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
"suggestions"], "type": "object"}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1962'
content-type:
- application/json
cookie:
- __cf_bm=4s6sWmJ49B9F_wNc1STtdZF1nikfl6uN9_ov3Xzfa8U-1738698987-1.0.1.1-lmbRRS1MHrDbnU93Gh16CP3qNczxxIrQnyBU7vpHSwNf6PdmuWOHKd1mkl5SBx6rg7p1NLaNUMyqDDcE0Mvjzw;
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AxJJz10KP7iadNPdKsbcsvHBa7cic\",\n \"object\":
\"chat.completion\",\n \"created\": 1738698987,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_czeHQgy5eiOVa0zlrtcfwepe\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Provide more context
or details for similar tasks to enhance output expectations.\\\",\\\"Encourage
creativity in responses for simple tasks to engage users more effectively.\\\"],\\\"quality\\\":10,\\\"entities\\\":[]
}\"\n }\n }\n ],\n \"refusal\": null\n },\n
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
\ \"usage\": {\n \"prompt_tokens\": 273,\n \"completion_tokens\": 40,\n
\ \"total_tokens\": 313,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_bd83329f63\"\n}\n"
headers:
CF-RAY:
- 90cd39615b281698-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Feb 2025 19:56:29 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1411'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999876'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_3e717a80c7d9c5ea19893dd990aaae26
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,245 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
final answer: hi\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '836'
content-type:
- application/json
cookie:
- __cf_bm=4s6sWmJ49B9F_wNc1STtdZF1nikfl6uN9_ov3Xzfa8U-1738698987-1.0.1.1-lmbRRS1MHrDbnU93Gh16CP3qNczxxIrQnyBU7vpHSwNf6PdmuWOHKd1mkl5SBx6rg7p1NLaNUMyqDDcE0Mvjzw;
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AxJiiHEQwIXsiG0Sd5wofcuhxVbo9\",\n \"object\":
\"chat.completion\",\n \"created\": 1738700520,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-RAY:
- 90cd5ecd0f7667ee-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Feb 2025 20:22:01 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=nedOdWE1YnKQYt1kSbrcA.zhwa3bZDzmZqTOjZYER0c-1738700521-1.0.1.1-xQk9iXOvqvyXNhkIOgc8Ws2WYcT1mJFkDCvCC8xA5joFD8QfNrBIAr_Qs6sIxt2EzXyeFwBA6gA8ZgWApCHx0Q;
path=/; expires=Tue, 04-Feb-25 20:52:01 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '450'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999810'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_10eaafc81640a98a0a4789d270dd94d9
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
completed based on the description, expected output, and actual results.\n\nTask
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
extracted from the task output, if any, their type, description, and relationships"}],
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
{"description": "The name of the entity.", "title": "Name", "type": "string"},
"type": {"description": "The type of the entity.", "title": "Type", "type":
"string"}, "description": {"description": "Description of the entity.", "title":
"Description", "type": "string"}, "relationships": {"description": "Relationships
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
from 0 to 10 evaluating on completion, quality, and overall performance, all
taking into account the task description, expected output, and the result of
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
"suggestions"], "type": "object"}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1962'
content-type:
- application/json
cookie:
- __cf_bm=nedOdWE1YnKQYt1kSbrcA.zhwa3bZDzmZqTOjZYER0c-1738700521-1.0.1.1-xQk9iXOvqvyXNhkIOgc8Ws2WYcT1mJFkDCvCC8xA5joFD8QfNrBIAr_Qs6sIxt2EzXyeFwBA6gA8ZgWApCHx0Q;
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AxJijOhk12Ua6lS23IwtZTachfjq9\",\n \"object\":
\"chat.completion\",\n \"created\": 1738700521,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_DSteeMHHPf5RanJb8qjCo4qx\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Consider adding context
for the greeting to make it more engaging.\\\",\\\"Specify if any additional
information or tone is desired in the greeting.\\\"],\\\"quality\\\":10,\\\"entities\\\":[{\\\"name\\\":\\\"greeting\\\",\\\"type\\\":\\\"text\\\",\\\"description\\\":\\\"A
simple greeting phrase\\\",\\\"relationships\\\":[\\\"is a\\\",\\\"is part of
a conversation\\\"]}]}\"\n }\n }\n ],\n \"refusal\":
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 273,\n \"completion_tokens\":
67,\n \"total_tokens\": 340,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_bd83329f63\"\n}\n"
headers:
CF-RAY:
- 90cd5ed20cb267ee-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Feb 2025 20:22:02 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1624'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999876'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_4ee944acdd3928afbf6c5562403b064a
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,114 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
final answer: hi\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '836'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AzpkZLpCyjKT5d6Udfx4zAme2sOMy\",\n \"object\":
\"chat.completion\",\n \"created\": 1739300299,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-RAY:
- 910691d3ab90ebef-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 11 Feb 2025 18:58:20 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=MOH5EY6n3p8JKY53.yz7qzLuLYsEB8QdQXH09loUMBM-1739300300-1.0.1.1-hjb4mk04sMygPFhoFyiySKZSqB_fN5PbhbOyn.kipa3.eLvk7EtriDyjvGkBFIAV13DYnc08BfF_l2kxdx9hfQ;
path=/; expires=Tue, 11-Feb-25 19:28:20 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=uu.cEiV.FfgvSvCdKOooDYJWrwjVEuFeGdQodijGUUI-1739300300232-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1357'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999810'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_2277503f851195e7d7a43b66eb044454
http_version: HTTP/1.1
status_code: 200
version: 1

File diff suppressed because one or more lines are too long

View File

@@ -1,103 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Hello, how are you?"}], "model":
"gpt-4o-mini", "stop": []}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '102'
content-type:
- application/json
cookie:
- _cfuvid=IY8ppO70AMHr2skDSUsGh71zqHHdCQCZ3OvkPi26NBc-1740424913267-0.0.1.1-604800000;
__cf_bm=fU6K5KZoDmgcEuF8_yWAYKUO5fKHh6q5.wDPnna393g-1740424913-1.0.1.1-2iOaq3JVGWs439V0HxJee0IC9HdJm7dPkeJorD.AGw0YwkngRPM8rrTzn_7ht1BkbOauEezj.wPKcBz18gIYUg
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-B4YLA2SrC2rwdVQ3U87G5a0P5lsLw\",\n \"object\":
\"chat.completion\",\n \"created\": 1740425016,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello! I'm just a computer program, so
I don't have feelings, but I'm here and ready to help you. How can I assist
you today?\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
13,\n \"completion_tokens\": 30,\n \"total_tokens\": 43,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_709714d124\"\n}\n"
headers:
CF-RAY:
- 9171d4c0ed44236e-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 24 Feb 2025 19:23:38 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1954'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999978'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_ea2703502b8827e4297cd2a7bae9d9c8
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,108 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Hello, how are you?"}], "model":
"gpt-4o-mini", "stop": []}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '102'
content-type:
- application/json
cookie:
- _cfuvid=GefCcEtb_Gem93E4a9Hvt3Xyof1YQZVJAXBb9I6pEUs-1739398417375-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-B4YJU8IWKGyBQtAyPDRd3SFI2flYR\",\n \"object\":
\"chat.completion\",\n \"created\": 1740424912,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello! I'm just a computer program, so
I don't have feelings, but I'm here and ready to help you. How can I assist
you today?\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
13,\n \"completion_tokens\": 30,\n \"total_tokens\": 43,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_7fcd609668\"\n}\n"
headers:
CF-RAY:
- 9171d230d8ed7ae0-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 24 Feb 2025 19:21:53 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=fU6K5KZoDmgcEuF8_yWAYKUO5fKHh6q5.wDPnna393g-1740424913-1.0.1.1-2iOaq3JVGWs439V0HxJee0IC9HdJm7dPkeJorD.AGw0YwkngRPM8rrTzn_7ht1BkbOauEezj.wPKcBz18gIYUg;
path=/; expires=Mon, 24-Feb-25 19:51:53 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=IY8ppO70AMHr2skDSUsGh71zqHHdCQCZ3OvkPi26NBc-1740424913267-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '993'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999978'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_d9c4d49185e97b1797061efc1e55d811
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,171 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Speaker. You are a
helpful assistant that just says hi\nYour personal goal is: Just say hi\n\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "say hi!"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"],
"stream": true, "stream_options": {"include_usage": true}}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '602'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.78.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.78.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
I"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
now"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
can"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
give"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
a"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
great"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
answer"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":" \n"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
Answer"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
Hi"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null}
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[],"usage":{"prompt_tokens":99,"completion_tokens":15,"total_tokens":114,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}}}
data: [DONE]
'
headers:
CF-RAY:
- 9580b92adce5e838-GRU
Connection:
- keep-alive
Content-Type:
- text/event-stream; charset=utf-8
Date:
- Mon, 30 Jun 2025 21:23:12 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=nhFmL5HNobQWdbf2Sd9Z8X9ad5zXKG7Ln7MlzuiuwP8-1751318592-1.0.1.1-5qDyF6nVC5d8PDerEmHSOgyWEYdzMdgyFRXqgiJB3FSyWWnvzL4PyVp6LGx9z0P5iTX8PNbxfUOEOYX.7bFaK6p.CyxLaXK7WpnQ3zeasG8;
path=/; expires=Mon, 30-Jun-25 21:53:12 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=APKo781sOKEk.HlN5nFBT1Mkid8Lj04kw6JPleI78bU-1751318592001-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '321'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-envoy-upstream-service-time:
- '326'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999896'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_0b0f668953604810c182b1e83e9709fe
status:
code: 200
message: OK
version: 1

View File

@@ -1,118 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Just say hi\n\nThis is the expected criteria for
your final answer: hi\nyou MUST return the actual complete content as the final
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
"model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '838'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.78.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.78.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xSTW/bMAy9+1cQOsdDnI9m861ZMGAbBuyyHbYWBiMxtjaZEiS5aVHkvw+y09jd
OqAXA+bje3qP5GMGILQSJQjZYJStM/nW7j6z+vb9R739+mm707vj7sv2gffq/v2VEbPEsPtfJOMT
6420rTMUteUBlp4wUlItNutiOd8s15seaK0ik2i1i/nK5q1mnS/mi1U+3+TF2zO7sVpSECX8zAAA
Hvtv8smK7kUJ89lTpaUQsCZRXpoAhLcmVQSGoENEjmI2gtJyJO6tfwS2R5DIUOs7AoQ62QbkcCQP
cMMfNKOB6/6/hEZPdTwduoApC3fGTABkthHTLPoEt2fkdPFsbO283Ye/qOKgWYem8oTBcvIXonWi
R08ZwG0/m+5ZXOG8bV2sov1N/XPFVTHoiXElE3RxBqONaCb1zXL2gl6lKKI2YTJdIVE2pEbquArs
lLYTIJuk/tfNS9pDcs31a+RHQEpykVTlPCktnyce2zyli/1f22XKvWERyN9pSVXU5NMmFB2wM8Md
ifAQIrXVQXNN3nk9HNPBVcsVrldI75ZSZKfsDwAAAP//AwBulbOoWgMAAA==
headers:
CF-RAY:
- 957fa6e91a22023d-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 30 Jun 2025 18:15:58 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=9WXNY0u6p0Nlyb1G36cXHDgtwb1538JzaUNoS4tgrpo-1751307358-1.0.1.1-BAvg6Fgqsv3ITFxrC3z3E42AqgSZcGq4Gr1Wrjx56TOsljYynqCePNzQ79oAncT9KXehFnUMxA6JSf2lAfQOeSLVREY3_P6GjPkbcwIsVXw;
path=/; expires=Mon, 30-Jun-25 18:45:58 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=N5kr6p8e26f8scPW5s2uVOatzh2RYjlQb13eQUBsrts-1751307358295-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '308'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-envoy-upstream-service-time:
- '310'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999823'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_78bb0375ac6e0939c8e05f66869e0137
status:
code: 200
message: OK
version: 1

View File

@@ -1,170 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Tell me a short joke"}], "model":
"gpt-3.5-turbo", "stop": [], "stream": true}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '121'
content-type:
- application/json
cookie:
- _cfuvid=IY8ppO70AMHr2skDSUsGh71zqHHdCQCZ3OvkPi26NBc-1740424913267-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.65.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.65.1
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Why"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
couldn"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"''t"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
the"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
bicycle"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
stand"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
up"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
by"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
itself"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Because"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
it"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
was"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
two"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"-t"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"ired"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
'
headers:
CF-RAY:
- 91ab1bcbad95bcda-ATL
Connection:
- keep-alive
Content-Type:
- text/event-stream; charset=utf-8
Date:
- Mon, 03 Mar 2025 18:13:34 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=Jydtg8l0yjWRI2vKmejdq.C1W.sasIwEbTrV2rUt6V0-1741025614-1.0.1.1-Af3gmq.j2ecn9QEa3aCVY09QU4VqoW2GTk9AjvzPA.jyAZlwhJd4paniSt3kSusH0tryW03iC8uaX826hb2xzapgcfSm6Jdh_eWh_BMCh_8;
path=/; expires=Mon, 03-Mar-25 18:43:34 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=5wzaJSCvT1p1Eazad55wDvp1JsgxrlghhmmU9tx0fMs-1741025614868-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '127'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '50000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '49999978'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_2a2a04977ace88fdd64cf570f80c0202
status:
code: 200
message: OK
version: 1

View File

@@ -1,110 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Tell me a short joke"}], "model":
"gpt-4o", "stop": []}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '98'
content-type:
- application/json
cookie:
- _cfuvid=IY8ppO70AMHr2skDSUsGh71zqHHdCQCZ3OvkPi26NBc-1740424913267-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-BHJ51XXwVMlREjnoe4n4fiA0Ynkab\",\n \"object\":
\"chat.completion\",\n \"created\": 1743464619,\n \"model\": \"gpt-4o-2024-08-06\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Why don't skeletons fight each other?\\n\\nThey
don't have the guts.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 12,\n \"completion_tokens\":
15,\n \"total_tokens\": 27,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_de57b65c90\"\n}\n"
headers:
CF-RAY:
- 9293b5d18d3f9450-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 31 Mar 2025 23:43:40 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=.esGqWXxYzwXyi6048Ocr_NZH1IMsgTTuNN0drcWtSI-1743464620-1.0.1.1-YroBLb5o02zaPiXdGGE3YNO3x56olTA3JQos540j.l2aoeOzHIMVubkp2uSSTBHefPb7OPDKFzjpRXoAVof9jgVUDL6C89g4Zu1_SXtWxEE;
path=/; expires=Tue, 01-Apr-25 00:13:40 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=jrsyZSqr3xLO_beX7x7VEel62eQFToYHZgRqR0eqVNs-1743464620187-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '275'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '50000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '49999'
x-ratelimit-remaining-tokens:
- '149999993'
x-ratelimit-reset-requests:
- 1ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_09cc97e978a7a4b57a1c9ebc9c688fb8
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,111 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
final answer: hi\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '836'
content-type:
- application/json
cookie:
- _cfuvid=gsNyCo_jrDOolzf8SXHDaxQQrEgdR3jgv4OAH8MziDE-1739291824699-0.0.1.1-604800000;
__cf_bm=cRijYuylMGzRGxv3udQL5PhHOR5mRN_9_eLLwevlM_o-1739299455-1.0.1.1-Fszr_Msw0B1.IBMkiunP.VF2ilul1YGZZV8TqMcO3Q2SHvSlqfgm9NHgns1bJrm0wWRvHiCE7wdZfUAOx7T3Lg
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AzpWx6pctOvzu6xsbyg0XfSAc0q9V\",\n \"object\":
\"chat.completion\",\n \"created\": 1739299455,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 91067d3ddc68fa16-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 11 Feb 2025 18:44:16 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '703'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999810'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_89222c00e4608e8557a135e91b223556
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,114 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
final answer: hi\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '836'
content-type:
- application/json
cookie:
- _cfuvid=gsNyCo_jrDOolzf8SXHDaxQQrEgdR3jgv4OAH8MziDE-1739291824699-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AzpWxLzAcRzigZuIGmjP3ckQgxAom\",\n \"object\":
\"chat.completion\",\n \"created\": 1739299455,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-RAY:
- 91067d389e90fa16-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 11 Feb 2025 18:44:15 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=cRijYuylMGzRGxv3udQL5PhHOR5mRN_9_eLLwevlM_o-1739299455-1.0.1.1-Fszr_Msw0B1.IBMkiunP.VF2ilul1YGZZV8TqMcO3Q2SHvSlqfgm9NHgns1bJrm0wWRvHiCE7wdZfUAOx7T3Lg;
path=/; expires=Tue, 11-Feb-25 19:14:15 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '716'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999810'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_ef807dc3223d40332aae8a313e96ef3a
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,165 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are TestAgent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Just say hi\n\nThis is the expected criteria for
your final answer: hi\nyou MUST return the actual complete content as the final
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
"model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream": true, "stream_options":
{"include_usage": true}}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '896'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.78.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.78.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
now"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
can"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
give"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
a"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
great"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
answer"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":" \n"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
Answer"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
hi"},"logprobs":null,"finish_reason":null}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null}
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[],"usage":{"prompt_tokens":161,"completion_tokens":12,"total_tokens":173,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}}}
data: [DONE]
'
headers:
CF-RAY:
- 95806f8e3fc2f213-GRU
Connection:
- keep-alive
Content-Type:
- text/event-stream; charset=utf-8
Date:
- Mon, 30 Jun 2025 20:32:56 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=dxb.Rn1CsTQLjW9eU0KWonEuID9KkkVRJ1FaBBsW5gQ-1751315576-1.0.1.1-R7bLnCfrjJwtHYzbKEE9in7ilYfymelWgYg1OcPqSEllAFA9_R2ctsY0f7Mrv7i0dXaynAooDpLs9hGIzfgyBR9EgkRjqoaHbByPXjxy_5s;
path=/; expires=Mon, 30-Jun-25 21:02:56 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=MwdjLsfFXJDWzbJseVfA4MIpVAqLa7envAu7EAkdK4o-1751315576696-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '238'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-envoy-upstream-service-time:
- '241'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999824'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_394b055696dfcdc221b5ecd0fba49e97
status:
code: 200
message: OK
version: 1

File diff suppressed because it is too large Load Diff

View File

@@ -1,512 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nYou
ONLY have access to the following tools, and should NEVER make up tools that
are not listed here:\n\nTool Name: say_hi\nTool Arguments: {}\nTool Description:
Say hi\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
you should always think about what to do\nAction: the action to take, only one
name of [say_hi], just the name, exactly as it''s written.\nAction Input: the
input to the action, just a simple JSON object, enclosed in curly braces, using
\" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
all necessary information is gathered, return the following format:\n\n```\nThought:
I now know the final answer\nFinal Answer: the final answer to the original
input question\n```"}, {"role": "user", "content": "\nCurrent Task: Just say
hi\n\nThis is the expect criteria for your final answer: hi\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is VERY important to you, use the tools available and give your best Final Answer,
your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1275'
content-type:
- application/json
cookie:
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AzUA6kJQfpUvB4CGot4gSfAIR0foh\",\n \"object\":
\"chat.completion\",\n \"created\": 1739217314,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"you should always think about what to
do \\nAction: say_hi \\nAction Input: {} \",\n \"refusal\": null\n
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
\ ],\n \"usage\": {\n \"prompt_tokens\": 257,\n \"completion_tokens\":
19,\n \"total_tokens\": 276,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-RAY:
- 90fea7d78e1fceb9-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 10 Feb 2025 19:55:15 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=fmlg1wjOwuOwZhUUOEtL1tQYluAPumn7AHLF8s0EU2Y-1739217315-1.0.1.1-PQDvxn8TOhzaznlHjwVsqPZUzbAyJWFkvzCubfNJydTu2_AyA1cJ8hkM0khsEE4UY_xp8iPe2gSGmH1ydrDa0Q;
path=/; expires=Mon, 10-Feb-25 20:25:15 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '526'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999703'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_f6358ff0cc7a2b8d2e167ab00a40f2a4
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nYou
ONLY have access to the following tools, and should NEVER make up tools that
are not listed here:\n\nTool Name: say_hi\nTool Arguments: {}\nTool Description:
Say hi\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
you should always think about what to do\nAction: the action to take, only one
name of [say_hi], just the name, exactly as it''s written.\nAction Input: the
input to the action, just a simple JSON object, enclosed in curly braces, using
\" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
all necessary information is gathered, return the following format:\n\n```\nThought:
I now know the final answer\nFinal Answer: the final answer to the original
input question\n```"}, {"role": "user", "content": "\nCurrent Task: Just say
hi\n\nThis is the expect criteria for your final answer: hi\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is VERY important to you, use the tools available and give your best Final Answer,
your job depends on it!\n\nThought:"}, {"role": "assistant", "content": "you
should always think about what to do \nAction: say_hi \nAction Input: {} \nObservation:
hi"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1410'
content-type:
- application/json
cookie:
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000;
__cf_bm=fmlg1wjOwuOwZhUUOEtL1tQYluAPumn7AHLF8s0EU2Y-1739217315-1.0.1.1-PQDvxn8TOhzaznlHjwVsqPZUzbAyJWFkvzCubfNJydTu2_AyA1cJ8hkM0khsEE4UY_xp8iPe2gSGmH1ydrDa0Q
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AzUA7QdlQy1WZZijxNWUv25sZycg0\",\n \"object\":
\"chat.completion\",\n \"created\": 1739217315,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
Answer: hi\\n```\",\n \"refusal\": null\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
283,\n \"completion_tokens\": 17,\n \"total_tokens\": 300,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-RAY:
- 90fea7dc5ba6ceb9-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 10 Feb 2025 19:55:15 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '388'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999680'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_7d7c68b90b3a9c3ac6092fe17ac1185a
http_version: HTTP/1.1
status_code: 200
- request:
body: !!binary |
CoMzCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS2jIKEgoQY3Jld2FpLnRl
bGVtZXRyeRKOAgoQ2EINIGZRoXD589od63oHmBIIMfUgEWudUbIqDFRhc2sgQ3JlYXRlZDABOcjI
7lbu8CIYQZB471bu8CIYSi4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhj
NzQ2MjhjSjEKB2NyZXdfaWQSJgokNTE4ODdiOTktY2FlMy00Yjc4LWJjMGEtMDY4MmVmNWEzNGQ0
Si4KCHRhc2tfa2V5EiIKIDFiMTVlZjIzOTE1YjI3NTVlODlhMGVjM2IyNmExM2QySjEKB3Rhc2tf
aWQSJgokMzlmMDlmMWUtOTJmOC00ZGJiLTgzNDAtNjU2ZmVkMDk3ZjM0egIYAYUBAAEAABKkBwoQ
RzhWoF6ewSTS/qUc9yeFRhIIM3SNZCwjz5AqDENyZXcgQ3JlYXRlZDABOQjrGlru8CIYQdgbKVru
8CIYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAwLjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4x
Mi44Si4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2Ny
ZXdfaWQSJgokYzk4ODFkY2YtMmM0MS00ZjRlLTgzMjctNjJjYjFhYjJkOTg4ShwKDGNyZXdfcHJv
Y2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90
YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrRAgoLY3Jld19hZ2VudHMSwQIK
vgJbeyJrZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAiaWQiOiAiNTU2
NzJiMDgtOTU4ZC00MjljLWE3ZTctY2ZlN2U4Y2MwOGZkIiwgInJvbGUiOiAiYmFzZV9hZ2VudCIs
ICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVu
Y3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0aW9u
X2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9y
ZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEKCmNyZXdfdGFza3MS8AEK7QFb
eyJrZXkiOiAiMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDIiLCAiaWQiOiAiMzlmMDlm
MWUtOTJmOC00ZGJiLTgzNDAtNjU2ZmVkMDk3ZjM0IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxz
ZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJiYXNlX2FnZW50IiwgImFn
ZW50X2tleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIsICJ0b29sc19uYW1l
cyI6IFtdfV16AhgBhQEAAQAAEo4CChB8AxWkb2Uwpdc8RpyCRqw5EggJAxbgNu81XyoMVGFzayBD
cmVhdGVkMAE5+HQ8Wu7wIhhB+PE8Wu7wIhhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVh
ZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiRjOTg4MWRjZi0yYzQxLTRmNGUtODMyNy02
MmNiMWFiMmQ5ODhKLgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEz
ZDJKMQoHdGFza19pZBImCiQzOWYwOWYxZS05MmY4LTRkYmItODM0MC02NTZmZWQwOTdmMzR6AhgB
hQEAAQAAEqQHChCcXvdbsgYC+gzCMrXs3LN/EgijKwJLCRIiHioMQ3JldyBDcmVhdGVkMAE5iJqz
vu7wIhhBqKC/vu7wIhhKGwoOY3Jld2FpX3ZlcnNpb24SCQoHMC4xMDAuMEoaCg5weXRob25fdmVy
c2lvbhIICgYzLjEyLjhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3
NDYyOGNKMQoHY3Jld19pZBImCiQ2Zjk1ZWI3Yy0wOWM5LTQxOTYtYWFiYi1kOWIxNmMxMzZjODdK
HAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdf
bnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBStECCgtjcmV3
X2FnZW50cxLBAgq+Alt7ImtleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIs
ICJpZCI6ICI1NTY3MmIwOC05NThkLTQyOWMtYTdlNy1jZmU3ZThjYzA4ZmQiLCAicm9sZSI6ICJi
YXNlX2FnZW50IiwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRlciI6IDIwLCAibWF4X3JwbSI6
IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdwdC00by1taW5pIiwg
ImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhlY3V0aW9uPyI6IGZh
bHNlLCAibWF4X3JldHJ5X2xpbWl0IjogMiwgInRvb2xzX25hbWVzIjogW119XUr/AQoKY3Jld190
YXNrcxLwAQrtAVt7ImtleSI6ICIxYjE1ZWYyMzkxNWIyNzU1ZTg5YTBlYzNiMjZhMTNkMiIsICJp
ZCI6ICIzOWYwOWYxZS05MmY4LTRkYmItODM0MC02NTZmZWQwOTdmMzQiLCAiYXN5bmNfZXhlY3V0
aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9yb2xlIjogImJhc2Vf
YWdlbnQiLCAiYWdlbnRfa2V5IjogImFkMTUzMTYxYzVjNWE4NTZhYTBkMDZiMjQ5YzRjNjRhIiwg
InRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASjgIKEExDo5nPLyHb2H8DfYjPoX4SCLEYs+24
8EenKgxUYXNrIENyZWF0ZWQwATmI4NG+7vAiGEFYZdK+7vAiGEouCghjcmV3X2tleRIiCiBlNTgw
NzAxZDUyZWI2NWFmZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJDZmOTVlYjdjLTA5Yzkt
NDE5Ni1hYWJiLWQ5YjE2YzEzNmM4N0ouCgh0YXNrX2tleRIiCiAxYjE1ZWYyMzkxNWIyNzU1ZTg5
YTBlYzNiMjZhMTNkMkoxCgd0YXNrX2lkEiYKJDM5ZjA5ZjFlLTkyZjgtNGRiYi04MzQwLTY1NmZl
ZDA5N2YzNHoCGAGFAQABAAASpAcKEBBQzR2bcR/7woQ+VkaJ4kQSCD1LFx3SNPPPKgxDcmV3IENy
ZWF0ZWQwATlotsW/7vAiGEEgA9C/7vAiGEobCg5jcmV3YWlfdmVyc2lvbhIJCgcwLjEwMC4wShoK
DnB5dGhvbl92ZXJzaW9uEggKBjMuMTIuOEouCghjcmV3X2tleRIiCiBlNTgwNzAxZDUyZWI2NWFm
ZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJDJiMWI2MGYzLTNlZTMtNGNjYi05MDM2LTdk
MzE4OTJiYjVkZkocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoRCgtjcmV3X21lbW9yeRIC
EABKGgoUY3Jld19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVyX29mX2FnZW50cxIC
GAFK0QIKC2NyZXdfYWdlbnRzEsECCr4CW3sia2V5IjogImFkMTUzMTYxYzVjNWE4NTZhYTBkMDZi
MjQ5YzRjNjRhIiwgImlkIjogIjU1NjcyYjA4LTk1OGQtNDI5Yy1hN2U3LWNmZTdlOGNjMDhmZCIs
ICJyb2xlIjogImJhc2VfYWdlbnQiLCAidmVyYm9zZT8iOiBmYWxzZSwgIm1heF9pdGVyIjogMjAs
ICJtYXhfcnBtIjogbnVsbCwgImZ1bmN0aW9uX2NhbGxpbmdfbGxtIjogIiIsICJsbG0iOiAiZ3B0
LTRvLW1pbmkiLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxsb3dfY29kZV9leGVj
dXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNfbmFtZXMiOiBbXX1d
Sv8BCgpjcmV3X3Rhc2tzEvABCu0BW3sia2V5IjogIjFiMTVlZjIzOTE1YjI3NTVlODlhMGVjM2Iy
NmExM2QyIiwgImlkIjogIjM5ZjA5ZjFlLTkyZjgtNGRiYi04MzQwLTY1NmZlZDA5N2YzNCIsICJh
c3luY19leGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3Jv
bGUiOiAiYmFzZV9hZ2VudCIsICJhZ2VudF9rZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIy
NDljNGM2NGEiLCAidG9vbHNfbmFtZXMiOiBbXX1degIYAYUBAAEAABKOAgoQmT07KMiFRgzOOPQf
I4bJPhIIqzN+pCYM6IUqDFRhc2sgQ3JlYXRlZDABOYjr3r/u8CIYQehY37/u8CIYSi4KCGNyZXdf
a2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2NyZXdfaWQSJgokMmIx
YjYwZjMtM2VlMy00Y2NiLTkwMzYtN2QzMTg5MmJiNWRmSi4KCHRhc2tfa2V5EiIKIDFiMTVlZjIz
OTE1YjI3NTVlODlhMGVjM2IyNmExM2QySjEKB3Rhc2tfaWQSJgokMzlmMDlmMWUtOTJmOC00ZGJi
LTgzNDAtNjU2ZmVkMDk3ZjM0egIYAYUBAAEAABKkBwoQE53vZNAWshkoNK1bqTvovRII83djkBUL
EbcqDENyZXcgQ3JlYXRlZDABORBBzsDu8CIYQbAU2MDu8CIYShsKDmNyZXdhaV92ZXJzaW9uEgkK
BzAuMTAwLjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMi44Si4KCGNyZXdfa2V5EiIKIGU1ODA3
MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2NyZXdfaWQSJgokNTQ0MWY0MWYtOTVjMC00
YzdkLTkxM2QtNDUxODcwY2YyZjYzShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2Ny
ZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJf
b2ZfYWdlbnRzEgIYAUrRAgoLY3Jld19hZ2VudHMSwQIKvgJbeyJrZXkiOiAiYWQxNTMxNjFjNWM1
YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAiaWQiOiAiNTU2NzJiMDgtOTU4ZC00MjljLWE3ZTctY2Zl
N2U4Y2MwOGZkIiwgInJvbGUiOiAiYmFzZV9hZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4
X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwg
ImxsbSI6ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxv
d19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19u
YW1lcyI6IFtdfV1K/wEKCmNyZXdfdGFza3MS8AEK7QFbeyJrZXkiOiAiMWIxNWVmMjM5MTViMjc1
NWU4OWEwZWMzYjI2YTEzZDIiLCAiaWQiOiAiMzlmMDlmMWUtOTJmOC00ZGJiLTgzNDAtNjU2ZmVk
MDk3ZjM0IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNl
LCAiYWdlbnRfcm9sZSI6ICJiYXNlX2FnZW50IiwgImFnZW50X2tleSI6ICJhZDE1MzE2MWM1YzVh
ODU2YWEwZDA2YjI0OWM0YzY0YSIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEo4CChBV
JNEz3VIdOlQM9VT3bctVEgisogN707a2AioMVGFzayBDcmVhdGVkMAE5kGbnwO7wIhhBaMDnwO7w
IhhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jl
d19pZBImCiQ1NDQxZjQxZi05NWMwLTRjN2QtOTEzZC00NTE4NzBjZjJmNjNKLgoIdGFza19rZXkS
IgogMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDJKMQoHdGFza19pZBImCiQzOWYwOWYx
ZS05MmY4LTRkYmItODM0MC02NTZmZWQwOTdmMzR6AhgBhQEAAQAAErQHChDA7zaLCfy56rd5t3oS
rDPZEgjYoSW3mq6WJyoMQ3JldyBDcmVhdGVkMAE5cP/5we7wIhhBIH0Dwu7wIhhKGwoOY3Jld2Fp
X3ZlcnNpb24SCQoHMC4xMDAuMEoaCg5weXRob25fdmVyc2lvbhIICgYzLjEyLjhKLgoIY3Jld19r
ZXkSIgogZTU4MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiRmNjcz
MTc1ZS04Y2Q1LTQ1ZWUtYTZiOS0xYWFjMTliODQxZWJKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVl
bnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVj
cmV3X251bWJlcl9vZl9hZ2VudHMSAhgBStkCCgtjcmV3X2FnZW50cxLJAgrGAlt7ImtleSI6ICJh
ZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIsICJpZCI6ICJmMGUwMGIzZi0wZWNmLTQ2
OGQtYjdjMC0yZmJhN2I5OTc5YjMiLCAicm9sZSI6ICJiYXNlX2FnZW50IiwgInZlcmJvc2U/Ijog
ZmFsc2UsICJtYXhfaXRlciI6IDIwLCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5n
X2xsbSI6ICIiLCAibGxtIjogImdwdC00by1taW5pIiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBm
YWxzZSwgImFsbG93X2NvZGVfZXhlY3V0aW9uPyI6IGZhbHNlLCAibWF4X3JldHJ5X2xpbWl0Ijog
MiwgInRvb2xzX25hbWVzIjogWyJzYXlfaGkiXX1dSocCCgpjcmV3X3Rhc2tzEvgBCvUBW3sia2V5
IjogIjFiMTVlZjIzOTE1YjI3NTVlODlhMGVjM2IyNmExM2QyIiwgImlkIjogImFhMGFmMmE2LTdm
MTktNDZmNi1iMjMxLTg1M2JjYzYxYzhiZiIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJo
dW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAiYmFzZV9hZ2VudCIsICJhZ2VudF9r
ZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAidG9vbHNfbmFtZXMiOiBb
InNheV9oaSJdfV16AhgBhQEAAQAAEo4CChBH8NUZY1Cv8sM2lfQLaEogEgiFlW7Wp7QpdyoMVGFz
ayBDcmVhdGVkMAE5MNkPwu7wIhhBUCcQwu7wIhhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmVi
NjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiRmNjczMTc1ZS04Y2Q1LTQ1ZWUtYTZi
OS0xYWFjMTliODQxZWJKLgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2
YTEzZDJKMQoHdGFza19pZBImCiRhYTBhZjJhNi03ZjE5LTQ2ZjYtYjIzMS04NTNiY2M2MWM4YmZ6
AhgBhQEAAQAAEooBChCJg/wSACw+HIDy4vvYISP/EgjoC/oI/1V0cCoKVG9vbCBVc2FnZTABOWA0
ifTu8CIYQTD0lPTu8CIYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAwLjBKFQoJdG9vbF9uYW1l
EggKBnNheV9oaUoOCghhdHRlbXB0cxICGAF6AhgBhQEAAQAA
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '6534'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.27.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Mon, 10 Feb 2025 19:55:17 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
completed based on the description, expected output, and actual results.\n\nTask
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n```\n\nPlease
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
extracted from the task output, if any, their type, description, and relationships"}],
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
{"description": "The name of the entity.", "title": "Name", "type": "string"},
"type": {"description": "The type of the entity.", "title": "Type", "type":
"string"}, "description": {"description": "Description of the entity.", "title":
"Description", "type": "string"}, "relationships": {"description": "Relationships
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
from 0 to 10 evaluating on completion, quality, and overall performance, all
taking into account the task description, expected output, and the result of
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
"suggestions"], "type": "object"}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1967'
content-type:
- application/json
cookie:
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000;
__cf_bm=fmlg1wjOwuOwZhUUOEtL1tQYluAPumn7AHLF8s0EU2Y-1739217315-1.0.1.1-PQDvxn8TOhzaznlHjwVsqPZUzbAyJWFkvzCubfNJydTu2_AyA1cJ8hkM0khsEE4UY_xp8iPe2gSGmH1ydrDa0Q
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AzUA8oE0A2d99i1Khpu0CI7fSgRtZ\",\n \"object\":
\"chat.completion\",\n \"created\": 1739217316,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_bk3duHRErK1qCyvWJ1uVmmGl\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Provide more context
or details for similar tasks to enhance clarity.\\\",\\\"Specify desired tone
or style for the output.\\\",\\\"Consider adding more variety in tasks to keep
engagement high.\\\"],\\\"quality\\\":10,\\\"entities\\\":[{\\\"name\\\":\\\"hi\\\",\\\"type\\\":\\\"greeting\\\",\\\"description\\\":\\\"A
casual way to say hello or acknowledge someone's presence.\\\",\\\"relationships\\\":[\\\"used
as a greeting\\\",\\\"expresses friendliness\\\"]}]}\"\n }\n }\n
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
275,\n \"completion_tokens\": 80,\n \"total_tokens\": 355,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
headers:
CF-RAY:
- 90fea7dfef41ceb9-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 10 Feb 2025 19:55:17 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1535'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999874'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_55d8eb91b4318245556b73d3f4c1e7c4
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,88 +0,0 @@
import uuid
import pytest
from opentelemetry import baggage
from opentelemetry.context import attach, detach
from crewai.utilities.crew.crew_context import get_crew_context
from crewai.utilities.crew.models import CrewContext
def test_crew_context_creation():
crew_id = str(uuid.uuid4())
context = CrewContext(id=crew_id, key="test-crew")
assert context.id == crew_id
assert context.key == "test-crew"
def test_get_crew_context_with_baggage():
crew_id = str(uuid.uuid4())
assert get_crew_context() is None
crew_ctx = CrewContext(id=crew_id, key="test-key")
ctx = baggage.set_baggage("crew_context", crew_ctx)
token = attach(ctx)
try:
context = get_crew_context()
assert context is not None
assert context.id == crew_id
assert context.key == "test-key"
finally:
detach(token)
assert get_crew_context() is None
def test_get_crew_context_empty():
assert get_crew_context() is None
def test_baggage_nested_contexts():
crew_id1 = str(uuid.uuid4())
crew_id2 = str(uuid.uuid4())
crew_ctx1 = CrewContext(id=crew_id1, key="outer")
ctx1 = baggage.set_baggage("crew_context", crew_ctx1)
token1 = attach(ctx1)
try:
outer_context = get_crew_context()
assert outer_context.id == crew_id1
assert outer_context.key == "outer"
crew_ctx2 = CrewContext(id=crew_id2, key="inner")
ctx2 = baggage.set_baggage("crew_context", crew_ctx2)
token2 = attach(ctx2)
try:
inner_context = get_crew_context()
assert inner_context.id == crew_id2
assert inner_context.key == "inner"
finally:
detach(token2)
restored_context = get_crew_context()
assert restored_context.id == crew_id1
assert restored_context.key == "outer"
finally:
detach(token1)
assert get_crew_context() is None
def test_baggage_exception_handling():
crew_id = str(uuid.uuid4())
crew_ctx = CrewContext(id=crew_id, key="test")
ctx = baggage.set_baggage("crew_context", crew_ctx)
token = attach(ctx)
with pytest.raises(ValueError):
try:
assert get_crew_context() is not None
raise ValueError("Test exception")
finally:
detach(token)
assert get_crew_context() is None

View File

@@ -1 +0,0 @@
"""Tests for evaluators."""

View File

@@ -1,142 +0,0 @@
from unittest import mock
import pytest
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.utilities.evaluators.crew_evaluator_handler import (
CrewEvaluator,
TaskEvaluationPydanticOutput,
)
class InternalCrewEvaluator:
@pytest.fixture
def crew_planner(self):
agent = Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1")
task = Task(
description="Task 1",
expected_output="Output 1",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
return CrewEvaluator(crew, openai_model_name="gpt-4o-mini")
def test_setup_for_evaluating(self, crew_planner):
crew_planner._setup_for_evaluating()
assert crew_planner.crew.tasks[0].callback == crew_planner.evaluate
def test_set_iteration(self, crew_planner):
crew_planner.set_iteration(1)
assert crew_planner.iteration == 1
def test_evaluator_agent(self, crew_planner):
agent = crew_planner._evaluator_agent()
assert agent.role == "Task Execution Evaluator"
assert (
agent.goal
== "Your goal is to evaluate the performance of the agents in the crew based on the tasks they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance."
)
assert (
agent.backstory
== "Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed"
)
assert agent.verbose is False
assert agent.llm.model == "gpt-4o-mini"
def test_evaluation_task(self, crew_planner):
evaluator_agent = Agent(
role="Evaluator Agent",
goal="Evaluate the performance of the agents in the crew",
backstory="Master in Evaluation",
)
task_to_evaluate = Task(
description="Task 1",
expected_output="Output 1",
agent=Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1"),
)
task_output = "Task Output 1"
task = crew_planner._evaluation_task(
evaluator_agent, task_to_evaluate, task_output
)
assert task.description.startswith(
"Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance."
)
assert task.agent == evaluator_agent
assert (
task.description
== "Based on the task description and the expected output, compare and evaluate "
"the performance of the agents in the crew based on the Task Output they have "
"performed using score from 1 to 10 evaluating on completion, quality, and overall "
"performance.task_description: Task 1 task_expected_output: Output 1 "
"agent: Agent 1 agent_goal: Goal 1 Task Output: Task Output 1"
)
@mock.patch("crewai.utilities.evaluators.crew_evaluator_handler.Console")
@mock.patch("crewai.utilities.evaluators.crew_evaluator_handler.Table")
def test_print_crew_evaluation_result(self, table, console, crew_planner):
# Set up task scores and execution times
crew_planner.tasks_scores = {
1: [10, 9, 8],
2: [9, 8, 7],
}
crew_planner.run_execution_times = {
1: [24, 45, 66],
2: [55, 33, 67],
}
# Mock agents and assign them to tasks
crew_planner.crew.agents = [
mock.Mock(role="Agent 1"),
mock.Mock(role="Agent 2"),
]
crew_planner.crew.tasks = [
mock.Mock(
agent=crew_planner.crew.agents[0], processed_by_agents=["Agent 1"]
),
mock.Mock(
agent=crew_planner.crew.agents[1], processed_by_agents=["Agent 2"]
),
]
# Run the method
crew_planner.print_crew_evaluation_result()
# Verify that the table is created with the appropriate structure and rows
table.assert_has_calls(
[
mock.call(
title="Tasks Scores \n (1-10 Higher is better)", box=mock.ANY
), # Title and styling
mock.call().add_column("Tasks/Crew/Agents", style="cyan"), # Columns
mock.call().add_column("Run 1", justify="center"),
mock.call().add_column("Run 2", justify="center"),
mock.call().add_column("Avg. Total", justify="center"),
mock.call().add_column("Agents", style="green"),
# Verify rows for tasks with agents
mock.call().add_row("Task 1", "10.0", "9.0", "9.5", "- Agent 1"),
mock.call().add_row("", "", "", "", "", ""), # Blank row between tasks
mock.call().add_row("Task 2", "9.0", "8.0", "8.5", "- Agent 2"),
# Add crew averages and execution times
mock.call().add_row("Crew", "9.00", "8.00", "8.5", ""),
mock.call().add_row("Execution Time (s)", "135", "155", "145", ""),
]
)
# Ensure the console prints the table
console.assert_has_calls([mock.call(), mock.call().print(table())])
def test_evaluate(self, crew_planner):
task_output = TaskOutput(
description="Task 1", agent=str(crew_planner.crew.agents[0])
)
with mock.patch.object(Task, "execute_sync") as execute:
execute().pydantic = TaskEvaluationPydanticOutput(quality=9.5)
crew_planner.evaluate(task_output)
assert crew_planner.tasks_scores[0] == [9.5]

View File

@@ -1,103 +0,0 @@
from unittest import mock
from unittest.mock import MagicMock, patch
from crewai.utilities.evaluators.task_evaluator import (
TaskEvaluator,
TrainingTaskEvaluation,
)
from crewai.utilities.converter import ConverterError
@patch("crewai.utilities.evaluators.task_evaluator.TrainingConverter")
def test_evaluate_training_data(converter_mock):
training_data = {
"agent_id": {
"data1": {
"initial_output": "Initial output 1",
"human_feedback": "Human feedback 1",
"improved_output": "Improved output 1",
},
"data2": {
"initial_output": "Initial output 2",
"human_feedback": "Human feedback 2",
"improved_output": "Improved output 2",
},
}
}
agent_id = "agent_id"
original_agent = MagicMock()
original_agent.llm.supports_function_calling.return_value = False
function_return_value = TrainingTaskEvaluation(
suggestions=[
"The initial output was already good, having a detailed explanation. However, the improved output "
"gave similar information but in a more professional manner using better vocabulary. For future tasks, "
"try to implement more elaborate language and precise terminology from the beginning."
],
quality=8.0,
final_summary="The agent responded well initially. However, the improved output showed that there is room "
"for enhancement in terms of language usage, precision, and professionalism. For future tasks, the agent "
"should focus more on these points from the start to increase performance.",
)
converter_mock.return_value.to_pydantic.return_value = function_return_value
result = TaskEvaluator(original_agent=original_agent).evaluate_training_data(
training_data, agent_id
)
assert result == function_return_value
converter_mock.assert_has_calls(
[
mock.call(
llm=original_agent.llm,
text="Assess the quality of the training data based on the llm output, human feedback , and llm "
"output improved result.\n\nIteration: data1\nInitial Output:\nInitial output 1\n\nHuman Feedback:\nHuman feedback "
"1\n\nImproved Output:\nImproved output 1\n\n------------------------------------------------\n\nIteration: data2\nInitial Output:\nInitial output 2\n\nHuman "
"Feedback:\nHuman feedback 2\n\nImproved Output:\nImproved output 2\n\n------------------------------------------------\n\nPlease provide:\n- Provide "
"a list of clear, actionable instructions derived from the Human Feedbacks to enhance the Agent's "
"performance. Analyze the differences between Initial Outputs and Improved Outputs to generate specific "
"action items for future tasks. Ensure all key and specificpoints from the human feedback are "
"incorporated into these instructions.\n- A score from 0 to 10 evaluating on completion, quality, and "
"overall performance from the improved output to the initial output based on the human feedback\n",
model=TrainingTaskEvaluation,
instructions="I'm gonna convert this raw text into valid JSON.\n\nThe json should have the "
"following structure, with the following keys:\n{\n suggestions: List[str],\n quality: float,\n final_summary: str\n}",
),
mock.call().to_pydantic(),
]
)
@patch("crewai.utilities.converter.Converter.to_pydantic")
@patch("crewai.utilities.training_converter.TrainingConverter._convert_field_by_field")
def test_training_converter_fallback_mechanism(convert_field_by_field_mock, to_pydantic_mock):
training_data = {
"agent_id": {
"data1": {
"initial_output": "Initial output 1",
"human_feedback": "Human feedback 1",
"improved_output": "Improved output 1",
},
"data2": {
"initial_output": "Initial output 2",
"human_feedback": "Human feedback 2",
"improved_output": "Improved output 2",
},
}
}
agent_id = "agent_id"
to_pydantic_mock.side_effect = ConverterError("Failed to convert directly")
expected_result = TrainingTaskEvaluation(
suggestions=["Fallback suggestion"],
quality=6.5,
final_summary="Fallback summary"
)
convert_field_by_field_mock.return_value = expected_result
original_agent = MagicMock()
result = TaskEvaluator(original_agent=original_agent).evaluate_training_data(
training_data, agent_id
)
assert result == expected_result
to_pydantic_mock.assert_called_once()
convert_field_by_field_mock.assert_called_once()

View File

@@ -1 +0,0 @@
"""Tests for events."""

View File

@@ -1,47 +0,0 @@
from unittest.mock import Mock
from crewai.events.base_events import BaseEvent
from crewai.events.event_bus import crewai_event_bus
class TestEvent(BaseEvent):
pass
def test_specific_event_handler():
mock_handler = Mock()
@crewai_event_bus.on(TestEvent)
def handler(source, event):
mock_handler(source, event)
event = TestEvent(type="test_event")
crewai_event_bus.emit("source_object", event)
mock_handler.assert_called_once_with("source_object", event)
def test_wildcard_event_handler():
mock_handler = Mock()
@crewai_event_bus.on(BaseEvent)
def handler(source, event):
mock_handler(source, event)
event = TestEvent(type="test_event")
crewai_event_bus.emit("source_object", event)
mock_handler.assert_called_once_with("source_object", event)
def test_event_bus_error_handling(capfd):
@crewai_event_bus.on(BaseEvent)
def broken_handler(source, event):
raise ValueError("Simulated handler failure")
event = TestEvent(type="test_event")
crewai_event_bus.emit("source_object", event)
out, err = capfd.readouterr()
assert "Simulated handler failure" in out
assert "Handler 'broken_handler' failed" in out

View File

@@ -1,40 +0,0 @@
{
"hierarchical_manager_agent": {
"role": "Lorem ipsum dolor sit amet",
"goal": "Lorem ipsum dolor sit amet",
"backstory": "Lorem ipsum dolor sit amet."
},
"planning_manager_agent": {
"role": "Lorem ipsum dolor sit amet",
"goal": "Lorem ipsum dolor sit amet",
"backstory": "Lorem ipsum dolor sit amet."
},
"slices": {
"observation": "Lorem ipsum dolor sit amet",
"task": "Lorem ipsum dolor sit amet",
"memory": "Lorem ipsum dolor sit amet",
"role_playing": "Lorem ipsum dolor sit amet",
"tools": "Lorem ipsum dolor sit amet",
"no_tools": "Lorem ipsum dolor sit amet",
"format": "Lorem ipsum dolor sit amet",
"final_answer_format": "Lorem ipsum dolor sit amet",
"format_without_tools": "Lorem ipsum dolor sit amet",
"task_with_context": "Lorem ipsum dolor sit amet",
"expected_output": "Lorem ipsum dolor sit amet",
"human_feedback": "Lorem ipsum dolor sit amet",
"getting_input": "Lorem ipsum dolor sit amet "
},
"errors": {
"force_final_answer": "Lorem ipsum dolor sit amet",
"agent_tool_unexisting_coworker": "Lorem ipsum dolor sit amet",
"task_repeated_usage": "Lorem ipsum dolor sit amet",
"tool_usage_error": "Lorem ipsum dolor sit amet",
"tool_arguments_error": "Lorem ipsum dolor sit amet",
"wrong_tool_name": "Lorem ipsum dolor sit amet",
"tool_usage_exception": "Lorem ipsum dolor sit amet"
},
"tools": {
"delegate_work": "Lorem ipsum dolor sit amet",
"ask_question": "Lorem ipsum dolor sit amet"
}
}

View File

@@ -1,116 +0,0 @@
from unittest.mock import MagicMock, patch
from rich.tree import Tree
from rich.live import Live
from crewai.events.utils.console_formatter import ConsoleFormatter
class TestConsoleFormatterPauseResume:
"""Test ConsoleFormatter pause/resume functionality."""
def test_pause_live_updates_with_active_session(self):
"""Test pausing when Live session is active."""
formatter = ConsoleFormatter()
mock_live = MagicMock(spec=Live)
formatter._live = mock_live
formatter._live_paused = False
formatter.pause_live_updates()
mock_live.stop.assert_called_once()
assert formatter._live_paused
def test_pause_live_updates_when_already_paused(self):
"""Test pausing when already paused does nothing."""
formatter = ConsoleFormatter()
mock_live = MagicMock(spec=Live)
formatter._live = mock_live
formatter._live_paused = True
formatter.pause_live_updates()
mock_live.stop.assert_not_called()
assert formatter._live_paused
def test_pause_live_updates_with_no_session(self):
"""Test pausing when no Live session exists."""
formatter = ConsoleFormatter()
formatter._live = None
formatter._live_paused = False
formatter.pause_live_updates()
assert formatter._live_paused
def test_resume_live_updates_when_paused(self):
"""Test resuming when paused."""
formatter = ConsoleFormatter()
formatter._live_paused = True
formatter.resume_live_updates()
assert not formatter._live_paused
def test_resume_live_updates_when_not_paused(self):
"""Test resuming when not paused does nothing."""
formatter = ConsoleFormatter()
formatter._live_paused = False
formatter.resume_live_updates()
assert not formatter._live_paused
def test_print_after_resume_restarts_live_session(self):
"""Test that printing a Tree after resume creates new Live session."""
formatter = ConsoleFormatter()
formatter._live_paused = True
formatter._live = None
formatter.resume_live_updates()
assert not formatter._live_paused
tree = Tree("Test")
with patch("crewai.events.utils.console_formatter.Live") as mock_live_class:
mock_live_instance = MagicMock()
mock_live_class.return_value = mock_live_instance
formatter.print(tree)
mock_live_class.assert_called_once()
mock_live_instance.start.assert_called_once()
assert formatter._live == mock_live_instance
def test_multiple_pause_resume_cycles(self):
"""Test multiple pause/resume cycles work correctly."""
formatter = ConsoleFormatter()
mock_live = MagicMock(spec=Live)
formatter._live = mock_live
formatter._live_paused = False
formatter.pause_live_updates()
assert formatter._live_paused
mock_live.stop.assert_called_once()
assert formatter._live is None # Live session should be cleared
formatter.resume_live_updates()
assert not formatter._live_paused
formatter.pause_live_updates()
assert formatter._live_paused
formatter.resume_live_updates()
assert not formatter._live_paused
def test_pause_resume_state_initialization(self):
"""Test that _live_paused is properly initialized."""
formatter = ConsoleFormatter()
assert hasattr(formatter, "_live_paused")
assert not formatter._live_paused

View File

@@ -1,600 +0,0 @@
import json
from typing import Dict, List, Optional
from unittest.mock import MagicMock, Mock, patch
import pytest
from pydantic import BaseModel
from crewai.llm import LLM
from crewai.utilities.converter import (
Converter,
ConverterError,
convert_to_model,
convert_with_instructions,
create_converter,
generate_model_description,
get_conversion_instructions,
handle_partial_json,
validate_model,
)
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
# Tests for enums
from enum import Enum
@pytest.fixture(scope="module")
def vcr_config(request) -> dict:
return {
"cassette_library_dir": "tests/utilities/cassettes",
}
# Sample Pydantic models for testing
class EmailResponse(BaseModel):
previous_message_content: str
class EmailResponses(BaseModel):
responses: list[EmailResponse]
class SimpleModel(BaseModel):
name: str
age: int
class NestedModel(BaseModel):
id: int
data: SimpleModel
class Address(BaseModel):
street: str
city: str
zip_code: str
class Person(BaseModel):
name: str
age: int
address: Address
class CustomConverter(Converter):
pass
# Fixtures
@pytest.fixture
def mock_agent():
agent = Mock()
agent.function_calling_llm = None
agent.llm = Mock()
return agent
# Tests for convert_to_model
def test_convert_to_model_with_valid_json():
result = '{"name": "John", "age": 30}'
output = convert_to_model(result, SimpleModel, None, None)
assert isinstance(output, SimpleModel)
assert output.name == "John"
assert output.age == 30
def test_convert_to_model_with_invalid_json():
result = '{"name": "John", "age": "thirty"}'
with patch("crewai.utilities.converter.handle_partial_json") as mock_handle:
mock_handle.return_value = "Fallback result"
output = convert_to_model(result, SimpleModel, None, None)
assert output == "Fallback result"
def test_convert_to_model_with_no_model():
result = "Plain text"
output = convert_to_model(result, None, None, None)
assert output == "Plain text"
def test_convert_to_model_with_special_characters():
json_string_test = """
{
"responses": [
{
"previous_message_content": "Hi Tom,\r\n\r\nNiamh has chosen the Mika phonics on"
}
]
}
"""
output = convert_to_model(json_string_test, EmailResponses, None, None)
assert isinstance(output, EmailResponses)
assert len(output.responses) == 1
assert (
output.responses[0].previous_message_content
== "Hi Tom,\r\n\r\nNiamh has chosen the Mika phonics on"
)
def test_convert_to_model_with_escaped_special_characters():
json_string_test = json.dumps(
{
"responses": [
{
"previous_message_content": "Hi Tom,\r\n\r\nNiamh has chosen the Mika phonics on"
}
]
}
)
output = convert_to_model(json_string_test, EmailResponses, None, None)
assert isinstance(output, EmailResponses)
assert len(output.responses) == 1
assert (
output.responses[0].previous_message_content
== "Hi Tom,\r\n\r\nNiamh has chosen the Mika phonics on"
)
def test_convert_to_model_with_multiple_special_characters():
json_string_test = """
{
"responses": [
{
"previous_message_content": "Line 1\r\nLine 2\tTabbed\nLine 3\r\n\rEscaped newline"
}
]
}
"""
output = convert_to_model(json_string_test, EmailResponses, None, None)
assert isinstance(output, EmailResponses)
assert len(output.responses) == 1
assert (
output.responses[0].previous_message_content
== "Line 1\r\nLine 2\tTabbed\nLine 3\r\n\rEscaped newline"
)
# Tests for validate_model
def test_validate_model_pydantic_output():
result = '{"name": "Alice", "age": 25}'
output = validate_model(result, SimpleModel, False)
assert isinstance(output, SimpleModel)
assert output.name == "Alice"
assert output.age == 25
def test_validate_model_json_output():
result = '{"name": "Bob", "age": 40}'
output = validate_model(result, SimpleModel, True)
assert isinstance(output, dict)
assert output == {"name": "Bob", "age": 40}
# Tests for handle_partial_json
def test_handle_partial_json_with_valid_partial():
result = 'Some text {"name": "Charlie", "age": 35} more text'
output = handle_partial_json(result, SimpleModel, False, None)
assert isinstance(output, SimpleModel)
assert output.name == "Charlie"
assert output.age == 35
def test_handle_partial_json_with_invalid_partial(mock_agent):
result = "No valid JSON here"
with patch("crewai.utilities.converter.convert_with_instructions") as mock_convert:
mock_convert.return_value = "Converted result"
output = handle_partial_json(result, SimpleModel, False, mock_agent)
assert output == "Converted result"
# Tests for convert_with_instructions
@patch("crewai.utilities.converter.create_converter")
@patch("crewai.utilities.converter.get_conversion_instructions")
def test_convert_with_instructions_success(
mock_get_instructions, mock_create_converter, mock_agent
):
mock_get_instructions.return_value = "Instructions"
mock_converter = Mock()
mock_converter.to_pydantic.return_value = SimpleModel(name="David", age=50)
mock_create_converter.return_value = mock_converter
result = "Some text to convert"
output = convert_with_instructions(result, SimpleModel, False, mock_agent)
assert isinstance(output, SimpleModel)
assert output.name == "David"
assert output.age == 50
@patch("crewai.utilities.converter.create_converter")
@patch("crewai.utilities.converter.get_conversion_instructions")
def test_convert_with_instructions_failure(
mock_get_instructions, mock_create_converter, mock_agent
):
mock_get_instructions.return_value = "Instructions"
mock_converter = Mock()
mock_converter.to_pydantic.return_value = ConverterError("Conversion failed")
mock_create_converter.return_value = mock_converter
result = "Some text to convert"
with patch("crewai.utilities.converter.Printer") as mock_printer:
output = convert_with_instructions(result, SimpleModel, False, mock_agent)
assert output == result
mock_printer.return_value.print.assert_called_once()
# Tests for get_conversion_instructions
def test_get_conversion_instructions_gpt():
llm = LLM(model="gpt-4o-mini")
with patch.object(LLM, "supports_function_calling") as supports_function_calling:
supports_function_calling.return_value = True
instructions = get_conversion_instructions(SimpleModel, llm)
model_schema = PydanticSchemaParser(model=SimpleModel).get_schema()
expected_instructions = (
"Please convert the following text into valid JSON.\n\n"
"Output ONLY the valid JSON and nothing else.\n\n"
"The JSON must follow this schema exactly:\n```json\n"
f"{model_schema}\n```"
)
assert instructions == expected_instructions
def test_get_conversion_instructions_non_gpt():
llm = LLM(model="ollama/llama3.1", base_url="http://localhost:11434")
with patch.object(LLM, "supports_function_calling", return_value=False):
instructions = get_conversion_instructions(SimpleModel, llm)
assert '"name": str' in instructions
assert '"age": int' in instructions
# Tests for is_gpt
def test_supports_function_calling_true():
llm = LLM(model="gpt-4o")
assert llm.supports_function_calling() is True
def test_supports_function_calling_false():
llm = LLM(model="non-existent-model")
assert llm.supports_function_calling() is False
def test_create_converter_with_mock_agent():
mock_agent = MagicMock()
mock_agent.get_output_converter.return_value = MagicMock(spec=Converter)
converter = create_converter(
agent=mock_agent,
llm=Mock(),
text="Sample",
model=SimpleModel,
instructions="Convert",
)
assert isinstance(converter, Converter)
mock_agent.get_output_converter.assert_called_once()
def test_create_converter_with_custom_converter():
converter = create_converter(
converter_cls=CustomConverter,
llm=LLM(model="gpt-4o-mini"),
text="Sample",
model=SimpleModel,
instructions="Convert",
)
assert isinstance(converter, CustomConverter)
def test_create_converter_fails_without_agent_or_converter_cls():
with pytest.raises(
ValueError, match="Either agent or converter_cls must be provided"
):
create_converter(
llm=Mock(), text="Sample", model=SimpleModel, instructions="Convert"
)
def test_generate_model_description_simple_model():
description = generate_model_description(SimpleModel)
expected_description = '{\n "name": str,\n "age": int\n}'
assert description == expected_description
def test_generate_model_description_nested_model():
description = generate_model_description(NestedModel)
expected_description = (
'{\n "id": int,\n "data": {\n "name": str,\n "age": int\n}\n}'
)
assert description == expected_description
def test_generate_model_description_optional_field():
class ModelWithOptionalField(BaseModel):
name: Optional[str]
age: int
description = generate_model_description(ModelWithOptionalField)
expected_description = '{\n "name": Optional[str],\n "age": int\n}'
assert description == expected_description
def test_generate_model_description_list_field():
class ModelWithListField(BaseModel):
items: List[int]
description = generate_model_description(ModelWithListField)
expected_description = '{\n "items": List[int]\n}'
assert description == expected_description
def test_generate_model_description_dict_field():
class ModelWithDictField(BaseModel):
attributes: Dict[str, int]
description = generate_model_description(ModelWithDictField)
expected_description = '{\n "attributes": Dict[str, int]\n}'
assert description == expected_description
@pytest.mark.vcr(filter_headers=["authorization"])
def test_convert_with_instructions():
llm = LLM(model="gpt-4o-mini")
sample_text = "Name: Alice, Age: 30"
instructions = get_conversion_instructions(SimpleModel, llm)
converter = Converter(
llm=llm,
text=sample_text,
model=SimpleModel,
instructions=instructions,
)
# Act
output = converter.to_pydantic()
# Assert
assert isinstance(output, SimpleModel)
assert output.name == "Alice"
assert output.age == 30
@pytest.mark.vcr(filter_headers=["authorization"])
def test_converter_with_llama3_2_model():
llm = LLM(model="openrouter/meta-llama/llama-3.2-3b-instruct")
sample_text = "Name: Alice Llama, Age: 30"
instructions = get_conversion_instructions(SimpleModel, llm)
converter = Converter(
llm=llm,
text=sample_text,
model=SimpleModel,
instructions=instructions,
)
output = converter.to_pydantic()
assert isinstance(output, SimpleModel)
assert output.name == "Alice Llama"
assert output.age == 30
@pytest.mark.vcr(filter_headers=["authorization"])
def test_converter_with_llama3_1_model():
llm = LLM(model="ollama/llama3.1", base_url="http://localhost:11434")
sample_text = "Name: Alice Llama, Age: 30"
instructions = get_conversion_instructions(SimpleModel, llm)
converter = Converter(
llm=llm,
text=sample_text,
model=SimpleModel,
instructions=instructions,
)
output = converter.to_pydantic()
assert isinstance(output, SimpleModel)
assert output.name == "Alice Llama"
assert output.age == 30
@pytest.mark.vcr(filter_headers=["authorization"])
def test_converter_with_nested_model():
llm = LLM(model="gpt-4o-mini")
sample_text = "Name: John Doe\nAge: 30\nAddress: 123 Main St, Anytown, 12345"
instructions = get_conversion_instructions(Person, llm)
converter = Converter(
llm=llm,
text=sample_text,
model=Person,
instructions=instructions,
)
output = converter.to_pydantic()
assert isinstance(output, Person)
assert output.name == "John Doe"
assert output.age == 30
assert isinstance(output.address, Address)
assert output.address.street == "123 Main St"
assert output.address.city == "Anytown"
assert output.address.zip_code == "12345"
# Tests for error handling
def test_converter_error_handling():
llm = Mock(spec=LLM)
llm.supports_function_calling.return_value = False
llm.call.return_value = "Invalid JSON"
sample_text = "Name: Alice, Age: 30"
instructions = get_conversion_instructions(SimpleModel, llm)
converter = Converter(
llm=llm,
text=sample_text,
model=SimpleModel,
instructions=instructions,
)
with pytest.raises(ConverterError) as exc_info:
converter.to_pydantic()
assert "Failed to convert text into a Pydantic model" in str(exc_info.value)
# Tests for retry logic
def test_converter_retry_logic():
llm = Mock(spec=LLM)
llm.supports_function_calling.return_value = False
llm.call.side_effect = [
"Invalid JSON",
"Still invalid",
'{"name": "Retry Alice", "age": 30}',
]
sample_text = "Name: Retry Alice, Age: 30"
instructions = get_conversion_instructions(SimpleModel, llm)
converter = Converter(
llm=llm,
text=sample_text,
model=SimpleModel,
instructions=instructions,
max_attempts=3,
)
output = converter.to_pydantic()
assert isinstance(output, SimpleModel)
assert output.name == "Retry Alice"
assert output.age == 30
assert llm.call.call_count == 3
# Tests for optional fields
def test_converter_with_optional_fields():
class OptionalModel(BaseModel):
name: str
age: Optional[int]
llm = Mock(spec=LLM)
llm.supports_function_calling.return_value = False
# Simulate the LLM's response with 'age' explicitly set to null
llm.call.return_value = '{"name": "Bob", "age": null}'
sample_text = "Name: Bob, age: None"
instructions = get_conversion_instructions(OptionalModel, llm)
converter = Converter(
llm=llm,
text=sample_text,
model=OptionalModel,
instructions=instructions,
)
output = converter.to_pydantic()
assert isinstance(output, OptionalModel)
assert output.name == "Bob"
assert output.age is None
# Tests for list fields
def test_converter_with_list_field():
class ListModel(BaseModel):
items: List[int]
llm = Mock(spec=LLM)
llm.supports_function_calling.return_value = False
llm.call.return_value = '{"items": [1, 2, 3]}'
sample_text = "Items: 1, 2, 3"
instructions = get_conversion_instructions(ListModel, llm)
converter = Converter(
llm=llm,
text=sample_text,
model=ListModel,
instructions=instructions,
)
output = converter.to_pydantic()
assert isinstance(output, ListModel)
assert output.items == [1, 2, 3]
def test_converter_with_enum():
class Color(Enum):
RED = "red"
GREEN = "green"
BLUE = "blue"
class EnumModel(BaseModel):
name: str
color: Color
llm = Mock(spec=LLM)
llm.supports_function_calling.return_value = False
llm.call.return_value = '{"name": "Alice", "color": "red"}'
sample_text = "Name: Alice, Color: Red"
instructions = get_conversion_instructions(EnumModel, llm)
converter = Converter(
llm=llm,
text=sample_text,
model=EnumModel,
instructions=instructions,
)
output = converter.to_pydantic()
assert isinstance(output, EnumModel)
assert output.name == "Alice"
assert output.color == Color.RED
# Tests for ambiguous input
def test_converter_with_ambiguous_input():
llm = Mock(spec=LLM)
llm.supports_function_calling.return_value = False
llm.call.return_value = '{"name": "Charlie", "age": "Not an age"}'
sample_text = "Charlie is thirty years old"
instructions = get_conversion_instructions(SimpleModel, llm)
converter = Converter(
llm=llm,
text=sample_text,
model=SimpleModel,
instructions=instructions,
)
with pytest.raises(ConverterError) as exc_info:
converter.to_pydantic()
assert "failed to convert text into a pydantic model" in str(exc_info.value).lower()
# Tests for function calling support
def test_converter_with_function_calling():
llm = Mock(spec=LLM)
llm.supports_function_calling.return_value = True
instructor = Mock()
instructor.to_pydantic.return_value = SimpleModel(name="Eve", age=35)
converter = Converter(
llm=llm,
text="Name: Eve, Age: 35",
model=SimpleModel,
instructions="Convert this text.",
)
converter._create_instructor = Mock(return_value=instructor)
output = converter.to_pydantic()
assert isinstance(output, SimpleModel)
assert output.name == "Eve"
assert output.age == 35
instructor.to_pydantic.assert_called_once()
def test_generate_model_description_union_field():
class UnionModel(BaseModel):
field: int | str | None
description = generate_model_description(UnionModel)
expected_description = '{\n "field": int | str | None\n}'
assert description == expected_description

View File

@@ -1,993 +0,0 @@
from datetime import datetime
from unittest.mock import Mock, patch
import pytest
from pydantic import Field
from crewai.agent import Agent
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.crew import Crew
from crewai.flow.flow import Flow, listen, start
from crewai.llm import LLM
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
AgentExecutionStartedEvent,
)
from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewKickoffStartedEvent,
CrewTestCompletedEvent,
CrewTestResultEvent,
CrewTestStartedEvent,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.events.event_listener import EventListener
from crewai.events.types.tool_usage_events import ToolUsageFinishedEvent
from crewai.events.types.flow_events import (
FlowCreatedEvent,
FlowFinishedEvent,
FlowStartedEvent,
MethodExecutionFailedEvent,
MethodExecutionStartedEvent,
)
from crewai.events.types.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMStreamChunkEvent,
)
from crewai.events.types.task_events import (
TaskCompletedEvent,
TaskFailedEvent,
TaskStartedEvent,
)
from crewai.events.types.tool_usage_events import (
ToolUsageErrorEvent,
)
@pytest.fixture(scope="module")
def vcr_config(request) -> dict:
return {
"cassette_library_dir": "tests/utilities/cassettes",
}
@pytest.fixture(scope="module")
def base_agent():
return Agent(
role="base_agent",
llm="gpt-4o-mini",
goal="Just say hi",
backstory="You are a helpful assistant that just says hi",
)
@pytest.fixture(scope="module")
def base_task(base_agent):
return Task(
description="Just say hi",
expected_output="hi",
agent=base_agent,
)
@pytest.fixture
def reset_event_listener_singleton():
"""Reset EventListener singleton for clean test state."""
original_instance = EventListener._instance
original_initialized = (
getattr(EventListener._instance, "_initialized", False)
if EventListener._instance
else False
)
EventListener._instance = None
yield
EventListener._instance = original_instance
if original_instance and original_initialized:
EventListener._instance._initialized = original_initialized
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_emits_start_kickoff_event(
base_agent, base_task, reset_event_listener_singleton
):
received_events = []
mock_span = Mock()
@crewai_event_bus.on(CrewKickoffStartedEvent)
def handle_crew_start(source, event):
received_events.append(event)
mock_telemetry = Mock()
mock_telemetry.crew_execution_span = Mock(return_value=mock_span)
mock_telemetry.end_crew = Mock(return_value=mock_span)
mock_telemetry.set_tracer = Mock()
mock_telemetry.task_started = Mock(return_value=mock_span)
mock_telemetry.task_ended = Mock(return_value=mock_span)
# Patch the Telemetry class to return our mock
with patch("crewai.events.event_listener.Telemetry", return_value=mock_telemetry):
# Now when Crew creates EventListener, it will use our mocked telemetry
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff()
mock_telemetry.crew_execution_span.assert_called_once_with(crew, None)
mock_telemetry.end_crew.assert_called_once_with(crew, "hi")
assert len(received_events) == 1
assert received_events[0].crew_name == "TestCrew"
assert isinstance(received_events[0].timestamp, datetime)
assert received_events[0].type == "crew_kickoff_started"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_emits_end_kickoff_event(base_agent, base_task):
received_events = []
@crewai_event_bus.on(CrewKickoffCompletedEvent)
def handle_crew_end(source, event):
received_events.append(event)
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff()
assert len(received_events) == 1
assert received_events[0].crew_name == "TestCrew"
assert isinstance(received_events[0].timestamp, datetime)
assert received_events[0].type == "crew_kickoff_completed"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_emits_test_kickoff_type_event(base_agent, base_task):
received_events = []
@crewai_event_bus.on(CrewTestStartedEvent)
def handle_crew_end(source, event):
received_events.append(event)
@crewai_event_bus.on(CrewTestCompletedEvent)
def handle_crew_test_end(source, event):
received_events.append(event)
@crewai_event_bus.on(CrewTestResultEvent)
def handle_crew_test_result(source, event):
received_events.append(event)
eval_llm = LLM(model="gpt-4o-mini")
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.test(n_iterations=1, eval_llm=eval_llm)
assert len(received_events) == 3
assert received_events[0].crew_name == "TestCrew"
assert isinstance(received_events[0].timestamp, datetime)
assert received_events[0].type == "crew_test_started"
assert received_events[1].crew_name == "TestCrew"
assert isinstance(received_events[1].timestamp, datetime)
assert received_events[1].type == "crew_test_result"
assert received_events[2].crew_name == "TestCrew"
assert isinstance(received_events[2].timestamp, datetime)
assert received_events[2].type == "crew_test_completed"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_emits_kickoff_failed_event(base_agent, base_task):
received_events = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffFailedEvent)
def handle_crew_failed(source, event):
received_events.append(event)
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
with patch.object(Crew, "_execute_tasks") as mock_execute:
error_message = "Simulated crew kickoff failure"
mock_execute.side_effect = Exception(error_message)
with pytest.raises(Exception):
crew.kickoff()
assert len(received_events) == 1
assert received_events[0].error == error_message
assert isinstance(received_events[0].timestamp, datetime)
assert received_events[0].type == "crew_kickoff_failed"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_emits_start_task_event(base_agent, base_task):
received_events = []
@crewai_event_bus.on(TaskStartedEvent)
def handle_task_start(source, event):
received_events.append(event)
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff()
assert len(received_events) == 1
assert isinstance(received_events[0].timestamp, datetime)
assert received_events[0].type == "task_started"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_emits_end_task_event(
base_agent, base_task, reset_event_listener_singleton
):
received_events = []
@crewai_event_bus.on(TaskCompletedEvent)
def handle_task_end(source, event):
received_events.append(event)
mock_span = Mock()
mock_telemetry = Mock()
mock_telemetry.task_started = Mock(return_value=mock_span)
mock_telemetry.task_ended = Mock(return_value=mock_span)
mock_telemetry.set_tracer = Mock()
mock_telemetry.crew_execution_span = Mock()
mock_telemetry.end_crew = Mock()
with patch("crewai.events.event_listener.Telemetry", return_value=mock_telemetry):
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff()
mock_telemetry.task_started.assert_called_once_with(crew=crew, task=base_task)
mock_telemetry.task_ended.assert_called_once_with(mock_span, base_task, crew)
assert len(received_events) == 1
assert isinstance(received_events[0].timestamp, datetime)
assert received_events[0].type == "task_completed"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_emits_failed_event_on_execution_error(base_agent, base_task):
received_events = []
received_sources = []
@crewai_event_bus.on(TaskFailedEvent)
def handle_task_failed(source, event):
received_events.append(event)
received_sources.append(source)
with patch.object(
Task,
"_execute_core",
) as mock_execute:
error_message = "Simulated task failure"
mock_execute.side_effect = Exception(error_message)
agent = Agent(
role="base_agent",
goal="Just say hi",
backstory="You are a helpful assistant that just says hi",
)
task = Task(
description="Just say hi",
expected_output="hi",
agent=agent,
)
with pytest.raises(Exception):
agent.execute_task(task=task)
assert len(received_events) == 1
assert received_sources[0] == task
assert received_events[0].error == error_message
assert isinstance(received_events[0].timestamp, datetime)
assert received_events[0].type == "task_failed"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_emits_execution_started_and_completed_events(base_agent, base_task):
received_events = []
@crewai_event_bus.on(AgentExecutionStartedEvent)
def handle_agent_start(source, event):
received_events.append(event)
@crewai_event_bus.on(AgentExecutionCompletedEvent)
def handle_agent_completed(source, event):
received_events.append(event)
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff()
assert len(received_events) == 2
assert received_events[0].agent == base_agent
assert received_events[0].task == base_task
assert received_events[0].tools == []
assert isinstance(received_events[0].task_prompt, str)
assert (
received_events[0].task_prompt
== "Just say hi\n\nThis is the expected criteria for your final answer: hi\nyou MUST return the actual complete content as the final answer, not a summary."
)
assert isinstance(received_events[0].timestamp, datetime)
assert received_events[0].type == "agent_execution_started"
assert isinstance(received_events[1].timestamp, datetime)
assert received_events[1].type == "agent_execution_completed"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_emits_execution_error_event(base_agent, base_task):
received_events = []
@crewai_event_bus.on(AgentExecutionErrorEvent)
def handle_agent_start(source, event):
received_events.append(event)
error_message = "Error happening while sending prompt to model."
base_agent.max_retry_limit = 0
with patch.object(
CrewAgentExecutor, "invoke", wraps=base_agent.agent_executor.invoke
) as invoke_mock:
invoke_mock.side_effect = Exception(error_message)
with pytest.raises(Exception):
base_agent.execute_task(
task=base_task,
)
assert len(received_events) == 1
assert received_events[0].agent == base_agent
assert received_events[0].task == base_task
assert received_events[0].error == error_message
assert isinstance(received_events[0].timestamp, datetime)
assert received_events[0].type == "agent_execution_error"
class SayHiTool(BaseTool):
name: str = Field(default="say_hi", description="The name of the tool")
description: str = Field(
default="Say hi", description="The description of the tool"
)
def _run(self) -> str:
return "hi"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_tools_emits_finished_events():
received_events = []
@crewai_event_bus.on(ToolUsageFinishedEvent)
def handle_tool_end(source, event):
received_events.append(event)
agent = Agent(
role="base_agent",
goal="Just say hi",
backstory="You are a helpful assistant that just says hi",
tools=[SayHiTool()],
)
task = Task(
description="Just say hi",
expected_output="hi",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], name="TestCrew")
crew.kickoff()
assert len(received_events) == 1
assert received_events[0].agent_key == agent.key
assert received_events[0].agent_role == agent.role
assert received_events[0].tool_name == SayHiTool().name
assert received_events[0].tool_args == "{}" or received_events[0].tool_args == {}
assert received_events[0].type == "tool_usage_finished"
assert isinstance(received_events[0].timestamp, datetime)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_tools_emits_error_events():
received_events = []
@crewai_event_bus.on(ToolUsageErrorEvent)
def handle_tool_end(source, event):
received_events.append(event)
class ErrorTool(BaseTool):
name: str = Field(
default="error_tool", description="A tool that raises an error"
)
description: str = Field(
default="This tool always raises an error",
description="The description of the tool",
)
def _run(self) -> str:
raise Exception("Simulated tool error")
agent = Agent(
role="base_agent",
goal="Try to use the error tool",
backstory="You are an assistant that tests error handling",
tools=[ErrorTool()],
llm=LLM(model="gpt-4o-mini"),
)
task = Task(
description="Use the error tool",
expected_output="This should error",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], name="TestCrew")
crew.kickoff()
assert len(received_events) == 48
assert received_events[0].agent_key == agent.key
assert received_events[0].agent_role == agent.role
assert received_events[0].tool_name == "error_tool"
assert received_events[0].tool_args == "{}" or received_events[0].tool_args == {}
assert str(received_events[0].error) == "Simulated tool error"
assert received_events[0].type == "tool_usage_error"
assert isinstance(received_events[0].timestamp, datetime)
def test_flow_emits_start_event(reset_event_listener_singleton):
received_events = []
mock_span = Mock()
@crewai_event_bus.on(FlowStartedEvent)
def handle_flow_start(source, event):
received_events.append(event)
class TestFlow(Flow[dict]):
@start()
def begin(self):
return "started"
mock_telemetry = Mock()
mock_telemetry.flow_execution_span = Mock(return_value=mock_span)
mock_telemetry.flow_creation_span = Mock()
mock_telemetry.set_tracer = Mock()
with patch("crewai.events.event_listener.Telemetry", return_value=mock_telemetry):
# Force creation of EventListener singleton with mocked telemetry
_ = EventListener()
flow = TestFlow()
flow.kickoff()
mock_telemetry.flow_execution_span.assert_called_once_with("TestFlow", ["begin"])
assert len(received_events) == 1
assert received_events[0].flow_name == "TestFlow"
assert received_events[0].type == "flow_started"
def test_flow_name_emitted_to_event_bus():
received_events = []
class MyFlowClass(Flow):
name = "PRODUCTION_FLOW"
@start()
def start(self):
return "Hello, world!"
@crewai_event_bus.on(FlowStartedEvent)
def handle_flow_start(source, event):
received_events.append(event)
flow = MyFlowClass()
flow.kickoff()
assert len(received_events) == 1
assert received_events[0].flow_name == "PRODUCTION_FLOW"
def test_flow_emits_finish_event():
received_events = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(FlowFinishedEvent)
def handle_flow_finish(source, event):
received_events.append(event)
class TestFlow(Flow[dict]):
@start()
def begin(self):
return "completed"
flow = TestFlow()
result = flow.kickoff()
assert len(received_events) == 1
assert received_events[0].flow_name == "TestFlow"
assert received_events[0].type == "flow_finished"
assert received_events[0].result == "completed"
assert result == "completed"
def test_flow_emits_method_execution_started_event():
received_events = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def handle_method_start(source, event):
print("event in method name", event.method_name)
received_events.append(event)
class TestFlow(Flow[dict]):
@start()
def begin(self):
return "started"
@listen("begin")
def second_method(self):
return "executed"
flow = TestFlow()
flow.kickoff()
assert len(received_events) == 2
assert received_events[0].method_name == "begin"
assert received_events[0].flow_name == "TestFlow"
assert received_events[0].type == "method_execution_started"
assert received_events[1].method_name == "second_method"
assert received_events[1].flow_name == "TestFlow"
assert received_events[1].type == "method_execution_started"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_register_handler_adds_new_handler(base_agent, base_task):
received_events = []
def custom_handler(source, event):
received_events.append(event)
with crewai_event_bus.scoped_handlers():
crewai_event_bus.register_handler(CrewKickoffStartedEvent, custom_handler)
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff()
assert len(received_events) == 1
assert isinstance(received_events[0].timestamp, datetime)
assert received_events[0].type == "crew_kickoff_started"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_multiple_handlers_for_same_event(base_agent, base_task):
received_events_1 = []
received_events_2 = []
def handler_1(source, event):
received_events_1.append(event)
def handler_2(source, event):
received_events_2.append(event)
with crewai_event_bus.scoped_handlers():
crewai_event_bus.register_handler(CrewKickoffStartedEvent, handler_1)
crewai_event_bus.register_handler(CrewKickoffStartedEvent, handler_2)
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff()
assert len(received_events_1) == 1
assert len(received_events_2) == 1
assert received_events_1[0].type == "crew_kickoff_started"
assert received_events_2[0].type == "crew_kickoff_started"
def test_flow_emits_created_event():
received_events = []
@crewai_event_bus.on(FlowCreatedEvent)
def handle_flow_created(source, event):
received_events.append(event)
class TestFlow(Flow[dict]):
@start()
def begin(self):
return "started"
flow = TestFlow()
flow.kickoff()
assert len(received_events) == 1
assert received_events[0].flow_name == "TestFlow"
assert received_events[0].type == "flow_created"
def test_flow_emits_method_execution_failed_event():
received_events = []
error = Exception("Simulated method failure")
@crewai_event_bus.on(MethodExecutionFailedEvent)
def handle_method_failed(source, event):
received_events.append(event)
class TestFlow(Flow[dict]):
@start()
def begin(self):
raise error
flow = TestFlow()
with pytest.raises(Exception):
flow.kickoff()
assert len(received_events) == 1
assert received_events[0].method_name == "begin"
assert received_events[0].flow_name == "TestFlow"
assert received_events[0].type == "method_execution_failed"
assert received_events[0].error == error
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_call_started_event():
received_events = []
@crewai_event_bus.on(LLMCallStartedEvent)
def handle_llm_call_started(source, event):
received_events.append(event)
@crewai_event_bus.on(LLMCallCompletedEvent)
def handle_llm_call_completed(source, event):
received_events.append(event)
llm = LLM(model="gpt-4o-mini")
llm.call("Hello, how are you?")
assert len(received_events) == 2
assert received_events[0].type == "llm_call_started"
assert received_events[1].type == "llm_call_completed"
assert received_events[0].task_name is None
assert received_events[0].agent_role is None
assert received_events[0].agent_id is None
assert received_events[0].task_id is None
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_call_failed_event():
received_events = []
@crewai_event_bus.on(LLMCallFailedEvent)
def handle_llm_call_failed(source, event):
received_events.append(event)
error_message = "Simulated LLM call failure"
with patch("crewai.llm.litellm.completion", side_effect=Exception(error_message)):
llm = LLM(model="gpt-4o-mini")
with pytest.raises(Exception) as exc_info:
llm.call("Hello, how are you?")
assert str(exc_info.value) == error_message
assert len(received_events) == 1
assert received_events[0].type == "llm_call_failed"
assert received_events[0].error == error_message
assert received_events[0].task_name is None
assert received_events[0].agent_role is None
assert received_events[0].agent_id is None
assert received_events[0].task_id is None
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_stream_chunk_events():
"""Test that LLM emits stream chunk events when streaming is enabled."""
received_chunks = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_stream_chunk(source, event):
received_chunks.append(event.chunk)
# Create an LLM with streaming enabled
llm = LLM(model="gpt-4o", stream=True)
# Call the LLM with a simple message
response = llm.call("Tell me a short joke")
# Verify that we received chunks
assert len(received_chunks) > 0
# Verify that concatenating all chunks equals the final response
assert "".join(received_chunks) == response
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_no_stream_chunks_when_streaming_disabled():
"""Test that LLM doesn't emit stream chunk events when streaming is disabled."""
received_chunks = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_stream_chunk(source, event):
received_chunks.append(event.chunk)
# Create an LLM with streaming disabled
llm = LLM(model="gpt-4o", stream=False)
# Call the LLM with a simple message
response = llm.call("Tell me a short joke")
# Verify that we didn't receive any chunks
assert len(received_chunks) == 0
# Verify we got a response
assert response and isinstance(response, str)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_streaming_fallback_to_non_streaming():
"""Test that streaming falls back to non-streaming when there's an error."""
received_chunks = []
fallback_called = False
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_stream_chunk(source, event):
received_chunks.append(event.chunk)
# Create an LLM with streaming enabled
llm = LLM(model="gpt-4o", stream=True)
# Store original methods
original_call = llm.call
# Create a mock call method that handles the streaming error
def mock_call(messages, tools=None, callbacks=None, available_functions=None):
nonlocal fallback_called
# Emit a couple of chunks to simulate partial streaming
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="Test chunk 1"))
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="Test chunk 2"))
# Mark that fallback would be called
fallback_called = True
# Return a response as if fallback succeeded
return "Fallback response after streaming error"
# Replace the call method with our mock
llm.call = mock_call
try:
# Call the LLM
response = llm.call("Tell me a short joke")
# Verify that we received some chunks
assert len(received_chunks) == 2
assert received_chunks[0] == "Test chunk 1"
assert received_chunks[1] == "Test chunk 2"
# Verify fallback was triggered
assert fallback_called
# Verify we got the fallback response
assert response == "Fallback response after streaming error"
finally:
# Restore the original method
llm.call = original_call
@pytest.mark.vcr(filter_headers=["authorization"])
def test_streaming_empty_response_handling():
"""Test that streaming handles empty responses correctly."""
received_chunks = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_stream_chunk(source, event):
received_chunks.append(event.chunk)
# Create an LLM with streaming enabled
llm = LLM(model="gpt-3.5-turbo", stream=True)
# Store original methods
original_call = llm.call
# Create a mock call method that simulates empty chunks
def mock_call(messages, tools=None, callbacks=None, available_functions=None):
# Emit a few empty chunks
for _ in range(3):
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk=""))
# Return the default message for empty responses
return "I apologize, but I couldn't generate a proper response. Please try again or rephrase your request."
# Replace the call method with our mock
llm.call = mock_call
try:
# Call the LLM - this should handle empty response
response = llm.call("Tell me a short joke")
# Verify that we received empty chunks
assert len(received_chunks) == 3
assert all(chunk == "" for chunk in received_chunks)
# Verify the response is the default message for empty responses
assert "I apologize" in response and "couldn't generate" in response
finally:
# Restore the original method
llm.call = original_call
@pytest.mark.vcr(filter_headers=["authorization"])
def test_stream_llm_emits_event_with_task_and_agent_info():
completed_event = []
failed_event = []
started_event = []
stream_event = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMCallFailedEvent)
def handle_llm_failed(source, event):
failed_event.append(event)
@crewai_event_bus.on(LLMCallStartedEvent)
def handle_llm_started(source, event):
started_event.append(event)
@crewai_event_bus.on(LLMCallCompletedEvent)
def handle_llm_completed(source, event):
completed_event.append(event)
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_llm_stream_chunk(source, event):
stream_event.append(event)
agent = Agent(
role="TestAgent",
llm=LLM(model="gpt-4o-mini", stream=True),
goal="Just say hi",
backstory="You are a helpful assistant that just says hi",
)
task = Task(
description="Just say hi",
expected_output="hi",
llm=LLM(model="gpt-4o-mini", stream=True),
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
assert len(completed_event) == 1
assert len(failed_event) == 0
assert len(started_event) == 1
assert len(stream_event) == 12
all_events = completed_event + failed_event + started_event + stream_event
all_agent_roles = [event.agent_role for event in all_events]
all_agent_id = [event.agent_id for event in all_events]
all_task_id = [event.task_id for event in all_events]
all_task_name = [event.task_name for event in all_events]
# ensure all events have the agent + task props set
assert len(all_agent_roles) == 14
assert len(all_agent_id) == 14
assert len(all_task_id) == 14
assert len(all_task_name) == 14
assert set(all_agent_roles) == {agent.role}
assert set(all_agent_id) == {agent.id}
assert set(all_task_id) == {task.id}
assert set(all_task_name) == {task.name or task.description}
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_event_with_task_and_agent_info(base_agent, base_task):
completed_event = []
failed_event = []
started_event = []
stream_event = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMCallFailedEvent)
def handle_llm_failed(source, event):
failed_event.append(event)
@crewai_event_bus.on(LLMCallStartedEvent)
def handle_llm_started(source, event):
started_event.append(event)
@crewai_event_bus.on(LLMCallCompletedEvent)
def handle_llm_completed(source, event):
completed_event.append(event)
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_llm_stream_chunk(source, event):
stream_event.append(event)
crew = Crew(agents=[base_agent], tasks=[base_task])
crew.kickoff()
assert len(completed_event) == 1
assert len(failed_event) == 0
assert len(started_event) == 1
assert len(stream_event) == 0
all_events = completed_event + failed_event + started_event + stream_event
all_agent_roles = [event.agent_role for event in all_events]
all_agent_id = [event.agent_id for event in all_events]
all_task_id = [event.task_id for event in all_events]
all_task_name = [event.task_name for event in all_events]
# ensure all events have the agent + task props set
assert len(all_agent_roles) == 2
assert len(all_agent_id) == 2
assert len(all_task_id) == 2
assert len(all_task_name) == 2
assert set(all_agent_roles) == {base_agent.role}
assert set(all_agent_id) == {base_agent.id}
assert set(all_task_id) == {base_task.id}
assert set(all_task_name) == {base_task.name or base_task.description}
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_event_with_lite_agent():
completed_event = []
failed_event = []
started_event = []
stream_event = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMCallFailedEvent)
def handle_llm_failed(source, event):
failed_event.append(event)
@crewai_event_bus.on(LLMCallStartedEvent)
def handle_llm_started(source, event):
started_event.append(event)
@crewai_event_bus.on(LLMCallCompletedEvent)
def handle_llm_completed(source, event):
completed_event.append(event)
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_llm_stream_chunk(source, event):
stream_event.append(event)
agent = Agent(
role="Speaker",
llm=LLM(model="gpt-4o-mini", stream=True),
goal="Just say hi",
backstory="You are a helpful assistant that just says hi",
)
agent.kickoff(messages=[{"role": "user", "content": "say hi!"}])
assert len(completed_event) == 1
assert len(failed_event) == 0
assert len(started_event) == 1
assert len(stream_event) == 15
all_events = completed_event + failed_event + started_event + stream_event
all_agent_roles = [event.agent_role for event in all_events]
all_agent_id = [event.agent_id for event in all_events]
all_task_id = [event.task_id for event in all_events if event.task_id]
all_task_name = [event.task_name for event in all_events if event.task_name]
# ensure all events have the agent + task props set
assert len(all_agent_roles) == 17
assert len(all_agent_id) == 17
assert len(all_task_id) == 0
assert len(all_task_name) == 0
assert set(all_agent_roles) == {agent.role}
assert set(all_agent_id) == {agent.id}

View File

@@ -1,50 +0,0 @@
import os
import unittest
import uuid
import pytest
from crewai.utilities.file_handler import PickleHandler
class TestPickleHandler(unittest.TestCase):
def setUp(self):
# Use a unique file name for each test to avoid race conditions in parallel test execution
unique_id = str(uuid.uuid4())
self.file_name = f"test_data_{unique_id}.pkl"
self.file_path = os.path.join(os.getcwd(), self.file_name)
self.handler = PickleHandler(self.file_name)
def tearDown(self):
if os.path.exists(self.file_path):
os.remove(self.file_path)
def test_initialize_file(self):
assert os.path.exists(self.file_path) is False
self.handler.initialize_file()
assert os.path.exists(self.file_path) is True
assert os.path.getsize(self.file_path) >= 0
def test_save_and_load(self):
data = {"key": "value"}
self.handler.save(data)
loaded_data = self.handler.load()
assert loaded_data == data
def test_load_empty_file(self):
loaded_data = self.handler.load()
assert loaded_data == {}
def test_load_corrupted_file(self):
with open(self.file_path, "wb") as file:
file.write(b"corrupted data")
file.flush()
os.fsync(file.fileno()) # Ensure data is written to disk
with pytest.raises(Exception) as exc:
self.handler.load()
assert str(exc.value) == "pickle data was truncated"
assert "<class '_pickle.UnpicklingError'>" == str(exc.type)

View File

@@ -1,44 +0,0 @@
import pytest
from crewai.utilities.i18n import I18N
def test_load_prompts():
i18n = I18N()
i18n.load_prompts()
assert i18n._prompts is not None
def test_slice():
i18n = I18N()
i18n.load_prompts()
assert isinstance(i18n.slice("role_playing"), str)
def test_tools():
i18n = I18N()
i18n.load_prompts()
assert isinstance(i18n.tools("ask_question"), str)
def test_retrieve():
i18n = I18N()
i18n.load_prompts()
assert isinstance(i18n.retrieve("slices", "role_playing"), str)
def test_retrieve_not_found():
i18n = I18N()
i18n.load_prompts()
with pytest.raises(Exception):
i18n.retrieve("nonexistent_kind", "nonexistent_key")
def test_prompt_file():
import os
path = os.path.join(os.path.dirname(__file__), "prompts.json")
i18n = I18N(prompt_file=path)
i18n.load_prompts()
assert isinstance(i18n.retrieve("slices", "role_playing"), str)
assert i18n.retrieve("slices", "role_playing") == "Lorem ipsum dolor sit amet"

View File

@@ -1,189 +0,0 @@
"""Tests for import utilities."""
import sys
from unittest.mock import MagicMock, patch
import pytest
from crewai.utilities.import_utils import (
OptionalDependencyError,
import_and_validate_definition,
require,
validate_import_path,
)
class TestRequire:
"""Test the require function."""
def test_require_existing_module(self):
"""Test requiring a module that exists."""
module = require("json", purpose="testing")
assert module.__name__ == "json"
def test_require_missing_module(self):
"""Test requiring a module that doesn't exist."""
with pytest.raises(OptionalDependencyError) as exc_info:
require("nonexistent_module_xyz", purpose="testing missing module")
error_msg = str(exc_info.value)
assert (
"testing missing module requires the optional dependency 'nonexistent_module_xyz'"
in error_msg
)
assert "uv add nonexistent_module_xyz" in error_msg
def test_require_with_import_error(self):
"""Test that ImportError is properly chained."""
with patch("importlib.import_module") as mock_import:
mock_import.side_effect = ImportError("Module import failed")
with pytest.raises(OptionalDependencyError) as exc_info:
require("some_module", purpose="testing error handling")
assert isinstance(exc_info.value.__cause__, ImportError)
assert str(exc_info.value.__cause__) == "Module import failed"
def test_optional_dependency_error_is_import_error(self):
"""Test that OptionalDependencyError is a subclass of ImportError."""
assert issubclass(OptionalDependencyError, ImportError)
def test_require_with_attr(self):
"""Test requiring a specific attribute from a module."""
loads = require("json", purpose="testing", attr="loads")
import json
assert loads == json.loads
def test_require_with_nonexistent_attr(self):
"""Test requiring a nonexistent attribute raises AttributeError."""
with pytest.raises(AttributeError) as exc_info:
require("json", purpose="testing", attr="nonexistent_attr")
assert "Module 'json' has no attribute 'nonexistent_attr'" in str(
exc_info.value
)
def test_require_extracts_package_name(self):
"""Test that require correctly extracts package name from module path."""
with pytest.raises(OptionalDependencyError) as exc_info:
require("some.nested.module.path", purpose="testing")
error_msg = str(exc_info.value)
assert "uv add some" in error_msg
class TestValidateImportPath:
"""Test the validate_import_path function."""
def test_validate_import_path_success(self):
"""Test successful import of a class."""
result = validate_import_path("json.JSONDecoder")
import json
assert result == json.JSONDecoder
def test_validate_import_path_malformed_no_module(self):
"""Test validation with no module path."""
with pytest.raises(ValueError) as exc_info:
validate_import_path("ClassName")
assert "import_path 'ClassName' must be of the form 'module.ClassName'" in str(
exc_info.value
)
def test_validate_import_path_empty_string(self):
"""Test validation with empty string."""
with pytest.raises(ValueError) as exc_info:
validate_import_path("")
assert "import_path '' must be of the form 'module.ClassName'" in str(
exc_info.value
)
def test_validate_import_path_module_not_found(self):
"""Test validation with non-existent module."""
with pytest.raises(ValueError) as exc_info:
validate_import_path("nonexistent_module.ClassName")
error_msg = str(exc_info.value)
assert "Package 'nonexistent_module' could not be imported" in error_msg
assert "uv add nonexistent_module" in error_msg
def test_validate_import_path_attribute_not_found(self):
"""Test validation when attribute doesn't exist in module."""
with pytest.raises(ValueError) as exc_info:
validate_import_path("json.NonExistentClass")
assert "Attribute 'NonExistentClass' not found in module 'json'" in str(
exc_info.value
)
def test_validate_import_path_nested_module(self):
"""Test validation with nested module path."""
result = validate_import_path("unittest.mock.MagicMock")
from unittest.mock import MagicMock
assert result == MagicMock
def test_validate_import_path_extracts_package_name(self):
"""Test that package name is correctly extracted for error message."""
with pytest.raises(ValueError) as exc_info:
validate_import_path("some.nested.module.path.ClassName")
error_msg = str(exc_info.value)
assert "Package 'some' could not be imported" in error_msg
assert "uv add some" in error_msg
class TestImportAndValidateDefinition:
"""Test the import_and_validate_definition function."""
def test_import_and_validate_definition_success(self):
"""Test successful import through Pydantic adapter."""
result = import_and_validate_definition("json.JSONEncoder")
import json
assert result == json.JSONEncoder
def test_import_and_validate_definition_with_function(self):
"""Test importing a function instead of a class."""
result = import_and_validate_definition("json.loads")
import json
assert result == json.loads
def test_import_and_validate_definition_invalid(self):
"""Test that invalid paths raise ValueError."""
with pytest.raises(ValueError) as exc_info:
import_and_validate_definition("InvalidPath")
assert "must be of the form 'module.ClassName'" in str(exc_info.value)
def test_import_and_validate_definition_module_error(self):
"""Test error handling for missing modules."""
with pytest.raises(ValueError) as exc_info:
import_and_validate_definition("missing_package.SomeClass")
error_msg = str(exc_info.value)
assert "Package 'missing_package' could not be imported" in error_msg
assert "uv add missing_package" in error_msg
def test_import_and_validate_definition_attribute_error(self):
"""Test error handling for missing attributes."""
with pytest.raises(ValueError) as exc_info:
import_and_validate_definition("json.MissingClass")
assert "Attribute 'MissingClass' not found in module 'json'" in str(
exc_info.value
)
def test_import_and_validate_definition_with_mock(self):
"""Test that mocked modules work correctly."""
mock_module = MagicMock()
mock_class = MagicMock()
mock_module.MockClass = mock_class
with patch.dict(sys.modules, {"mocked_module": mock_module}):
result = import_and_validate_definition("mocked_module.MockClass")
assert result == mock_class

View File

@@ -1,92 +0,0 @@
"""
Tests for verifying the integration of knowledge sources in the planning process.
This module ensures that agent knowledge is properly included during task planning.
"""
from unittest.mock import patch
import pytest
from crewai.agent import Agent
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai.task import Task
from crewai.utilities.planning_handler import CrewPlanner
@pytest.fixture
def mock_knowledge_source():
"""
Create a mock knowledge source with test content.
Returns:
StringKnowledgeSource:
A knowledge source containing AI-related test content
"""
content = """
Important context about AI:
1. AI systems use machine learning algorithms
2. Neural networks are a key component
3. Training data is essential for good performance
"""
return StringKnowledgeSource(content=content)
@patch("crewai.rag.config.utils.get_rag_client")
def test_knowledge_included_in_planning(mock_get_client):
"""Test that verifies knowledge sources are properly included in planning."""
# Mock RAG client
mock_client = mock_get_client.return_value
mock_client.get_or_create_collection.return_value = None
mock_client.add_documents.return_value = None
# Create an agent with knowledge
agent = Agent(
role="AI Researcher",
goal="Research and explain AI concepts",
backstory="Expert in artificial intelligence",
knowledge_sources=[
StringKnowledgeSource(
content="AI systems require careful training and validation."
)
],
)
# Create a task for the agent
task = Task(
description="Explain the basics of AI systems",
expected_output="A clear explanation of AI fundamentals",
agent=agent,
)
# Create a crew planner
planner = CrewPlanner([task], None)
# Get the task summary
task_summary = planner._create_tasks_summary()
# Verify that knowledge is included in planning when present
assert "AI systems require careful training" in task_summary, (
"Knowledge content should be present in task summary when knowledge exists"
)
assert '"agent_knowledge"' in task_summary, (
"agent_knowledge field should be present in task summary when knowledge exists"
)
# Verify that knowledge is properly formatted
assert isinstance(task.agent.knowledge_sources, list), (
"Knowledge sources should be stored in a list"
)
assert len(task.agent.knowledge_sources) > 0, (
"At least one knowledge source should be present"
)
assert task.agent.knowledge_sources[0].content in task_summary, (
"Knowledge source content should be included in task summary"
)
# Verify that other expected components are still present
assert task.description in task_summary, (
"Task description should be present in task summary"
)
assert task.expected_output in task_summary, (
"Expected output should be present in task summary"
)
assert agent.role in task_summary, "Agent role should be present in task summary"

View File

@@ -1,96 +0,0 @@
import os
from unittest.mock import patch
import pytest
from litellm.exceptions import BadRequestError
from crewai.llm import LLM
from crewai.utilities.llm_utils import create_llm
def test_create_llm_with_llm_instance():
existing_llm = LLM(model="gpt-4o")
llm = create_llm(llm_value=existing_llm)
assert llm is existing_llm
def test_create_llm_with_valid_model_string():
llm = create_llm(llm_value="gpt-4o")
assert isinstance(llm, LLM)
assert llm.model == "gpt-4o"
def test_create_llm_with_invalid_model_string():
with pytest.raises(BadRequestError, match="LLM Provider NOT provided"):
llm = create_llm(llm_value="invalid-model")
llm.call(messages=[{"role": "user", "content": "Hello, world!"}])
def test_create_llm_with_unknown_object_missing_attributes():
class UnknownObject:
pass
unknown_obj = UnknownObject()
llm = create_llm(llm_value=unknown_obj)
# Attempt to call the LLM and expect it to raise an error due to missing attributes
with pytest.raises(BadRequestError, match="LLM Provider NOT provided"):
llm.call(messages=[{"role": "user", "content": "Hello, world!"}])
def test_create_llm_with_none_uses_default_model():
with patch.dict(os.environ, {}, clear=True):
with patch("crewai.cli.constants.DEFAULT_LLM_MODEL", "gpt-4o"):
llm = create_llm(llm_value=None)
assert isinstance(llm, LLM)
assert llm.model == "gpt-4o-mini"
def test_create_llm_with_unknown_object():
class UnknownObject:
model_name = "gpt-4o"
temperature = 0.7
max_tokens = 1500
unknown_obj = UnknownObject()
llm = create_llm(llm_value=unknown_obj)
assert isinstance(llm, LLM)
assert llm.model == "gpt-4o"
assert llm.temperature == 0.7
assert llm.max_tokens == 1500
def test_create_llm_from_env_with_unaccepted_attributes():
with patch.dict(
os.environ,
{
"OPENAI_MODEL_NAME": "gpt-3.5-turbo",
"AWS_ACCESS_KEY_ID": "fake-access-key",
"AWS_SECRET_ACCESS_KEY": "fake-secret-key",
"AWS_REGION_NAME": "us-west-2",
},
):
llm = create_llm(llm_value=None)
assert isinstance(llm, LLM)
assert llm.model == "gpt-3.5-turbo"
assert not hasattr(llm, "AWS_ACCESS_KEY_ID")
assert not hasattr(llm, "AWS_SECRET_ACCESS_KEY")
assert not hasattr(llm, "AWS_REGION_NAME")
def test_create_llm_with_partial_attributes():
class PartialAttributes:
model_name = "gpt-4o"
# temperature is missing
obj = PartialAttributes()
llm = create_llm(llm_value=obj)
assert isinstance(llm, LLM)
assert llm.model == "gpt-4o"
assert llm.temperature is None # Should handle missing attributes gracefully
def test_create_llm_with_invalid_type():
with pytest.raises(BadRequestError, match="LLM Provider NOT provided"):
llm = create_llm(llm_value=42)
llm.call(messages=[{"role": "user", "content": "Hello, world!"}])

View File

@@ -1,182 +0,0 @@
from typing import Optional
from unittest.mock import MagicMock, patch
import pytest
from pydantic import BaseModel
from crewai.agent import Agent
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.tools.base_tool import BaseTool
from crewai.utilities.planning_handler import (
CrewPlanner,
PlannerTaskPydanticOutput,
PlanPerTask,
)
class InternalCrewPlanner:
@pytest.fixture
def crew_planner(self):
tasks = [
Task(
description="Task 1",
expected_output="Output 1",
agent=Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1"),
),
Task(
description="Task 2",
expected_output="Output 2",
agent=Agent(role="Agent 2", goal="Goal 2", backstory="Backstory 2"),
),
Task(
description="Task 3",
expected_output="Output 3",
agent=Agent(role="Agent 3", goal="Goal 3", backstory="Backstory 3"),
),
]
return CrewPlanner(tasks, None)
@pytest.fixture
def crew_planner_different_llm(self):
tasks = [
Task(
description="Task 1",
expected_output="Output 1",
agent=Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1"),
)
]
planning_agent_llm = "gpt-3.5-turbo"
return CrewPlanner(tasks, planning_agent_llm)
def test_handle_crew_planning(self, crew_planner):
list_of_plans_per_task = [
PlanPerTask(task="Task1", plan="Plan 1"),
PlanPerTask(task="Task2", plan="Plan 2"),
PlanPerTask(task="Task3", plan="Plan 3"),
]
with patch.object(Task, "execute_sync") as execute:
execute.return_value = TaskOutput(
description="Description",
agent="agent",
pydantic=PlannerTaskPydanticOutput(
list_of_plans_per_task=list_of_plans_per_task
),
)
result = crew_planner._handle_crew_planning()
assert crew_planner.planning_agent_llm == "gpt-4o-mini"
assert isinstance(result, PlannerTaskPydanticOutput)
assert len(result.list_of_plans_per_task) == len(crew_planner.tasks)
execute.assert_called_once()
def test_create_planning_agent(self, crew_planner):
agent = crew_planner._create_planning_agent()
assert isinstance(agent, Agent)
assert agent.role == "Task Execution Planner"
def test_create_planner_task(self, crew_planner):
planning_agent = Agent(
role="Planning Agent",
goal="Plan Step by Step Plan",
backstory="Master in Planning",
)
tasks_summary = "Summary of tasks"
task = crew_planner._create_planner_task(planning_agent, tasks_summary)
assert isinstance(task, Task)
assert task.description.startswith("Based on these tasks summary")
assert task.agent == planning_agent
assert (
task.expected_output
== "Step by step plan on how the agents can execute their tasks using the available tools with mastery"
)
def test_create_tasks_summary(self, crew_planner):
tasks_summary = crew_planner._create_tasks_summary()
assert isinstance(tasks_summary, str)
assert tasks_summary.startswith("\n Task Number 1 - Task 1")
assert '"agent_tools": "agent has no tools"' in tasks_summary
# Knowledge field should not be present when empty
assert '"agent_knowledge"' not in tasks_summary
@patch('crewai.knowledge.storage.knowledge_storage.chromadb')
def test_create_tasks_summary_with_knowledge_and_tools(self, mock_chroma):
"""Test task summary generation with both knowledge and tools present."""
# Mock ChromaDB collection
mock_collection = mock_chroma.return_value.get_or_create_collection.return_value
mock_collection.add.return_value = None
# Create mock tools with proper string descriptions and structured tool support
class MockTool(BaseTool):
name: str
description: str
def __init__(self, name: str, description: str):
tool_data = {"name": name, "description": description}
super().__init__(**tool_data)
def __str__(self):
return self.name
def __repr__(self):
return self.name
def to_structured_tool(self):
return self
def _run(self, *args, **kwargs):
pass
def _generate_description(self) -> str:
"""Override _generate_description to avoid args_schema handling."""
return self.description
tool1 = MockTool("tool1", "Tool 1 description")
tool2 = MockTool("tool2", "Tool 2 description")
# Create a task with knowledge and tools
task = Task(
description="Task with knowledge and tools",
expected_output="Expected output",
agent=Agent(
role="Test Agent",
goal="Test Goal",
backstory="Test Backstory",
tools=[tool1, tool2],
knowledge_sources=[
StringKnowledgeSource(content="Test knowledge content")
]
)
)
# Create planner with the new task
planner = CrewPlanner([task], None)
tasks_summary = planner._create_tasks_summary()
# Verify task summary content
assert isinstance(tasks_summary, str)
assert task.description in tasks_summary
assert task.expected_output in tasks_summary
assert '"agent_tools": [tool1, tool2]' in tasks_summary
assert '"agent_knowledge": "[\\"Test knowledge content\\"]"' in tasks_summary
assert task.agent.role in tasks_summary
assert task.agent.goal in tasks_summary
def test_handle_crew_planning_different_llm(self, crew_planner_different_llm):
with patch.object(Task, "execute_sync") as execute:
execute.return_value = TaskOutput(
description="Description",
agent="agent",
pydantic=PlannerTaskPydanticOutput(
list_of_plans_per_task=[PlanPerTask(task="Task1", plan="Plan 1")]
),
)
result = crew_planner_different_llm._handle_crew_planning()
assert crew_planner_different_llm.planning_agent_llm == "gpt-3.5-turbo"
assert isinstance(result, PlannerTaskPydanticOutput)
assert len(result.list_of_plans_per_task) == len(
crew_planner_different_llm.tasks
)
execute.assert_called_once()

View File

@@ -1,94 +0,0 @@
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import pytest
from pydantic import BaseModel, Field
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
def test_simple_model():
class SimpleModel(BaseModel):
field1: int
field2: str
parser = PydanticSchemaParser(model=SimpleModel)
schema = parser.get_schema()
expected_schema = """{
field1: int,
field2: str
}"""
assert schema.strip() == expected_schema.strip()
def test_nested_model():
class NestedModel(BaseModel):
nested_field: int
class ParentModel(BaseModel):
parent_field: str
nested: NestedModel
parser = PydanticSchemaParser(model=ParentModel)
schema = parser.get_schema()
expected_schema = """{
parent_field: str,
nested: NestedModel
{
nested_field: int
}
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_list():
class ListModel(BaseModel):
list_field: List[int]
parser = PydanticSchemaParser(model=ListModel)
schema = parser.get_schema()
expected_schema = """{
list_field: List[int]
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_optional_field():
class OptionalModel(BaseModel):
optional_field: Optional[str]
parser = PydanticSchemaParser(model=OptionalModel)
schema = parser.get_schema()
expected_schema = """{
optional_field: Optional[str]
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_union():
class UnionModel(BaseModel):
union_field: Union[int, str]
parser = PydanticSchemaParser(model=UnionModel)
schema = parser.get_schema()
expected_schema = """{
union_field: Union[int, str]
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_dict():
class DictModel(BaseModel):
dict_field: Dict[str, int]
parser = PydanticSchemaParser(model=DictModel)
schema = parser.get_schema()
expected_schema = """{
dict_field: Dict[str, int]
}"""
assert schema.strip() == expected_schema.strip()

View File

@@ -1,152 +0,0 @@
from datetime import date, datetime
from typing import List
from unittest.mock import Mock
import pytest
from pydantic import BaseModel
from crewai.utilities.serialization import to_serializable, to_string
class Address(BaseModel):
street: str
city: str
country: str
class Person(BaseModel):
name: str
age: int
address: Address
birthday: date
skills: List[str]
@pytest.mark.parametrize(
"test_input,expected",
[
({"text": "hello world"}, {"text": "hello world"}),
({"number": 42}, {"number": 42}),
({"decimal": 3.14}, {"decimal": 3.14}),
({"flag": True}, {"flag": True}),
({"empty": None}, {"empty": None}),
({"list": [1, 2, 3]}, {"list": [1, 2, 3]}),
({"tuple": (1, 2, 3)}, {"tuple": [1, 2, 3]}),
({"set": {1, 2, 3}}, {"set": [1, 2, 3]}),
({"nested": [1, [2, 3], {4, 5}]}, {"nested": [1, [2, 3], [4, 5]]}),
],
)
def test_basic_serialization(test_input, expected):
result = to_serializable(test_input)
assert result == expected
@pytest.mark.parametrize(
"input_date,expected",
[
(date(2024, 1, 1), "2024-01-01"),
(datetime(2024, 1, 1, 12, 30), "2024-01-01T12:30:00"),
],
)
def test_temporal_serialization(input_date, expected):
result = to_serializable({"date": input_date})
assert result["date"] == expected
@pytest.mark.parametrize(
"key,value,expected_key_type",
[
(("tuple", "key"), "value", str),
(None, "value", str),
(123, "value", str),
("normal", "value", str),
],
)
def test_dictionary_key_serialization(key, value, expected_key_type):
result = to_serializable({key: value})
assert len(result) == 1
result_key = next(iter(result.keys()))
assert isinstance(result_key, expected_key_type)
assert result[result_key] == value
@pytest.mark.parametrize(
"callable_obj,expected_in_result",
[
(lambda x: x * 2, "lambda"),
(str.upper, "upper"),
],
)
def test_callable_serialization(callable_obj, expected_in_result):
result = to_serializable({"func": callable_obj})
assert isinstance(result["func"], str)
assert expected_in_result in result["func"].lower()
def test_pydantic_model_serialization():
address = Address(street="123 Main St", city="Tech City", country="Pythonia")
person = Person(
name="John Doe",
age=30,
address=address,
birthday=date(1994, 1, 1),
skills=["Python", "Testing"],
)
data = {
"single_model": address,
"nested_model": person,
"model_list": [address, address],
"model_dict": {"home": address},
}
result = to_serializable(data)
assert (
to_string(result)
== '{"single_model": {"street": "123 Main St", "city": "Tech City", "country": "Pythonia"}, "nested_model": {"name": "John Doe", "age": 30, "address": {"street": "123 Main St", "city": "Tech City", "country": "Pythonia"}, "birthday": "1994-01-01", "skills": ["Python", "Testing"]}, "model_list": [{"street": "123 Main St", "city": "Tech City", "country": "Pythonia"}, {"street": "123 Main St", "city": "Tech City", "country": "Pythonia"}], "model_dict": {"home": {"street": "123 Main St", "city": "Tech City", "country": "Pythonia"}}}'
)
def test_depth_limit():
"""Test max depth handling with a deeply nested structure"""
def create_nested(depth):
if depth == 0:
return "value"
return {"next": create_nested(depth - 1)}
deep_structure = create_nested(10)
result = to_serializable(deep_structure)
assert result == {
"next": {
"next": {
"next": {
"next": {
"next": "{'next': {'next': {'next': {'next': {'next': 'value'}}}}}"
}
}
}
}
}
def test_exclude_keys():
result = to_serializable({"key1": "value1", "key2": "value2"}, exclude={"key1"})
assert result == {"key2": "value2"}
model = Person(
name="John Doe",
age=30,
address=Address(street="123 Main St", city="Tech City", country="Pythonia"),
birthday=date(1994, 1, 1),
skills=["Python", "Testing"],
)
result = to_serializable(model, exclude={"address"})
assert result == {
"name": "John Doe",
"age": 30,
"birthday": "1994-01-01",
"skills": ["Python", "Testing"],
}

View File

@@ -1,187 +0,0 @@
from typing import Any, Dict, List, Union
import pytest
from crewai.utilities.string_utils import interpolate_only
class TestInterpolateOnly:
"""Tests for the interpolate_only function in string_utils.py."""
def test_basic_variable_interpolation(self):
"""Test basic variable interpolation works correctly."""
template = "Hello, {name}! Welcome to {company}."
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
"name": "Alice",
"company": "CrewAI",
}
result = interpolate_only(template, inputs)
assert result == "Hello, Alice! Welcome to CrewAI."
def test_multiple_occurrences_of_same_variable(self):
"""Test that multiple occurrences of the same variable are replaced."""
template = "{name} is using {name}'s account."
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
"name": "Bob"
}
result = interpolate_only(template, inputs)
assert result == "Bob is using Bob's account."
def test_json_structure_preservation(self):
"""Test that JSON structures are preserved and not interpolated incorrectly."""
template = """
Instructions for {agent}:
Please return the following object:
{"name": "person's name", "age": 25, "skills": ["coding", "testing"]}
"""
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
"agent": "DevAgent"
}
result = interpolate_only(template, inputs)
assert "Instructions for DevAgent:" in result
assert (
'{"name": "person\'s name", "age": 25, "skills": ["coding", "testing"]}'
in result
)
def test_complex_nested_json(self):
"""Test with complex JSON structures containing curly braces."""
template = """
{agent} needs to process:
{
"config": {
"nested": {
"value": 42
},
"arrays": [1, 2, {"inner": "value"}]
}
}
"""
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
"agent": "DataProcessor"
}
result = interpolate_only(template, inputs)
assert "DataProcessor needs to process:" in result
assert '"nested": {' in result
assert '"value": 42' in result
assert '[1, 2, {"inner": "value"}]' in result
def test_missing_variable(self):
"""Test that an error is raised when a required variable is missing."""
template = "Hello, {name}!"
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
"not_name": "Alice"
}
with pytest.raises(KeyError) as excinfo:
interpolate_only(template, inputs)
assert "template variable" in str(excinfo.value).lower()
assert "name" in str(excinfo.value)
def test_invalid_input_types(self):
"""Test that an error is raised with invalid input types."""
template = "Hello, {name}!"
# Using Any for this test since we're intentionally testing an invalid type
inputs: Dict[str, Any] = {"name": object()} # Object is not a valid input type
with pytest.raises(ValueError) as excinfo:
interpolate_only(template, inputs)
assert "unsupported type" in str(excinfo.value).lower()
def test_empty_input_string(self):
"""Test handling of empty or None input string."""
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
"name": "Alice"
}
assert interpolate_only("", inputs) == ""
assert interpolate_only(None, inputs) == ""
def test_no_variables_in_template(self):
"""Test a template with no variables to replace."""
template = "This is a static string with no variables."
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
"name": "Alice"
}
result = interpolate_only(template, inputs)
assert result == template
def test_variable_name_starting_with_underscore(self):
"""Test variables starting with underscore are replaced correctly."""
template = "Variable: {_special_var}"
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
"_special_var": "Special Value"
}
result = interpolate_only(template, inputs)
assert result == "Variable: Special Value"
def test_preserves_non_matching_braces(self):
"""Test that non-matching braces patterns are preserved."""
template = (
"This {123} and {!var} should not be replaced but {valid_var} should."
)
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
"valid_var": "works"
}
result = interpolate_only(template, inputs)
assert (
result == "This {123} and {!var} should not be replaced but works should."
)
def test_complex_mixed_scenario(self):
"""Test a complex scenario with both valid variables and JSON structures."""
template = """
{agent_name} is working on task {task_id}.
Instructions:
1. Process the data
2. Return results as:
{
"taskId": "{task_id}",
"results": {
"processed_by": "agent_name",
"status": "complete",
"values": [1, 2, 3]
}
}
"""
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
"agent_name": "AnalyticsAgent",
"task_id": "T-12345",
}
result = interpolate_only(template, inputs)
assert "AnalyticsAgent is working on task T-12345" in result
assert '"taskId": "T-12345"' in result
assert '"processed_by": "agent_name"' in result # This shouldn't be replaced
assert '"values": [1, 2, 3]' in result
def test_empty_inputs_dictionary(self):
"""Test that an error is raised with empty inputs dictionary."""
template = "Hello, {name}!"
inputs: Dict[str, Any] = {}
with pytest.raises(ValueError) as excinfo:
interpolate_only(template, inputs)
assert "inputs dictionary cannot be empty" in str(excinfo.value).lower()

View File

@@ -1,97 +0,0 @@
from unittest.mock import MagicMock, patch
from pydantic import BaseModel, Field
from typing import List
from crewai.utilities.converter import ConverterError
from crewai.utilities.training_converter import TrainingConverter
class TestModel(BaseModel):
string_field: str = Field(description="A simple string field")
list_field: List[str] = Field(description="A list of strings")
number_field: float = Field(description="A number field")
class TestTrainingConverter:
def setup_method(self):
self.llm_mock = MagicMock()
self.test_text = "Sample text for evaluation"
self.test_instructions = "Convert to JSON format"
self.converter = TrainingConverter(
llm=self.llm_mock,
text=self.test_text,
model=TestModel,
instructions=self.test_instructions
)
@patch("crewai.utilities.converter.Converter.to_pydantic")
def test_fallback_to_field_by_field(self, parent_to_pydantic_mock):
parent_to_pydantic_mock.side_effect = ConverterError("Failed to convert directly")
llm_responses = {
"string_field": "test string value",
"list_field": "- item1\n- item2\n- item3",
"number_field": "8.5"
}
def llm_side_effect(messages):
prompt = messages[1]["content"]
if "string_field" in prompt:
return llm_responses["string_field"]
elif "list_field" in prompt:
return llm_responses["list_field"]
elif "number_field" in prompt:
return llm_responses["number_field"]
return "unknown field"
self.llm_mock.call.side_effect = llm_side_effect
result = self.converter.to_pydantic()
assert result.string_field == "test string value"
assert result.list_field == ["item1", "item2", "item3"]
assert result.number_field == 8.5
parent_to_pydantic_mock.assert_called_once()
assert self.llm_mock.call.call_count == 3
def test_ask_llm_for_field(self):
field_name = "test_field"
field_description = "This is a test field description"
expected_response = "Test response"
self.llm_mock.call.return_value = expected_response
response = self.converter._ask_llm_for_field(field_name, field_description)
assert response == expected_response
self.llm_mock.call.assert_called_once()
call_args = self.llm_mock.call.call_args[0][0]
assert call_args[0]["role"] == "system"
assert f"Extract the {field_name}" in call_args[0]["content"]
assert call_args[1]["role"] == "user"
assert field_name in call_args[1]["content"]
assert field_description in call_args[1]["content"]
def test_process_field_value_string(self):
response = " This is a string with extra whitespace "
result = self.converter._process_field_value(response, str)
assert result == "This is a string with extra whitespace"
def test_process_field_value_list_with_bullet_points(self):
response = "- Item 1\n- Item 2\n- Item 3"
result = self.converter._process_field_value(response, List[str])
assert result == ["Item 1", "Item 2", "Item 3"]
def test_process_field_value_list_with_json(self):
response = '["Item 1", "Item 2", "Item 3"]'
with patch("crewai.utilities.training_converter.json.loads") as json_mock:
json_mock.return_value = ["Item 1", "Item 2", "Item 3"]
result = self.converter._process_field_value(response, List[str])
assert result == ["Item 1", "Item 2", "Item 3"]
def test_process_field_value_float(self):
response = "The quality score is 8.5 out of 10"
result = self.converter._process_field_value(response, float)
assert result == 8.5

View File

@@ -1,55 +0,0 @@
import os
import tempfile
import unittest
from crewai.utilities.training_handler import CrewTrainingHandler
class InternalCrewTrainingHandler(unittest.TestCase):
def setUp(self):
self.temp_file = tempfile.NamedTemporaryFile(suffix=".pkl", delete=False)
self.temp_file.close()
self.handler = CrewTrainingHandler(self.temp_file.name)
def tearDown(self):
if os.path.exists(self.temp_file.name):
os.remove(self.temp_file.name)
del self.handler
def test_save_trained_data(self):
agent_id = "agent1"
trained_data = {"param1": 1, "param2": 2}
self.handler.save_trained_data(agent_id, trained_data)
# Assert that the trained data is saved correctly
data = self.handler.load()
assert data[agent_id] == trained_data
def test_append_existing_agent(self):
agent_id = "agent1"
initial_iteration = 0
initial_data = {"param1": 1, "param2": 2}
self.handler.append(initial_iteration, agent_id, initial_data)
train_iteration = 1
new_data = {"param3": 3, "param4": 4}
self.handler.append(train_iteration, agent_id, new_data)
# Assert that the new data is appended correctly to the existing agent
data = self.handler.load()
assert agent_id in data
assert initial_iteration in data[agent_id]
assert train_iteration in data[agent_id]
assert data[agent_id][initial_iteration] == initial_data
assert data[agent_id][train_iteration] == new_data
def test_append_new_agent(self):
train_iteration = 1
agent_id = "agent2"
new_data = {"param5": 5, "param6": 6}
self.handler.append(train_iteration, agent_id, new_data)
# Assert that the new agent and data are appended correctly
data = self.handler.load()
assert data[agent_id][train_iteration] == new_data