diff --git a/lib/crewai/src/crewai/agent/core.py b/lib/crewai/src/crewai/agent/core.py index d815b15ed..86072210d 100644 --- a/lib/crewai/src/crewai/agent/core.py +++ b/lib/crewai/src/crewai/agent/core.py @@ -1,7 +1,7 @@ from __future__ import annotations import asyncio -from collections.abc import Callable, Coroutine, Sequence +from collections.abc import Callable, Sequence import shutil import subprocess import time @@ -70,7 +70,6 @@ from crewai.security.fingerprint import Fingerprint from crewai.tools.agent_tools.agent_tools import AgentTools from crewai.utilities.agent_utils import ( get_tool_names, - is_inside_event_loop, load_agent_from_repository, parse_tools, render_text_description_and_args, @@ -1684,16 +1683,16 @@ class Agent(BaseAgent): self, messages: str | list[LLMMessage], response_format: type[Any] | None = None, - ) -> LiteAgentOutput | Coroutine[Any, Any, LiteAgentOutput]: + ) -> LiteAgentOutput: """ Execute the agent with the given messages using the AgentExecutor. This method provides standalone agent execution without requiring a Crew. It supports tools, response formatting, and guardrails. - When called from within a Flow (inside an event loop), this method - automatically returns a coroutine that the Flow framework will await, - making it work seamlessly in both sync and async contexts. + When called from within a sync Flow method, the Flow framework automatically + runs the method in a thread pool, so this works seamlessly. For async Flow + methods, use kickoff_async() instead. Args: messages: Either a string query or a list of message dictionaries. @@ -1703,11 +1702,10 @@ class Agent(BaseAgent): Returns: LiteAgentOutput: The result of the agent execution. - Or a coroutine if called from within an event loop. + + Note: + If called from an async context (not through Flow), use kickoff_async(). """ - # Magic auto-async: return coroutine for Flow to await - if is_inside_event_loop(): - return self.kickoff_async(messages, response_format) executor, inputs, agent_info, parsed_tools = self._prepare_kickoff( messages, response_format diff --git a/lib/crewai/src/crewai/experimental/agent_executor.py b/lib/crewai/src/crewai/experimental/agent_executor.py index e0a6b068b..a05e29b63 100644 --- a/lib/crewai/src/crewai/experimental/agent_executor.py +++ b/lib/crewai/src/crewai/experimental/agent_executor.py @@ -268,7 +268,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): printer=self._printer, from_task=self.task, from_agent=self.agent, - response_model=self.response_model, + response_model=None, executor_context=self, ) diff --git a/lib/crewai/src/crewai/flow/flow.py b/lib/crewai/src/crewai/flow/flow.py index 312af097e..8a06e2da6 100644 --- a/lib/crewai/src/crewai/flow/flow.py +++ b/lib/crewai/src/crewai/flow/flow.py @@ -1575,13 +1575,15 @@ class Flow(Generic[T], metaclass=FlowMeta): if future: self._event_futures.append(future) - result = ( - await method(*args, **kwargs) - if asyncio.iscoroutinefunction(method) - else method(*args, **kwargs) - ) + if asyncio.iscoroutinefunction(method): + result = await method(*args, **kwargs) + else: + import contextvars - # Auto-await coroutines from sync methods (enables agent.kickoff() inside flows) + ctx = contextvars.copy_context() + result = await asyncio.to_thread(ctx.run, method, *args, **kwargs) + + # Auto-await coroutines from sync methods (still useful for explicit coroutine returns) if asyncio.iscoroutine(result): result = await result diff --git a/lib/crewai/tests/cassettes/agents/test_lite_agent_inside_flow_sync.yaml b/lib/crewai/tests/cassettes/agents/test_lite_agent_inside_flow_sync.yaml new file mode 100644 index 000000000..89749c490 --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/test_lite_agent_inside_flow_sync.yaml @@ -0,0 +1,119 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Test Agent. A helpful + test assistant\nYour personal goal is: Answer questions\nTo give my best complete + final answer to the task respond using the exact following format:\n\nThought: + I now can give a great answer\nFinal Answer: Your final answer must be the great + and the most complete as possible, it must be outcome described.\n\nI MUST use + these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: + What is 2+2? Reply with just the number.\n\nBegin! This is VERY important to + you, use the tools available and give your best Final Answer, your job depends + on it!\n\nThought:"}],"model":"gpt-4o-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '673' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-Cy7b0HjL79y39EkUcMLrRhPFe3XGj\",\n \"object\": + \"chat.completion\",\n \"created\": 1768444914,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal + Answer: 4\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 136,\n \"completion_tokens\": 13,\n + \ \"total_tokens\": 149,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_8bbc38b4db\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 15 Jan 2026 02:41:55 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '857' + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '341' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '358' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/test_lite_agent_inside_flow_with_tools.yaml b/lib/crewai/tests/cassettes/agents/test_lite_agent_inside_flow_with_tools.yaml new file mode 100644 index 000000000..e508e1afc --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/test_lite_agent_inside_flow_with_tools.yaml @@ -0,0 +1,255 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Calculator Agent. A math + expert\nYour personal goal is: Perform calculations\nYou ONLY have access to + the following tools, and should NEVER make up tools that are not listed here:\n\nTool + Name: calculate\nTool Arguments: {\n \"properties\": {\n \"expression\": + {\n \"title\": \"Expression\",\n \"type\": \"string\"\n }\n },\n \"required\": + [\n \"expression\"\n ],\n \"title\": \"CalculatorToolSchema\",\n \"type\": + \"object\",\n \"additionalProperties\": false\n}\nTool Description: Calculate + the result of a mathematical expression.\n\nIMPORTANT: Use the following format + in your response:\n\n```\nThought: you should always think about what to do\nAction: + the action to take, only one name of [calculate], just the name, exactly as + it''s written.\nAction Input: the input to the action, just a simple JSON object, + enclosed in curly braces, using \" to wrap keys and values.\nObservation: the + result of the action\n```\n\nOnce all necessary information is gathered, return + the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: + the final answer to the original input question\n```"},{"role":"user","content":"\nCurrent + Task: Calculate 10 * 5\n\nBegin! This is VERY important to you, use the tools + available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1403' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-Cy7avghVPSpszLmlbHpwDQlWDoD6O\",\n \"object\": + \"chat.completion\",\n \"created\": 1768444909,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I need to calculate the expression + 10 * 5.\\nAction: calculate\\nAction Input: {\\\"expression\\\":\\\"10 * 5\\\"}\\nObservation: + 50\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 291,\n \"completion_tokens\": 33,\n + \ \"total_tokens\": 324,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 15 Jan 2026 02:41:49 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '939' + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '579' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '598' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Calculator Agent. A math + expert\nYour personal goal is: Perform calculations\nYou ONLY have access to + the following tools, and should NEVER make up tools that are not listed here:\n\nTool + Name: calculate\nTool Arguments: {\n \"properties\": {\n \"expression\": + {\n \"title\": \"Expression\",\n \"type\": \"string\"\n }\n },\n \"required\": + [\n \"expression\"\n ],\n \"title\": \"CalculatorToolSchema\",\n \"type\": + \"object\",\n \"additionalProperties\": false\n}\nTool Description: Calculate + the result of a mathematical expression.\n\nIMPORTANT: Use the following format + in your response:\n\n```\nThought: you should always think about what to do\nAction: + the action to take, only one name of [calculate], just the name, exactly as + it''s written.\nAction Input: the input to the action, just a simple JSON object, + enclosed in curly braces, using \" to wrap keys and values.\nObservation: the + result of the action\n```\n\nOnce all necessary information is gathered, return + the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: + the final answer to the original input question\n```"},{"role":"user","content":"\nCurrent + Task: Calculate 10 * 5\n\nBegin! This is VERY important to you, use the tools + available and give your best Final Answer, your job depends on it!\n\nThought:"},{"role":"assistant","content":"Thought: + I need to calculate the expression 10 * 5.\nAction: calculate\nAction Input: + {\"expression\":\"10 * 5\"}\nObservation: The result of 10 * 5 is 50"}],"model":"gpt-4o-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1591' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-Cy7avDhDZCLvv8v2dh8ZQRrLdci6A\",\n \"object\": + \"chat.completion\",\n \"created\": 1768444909,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now know the final answer.\\nFinal + Answer: 50\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 337,\n \"completion_tokens\": 14,\n + \ \"total_tokens\": 351,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 15 Jan 2026 02:41:50 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '864' + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '429' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '457' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/test_lite_agent_kickoff_async_inside_flow.yaml b/lib/crewai/tests/cassettes/agents/test_lite_agent_kickoff_async_inside_flow.yaml new file mode 100644 index 000000000..1a17a39fe --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/test_lite_agent_kickoff_async_inside_flow.yaml @@ -0,0 +1,119 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Async Test Agent. An async + helper\nYour personal goal is: Answer questions asynchronously\nTo give my best + complete final answer to the task respond using the exact following format:\n\nThought: + I now can give a great answer\nFinal Answer: Your final answer must be the great + and the most complete as possible, it must be outcome described.\n\nI MUST use + these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: + What is 3+3?\n\nBegin! This is VERY important to you, use the tools available + and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '657' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-Cy7atOGxtc4y3oYNI62WiQ0Vogsdv\",\n \"object\": + \"chat.completion\",\n \"created\": 1768444907,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal + Answer: The sum of 3 + 3 is 6. Therefore, the outcome is that if you add three + and three together, you will arrive at the total of six.\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 131,\n \"completion_tokens\": 46,\n \"total_tokens\": 177,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 15 Jan 2026 02:41:48 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '983' + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '944' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '1192' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/test_lite_agent_standalone_still_works.yaml b/lib/crewai/tests/cassettes/agents/test_lite_agent_standalone_still_works.yaml new file mode 100644 index 000000000..83bec39ce --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/test_lite_agent_standalone_still_works.yaml @@ -0,0 +1,119 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Standalone Agent. A helpful + assistant\nYour personal goal is: Answer questions\nTo give my best complete + final answer to the task respond using the exact following format:\n\nThought: + I now can give a great answer\nFinal Answer: Your final answer must be the great + and the most complete as possible, it must be outcome described.\n\nI MUST use + these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: + What is 5+5? Reply with just the number.\n\nBegin! This is VERY important to + you, use the tools available and give your best Final Answer, your job depends + on it!\n\nThought:"}],"model":"gpt-4o-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '674' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-Cy7azhPwUHQ0p5tdhxSAmLPoE8UgC\",\n \"object\": + \"chat.completion\",\n \"created\": 1768444913,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal + Answer: 10\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 136,\n \"completion_tokens\": 13,\n + \ \"total_tokens\": 149,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 15 Jan 2026 02:41:54 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '858' + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '455' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '583' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/test_multiple_agents_in_same_flow.yaml b/lib/crewai/tests/cassettes/agents/test_multiple_agents_in_same_flow.yaml new file mode 100644 index 000000000..3a9fdda96 --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/test_multiple_agents_in_same_flow.yaml @@ -0,0 +1,240 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are First Agent. A friendly + greeter\nYour personal goal is: Greet users\nTo give my best complete final + answer to the task respond using the exact following format:\n\nThought: I now + can give a great answer\nFinal Answer: Your final answer must be the great and + the most complete as possible, it must be outcome described.\n\nI MUST use these + formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say + hello\n\nBegin! This is VERY important to you, use the tools available and give + your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '632' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-Cy7awLGYnYfpKGEeRhKlU90FltH7L\",\n \"object\": + \"chat.completion\",\n \"created\": 1768444910,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal + Answer: Hello and welcome! It's wonderful to see you here. I hope you're having + a fantastic day. If there's anything you need or if you have any questions, + feel free to ask. I'm here to help and make your experience enjoyable!\",\n + \ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": + null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 127,\n \"completion_tokens\": 57,\n \"total_tokens\": 184,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 15 Jan 2026 02:41:51 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '1074' + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '1019' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '1242' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Second Agent. A polite + farewell agent\nYour personal goal is: Say goodbye\nTo give my best complete + final answer to the task respond using the exact following format:\n\nThought: + I now can give a great answer\nFinal Answer: Your final answer must be the great + and the most complete as possible, it must be outcome described.\n\nI MUST use + these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: + Say goodbye\n\nBegin! This is VERY important to you, use the tools available + and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '640' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-Cy7ayZre6crr19UyujJE9YbNxDndk\",\n \"object\": + \"chat.completion\",\n \"created\": 1768444912,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal + Answer: As we conclude our conversation, I just want to take a moment to express + my heartfelt gratitude for your time and engagement. It has been a pleasure + interacting with you. I wish you all the best in your future endeavors. May + your path ahead be filled with success and happiness. Farewell, and until + we meet again!\",\n \"refusal\": null,\n \"annotations\": []\n + \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n + \ ],\n \"usage\": {\n \"prompt_tokens\": 126,\n \"completion_tokens\": + 75,\n \"total_tokens\": 201,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 15 Jan 2026 02:41:53 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '1169' + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '1298' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '1550' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1