mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-19 12:58:14 +00:00
Compare commits
2 Commits
lorenze/en
...
devin/1768
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9b47afbee0 | ||
|
|
1dd566311e |
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Coroutine, Sequence
|
||||
from collections.abc import Callable, Sequence
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
@@ -34,11 +34,6 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
@@ -48,10 +43,10 @@ from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
)
|
||||
from crewai.experimental.agent_executor import AgentExecutor
|
||||
from crewai.experimental.crew_agent_executor_flow import CrewAgentExecutorFlow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.lite_agent_output import LiteAgentOutput
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.mcp import (
|
||||
MCPClient,
|
||||
@@ -69,18 +64,15 @@ from crewai.security.fingerprint import Fingerprint
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities.agent_utils import (
|
||||
get_tool_names,
|
||||
is_inside_event_loop,
|
||||
load_agent_from_repository,
|
||||
parse_tools,
|
||||
render_text_description_and_args,
|
||||
)
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import Converter, ConverterError
|
||||
from crewai.utilities.guardrail import process_guardrail
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.guardrail_types import GuardrailType
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.prompts import Prompts, StandardPromptResult, SystemPromptResult
|
||||
from crewai.utilities.pydantic_schema_utils import generate_model_description
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
@@ -97,9 +89,9 @@ if TYPE_CHECKING:
|
||||
from crewai_tools import CodeInterpreterTool
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import PlatformAppOrAction
|
||||
from crewai.lite_agent_output import LiteAgentOutput
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
@@ -121,7 +113,7 @@ class Agent(BaseAgent):
|
||||
The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
|
||||
|
||||
Attributes:
|
||||
agent_executor: An instance of the CrewAgentExecutor or AgentExecutor class.
|
||||
agent_executor: An instance of the CrewAgentExecutor or CrewAgentExecutorFlow class.
|
||||
role: The role of the agent.
|
||||
goal: The objective of the agent.
|
||||
backstory: The backstory of the agent.
|
||||
@@ -246,9 +238,9 @@ class Agent(BaseAgent):
|
||||
Can be a single A2AConfig/A2AClientConfig/A2AServerConfig, or a list of any number of A2AConfig/A2AClientConfig with a single A2AServerConfig.
|
||||
""",
|
||||
)
|
||||
executor_class: type[CrewAgentExecutor] | type[AgentExecutor] = Field(
|
||||
executor_class: type[CrewAgentExecutor] | type[CrewAgentExecutorFlow] = Field(
|
||||
default=CrewAgentExecutor,
|
||||
description="Class to use for the agent executor. Defaults to CrewAgentExecutor, can optionally use AgentExecutor.",
|
||||
description="Class to use for the agent executor. Defaults to CrewAgentExecutor, can optionally use CrewAgentExecutorFlow.",
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@@ -1591,25 +1583,26 @@ class Agent(BaseAgent):
|
||||
)
|
||||
return None
|
||||
|
||||
def _prepare_kickoff(
|
||||
def kickoff(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> tuple[AgentExecutor, dict[str, str], dict[str, Any], list[CrewStructuredTool]]:
|
||||
"""Prepare common setup for kickoff execution.
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent with the given messages using a LiteAgent instance.
|
||||
|
||||
This method handles all the common preparation logic shared between
|
||||
kickoff() and kickoff_async(), including tool processing, prompt building,
|
||||
executor creation, and input formatting.
|
||||
This method is useful when you want to use the Agent configuration but
|
||||
with the simpler and more direct execution flow of LiteAgent.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
If a string is provided, it will be converted to a user message.
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
|
||||
Returns:
|
||||
Tuple of (executor, inputs, agent_info, parsed_tools) ready for execution.
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
# Process platform apps and MCP tools
|
||||
if self.apps:
|
||||
platform_tools = self.get_platform_tools(self.apps)
|
||||
if platform_tools and self.tools is not None:
|
||||
@@ -1619,359 +1612,25 @@ class Agent(BaseAgent):
|
||||
if mcps and self.tools is not None:
|
||||
self.tools.extend(mcps)
|
||||
|
||||
# Prepare tools
|
||||
raw_tools: list[BaseTool] = self.tools or []
|
||||
parsed_tools = parse_tools(raw_tools)
|
||||
|
||||
# Build agent_info for backward-compatible event emission
|
||||
agent_info = {
|
||||
"id": self.id,
|
||||
"role": self.role,
|
||||
"goal": self.goal,
|
||||
"backstory": self.backstory,
|
||||
"tools": raw_tools,
|
||||
"verbose": self.verbose,
|
||||
}
|
||||
|
||||
# Build prompt for standalone execution
|
||||
prompt = Prompts(
|
||||
agent=self,
|
||||
has_tools=len(raw_tools) > 0,
|
||||
i18n=self.i18n,
|
||||
use_system_prompt=self.use_system_prompt,
|
||||
system_template=self.system_template,
|
||||
prompt_template=self.prompt_template,
|
||||
response_template=self.response_template,
|
||||
).task_execution()
|
||||
|
||||
# Prepare stop words
|
||||
stop_words = [self.i18n.slice("observation")]
|
||||
if self.response_template:
|
||||
stop_words.append(
|
||||
self.response_template.split("{{ .Response }}")[1].strip()
|
||||
)
|
||||
|
||||
# Get RPM limit function
|
||||
rpm_limit_fn = (
|
||||
self._rpm_controller.check_or_wait if self._rpm_controller else None
|
||||
)
|
||||
|
||||
# Create the executor for standalone mode (no crew, no task)
|
||||
executor = AgentExecutor(
|
||||
task=None,
|
||||
crew=None,
|
||||
llm=cast(BaseLLM, self.llm),
|
||||
agent=self,
|
||||
prompt=prompt,
|
||||
max_iter=self.max_iter,
|
||||
tools=parsed_tools,
|
||||
tools_names=get_tool_names(parsed_tools),
|
||||
stop_words=stop_words,
|
||||
tools_description=render_text_description_and_args(parsed_tools),
|
||||
tools_handler=self.tools_handler,
|
||||
original_tools=raw_tools,
|
||||
step_callback=self.step_callback,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
lite_agent = LiteAgent(
|
||||
id=self.id,
|
||||
role=self.role,
|
||||
goal=self.goal,
|
||||
backstory=self.backstory,
|
||||
llm=self.llm,
|
||||
tools=self.tools or [],
|
||||
max_iterations=self.max_iter,
|
||||
max_execution_time=self.max_execution_time,
|
||||
respect_context_window=self.respect_context_window,
|
||||
request_within_rpm_limit=rpm_limit_fn,
|
||||
callbacks=[TokenCalcHandler(self._token_process)],
|
||||
response_model=response_format,
|
||||
verbose=self.verbose,
|
||||
response_format=response_format,
|
||||
i18n=self.i18n,
|
||||
original_agent=self,
|
||||
guardrail=self.guardrail,
|
||||
guardrail_max_retries=self.guardrail_max_retries,
|
||||
)
|
||||
|
||||
# Format messages
|
||||
if isinstance(messages, str):
|
||||
formatted_messages = messages
|
||||
else:
|
||||
formatted_messages = "\n".join(
|
||||
str(msg.get("content", "")) for msg in messages if msg.get("content")
|
||||
)
|
||||
|
||||
# Build the input dict for the executor
|
||||
inputs = {
|
||||
"input": formatted_messages,
|
||||
"tool_names": get_tool_names(parsed_tools),
|
||||
"tools": render_text_description_and_args(parsed_tools),
|
||||
}
|
||||
|
||||
return executor, inputs, agent_info, parsed_tools
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput | Coroutine[Any, Any, LiteAgentOutput]:
|
||||
"""
|
||||
Execute the agent with the given messages using the AgentExecutor.
|
||||
|
||||
This method provides standalone agent execution without requiring a Crew.
|
||||
It supports tools, response formatting, and guardrails.
|
||||
|
||||
When called from within a Flow (sync or async method), this automatically
|
||||
detects the event loop and returns a coroutine that the Flow framework
|
||||
awaits. Users don't need to handle async explicitly.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
If a string is provided, it will be converted to a user message.
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
When inside a Flow, returns a coroutine that resolves to LiteAgentOutput.
|
||||
|
||||
Note:
|
||||
For explicit async usage outside of Flow, use kickoff_async() directly.
|
||||
"""
|
||||
# Magic auto-async: if inside event loop (e.g., inside a Flow),
|
||||
# return coroutine for Flow to await
|
||||
if is_inside_event_loop():
|
||||
return self.kickoff_async(messages, response_format)
|
||||
|
||||
executor, inputs, agent_info, parsed_tools = self._prepare_kickoff(
|
||||
messages, response_format
|
||||
)
|
||||
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionStartedEvent(
|
||||
agent_info=agent_info,
|
||||
tools=parsed_tools,
|
||||
messages=messages,
|
||||
),
|
||||
)
|
||||
|
||||
output = self._execute_and_build_output(executor, inputs, response_format)
|
||||
|
||||
if self.guardrail is not None:
|
||||
output = self._process_kickoff_guardrail(
|
||||
output=output,
|
||||
executor=executor,
|
||||
inputs=inputs,
|
||||
response_format=response_format,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionCompletedEvent(
|
||||
agent_info=agent_info,
|
||||
output=output.raw,
|
||||
),
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionErrorEvent(
|
||||
agent_info=agent_info,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def _execute_and_build_output(
|
||||
self,
|
||||
executor: AgentExecutor,
|
||||
inputs: dict[str, str],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Execute the agent and build the output object.
|
||||
|
||||
Args:
|
||||
executor: The executor instance.
|
||||
inputs: Input dictionary for execution.
|
||||
response_format: Optional response format.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput with raw output, formatted result, and metrics.
|
||||
"""
|
||||
import json
|
||||
|
||||
# Execute the agent (this is called from sync path, so invoke returns dict)
|
||||
result = cast(dict[str, Any], executor.invoke(inputs))
|
||||
raw_output = result.get("output", "")
|
||||
|
||||
# Handle response format conversion
|
||||
formatted_result: BaseModel | None = None
|
||||
if response_format:
|
||||
try:
|
||||
model_schema = generate_model_description(response_format)
|
||||
schema = json.dumps(model_schema, indent=2)
|
||||
instructions = self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
|
||||
converter = Converter(
|
||||
llm=self.llm,
|
||||
text=raw_output,
|
||||
model=response_format,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
conversion_result = converter.to_pydantic()
|
||||
if isinstance(conversion_result, BaseModel):
|
||||
formatted_result = conversion_result
|
||||
except ConverterError:
|
||||
pass # Keep raw output if conversion fails
|
||||
|
||||
# Get token usage metrics
|
||||
if isinstance(self.llm, BaseLLM):
|
||||
usage_metrics = self.llm.get_token_usage_summary()
|
||||
else:
|
||||
usage_metrics = self._token_process.get_summary()
|
||||
|
||||
return LiteAgentOutput(
|
||||
raw=raw_output,
|
||||
pydantic=formatted_result,
|
||||
agent_role=self.role,
|
||||
usage_metrics=usage_metrics.model_dump() if usage_metrics else None,
|
||||
messages=executor.messages,
|
||||
)
|
||||
|
||||
async def _execute_and_build_output_async(
|
||||
self,
|
||||
executor: AgentExecutor,
|
||||
inputs: dict[str, str],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Execute the agent asynchronously and build the output object.
|
||||
|
||||
This is the async version of _execute_and_build_output that uses
|
||||
invoke_async() for native async execution within event loops.
|
||||
|
||||
Args:
|
||||
executor: The executor instance.
|
||||
inputs: Input dictionary for execution.
|
||||
response_format: Optional response format.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput with raw output, formatted result, and metrics.
|
||||
"""
|
||||
import json
|
||||
|
||||
# Execute the agent asynchronously
|
||||
result = await executor.invoke_async(inputs)
|
||||
raw_output = result.get("output", "")
|
||||
|
||||
# Handle response format conversion
|
||||
formatted_result: BaseModel | None = None
|
||||
if response_format:
|
||||
try:
|
||||
model_schema = generate_model_description(response_format)
|
||||
schema = json.dumps(model_schema, indent=2)
|
||||
instructions = self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
|
||||
converter = Converter(
|
||||
llm=self.llm,
|
||||
text=raw_output,
|
||||
model=response_format,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
conversion_result = converter.to_pydantic()
|
||||
if isinstance(conversion_result, BaseModel):
|
||||
formatted_result = conversion_result
|
||||
except ConverterError:
|
||||
pass # Keep raw output if conversion fails
|
||||
|
||||
# Get token usage metrics
|
||||
if isinstance(self.llm, BaseLLM):
|
||||
usage_metrics = self.llm.get_token_usage_summary()
|
||||
else:
|
||||
usage_metrics = self._token_process.get_summary()
|
||||
|
||||
return LiteAgentOutput(
|
||||
raw=raw_output,
|
||||
pydantic=formatted_result,
|
||||
agent_role=self.role,
|
||||
usage_metrics=usage_metrics.model_dump() if usage_metrics else None,
|
||||
messages=executor.messages,
|
||||
)
|
||||
|
||||
def _process_kickoff_guardrail(
|
||||
self,
|
||||
output: LiteAgentOutput,
|
||||
executor: AgentExecutor,
|
||||
inputs: dict[str, str],
|
||||
response_format: type[Any] | None = None,
|
||||
retry_count: int = 0,
|
||||
) -> LiteAgentOutput:
|
||||
"""Process guardrail for kickoff execution with retry logic.
|
||||
|
||||
Args:
|
||||
output: Current agent output.
|
||||
executor: The executor instance.
|
||||
inputs: Input dictionary for re-execution.
|
||||
response_format: Optional response format.
|
||||
retry_count: Current retry count.
|
||||
|
||||
Returns:
|
||||
Validated/updated output.
|
||||
"""
|
||||
from crewai.utilities.guardrail_types import GuardrailCallable
|
||||
|
||||
# Ensure guardrail is callable
|
||||
guardrail_callable: GuardrailCallable
|
||||
if isinstance(self.guardrail, str):
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
|
||||
guardrail_callable = cast(
|
||||
GuardrailCallable,
|
||||
LLMGuardrail(description=self.guardrail, llm=cast(BaseLLM, self.llm)),
|
||||
)
|
||||
elif callable(self.guardrail):
|
||||
guardrail_callable = self.guardrail
|
||||
else:
|
||||
# Should not happen if called from kickoff with guardrail check
|
||||
return output
|
||||
|
||||
guardrail_result = process_guardrail(
|
||||
output=output,
|
||||
guardrail=guardrail_callable,
|
||||
retry_count=retry_count,
|
||||
event_source=self,
|
||||
from_agent=self,
|
||||
)
|
||||
|
||||
if not guardrail_result.success:
|
||||
if retry_count >= self.guardrail_max_retries:
|
||||
raise ValueError(
|
||||
f"Agent's guardrail failed validation after {self.guardrail_max_retries} retries. "
|
||||
f"Last error: {guardrail_result.error}"
|
||||
)
|
||||
|
||||
# Add feedback and re-execute
|
||||
executor._append_message_to_state(
|
||||
guardrail_result.error or "Guardrail validation failed",
|
||||
role="user",
|
||||
)
|
||||
|
||||
# Re-execute and build new output
|
||||
output = self._execute_and_build_output(executor, inputs, response_format)
|
||||
|
||||
# Recursively retry guardrail
|
||||
return self._process_kickoff_guardrail(
|
||||
output=output,
|
||||
executor=executor,
|
||||
inputs=inputs,
|
||||
response_format=response_format,
|
||||
retry_count=retry_count + 1,
|
||||
)
|
||||
|
||||
# Apply guardrail result if available
|
||||
if guardrail_result.result is not None:
|
||||
if isinstance(guardrail_result.result, str):
|
||||
output.raw = guardrail_result.result
|
||||
elif isinstance(guardrail_result.result, BaseModel):
|
||||
output.pydantic = guardrail_result.result
|
||||
|
||||
return output
|
||||
return lite_agent.kickoff(messages)
|
||||
|
||||
async def kickoff_async(
|
||||
self,
|
||||
@@ -1979,11 +1638,9 @@ class Agent(BaseAgent):
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent asynchronously with the given messages.
|
||||
Execute the agent asynchronously with the given messages using a LiteAgent instance.
|
||||
|
||||
This is the async version of the kickoff method that uses native async
|
||||
execution. It is designed for use within async contexts, such as when
|
||||
called from within an async Flow method.
|
||||
This is the async version of the kickoff method.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
@@ -1994,48 +1651,21 @@ class Agent(BaseAgent):
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
executor, inputs, agent_info, parsed_tools = self._prepare_kickoff(
|
||||
messages, response_format
|
||||
lite_agent = LiteAgent(
|
||||
role=self.role,
|
||||
goal=self.goal,
|
||||
backstory=self.backstory,
|
||||
llm=self.llm,
|
||||
tools=self.tools or [],
|
||||
max_iterations=self.max_iter,
|
||||
max_execution_time=self.max_execution_time,
|
||||
respect_context_window=self.respect_context_window,
|
||||
verbose=self.verbose,
|
||||
response_format=response_format,
|
||||
i18n=self.i18n,
|
||||
original_agent=self,
|
||||
guardrail=self.guardrail,
|
||||
guardrail_max_retries=self.guardrail_max_retries,
|
||||
)
|
||||
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionStartedEvent(
|
||||
agent_info=agent_info,
|
||||
tools=parsed_tools,
|
||||
messages=messages,
|
||||
),
|
||||
)
|
||||
|
||||
output = await self._execute_and_build_output_async(
|
||||
executor, inputs, response_format
|
||||
)
|
||||
|
||||
if self.guardrail is not None:
|
||||
output = self._process_kickoff_guardrail(
|
||||
output=output,
|
||||
executor=executor,
|
||||
inputs=inputs,
|
||||
response_format=response_format,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionCompletedEvent(
|
||||
agent_info=agent_info,
|
||||
output=output.raw,
|
||||
),
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionErrorEvent(
|
||||
agent_info=agent_info,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise
|
||||
return await lite_agent.kickoff_async(messages)
|
||||
|
||||
@@ -21,9 +21,9 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class CrewAgentExecutorMixin:
|
||||
crew: Crew | None
|
||||
crew: Crew
|
||||
agent: Agent
|
||||
task: Task | None
|
||||
task: Task
|
||||
iterations: int
|
||||
max_iter: int
|
||||
messages: list[LLMMessage]
|
||||
|
||||
@@ -219,6 +219,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
Final answer from the agent.
|
||||
"""
|
||||
formatted_answer = None
|
||||
last_raw_output: str | None = None
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
try:
|
||||
if has_reached_max_iterations(self.iterations, self.max_iter):
|
||||
@@ -244,6 +245,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
)
|
||||
last_raw_output = answer
|
||||
if self.response_model is not None:
|
||||
try:
|
||||
self.response_model.model_validate_json(answer)
|
||||
@@ -300,6 +302,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
iterations=self.iterations,
|
||||
log_error_after=self.log_error_after,
|
||||
printer=self._printer,
|
||||
raw_output=last_raw_output,
|
||||
agent_role=self.agent.role if self.agent else None,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -386,6 +390,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
Final answer from the agent.
|
||||
"""
|
||||
formatted_answer = None
|
||||
last_raw_output: str | None = None
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
try:
|
||||
if has_reached_max_iterations(self.iterations, self.max_iter):
|
||||
@@ -411,6 +416,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
)
|
||||
last_raw_output = answer
|
||||
|
||||
if self.response_model is not None:
|
||||
try:
|
||||
@@ -467,6 +473,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
iterations=self.iterations,
|
||||
log_error_after=self.log_error_after,
|
||||
printer=self._printer,
|
||||
raw_output=last_raw_output,
|
||||
agent_role=self.agent.role if self.agent else None,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from crewai.experimental.agent_executor import AgentExecutor, CrewAgentExecutorFlow
|
||||
from crewai.experimental.crew_agent_executor_flow import CrewAgentExecutorFlow
|
||||
from crewai.experimental.evaluation import (
|
||||
AgentEvaluationResult,
|
||||
AgentEvaluator,
|
||||
@@ -23,9 +23,8 @@ from crewai.experimental.evaluation import (
|
||||
__all__ = [
|
||||
"AgentEvaluationResult",
|
||||
"AgentEvaluator",
|
||||
"AgentExecutor",
|
||||
"BaseEvaluator",
|
||||
"CrewAgentExecutorFlow", # Deprecated alias for AgentExecutor
|
||||
"CrewAgentExecutorFlow",
|
||||
"EvaluationScore",
|
||||
"EvaluationTraceCallback",
|
||||
"ExperimentResult",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Coroutine
|
||||
from collections.abc import Callable
|
||||
import threading
|
||||
from typing import TYPE_CHECKING, Any, Literal, cast
|
||||
from uuid import uuid4
|
||||
@@ -37,7 +37,6 @@ from crewai.utilities.agent_utils import (
|
||||
handle_unknown_error,
|
||||
has_reached_max_iterations,
|
||||
is_context_length_exceeded,
|
||||
is_inside_event_loop,
|
||||
process_llm_response,
|
||||
)
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
@@ -74,17 +73,13 @@ class AgentReActState(BaseModel):
|
||||
ask_for_human_input: bool = Field(default=False)
|
||||
|
||||
|
||||
class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
"""Flow-based agent executor for both standalone and crew-bound execution.
|
||||
class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
"""Flow-based executor matching CrewAgentExecutor interface.
|
||||
|
||||
Inherits from:
|
||||
- Flow[AgentReActState]: Provides flow orchestration capabilities
|
||||
- CrewAgentExecutorMixin: Provides memory methods (short/long/external term)
|
||||
|
||||
This executor can operate in two modes:
|
||||
- Standalone mode: When crew and task are None (used by Agent.kickoff())
|
||||
- Crew mode: When crew and task are provided (used by Agent.execute_task())
|
||||
|
||||
Note: Multiple instances may be created during agent initialization
|
||||
(cache setup, RPM controller setup, etc.) but only the final instance
|
||||
should execute tasks via invoke().
|
||||
@@ -93,6 +88,8 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
def __init__(
|
||||
self,
|
||||
llm: BaseLLM,
|
||||
task: Task,
|
||||
crew: Crew,
|
||||
agent: Agent,
|
||||
prompt: SystemPromptResult | StandardPromptResult,
|
||||
max_iter: int,
|
||||
@@ -101,8 +98,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
stop_words: list[str],
|
||||
tools_description: str,
|
||||
tools_handler: ToolsHandler,
|
||||
task: Task | None = None,
|
||||
crew: Crew | None = None,
|
||||
step_callback: Any = None,
|
||||
original_tools: list[BaseTool] | None = None,
|
||||
function_calling_llm: BaseLLM | Any | None = None,
|
||||
@@ -116,6 +111,8 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
|
||||
Args:
|
||||
llm: Language model instance.
|
||||
task: Task to execute.
|
||||
crew: Crew instance.
|
||||
agent: Agent to execute.
|
||||
prompt: Prompt templates.
|
||||
max_iter: Maximum iterations.
|
||||
@@ -124,8 +121,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
stop_words: Stop word list.
|
||||
tools_description: Tool descriptions.
|
||||
tools_handler: Tool handler instance.
|
||||
task: Optional task to execute (None for standalone agent execution).
|
||||
crew: Optional crew instance (None for standalone agent execution).
|
||||
step_callback: Optional step callback.
|
||||
original_tools: Original tool list.
|
||||
function_calling_llm: Optional function calling LLM.
|
||||
@@ -136,9 +131,9 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
"""
|
||||
self._i18n: I18N = i18n or get_i18n()
|
||||
self.llm = llm
|
||||
self.task: Task | None = task
|
||||
self.task = task
|
||||
self.agent = agent
|
||||
self.crew: Crew | None = crew
|
||||
self.crew = crew
|
||||
self.prompt = prompt
|
||||
self.tools = tools
|
||||
self.tools_names = tools_names
|
||||
@@ -183,6 +178,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
else self.stop
|
||||
)
|
||||
)
|
||||
|
||||
self._state = AgentReActState()
|
||||
|
||||
def _ensure_flow_initialized(self) -> None:
|
||||
@@ -268,7 +264,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
printer=self._printer,
|
||||
from_task=self.task,
|
||||
from_agent=self.agent,
|
||||
response_model=None,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
)
|
||||
|
||||
@@ -453,99 +449,9 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
|
||||
return "initialized"
|
||||
|
||||
def invoke(
|
||||
self, inputs: dict[str, Any]
|
||||
) -> dict[str, Any] | Coroutine[Any, Any, dict[str, Any]]:
|
||||
def invoke(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Execute agent with given inputs.
|
||||
|
||||
When called from within an existing event loop (e.g., inside a Flow),
|
||||
this method returns a coroutine that should be awaited. The Flow
|
||||
framework handles this automatically.
|
||||
|
||||
Args:
|
||||
inputs: Input dictionary containing prompt variables.
|
||||
|
||||
Returns:
|
||||
Dictionary with agent output, or a coroutine if inside an event loop.
|
||||
"""
|
||||
# Magic auto-async: if inside event loop, return coroutine for Flow to await
|
||||
if is_inside_event_loop():
|
||||
return self.invoke_async(inputs)
|
||||
|
||||
self._ensure_flow_initialized()
|
||||
|
||||
with self._execution_lock:
|
||||
if self._is_executing:
|
||||
raise RuntimeError(
|
||||
"Executor is already running. "
|
||||
"Cannot invoke the same executor instance concurrently."
|
||||
)
|
||||
self._is_executing = True
|
||||
self._has_been_invoked = True
|
||||
|
||||
try:
|
||||
# Reset state for fresh execution
|
||||
self.state.messages.clear()
|
||||
self.state.iterations = 0
|
||||
self.state.current_answer = None
|
||||
self.state.is_finished = False
|
||||
|
||||
if "system" in self.prompt:
|
||||
prompt = cast("SystemPromptResult", self.prompt)
|
||||
system_prompt = self._format_prompt(prompt["system"], inputs)
|
||||
user_prompt = self._format_prompt(prompt["user"], inputs)
|
||||
self.state.messages.append(
|
||||
format_message_for_llm(system_prompt, role="system")
|
||||
)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
else:
|
||||
user_prompt = self._format_prompt(self.prompt["prompt"], inputs)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
self.state.ask_for_human_input = bool(
|
||||
inputs.get("ask_for_human_input", False)
|
||||
)
|
||||
|
||||
self.kickoff()
|
||||
|
||||
formatted_answer = self.state.current_answer
|
||||
|
||||
if not isinstance(formatted_answer, AgentFinish):
|
||||
raise RuntimeError(
|
||||
"Agent execution ended without reaching a final answer."
|
||||
)
|
||||
|
||||
if self.state.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
except AssertionError:
|
||||
fail_text = Text()
|
||||
fail_text.append("❌ ", style="red bold")
|
||||
fail_text.append(
|
||||
"Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
style="red",
|
||||
)
|
||||
self._console.print(fail_text)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
finally:
|
||||
self._is_executing = False
|
||||
|
||||
async def invoke_async(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Execute agent asynchronously with given inputs.
|
||||
|
||||
This method is designed for use within async contexts, such as when
|
||||
the agent is called from within an async Flow method. It uses
|
||||
kickoff_async() directly instead of running in a separate thread.
|
||||
|
||||
Args:
|
||||
inputs: Input dictionary containing prompt variables.
|
||||
|
||||
@@ -586,8 +492,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
inputs.get("ask_for_human_input", False)
|
||||
)
|
||||
|
||||
# Use async kickoff directly since we're already in an async context
|
||||
await self.kickoff_async()
|
||||
self.kickoff()
|
||||
|
||||
formatted_answer = self.state.current_answer
|
||||
|
||||
@@ -678,14 +583,11 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
if self.agent is None:
|
||||
raise ValueError("Agent cannot be None")
|
||||
|
||||
if self.task is None:
|
||||
return
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self.agent,
|
||||
AgentLogsStartedEvent(
|
||||
agent_role=self.agent.role,
|
||||
task_description=self.task.description,
|
||||
task_description=(self.task.description if self.task else "Not Found"),
|
||||
verbose=self.agent.verbose
|
||||
or (hasattr(self, "crew") and getattr(self.crew, "verbose", False)),
|
||||
),
|
||||
@@ -719,12 +621,10 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
result: Agent's final output.
|
||||
human_feedback: Optional feedback from human.
|
||||
"""
|
||||
# Early return if no crew (standalone mode)
|
||||
if self.crew is None:
|
||||
return
|
||||
|
||||
agent_id = str(self.agent.id)
|
||||
train_iteration = getattr(self.crew, "_train_iteration", None)
|
||||
train_iteration = (
|
||||
getattr(self.crew, "_train_iteration", None) if self.crew else None
|
||||
)
|
||||
|
||||
if train_iteration is None or not isinstance(train_iteration, int):
|
||||
train_error = Text()
|
||||
@@ -906,7 +806,3 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
requiring arbitrary_types_allowed=True.
|
||||
"""
|
||||
return core_schema.any_schema()
|
||||
|
||||
|
||||
# Backward compatibility alias (deprecated)
|
||||
CrewAgentExecutorFlow = AgentExecutor
|
||||
@@ -73,7 +73,6 @@ from crewai.flow.utils import (
|
||||
is_simple_flow_condition,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.flow.async_feedback.types import PendingFeedbackContext
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
@@ -520,9 +519,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._methods: dict[FlowMethodName, FlowMethod[Any, Any]] = {}
|
||||
self._method_execution_counts: dict[FlowMethodName, int] = {}
|
||||
self._pending_and_listeners: dict[PendingListenerKey, set[FlowMethodName]] = {}
|
||||
self._fired_or_listeners: set[FlowMethodName] = (
|
||||
set()
|
||||
) # Track OR listeners that already fired
|
||||
self._method_outputs: list[Any] = [] # list to store all method outputs
|
||||
self._completed_methods: set[FlowMethodName] = (
|
||||
set()
|
||||
@@ -574,7 +570,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
flow_id: str,
|
||||
persistence: FlowPersistence | None = None,
|
||||
**kwargs: Any,
|
||||
) -> Flow[Any]:
|
||||
) -> "Flow[Any]":
|
||||
"""Create a Flow instance from a pending feedback state.
|
||||
|
||||
This classmethod is used to restore a flow that was paused waiting
|
||||
@@ -635,7 +631,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return instance
|
||||
|
||||
@property
|
||||
def pending_feedback(self) -> PendingFeedbackContext | None:
|
||||
def pending_feedback(self) -> "PendingFeedbackContext | None":
|
||||
"""Get the pending feedback context if this flow is waiting for feedback.
|
||||
|
||||
Returns:
|
||||
@@ -720,9 +716,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
Raises:
|
||||
ValueError: If no pending feedback context exists
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
from datetime import datetime
|
||||
|
||||
if self._pending_feedback_context is None:
|
||||
raise ValueError(
|
||||
@@ -1300,7 +1295,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._completed_methods.clear()
|
||||
self._method_outputs.clear()
|
||||
self._pending_and_listeners.clear()
|
||||
self._fired_or_listeners.clear()
|
||||
else:
|
||||
# We're restoring from persistence, set the flag
|
||||
self._is_execution_resuming = True
|
||||
@@ -1352,26 +1346,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._initialize_state(inputs)
|
||||
|
||||
try:
|
||||
# Determine which start methods to execute at kickoff
|
||||
# Conditional start methods (with __trigger_methods__) are only triggered by their conditions
|
||||
# UNLESS there are no unconditional starts (then all starts run as entry points)
|
||||
unconditional_starts = [
|
||||
start_method
|
||||
for start_method in self._start_methods
|
||||
if not getattr(
|
||||
self._methods.get(start_method), "__trigger_methods__", None
|
||||
)
|
||||
]
|
||||
# If there are unconditional starts, only run those at kickoff
|
||||
# If there are NO unconditional starts, run all starts (including conditional ones)
|
||||
starts_to_execute = (
|
||||
unconditional_starts
|
||||
if unconditional_starts
|
||||
else self._start_methods
|
||||
)
|
||||
tasks = [
|
||||
self._execute_start_method(start_method)
|
||||
for start_method in starts_to_execute
|
||||
for start_method in self._start_methods
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
except Exception as e:
|
||||
@@ -1504,8 +1481,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return
|
||||
# For cyclic flows, clear from completed to allow re-execution
|
||||
self._completed_methods.discard(start_method_name)
|
||||
# Also clear fired OR listeners to allow them to fire again in new cycle
|
||||
self._fired_or_listeners.clear()
|
||||
|
||||
method = self._methods[start_method_name]
|
||||
enhanced_method = self._inject_trigger_payload_for_start_method(method)
|
||||
@@ -1528,9 +1503,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if self.last_human_feedback is not None
|
||||
else result
|
||||
)
|
||||
# Execute listeners sequentially to prevent race conditions on shared state
|
||||
for listener_name in listeners_for_result:
|
||||
await self._execute_single_listener(listener_name, listener_result)
|
||||
tasks = [
|
||||
self._execute_single_listener(listener_name, listener_result)
|
||||
for listener_name in listeners_for_result
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
else:
|
||||
await self._execute_listeners(start_method_name, result)
|
||||
|
||||
@@ -1596,19 +1573,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if future:
|
||||
self._event_futures.append(future)
|
||||
|
||||
if asyncio.iscoroutinefunction(method):
|
||||
result = await method(*args, **kwargs)
|
||||
else:
|
||||
# Run sync methods in thread pool for isolation
|
||||
# This allows Agent.kickoff() to work synchronously inside Flow methods
|
||||
import contextvars
|
||||
|
||||
ctx = contextvars.copy_context()
|
||||
result = await asyncio.to_thread(ctx.run, method, *args, **kwargs)
|
||||
|
||||
# Auto-await coroutines returned from sync methods (enables AgentExecutor pattern)
|
||||
if asyncio.iscoroutine(result):
|
||||
result = await result
|
||||
result = (
|
||||
await method(*args, **kwargs)
|
||||
if asyncio.iscoroutinefunction(method)
|
||||
else method(*args, **kwargs)
|
||||
)
|
||||
|
||||
self._method_outputs.append(result)
|
||||
self._method_execution_counts[method_name] = (
|
||||
@@ -1755,11 +1724,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
listener_result = router_result_to_feedback.get(
|
||||
str(current_trigger), result
|
||||
)
|
||||
# Execute listeners sequentially to prevent race conditions on shared state
|
||||
for listener_name in listeners_triggered:
|
||||
await self._execute_single_listener(
|
||||
listener_name, listener_result
|
||||
)
|
||||
tasks = [
|
||||
self._execute_single_listener(listener_name, listener_result)
|
||||
for listener_name in listeners_triggered
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
if current_trigger in router_results:
|
||||
# Find start methods triggered by this router result
|
||||
@@ -1776,16 +1745,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
should_trigger = current_trigger in all_methods
|
||||
|
||||
if should_trigger:
|
||||
# Execute conditional start method triggered by router result
|
||||
# Only execute if this is a cycle (method was already completed)
|
||||
if method_name in self._completed_methods:
|
||||
# For cyclic re-execution, temporarily clear resumption flag
|
||||
# For router-triggered start methods in cycles, temporarily clear resumption flag
|
||||
# to allow cyclic execution
|
||||
was_resuming = self._is_execution_resuming
|
||||
self._is_execution_resuming = False
|
||||
await self._execute_start_method(method_name)
|
||||
self._is_execution_resuming = was_resuming
|
||||
else:
|
||||
# First-time execution of conditional start
|
||||
await self._execute_start_method(method_name)
|
||||
|
||||
def _evaluate_condition(
|
||||
self,
|
||||
@@ -1883,21 +1850,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
condition_type, methods = condition_data
|
||||
|
||||
if condition_type == OR_CONDITION:
|
||||
# Only trigger multi-source OR listeners (or_(A, B, C)) once - skip if already fired
|
||||
# Simple single-method listeners fire every time their trigger occurs
|
||||
# Routers also fire every time - they're decision points
|
||||
has_multiple_triggers = len(methods) > 1
|
||||
should_check_fired = has_multiple_triggers and not is_router
|
||||
|
||||
if (
|
||||
not should_check_fired
|
||||
or listener_name not in self._fired_or_listeners
|
||||
):
|
||||
if trigger_method in methods:
|
||||
triggered.append(listener_name)
|
||||
# Only track multi-source OR listeners (not single-method or routers)
|
||||
if should_check_fired:
|
||||
self._fired_or_listeners.add(listener_name)
|
||||
if trigger_method in methods:
|
||||
triggered.append(listener_name)
|
||||
elif condition_type == AND_CONDITION:
|
||||
pending_key = PendingListenerKey(listener_name)
|
||||
if pending_key not in self._pending_and_listeners:
|
||||
@@ -1910,26 +1864,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._pending_and_listeners.pop(pending_key, None)
|
||||
|
||||
elif is_flow_condition_dict(condition_data):
|
||||
# For complex conditions, check if top-level is OR and track accordingly
|
||||
top_level_type = condition_data.get("type", OR_CONDITION)
|
||||
is_or_based = top_level_type == OR_CONDITION
|
||||
|
||||
# Only track multi-source OR conditions (multiple sub-conditions), not routers
|
||||
sub_conditions = condition_data.get("conditions", [])
|
||||
has_multiple_triggers = is_or_based and len(sub_conditions) > 1
|
||||
should_check_fired = has_multiple_triggers and not is_router
|
||||
|
||||
# Skip compound OR-based listeners that have already fired
|
||||
if should_check_fired and listener_name in self._fired_or_listeners:
|
||||
continue
|
||||
|
||||
if self._evaluate_condition(
|
||||
condition_data, trigger_method, listener_name
|
||||
):
|
||||
triggered.append(listener_name)
|
||||
# Track compound OR-based listeners so they only fire once
|
||||
if should_check_fired:
|
||||
self._fired_or_listeners.add(listener_name)
|
||||
|
||||
return triggered
|
||||
|
||||
@@ -1958,22 +1896,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if self._is_execution_resuming:
|
||||
# During resumption, skip execution but continue listeners
|
||||
await self._execute_listeners(listener_name, None)
|
||||
|
||||
# For routers, also check if any conditional starts they triggered are completed
|
||||
# If so, continue their chains
|
||||
if listener_name in self._routers:
|
||||
for start_method_name in self._start_methods:
|
||||
if (
|
||||
start_method_name in self._listeners
|
||||
and start_method_name in self._completed_methods
|
||||
):
|
||||
# This conditional start was executed, continue its chain
|
||||
await self._execute_start_method(start_method_name)
|
||||
return
|
||||
# For cyclic flows, clear from completed to allow re-execution
|
||||
self._completed_methods.discard(listener_name)
|
||||
# Also clear from fired OR listeners for cyclic flows
|
||||
self._fired_or_listeners.discard(listener_name)
|
||||
|
||||
try:
|
||||
method = self._methods[listener_name]
|
||||
@@ -2006,9 +1931,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if self.last_human_feedback is not None
|
||||
else listener_result
|
||||
)
|
||||
# Execute listeners sequentially to prevent race conditions on shared state
|
||||
for name in listeners_for_result:
|
||||
await self._execute_single_listener(name, feedback_result)
|
||||
tasks = [
|
||||
self._execute_single_listener(name, feedback_result)
|
||||
for name in listeners_for_result
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
except Exception as e:
|
||||
# Don't log HumanFeedbackPending as an error - it's expected control flow
|
||||
|
||||
@@ -10,7 +10,6 @@ from typing import (
|
||||
get_origin,
|
||||
)
|
||||
import uuid
|
||||
import warnings
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -81,11 +80,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
"""
|
||||
A lightweight agent that can process messages and use tools.
|
||||
|
||||
.. deprecated::
|
||||
LiteAgent is deprecated and will be removed in a future version.
|
||||
Use ``Agent().kickoff(messages)`` instead, which provides the same
|
||||
functionality with additional features like memory and knowledge support.
|
||||
|
||||
This agent is simpler than the full Agent class, focusing on direct execution
|
||||
rather than task delegation. It's designed to be used for simple interactions
|
||||
where a full crew is not needed.
|
||||
@@ -170,18 +164,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
default_factory=get_after_llm_call_hooks
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def emit_deprecation_warning(self) -> Self:
|
||||
"""Emit deprecation warning for LiteAgent usage."""
|
||||
warnings.warn(
|
||||
"LiteAgent is deprecated and will be removed in a future version. "
|
||||
"Use Agent().kickoff(messages) instead, which provides the same "
|
||||
"functionality with additional features like memory and knowledge support.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def setup_llm(self) -> Self:
|
||||
"""Set up the LLM and other components after initialization."""
|
||||
@@ -551,6 +533,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
"""
|
||||
# Execute the agent loop
|
||||
formatted_answer: AgentAction | AgentFinish | None = None
|
||||
last_raw_output: str | None = None
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
try:
|
||||
if has_reached_max_iterations(self._iterations, self.max_iterations):
|
||||
@@ -574,6 +557,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
from_agent=self,
|
||||
executor_context=self,
|
||||
)
|
||||
last_raw_output = answer
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
@@ -612,6 +596,8 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
iterations=self._iterations,
|
||||
log_error_after=3,
|
||||
printer=self._printer,
|
||||
raw_output=last_raw_output,
|
||||
agent_role=self.role,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Sequence
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Final, Literal, TypedDict
|
||||
|
||||
@@ -52,26 +52,11 @@ class SummaryContent(TypedDict):
|
||||
|
||||
console = Console()
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_MULTIPLE_NEWLINES: Final[re.Pattern[str]] = re.compile(r"\n+")
|
||||
|
||||
|
||||
def is_inside_event_loop() -> bool:
|
||||
"""Check if code is currently running inside an asyncio event loop.
|
||||
|
||||
This is used to detect when code is being called from within an async context
|
||||
(e.g., inside a Flow). In such cases, callers should return a coroutine
|
||||
instead of executing synchronously to avoid nested event loop errors.
|
||||
|
||||
Returns:
|
||||
True if inside a running event loop, False otherwise.
|
||||
"""
|
||||
try:
|
||||
asyncio.get_running_loop()
|
||||
return True
|
||||
except RuntimeError:
|
||||
return False
|
||||
|
||||
|
||||
def parse_tools(tools: list[BaseTool]) -> list[CrewStructuredTool]:
|
||||
"""Parse tools to be used for the task.
|
||||
|
||||
@@ -448,6 +433,8 @@ def handle_output_parser_exception(
|
||||
iterations: int,
|
||||
log_error_after: int = 3,
|
||||
printer: Printer | None = None,
|
||||
raw_output: str | None = None,
|
||||
agent_role: str | None = None,
|
||||
) -> AgentAction:
|
||||
"""Handle OutputParserError by updating messages and formatted_answer.
|
||||
|
||||
@@ -457,6 +444,8 @@ def handle_output_parser_exception(
|
||||
iterations: Current iteration count
|
||||
log_error_after: Number of iterations after which to log errors
|
||||
printer: Optional printer instance for logging
|
||||
raw_output: The raw LLM output that failed to parse
|
||||
agent_role: The role of the agent for logging context
|
||||
|
||||
Returns:
|
||||
AgentAction: A formatted answer with the error
|
||||
@@ -470,6 +459,27 @@ def handle_output_parser_exception(
|
||||
thought="",
|
||||
)
|
||||
|
||||
retry_count = iterations + 1
|
||||
agent_context = f" for agent '{agent_role}'" if agent_role else ""
|
||||
|
||||
logger.debug(
|
||||
"Parse failed%s: %s",
|
||||
agent_context,
|
||||
e.error.split("\n")[0],
|
||||
)
|
||||
|
||||
if raw_output is not None:
|
||||
truncated_output = (
|
||||
raw_output[:500] + "..." if len(raw_output) > 500 else raw_output
|
||||
)
|
||||
logger.debug(
|
||||
"Raw output (truncated)%s: %s",
|
||||
agent_context,
|
||||
truncated_output.replace("\n", "\\n"),
|
||||
)
|
||||
|
||||
logger.debug("Retry %d initiated%s", retry_count, agent_context)
|
||||
|
||||
if iterations > log_error_after and printer:
|
||||
printer.print(
|
||||
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Unit tests for AgentExecutor.
|
||||
"""Unit tests for CrewAgentExecutorFlow.
|
||||
|
||||
Tests the Flow-based agent executor implementation including state management,
|
||||
flow methods, routing logic, and error handling.
|
||||
@@ -8,9 +8,9 @@ from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.experimental.agent_executor import (
|
||||
from crewai.experimental.crew_agent_executor_flow import (
|
||||
AgentReActState,
|
||||
AgentExecutor,
|
||||
CrewAgentExecutorFlow,
|
||||
)
|
||||
from crewai.agents.parser import AgentAction, AgentFinish
|
||||
|
||||
@@ -43,8 +43,8 @@ class TestAgentReActState:
|
||||
assert state.ask_for_human_input is True
|
||||
|
||||
|
||||
class TestAgentExecutor:
|
||||
"""Test AgentExecutor class."""
|
||||
class TestCrewAgentExecutorFlow:
|
||||
"""Test CrewAgentExecutorFlow class."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_dependencies(self):
|
||||
@@ -87,8 +87,8 @@ class TestAgentExecutor:
|
||||
}
|
||||
|
||||
def test_executor_initialization(self, mock_dependencies):
|
||||
"""Test AgentExecutor initialization."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
"""Test CrewAgentExecutorFlow initialization."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
|
||||
assert executor.llm == mock_dependencies["llm"]
|
||||
assert executor.task == mock_dependencies["task"]
|
||||
@@ -100,9 +100,9 @@ class TestAgentExecutor:
|
||||
def test_initialize_reasoning(self, mock_dependencies):
|
||||
"""Test flow entry point."""
|
||||
with patch.object(
|
||||
AgentExecutor, "_show_start_logs"
|
||||
CrewAgentExecutorFlow, "_show_start_logs"
|
||||
) as mock_show_start:
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
result = executor.initialize_reasoning()
|
||||
|
||||
assert result == "initialized"
|
||||
@@ -110,7 +110,7 @@ class TestAgentExecutor:
|
||||
|
||||
def test_check_max_iterations_not_reached(self, mock_dependencies):
|
||||
"""Test routing when iterations < max."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor.state.iterations = 5
|
||||
|
||||
result = executor.check_max_iterations()
|
||||
@@ -118,7 +118,7 @@ class TestAgentExecutor:
|
||||
|
||||
def test_check_max_iterations_reached(self, mock_dependencies):
|
||||
"""Test routing when iterations >= max."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor.state.iterations = 10
|
||||
|
||||
result = executor.check_max_iterations()
|
||||
@@ -126,7 +126,7 @@ class TestAgentExecutor:
|
||||
|
||||
def test_route_by_answer_type_action(self, mock_dependencies):
|
||||
"""Test routing for AgentAction."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor.state.current_answer = AgentAction(
|
||||
thought="thinking", tool="search", tool_input="query", text="action text"
|
||||
)
|
||||
@@ -136,7 +136,7 @@ class TestAgentExecutor:
|
||||
|
||||
def test_route_by_answer_type_finish(self, mock_dependencies):
|
||||
"""Test routing for AgentFinish."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor.state.current_answer = AgentFinish(
|
||||
thought="final thoughts", output="Final answer", text="complete"
|
||||
)
|
||||
@@ -146,7 +146,7 @@ class TestAgentExecutor:
|
||||
|
||||
def test_continue_iteration(self, mock_dependencies):
|
||||
"""Test iteration continuation."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
|
||||
result = executor.continue_iteration()
|
||||
|
||||
@@ -154,8 +154,8 @@ class TestAgentExecutor:
|
||||
|
||||
def test_finalize_success(self, mock_dependencies):
|
||||
"""Test finalize with valid AgentFinish."""
|
||||
with patch.object(AgentExecutor, "_show_logs") as mock_show_logs:
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
with patch.object(CrewAgentExecutorFlow, "_show_logs") as mock_show_logs:
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor.state.current_answer = AgentFinish(
|
||||
thought="final thinking", output="Done", text="complete"
|
||||
)
|
||||
@@ -168,7 +168,7 @@ class TestAgentExecutor:
|
||||
|
||||
def test_finalize_failure(self, mock_dependencies):
|
||||
"""Test finalize skips when given AgentAction instead of AgentFinish."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor.state.current_answer = AgentAction(
|
||||
thought="thinking", tool="search", tool_input="query", text="action text"
|
||||
)
|
||||
@@ -181,7 +181,7 @@ class TestAgentExecutor:
|
||||
|
||||
def test_format_prompt(self, mock_dependencies):
|
||||
"""Test prompt formatting."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
inputs = {"input": "test input", "tool_names": "tool1, tool2", "tools": "desc"}
|
||||
|
||||
result = executor._format_prompt("Prompt {input} {tool_names} {tools}", inputs)
|
||||
@@ -192,18 +192,18 @@ class TestAgentExecutor:
|
||||
|
||||
def test_is_training_mode_false(self, mock_dependencies):
|
||||
"""Test training mode detection when not in training."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
assert executor._is_training_mode() is False
|
||||
|
||||
def test_is_training_mode_true(self, mock_dependencies):
|
||||
"""Test training mode detection when in training."""
|
||||
mock_dependencies["crew"]._train = True
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
assert executor._is_training_mode() is True
|
||||
|
||||
def test_append_message_to_state(self, mock_dependencies):
|
||||
"""Test message appending to state."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
initial_count = len(executor.state.messages)
|
||||
|
||||
executor._append_message_to_state("test message")
|
||||
@@ -216,7 +216,7 @@ class TestAgentExecutor:
|
||||
callback = Mock()
|
||||
mock_dependencies["step_callback"] = callback
|
||||
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
answer = AgentFinish(thought="thinking", output="test", text="final")
|
||||
|
||||
executor._invoke_step_callback(answer)
|
||||
@@ -226,14 +226,14 @@ class TestAgentExecutor:
|
||||
def test_invoke_step_callback_none(self, mock_dependencies):
|
||||
"""Test step callback when none provided."""
|
||||
mock_dependencies["step_callback"] = None
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
|
||||
# Should not raise error
|
||||
executor._invoke_step_callback(
|
||||
AgentFinish(thought="thinking", output="test", text="final")
|
||||
)
|
||||
|
||||
@patch("crewai.experimental.agent_executor.handle_output_parser_exception")
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.handle_output_parser_exception")
|
||||
def test_recover_from_parser_error(
|
||||
self, mock_handle_exception, mock_dependencies
|
||||
):
|
||||
@@ -242,7 +242,7 @@ class TestAgentExecutor:
|
||||
|
||||
mock_handle_exception.return_value = None
|
||||
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor._last_parser_error = OutputParserError("test error")
|
||||
initial_iterations = executor.state.iterations
|
||||
|
||||
@@ -252,12 +252,12 @@ class TestAgentExecutor:
|
||||
assert executor.state.iterations == initial_iterations + 1
|
||||
mock_handle_exception.assert_called_once()
|
||||
|
||||
@patch("crewai.experimental.agent_executor.handle_context_length")
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.handle_context_length")
|
||||
def test_recover_from_context_length(
|
||||
self, mock_handle_context, mock_dependencies
|
||||
):
|
||||
"""Test recovery from context length error."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor._last_context_error = Exception("context too long")
|
||||
initial_iterations = executor.state.iterations
|
||||
|
||||
@@ -270,16 +270,16 @@ class TestAgentExecutor:
|
||||
def test_use_stop_words_property(self, mock_dependencies):
|
||||
"""Test use_stop_words property."""
|
||||
mock_dependencies["llm"].supports_stop_words.return_value = True
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
assert executor.use_stop_words is True
|
||||
|
||||
mock_dependencies["llm"].supports_stop_words.return_value = False
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
assert executor.use_stop_words is False
|
||||
|
||||
def test_compatibility_properties(self, mock_dependencies):
|
||||
"""Test compatibility properties for mixin."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor.state.messages = [{"role": "user", "content": "test"}]
|
||||
executor.state.iterations = 5
|
||||
|
||||
@@ -321,8 +321,8 @@ class TestFlowErrorHandling:
|
||||
"tools_handler": Mock(),
|
||||
}
|
||||
|
||||
@patch("crewai.experimental.agent_executor.get_llm_response")
|
||||
@patch("crewai.experimental.agent_executor.enforce_rpm_limit")
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.get_llm_response")
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.enforce_rpm_limit")
|
||||
def test_call_llm_parser_error(
|
||||
self, mock_enforce_rpm, mock_get_llm, mock_dependencies
|
||||
):
|
||||
@@ -332,15 +332,15 @@ class TestFlowErrorHandling:
|
||||
mock_enforce_rpm.return_value = None
|
||||
mock_get_llm.side_effect = OutputParserError("parse failed")
|
||||
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
result = executor.call_llm_and_parse()
|
||||
|
||||
assert result == "parser_error"
|
||||
assert executor._last_parser_error is not None
|
||||
|
||||
@patch("crewai.experimental.agent_executor.get_llm_response")
|
||||
@patch("crewai.experimental.agent_executor.enforce_rpm_limit")
|
||||
@patch("crewai.experimental.agent_executor.is_context_length_exceeded")
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.get_llm_response")
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.enforce_rpm_limit")
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.is_context_length_exceeded")
|
||||
def test_call_llm_context_error(
|
||||
self,
|
||||
mock_is_context_exceeded,
|
||||
@@ -353,7 +353,7 @@ class TestFlowErrorHandling:
|
||||
mock_get_llm.side_effect = Exception("context length")
|
||||
mock_is_context_exceeded.return_value = True
|
||||
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
result = executor.call_llm_and_parse()
|
||||
|
||||
assert result == "context_error"
|
||||
@@ -397,10 +397,10 @@ class TestFlowInvoke:
|
||||
"tools_handler": Mock(),
|
||||
}
|
||||
|
||||
@patch.object(AgentExecutor, "kickoff")
|
||||
@patch.object(AgentExecutor, "_create_short_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_long_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_external_memory")
|
||||
@patch.object(CrewAgentExecutorFlow, "kickoff")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_short_term_memory")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_long_term_memory")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_external_memory")
|
||||
def test_invoke_success(
|
||||
self,
|
||||
mock_external_memory,
|
||||
@@ -410,7 +410,7 @@ class TestFlowInvoke:
|
||||
mock_dependencies,
|
||||
):
|
||||
"""Test successful invoke without human feedback."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
|
||||
# Mock kickoff to set the final answer in state
|
||||
def mock_kickoff_side_effect():
|
||||
@@ -429,10 +429,10 @@ class TestFlowInvoke:
|
||||
mock_long_term_memory.assert_called_once()
|
||||
mock_external_memory.assert_called_once()
|
||||
|
||||
@patch.object(AgentExecutor, "kickoff")
|
||||
@patch.object(CrewAgentExecutorFlow, "kickoff")
|
||||
def test_invoke_failure_no_agent_finish(self, mock_kickoff, mock_dependencies):
|
||||
"""Test invoke fails without AgentFinish."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor.state.current_answer = AgentAction(
|
||||
thought="thinking", tool="test", tool_input="test", text="action text"
|
||||
)
|
||||
@@ -442,10 +442,10 @@ class TestFlowInvoke:
|
||||
with pytest.raises(RuntimeError, match="without reaching a final answer"):
|
||||
executor.invoke(inputs)
|
||||
|
||||
@patch.object(AgentExecutor, "kickoff")
|
||||
@patch.object(AgentExecutor, "_create_short_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_long_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_external_memory")
|
||||
@patch.object(CrewAgentExecutorFlow, "kickoff")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_short_term_memory")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_long_term_memory")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_external_memory")
|
||||
def test_invoke_with_system_prompt(
|
||||
self,
|
||||
mock_external_memory,
|
||||
@@ -459,7 +459,7 @@ class TestFlowInvoke:
|
||||
"system": "System: {input}",
|
||||
"user": "User: {input} {tool_names} {tools}",
|
||||
}
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
|
||||
def mock_kickoff_side_effect():
|
||||
executor.state.current_answer = AgentFinish(
|
||||
@@ -72,53 +72,62 @@ class ResearchResult(BaseModel):
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.parametrize("verbose", [True, False])
|
||||
def test_agent_kickoff_preserves_parameters(verbose):
|
||||
"""Test that Agent.kickoff() uses the correct parameters from the Agent."""
|
||||
def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose):
|
||||
"""Test that LiteAgent is created with the correct parameters when Agent.kickoff() is called."""
|
||||
# Create a test agent with specific parameters
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Test response"
|
||||
mock_llm.stop = []
|
||||
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_usage_metrics = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
mock_llm.get_token_usage_summary.return_value = mock_usage_metrics
|
||||
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
custom_tools = [WebSearchTool(), CalculatorTool()]
|
||||
max_iter = 10
|
||||
max_execution_time = 300
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test Goal",
|
||||
backstory="Test Backstory",
|
||||
llm=mock_llm,
|
||||
llm=llm,
|
||||
tools=custom_tools,
|
||||
max_iter=max_iter,
|
||||
max_execution_time=max_execution_time,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# Call kickoff and verify it works
|
||||
result = agent.kickoff("Test query")
|
||||
# Create a mock to capture the created LiteAgent
|
||||
created_lite_agent = None
|
||||
original_lite_agent = LiteAgent
|
||||
|
||||
# Verify the agent was configured correctly
|
||||
assert agent.role == "Test Agent"
|
||||
assert agent.goal == "Test Goal"
|
||||
assert agent.backstory == "Test Backstory"
|
||||
assert len(agent.tools) == 2
|
||||
assert isinstance(agent.tools[0], WebSearchTool)
|
||||
assert isinstance(agent.tools[1], CalculatorTool)
|
||||
assert agent.max_iter == max_iter
|
||||
assert agent.verbose == verbose
|
||||
# Define a mock LiteAgent class that captures its arguments
|
||||
class MockLiteAgent(original_lite_agent):
|
||||
def __init__(self, **kwargs):
|
||||
nonlocal created_lite_agent
|
||||
created_lite_agent = kwargs
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# Verify kickoff returned a result
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
# Patch the LiteAgent class
|
||||
monkeypatch.setattr("crewai.agent.core.LiteAgent", MockLiteAgent)
|
||||
|
||||
# Call kickoff to create the LiteAgent
|
||||
agent.kickoff("Test query")
|
||||
|
||||
# Verify all parameters were passed correctly
|
||||
assert created_lite_agent is not None
|
||||
assert created_lite_agent["role"] == "Test Agent"
|
||||
assert created_lite_agent["goal"] == "Test Goal"
|
||||
assert created_lite_agent["backstory"] == "Test Backstory"
|
||||
assert created_lite_agent["llm"] == llm
|
||||
assert len(created_lite_agent["tools"]) == 2
|
||||
assert isinstance(created_lite_agent["tools"][0], WebSearchTool)
|
||||
assert isinstance(created_lite_agent["tools"][1], CalculatorTool)
|
||||
assert created_lite_agent["max_iterations"] == max_iter
|
||||
assert created_lite_agent["max_execution_time"] == max_execution_time
|
||||
assert created_lite_agent["verbose"] == verbose
|
||||
assert created_lite_agent["response_format"] is None
|
||||
|
||||
# Test with a response_format
|
||||
class TestResponse(BaseModel):
|
||||
test_field: str
|
||||
|
||||
agent.kickoff("Test query", response_format=TestResponse)
|
||||
assert created_lite_agent["response_format"] == TestResponse
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@@ -301,8 +310,7 @@ def verify_agent_parent_flow(result, agent, flow):
|
||||
|
||||
|
||||
def test_sets_parent_flow_when_inside_flow():
|
||||
"""Test that an Agent can be created and executed inside a Flow context."""
|
||||
captured_event = None
|
||||
captured_agent = None
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Test response"
|
||||
@@ -335,17 +343,15 @@ def test_sets_parent_flow_when_inside_flow():
|
||||
event_received = threading.Event()
|
||||
|
||||
@crewai_event_bus.on(LiteAgentExecutionStartedEvent)
|
||||
def capture_event(source, event):
|
||||
nonlocal captured_event
|
||||
captured_event = event
|
||||
def capture_agent(source, event):
|
||||
nonlocal captured_agent
|
||||
captured_agent = source
|
||||
event_received.set()
|
||||
|
||||
result = flow.kickoff()
|
||||
flow.kickoff()
|
||||
|
||||
assert event_received.wait(timeout=5), "Timeout waiting for agent execution event"
|
||||
assert captured_event is not None
|
||||
assert captured_event.agent_info["role"] == "Test Agent"
|
||||
assert result is not None
|
||||
assert captured_agent.parent_flow is flow
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@@ -367,14 +373,16 @@ def test_guardrail_is_called_using_string():
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailStartedEvent)
|
||||
def capture_guardrail_started(source, event):
|
||||
assert isinstance(source, Agent)
|
||||
assert isinstance(source, LiteAgent)
|
||||
assert source.original_agent == agent
|
||||
with condition:
|
||||
guardrail_events["started"].append(event)
|
||||
condition.notify()
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
|
||||
def capture_guardrail_completed(source, event):
|
||||
assert isinstance(source, Agent)
|
||||
assert isinstance(source, LiteAgent)
|
||||
assert source.original_agent == agent
|
||||
with condition:
|
||||
guardrail_events["completed"].append(event)
|
||||
condition.notify()
|
||||
@@ -675,151 +683,3 @@ def test_agent_kickoff_with_mcp_tools(mock_get_mcp_tools):
|
||||
|
||||
# Verify MCP tools were retrieved
|
||||
mock_get_mcp_tools.assert_called_once_with("https://mcp.exa.ai/mcp?api_key=test_exa_key&profile=research")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for LiteAgent inside Flow (magic auto-async pattern)
|
||||
# ============================================================================
|
||||
|
||||
from crewai.flow.flow import listen
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_inside_flow_sync():
|
||||
"""Test that LiteAgent.kickoff() works magically inside a Flow.
|
||||
|
||||
This tests the "magic auto-async" pattern where calling agent.kickoff()
|
||||
from within a Flow automatically detects the event loop and returns a
|
||||
coroutine that the Flow framework awaits. Users don't need to use async/await.
|
||||
"""
|
||||
# Track execution
|
||||
execution_log = []
|
||||
|
||||
class TestFlow(Flow):
|
||||
@start()
|
||||
def run_agent(self):
|
||||
execution_log.append("flow_started")
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Answer questions",
|
||||
backstory="A helpful test assistant",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
# Magic: just call kickoff() normally - it auto-detects Flow context
|
||||
result = agent.kickoff(messages="What is 2+2? Reply with just the number.")
|
||||
execution_log.append("agent_completed")
|
||||
return result
|
||||
|
||||
flow = TestFlow()
|
||||
result = flow.kickoff()
|
||||
|
||||
# Verify the flow executed successfully
|
||||
assert "flow_started" in execution_log
|
||||
assert "agent_completed" in execution_log
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_inside_flow_with_tools():
|
||||
"""Test that LiteAgent with tools works correctly inside a Flow."""
|
||||
class TestFlow(Flow):
|
||||
@start()
|
||||
def run_agent_with_tools(self):
|
||||
agent = Agent(
|
||||
role="Calculator Agent",
|
||||
goal="Perform calculations",
|
||||
backstory="A math expert",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
tools=[CalculatorTool()],
|
||||
verbose=False,
|
||||
)
|
||||
result = agent.kickoff(messages="Calculate 10 * 5")
|
||||
return result
|
||||
|
||||
flow = TestFlow()
|
||||
result = flow.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
assert result.raw is not None
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_multiple_agents_in_same_flow():
|
||||
"""Test that multiple LiteAgents can run sequentially in the same Flow."""
|
||||
class MultiAgentFlow(Flow):
|
||||
@start()
|
||||
def first_step(self):
|
||||
agent1 = Agent(
|
||||
role="First Agent",
|
||||
goal="Greet users",
|
||||
backstory="A friendly greeter",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
return agent1.kickoff(messages="Say hello")
|
||||
|
||||
@listen(first_step)
|
||||
def second_step(self, first_result):
|
||||
agent2 = Agent(
|
||||
role="Second Agent",
|
||||
goal="Say goodbye",
|
||||
backstory="A polite farewell agent",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
return agent2.kickoff(messages="Say goodbye")
|
||||
|
||||
flow = MultiAgentFlow()
|
||||
result = flow.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_kickoff_async_inside_flow():
|
||||
"""Test that Agent.kickoff_async() works correctly from async Flow methods."""
|
||||
class AsyncAgentFlow(Flow):
|
||||
@start()
|
||||
async def async_agent_step(self):
|
||||
agent = Agent(
|
||||
role="Async Test Agent",
|
||||
goal="Answer questions asynchronously",
|
||||
backstory="An async helper",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
result = await agent.kickoff_async(messages="What is 3+3?")
|
||||
return result
|
||||
|
||||
flow = AsyncAgentFlow()
|
||||
result = flow.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_standalone_still_works():
|
||||
"""Test that LiteAgent.kickoff() still works normally outside of a Flow.
|
||||
|
||||
This verifies that the magic auto-async pattern doesn't break standalone usage
|
||||
where there's no event loop running.
|
||||
"""
|
||||
agent = Agent(
|
||||
role="Standalone Agent",
|
||||
goal="Answer questions",
|
||||
backstory="A helpful assistant",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# This should work normally - no Flow, no event loop
|
||||
result = agent.kickoff(messages="What is 5+5? Reply with just the number.")
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
assert result.raw is not None
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. A helpful
|
||||
test assistant\nYour personal goal is: Answer questions\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
What is 2+2? Reply with just the number.\n\nBegin! This is VERY important to
|
||||
you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '673'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7b0HjL79y39EkUcMLrRhPFe3XGj\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444914,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: 4\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 136,\n \"completion_tokens\": 13,\n
|
||||
\ \"total_tokens\": 149,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_8bbc38b4db\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:55 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '857'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '341'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '358'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,255 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator Agent. A math
|
||||
expert\nYour personal goal is: Perform calculations\nYou ONLY have access to
|
||||
the following tools, and should NEVER make up tools that are not listed here:\n\nTool
|
||||
Name: calculate\nTool Arguments: {\n \"properties\": {\n \"expression\":
|
||||
{\n \"title\": \"Expression\",\n \"type\": \"string\"\n }\n },\n \"required\":
|
||||
[\n \"expression\"\n ],\n \"title\": \"CalculatorToolSchema\",\n \"type\":
|
||||
\"object\",\n \"additionalProperties\": false\n}\nTool Description: Calculate
|
||||
the result of a mathematical expression.\n\nIMPORTANT: Use the following format
|
||||
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [calculate], just the name, exactly as
|
||||
it''s written.\nAction Input: the input to the action, just a simple JSON object,
|
||||
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
|
||||
result of the action\n```\n\nOnce all necessary information is gathered, return
|
||||
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n```"},{"role":"user","content":"\nCurrent
|
||||
Task: Calculate 10 * 5\n\nBegin! This is VERY important to you, use the tools
|
||||
available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1403'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7avghVPSpszLmlbHpwDQlWDoD6O\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444909,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I need to calculate the expression
|
||||
10 * 5.\\nAction: calculate\\nAction Input: {\\\"expression\\\":\\\"10 * 5\\\"}\\nObservation:
|
||||
50\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 291,\n \"completion_tokens\": 33,\n
|
||||
\ \"total_tokens\": 324,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:49 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '939'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '579'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '598'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator Agent. A math
|
||||
expert\nYour personal goal is: Perform calculations\nYou ONLY have access to
|
||||
the following tools, and should NEVER make up tools that are not listed here:\n\nTool
|
||||
Name: calculate\nTool Arguments: {\n \"properties\": {\n \"expression\":
|
||||
{\n \"title\": \"Expression\",\n \"type\": \"string\"\n }\n },\n \"required\":
|
||||
[\n \"expression\"\n ],\n \"title\": \"CalculatorToolSchema\",\n \"type\":
|
||||
\"object\",\n \"additionalProperties\": false\n}\nTool Description: Calculate
|
||||
the result of a mathematical expression.\n\nIMPORTANT: Use the following format
|
||||
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [calculate], just the name, exactly as
|
||||
it''s written.\nAction Input: the input to the action, just a simple JSON object,
|
||||
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
|
||||
result of the action\n```\n\nOnce all necessary information is gathered, return
|
||||
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n```"},{"role":"user","content":"\nCurrent
|
||||
Task: Calculate 10 * 5\n\nBegin! This is VERY important to you, use the tools
|
||||
available and give your best Final Answer, your job depends on it!\n\nThought:"},{"role":"assistant","content":"Thought:
|
||||
I need to calculate the expression 10 * 5.\nAction: calculate\nAction Input:
|
||||
{\"expression\":\"10 * 5\"}\nObservation: The result of 10 * 5 is 50"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1591'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7avDhDZCLvv8v2dh8ZQRrLdci6A\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444909,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now know the final answer.\\nFinal
|
||||
Answer: 50\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 337,\n \"completion_tokens\": 14,\n
|
||||
\ \"total_tokens\": 351,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:50 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '864'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '429'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '457'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,119 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Async Test Agent. An async
|
||||
helper\nYour personal goal is: Answer questions asynchronously\nTo give my best
|
||||
complete final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
What is 3+3?\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '657'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7atOGxtc4y3oYNI62WiQ0Vogsdv\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444907,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: The sum of 3 + 3 is 6. Therefore, the outcome is that if you add three
|
||||
and three together, you will arrive at the total of six.\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
131,\n \"completion_tokens\": 46,\n \"total_tokens\": 177,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:48 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '983'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '944'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1192'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,119 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Standalone Agent. A helpful
|
||||
assistant\nYour personal goal is: Answer questions\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
What is 5+5? Reply with just the number.\n\nBegin! This is VERY important to
|
||||
you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '674'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7azhPwUHQ0p5tdhxSAmLPoE8UgC\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444913,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: 10\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 136,\n \"completion_tokens\": 13,\n
|
||||
\ \"total_tokens\": 149,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:54 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '858'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '455'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '583'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,239 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are First Agent. A friendly
|
||||
greeter\nYour personal goal is: Greet users\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
hello\n\nBegin! This is VERY important to you, use the tools available and give
|
||||
your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '632'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CyRKzgODZ9yn3F9OkaXsscLk2Ln3N\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768520801,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: Hello! Welcome! I'm so glad to see you here. If you need any assistance
|
||||
or have any questions, feel free to ask. Have a wonderful day!\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
127,\n \"completion_tokens\": 43,\n \"total_tokens\": 170,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 23:46:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '990'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '880'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1160'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Second Agent. A polite
|
||||
farewell agent\nYour personal goal is: Say goodbye\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
Say goodbye\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '640'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CyRL1Ua2PkK5xXPp3KeF0AnGAk3JP\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768520803,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: As we reach the end of our conversation, I want to express my gratitude
|
||||
for the time we've shared. It's been a pleasure assisting you, and I hope
|
||||
you found our interaction helpful and enjoyable. Remember, whenever you need
|
||||
assistance, I'm just a message away. Wishing you all the best in your future
|
||||
endeavors. Goodbye and take care!\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 126,\n \"completion_tokens\":
|
||||
79,\n \"total_tokens\": 205,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 23:46:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '1189'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '1363'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1605'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
@@ -1,528 +1,456 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Guardrail Agent.
|
||||
You are a expert at validating the output of a task. By providing effective
|
||||
feedback if the output is not valid.\\nYour personal goal is: Validate the output
|
||||
of the task\\nTo give my best complete final answer to the task respond using
|
||||
the exact following format:\\n\\nThought: I now can give a great answer\\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\\n\\nI MUST use these formats, my job depends
|
||||
on it!\"},{\"role\":\"user\",\"content\":\"\\nCurrent Task: \\n Ensure
|
||||
the following task result complies with the given guardrail.\\n\\n Task
|
||||
result:\\n \\n Lorem Ipsum is simply dummy text of the printing
|
||||
and typesetting industry. Lorem Ipsum has been the industry's standard dummy
|
||||
text ever\\n \\n\\n Guardrail:\\n Ensure the result has
|
||||
less than 10 words\\n\\n Your task:\\n - Confirm if the Task result
|
||||
complies with the guardrail.\\n - If not, provide clear feedback explaining
|
||||
what is wrong (e.g., by how much it violates the rule, or what specific part
|
||||
fails).\\n - Focus only on identifying issues \u2014 do not propose corrections.\\n
|
||||
\ - If the Task result complies with the guardrail, saying that is valid\\n
|
||||
\ \\n\\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\\n\\nThought:\"}],\"model\":\"gpt-4o\"}"
|
||||
body: '{"trace_id": "00000000-0000-0000-0000-000000000000", "execution_type": "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, "crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-05T22:19:56.074812+00:00"}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '434'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
- CrewAI-CLI/1.3.0
|
||||
X-Crewai-Version:
|
||||
- 1.3.0
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:19:56 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net
|
||||
https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com
|
||||
https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 230c6cb5-92c7-448d-8c94-e5548a9f4259
|
||||
x-runtime:
|
||||
- '0.073220'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Guardrail Agent. You are a expert at validating the output of a task. By providing effective feedback if the output is not valid.\nYour personal goal is: Validate the output of the task\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\":
|
||||
[\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\": false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"\n Ensure the following task result complies with the given guardrail.\n\n Task result:\n \n Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry''s standard dummy text ever\n \n\n Guardrail:\n Ensure
|
||||
the result has less than 10 words\n\n Your task:\n - Confirm if the Task result complies with the guardrail.\n - If not, provide clear feedback explaining what is wrong (e.g., by how much it violates the rule, or what specific part fails).\n - Focus only on identifying issues — do not propose corrections.\n - If the Task result complies with the guardrail, saying that is valid\n "}],"model":"gpt-4o"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1467'
|
||||
- '2452'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7yHRYTZi8yzRbcODnKr92keLKCb\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768446357,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The task result provided has more than
|
||||
10 words. I will count the words to verify this.\\n\\nThe task result is the
|
||||
following text:\\n\\\"Lorem Ipsum is simply dummy text of the printing and
|
||||
typesetting industry. Lorem Ipsum has been the industry's standard dummy text
|
||||
ever\\\"\\n\\nCounting the words:\\n\\n1. Lorem \\n2. Ipsum \\n3. is \\n4.
|
||||
simply \\n5. dummy \\n6. text \\n7. of \\n8. the \\n9. printing \\n10. and
|
||||
\\n11. typesetting \\n12. industry. \\n13. Lorem \\n14. Ipsum \\n15. has \\n16.
|
||||
been \\n17. the \\n18. industry's \\n19. standard \\n20. dummy \\n21. text
|
||||
\\n22. ever\\n\\nThe total word count is 22.\\n\\nThought: I now can give
|
||||
a great answer\\nFinal Answer: The task result does not comply with the guardrail.
|
||||
It contains 22 words, which exceeds the limit of 10 words.\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
285,\n \"completion_tokens\": 195,\n \"total_tokens\": 480,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_deacdd5f6f\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-CYg96Riy2RJRxnBHvoROukymP9wvs\",\n \"object\": \"chat.completion\",\n \"created\": 1762381196,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I need to check if the task result meets the requirement of having less than 10 words.\\n\\nFinal Answer: {\\n \\\"valid\\\": false,\\n \\\"feedback\\\": \\\"The task result contains more than 10 words, violating the guardrail. The text provided contains about 21 words.\\\"\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 489,\n \"completion_tokens\": 61,\n \"total_tokens\": 550,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\"\
|
||||
: 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_cbf1785567\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 03:05:59 GMT
|
||||
- Wed, 05 Nov 2025 22:19:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
- __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:49:58 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '1557'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '2130'
|
||||
- '2201'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '2147'
|
||||
- '2401'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
- '29439'
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
- 1.122s
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly
|
||||
adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\":
|
||||
{\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\":
|
||||
{\n \"properties\": {\n \"valid\": {\n \"description\":
|
||||
\"Whether the task output complies with the guardrail\",\n \"title\":
|
||||
\"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\":
|
||||
{\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\":
|
||||
\"null\"\n }\n ],\n \"default\": null,\n \"description\":
|
||||
\"A feedback about the task output if it is not valid\",\n \"title\":
|
||||
\"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\":
|
||||
\"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."},{"role":"user","content":"The task result does not comply with
|
||||
the guardrail. It contains 22 words, which exceeds the limit of 10 words."}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether
|
||||
the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A
|
||||
feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"{\n \"valid\": false,\n \"feedback\": \"The task result contains more than 10 words, violating the guardrail. The text provided contains about 21 words.\"\n}"}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1835'
|
||||
- '1884'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
- __cf_bm=REDACTED; _cfuvid=REDACTED
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- beta.chat.completions.parse
|
||||
- chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7yJiPCk4fXuogyT5e8XeGRLCSf8\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768446359,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"{\\\"valid\\\":false,\\\"feedback\\\":\\\"The
|
||||
task output exceeds the word limit of 10 words by containing 22 words.\\\"}\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
363,\n \"completion_tokens\": 25,\n \"total_tokens\": 388,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_a0e9480a2f\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-CYg98QlZ8NTrQ69676MpXXyCoZJT8\",\n \"object\": \"chat.completion\",\n \"created\": 1762381198,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"{\\\"valid\\\":false,\\\"feedback\\\":\\\"The task result contains more than 10 words, violating the guardrail. The text provided contains about 21 words.\\\"}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 374,\n \"completion_tokens\": 32,\n \"total_tokens\": 406,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n\
|
||||
\ \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_cbf1785567\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 03:05:59 GMT
|
||||
- Wed, 05 Nov 2025 22:19:59 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '913'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '488'
|
||||
- '419'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '507'
|
||||
- '432'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
- '29702'
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
- 596ms
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Guardrail Agent.
|
||||
You are a expert at validating the output of a task. By providing effective
|
||||
feedback if the output is not valid.\\nYour personal goal is: Validate the output
|
||||
of the task\\nTo give my best complete final answer to the task respond using
|
||||
the exact following format:\\n\\nThought: I now can give a great answer\\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\\n\\nI MUST use these formats, my job depends
|
||||
on it!\"},{\"role\":\"user\",\"content\":\"\\nCurrent Task: \\n Ensure
|
||||
the following task result complies with the given guardrail.\\n\\n Task
|
||||
result:\\n \\n Lorem Ipsum is simply dummy text of the printing
|
||||
and typesetting industry. Lorem Ipsum has been the industry's standard dummy
|
||||
text ever\\n \\n\\n Guardrail:\\n Ensure the result has
|
||||
less than 500 words\\n\\n Your task:\\n - Confirm if the Task
|
||||
result complies with the guardrail.\\n - If not, provide clear feedback
|
||||
explaining what is wrong (e.g., by how much it violates the rule, or what specific
|
||||
part fails).\\n - Focus only on identifying issues \u2014 do not propose
|
||||
corrections.\\n - If the Task result complies with the guardrail, saying
|
||||
that is valid\\n \\n\\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\\n\\nThought:\"}],\"model\":\"gpt-4o\"}"
|
||||
body: '{"messages":[{"role":"system","content":"You are Guardrail Agent. You are a expert at validating the output of a task. By providing effective feedback if the output is not valid.\nYour personal goal is: Validate the output of the task\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\":
|
||||
[\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\": false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"\n Ensure the following task result complies with the given guardrail.\n\n Task result:\n \n Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry''s standard dummy text ever\n \n\n Guardrail:\n Ensure
|
||||
the result has less than 500 words\n\n Your task:\n - Confirm if the Task result complies with the guardrail.\n - If not, provide clear feedback explaining what is wrong (e.g., by how much it violates the rule, or what specific part fails).\n - Focus only on identifying issues — do not propose corrections.\n - If the Task result complies with the guardrail, saying that is valid\n "}],"model":"gpt-4o"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1468'
|
||||
- '2453'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7yKa0rmi2YoTLpyXt9hjeLt2rTI\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768446360,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"First, I'll count the number of words
|
||||
in the Task result to ensure it complies with the guardrail. \\n\\nThe Task
|
||||
result is: \\\"Lorem Ipsum is simply dummy text of the printing and typesetting
|
||||
industry. Lorem Ipsum has been the industry's standard dummy text ever.\\\"\\n\\nBy
|
||||
counting the words: \\n1. Lorem\\n2. Ipsum\\n3. is\\n4. simply\\n5. dummy\\n6.
|
||||
text\\n7. of\\n8. the\\n9. printing\\n10. and\\n11. typesetting\\n12. industry\\n13.
|
||||
Lorem\\n14. Ipsum\\n15. has\\n16. been\\n17. the\\n18. industry's\\n19. standard\\n20.
|
||||
dummy\\n21. text\\n22. ever\\n\\nThere are 22 words total in the Task result.\\n\\nI
|
||||
need to verify if the count of 22 words is less than the guardrail limit of
|
||||
500 words.\\n\\nThought: I now can give a great answer\\nFinal Answer: The
|
||||
Task result complies with the guardrail as it contains 22 words, which is
|
||||
less than the 500-word limit. Therefore, the output is valid.\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
285,\n \"completion_tokens\": 227,\n \"total_tokens\": 512,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_deacdd5f6f\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-CYgBMV6fu7EvV2BqzMdJaKyLAg1WW\",\n \"object\": \"chat.completion\",\n \"created\": 1762381336,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\\"valid\\\": true, \\\"feedback\\\": null}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 489,\n \"completion_tokens\": 23,\n \"total_tokens\": 512,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\"\
|
||||
: \"fp_cbf1785567\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 03:06:02 GMT
|
||||
- Wed, 05 Nov 2025 22:22:16 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
- __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:52:16 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '1668'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '2502'
|
||||
- '327'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '2522'
|
||||
- '372'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
- '29438'
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
- 1.124s
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly
|
||||
adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\":
|
||||
{\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\":
|
||||
{\n \"properties\": {\n \"valid\": {\n \"description\":
|
||||
\"Whether the task output complies with the guardrail\",\n \"title\":
|
||||
\"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\":
|
||||
{\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\":
|
||||
\"null\"\n }\n ],\n \"default\": null,\n \"description\":
|
||||
\"A feedback about the task output if it is not valid\",\n \"title\":
|
||||
\"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\":
|
||||
\"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."},{"role":"user","content":"The Task result complies with the
|
||||
guardrail as it contains 22 words, which is less than the 500-word limit. Therefore,
|
||||
the output is valid."}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether
|
||||
the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A
|
||||
feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"{\"valid\": true, \"feedback\": null}"}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1864'
|
||||
- '1762'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
- __cf_bm=REDACTED; _cfuvid=REDACTED
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- beta.chat.completions.parse
|
||||
- chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7yMAjNYSCz2foZPEcSVCuapzF8y\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768446362,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"{\\\"valid\\\":true,\\\"feedback\\\":null}\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
369,\n \"completion_tokens\": 9,\n \"total_tokens\": 378,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_a0e9480a2f\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-CYgBMU20R45qGGaLN6vNAmW1NR4R6\",\n \"object\": \"chat.completion\",\n \"created\": 1762381336,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"{\\\"valid\\\":true,\\\"feedback\\\":null}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 347,\n \"completion_tokens\": 9,\n \"total_tokens\": 356,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_cbf1785567\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 03:06:03 GMT
|
||||
- Wed, 05 Nov 2025 22:22:17 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '837'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '413'
|
||||
- '1081'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '650'
|
||||
- '1241'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
- '29478'
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
- 1.042s
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -1202,9 +1202,8 @@ def test_complex_and_or_branching():
|
||||
)
|
||||
assert execution_order.index("branch_2b") > min_branch_1_index
|
||||
|
||||
# Final should be after both 2a and 2b
|
||||
# Note: final may not be absolutely last due to independent branches (like branch_1c)
|
||||
# that don't contribute to the final result path with sequential listener execution
|
||||
# Final should be last and after both 2a and 2b
|
||||
assert execution_order[-1] == "final"
|
||||
assert execution_order.index("final") > execution_order.index("branch_2a")
|
||||
assert execution_order.index("final") > execution_order.index("branch_2b")
|
||||
|
||||
|
||||
@@ -185,8 +185,8 @@ def test_task_guardrail_process_output(task_output):
|
||||
|
||||
result = guardrail(task_output)
|
||||
assert result[0] is False
|
||||
# Check that feedback is provided (wording varies by LLM)
|
||||
assert result[1] and len(result[1]) > 0
|
||||
|
||||
assert result[1] == "The task result contains more than 10 words, violating the guardrail. The text provided contains about 21 words."
|
||||
|
||||
guardrail = LLMGuardrail(
|
||||
description="Ensure the result has less than 500 words", llm=LLM(model="gpt-4o")
|
||||
|
||||
240
lib/crewai/tests/utilities/test_agent_utils.py
Normal file
240
lib/crewai/tests/utilities/test_agent_utils.py
Normal file
@@ -0,0 +1,240 @@
|
||||
"""Tests for agent_utils module, specifically debug logging for OutputParserError."""
|
||||
|
||||
import logging
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.agents.parser import AgentAction, OutputParserError
|
||||
from crewai.utilities.agent_utils import handle_output_parser_exception
|
||||
|
||||
|
||||
class TestHandleOutputParserExceptionDebugLogging:
|
||||
"""Tests for debug logging in handle_output_parser_exception."""
|
||||
|
||||
def test_debug_logging_with_raw_output_and_agent_role(self, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that debug logging includes raw output and agent role when provided."""
|
||||
error = OutputParserError("Invalid Format: I missed the 'Action:' after 'Thought:'.")
|
||||
messages: list[dict[str, str]] = []
|
||||
raw_output = "Let me think about this... The answer is..."
|
||||
agent_role = "Researcher"
|
||||
|
||||
with caplog.at_level(logging.DEBUG):
|
||||
result = handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=0,
|
||||
raw_output=raw_output,
|
||||
agent_role=agent_role,
|
||||
)
|
||||
|
||||
assert isinstance(result, AgentAction)
|
||||
assert "Parse failed for agent 'Researcher'" in caplog.text
|
||||
assert "Raw output (truncated) for agent 'Researcher'" in caplog.text
|
||||
assert "Let me think about this... The answer is..." in caplog.text
|
||||
assert "Retry 1 initiated for agent 'Researcher'" in caplog.text
|
||||
|
||||
def test_debug_logging_without_agent_role(self, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that debug logging works without agent role."""
|
||||
error = OutputParserError("Invalid Format: I missed the 'Action:' after 'Thought:'.")
|
||||
messages: list[dict[str, str]] = []
|
||||
raw_output = "Some raw output"
|
||||
|
||||
with caplog.at_level(logging.DEBUG):
|
||||
result = handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=0,
|
||||
raw_output=raw_output,
|
||||
)
|
||||
|
||||
assert isinstance(result, AgentAction)
|
||||
assert "Parse failed:" in caplog.text
|
||||
assert "for agent" not in caplog.text.split("Parse failed:")[1].split("\n")[0]
|
||||
assert "Raw output (truncated):" in caplog.text
|
||||
assert "Retry 1 initiated" in caplog.text
|
||||
|
||||
def test_debug_logging_without_raw_output(self, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that debug logging works without raw output."""
|
||||
error = OutputParserError("Invalid Format: I missed the 'Action:' after 'Thought:'.")
|
||||
messages: list[dict[str, str]] = []
|
||||
|
||||
with caplog.at_level(logging.DEBUG):
|
||||
result = handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=0,
|
||||
agent_role="Researcher",
|
||||
)
|
||||
|
||||
assert isinstance(result, AgentAction)
|
||||
assert "Parse failed for agent 'Researcher'" in caplog.text
|
||||
assert "Raw output (truncated)" not in caplog.text
|
||||
assert "Retry 1 initiated for agent 'Researcher'" in caplog.text
|
||||
|
||||
def test_debug_logging_truncates_long_raw_output(self, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that raw output is truncated when longer than 500 characters."""
|
||||
error = OutputParserError("Invalid Format")
|
||||
messages: list[dict[str, str]] = []
|
||||
long_output = "A" * 600
|
||||
|
||||
with caplog.at_level(logging.DEBUG):
|
||||
handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=0,
|
||||
raw_output=long_output,
|
||||
agent_role="Researcher",
|
||||
)
|
||||
|
||||
assert "A" * 500 + "..." in caplog.text
|
||||
assert "A" * 600 not in caplog.text
|
||||
|
||||
def test_debug_logging_does_not_truncate_short_raw_output(self, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that short raw output is not truncated."""
|
||||
error = OutputParserError("Invalid Format")
|
||||
messages: list[dict[str, str]] = []
|
||||
short_output = "Short output"
|
||||
|
||||
with caplog.at_level(logging.DEBUG):
|
||||
handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=0,
|
||||
raw_output=short_output,
|
||||
agent_role="Researcher",
|
||||
)
|
||||
|
||||
assert "Short output" in caplog.text
|
||||
assert "..." not in caplog.text.split("Short output")[1].split("\n")[0]
|
||||
|
||||
def test_debug_logging_retry_count_increments(self, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that retry count is correctly calculated from iterations."""
|
||||
error = OutputParserError("Invalid Format")
|
||||
messages: list[dict[str, str]] = []
|
||||
|
||||
with caplog.at_level(logging.DEBUG):
|
||||
handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=4,
|
||||
raw_output="test",
|
||||
agent_role="Researcher",
|
||||
)
|
||||
|
||||
assert "Retry 5 initiated" in caplog.text
|
||||
|
||||
def test_debug_logging_escapes_newlines_in_raw_output(self, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that newlines in raw output are escaped for readability."""
|
||||
error = OutputParserError("Invalid Format")
|
||||
messages: list[dict[str, str]] = []
|
||||
output_with_newlines = "Line 1\nLine 2\nLine 3"
|
||||
|
||||
with caplog.at_level(logging.DEBUG):
|
||||
handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=0,
|
||||
raw_output=output_with_newlines,
|
||||
agent_role="Researcher",
|
||||
)
|
||||
|
||||
assert "Line 1\\nLine 2\\nLine 3" in caplog.text
|
||||
|
||||
def test_debug_logging_extracts_first_line_of_error(self, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that only the first line of the error message is logged."""
|
||||
error = OutputParserError("First line of error\nSecond line\nThird line")
|
||||
messages: list[dict[str, str]] = []
|
||||
|
||||
with caplog.at_level(logging.DEBUG):
|
||||
handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=0,
|
||||
agent_role="Researcher",
|
||||
)
|
||||
|
||||
assert "First line of error" in caplog.text
|
||||
parse_failed_line = [line for line in caplog.text.split("\n") if "Parse failed" in line][0]
|
||||
assert "Second line" not in parse_failed_line
|
||||
|
||||
def test_messages_updated_with_error(self) -> None:
|
||||
"""Test that messages list is updated with the error."""
|
||||
error = OutputParserError("Test error message")
|
||||
messages: list[dict[str, str]] = []
|
||||
|
||||
handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=0,
|
||||
)
|
||||
|
||||
assert len(messages) == 1
|
||||
assert messages[0]["role"] == "user"
|
||||
assert messages[0]["content"] == "Test error message"
|
||||
|
||||
def test_returns_agent_action_with_error_text(self) -> None:
|
||||
"""Test that the function returns an AgentAction with the error text."""
|
||||
error = OutputParserError("Test error message")
|
||||
messages: list[dict[str, str]] = []
|
||||
|
||||
result = handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=0,
|
||||
)
|
||||
|
||||
assert isinstance(result, AgentAction)
|
||||
assert result.text == "Test error message"
|
||||
assert result.tool == ""
|
||||
assert result.tool_input == ""
|
||||
assert result.thought == ""
|
||||
|
||||
def test_printer_logs_after_log_error_after_iterations(self) -> None:
|
||||
"""Test that printer logs error after log_error_after iterations."""
|
||||
error = OutputParserError("Test error")
|
||||
messages: list[dict[str, str]] = []
|
||||
printer = MagicMock()
|
||||
|
||||
handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=4,
|
||||
log_error_after=3,
|
||||
printer=printer,
|
||||
)
|
||||
|
||||
printer.print.assert_called_once()
|
||||
call_args = printer.print.call_args
|
||||
assert "Error parsing LLM output" in call_args.kwargs["content"]
|
||||
assert call_args.kwargs["color"] == "red"
|
||||
|
||||
def test_printer_does_not_log_before_log_error_after_iterations(self) -> None:
|
||||
"""Test that printer does not log before log_error_after iterations."""
|
||||
error = OutputParserError("Test error")
|
||||
messages: list[dict[str, str]] = []
|
||||
printer = MagicMock()
|
||||
|
||||
handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=2,
|
||||
log_error_after=3,
|
||||
printer=printer,
|
||||
)
|
||||
|
||||
printer.print.assert_not_called()
|
||||
|
||||
def test_backward_compatibility_without_new_parameters(self) -> None:
|
||||
"""Test that the function works without the new optional parameters."""
|
||||
error = OutputParserError("Test error")
|
||||
messages: list[dict[str, str]] = []
|
||||
|
||||
result = handle_output_parser_exception(
|
||||
e=error,
|
||||
messages=messages,
|
||||
iterations=0,
|
||||
)
|
||||
|
||||
assert isinstance(result, AgentAction)
|
||||
assert len(messages) == 1
|
||||
@@ -348,11 +348,11 @@ def test_agent_emits_execution_error_event(base_agent, base_task):
|
||||
|
||||
error_message = "Error happening while sending prompt to model."
|
||||
base_agent.max_retry_limit = 0
|
||||
|
||||
# Patch at the class level since agent_executor is created lazily
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "invoke", side_effect=Exception(error_message)
|
||||
):
|
||||
CrewAgentExecutor, "invoke", wraps=base_agent.agent_executor.invoke
|
||||
) as invoke_mock:
|
||||
invoke_mock.side_effect = Exception(error_message)
|
||||
|
||||
with pytest.raises(Exception): # noqa: B017
|
||||
base_agent.execute_task(
|
||||
task=base_task,
|
||||
|
||||
Reference in New Issue
Block a user