mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-26 16:48:13 +00:00
Refactor event handling and introduce new event types
- Migrate from global `emit` function to `event_bus.emit` - Add new event types for task failures, tool usage, and agent execution - Update event listeners and event bus to support more granular event tracking - Remove deprecated event emission methods - Improve event type consistency and add more detailed event information
This commit is contained in:
@@ -22,9 +22,8 @@ from crewai.utilities.converter import generate_model_description
|
|||||||
from crewai.utilities.events.agent_events import (
|
from crewai.utilities.events.agent_events import (
|
||||||
AgentExecutionCompleted,
|
AgentExecutionCompleted,
|
||||||
AgentExecutionError,
|
AgentExecutionError,
|
||||||
AgentExecutionStarted,
|
|
||||||
)
|
)
|
||||||
from crewai.utilities.events.events import emit
|
from crewai.utilities.events.event_bus import event_bus
|
||||||
from crewai.utilities.llm_utils import create_llm
|
from crewai.utilities.llm_utils import create_llm
|
||||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||||
@@ -188,7 +187,6 @@ class Agent(BaseAgent):
|
|||||||
Returns:
|
Returns:
|
||||||
Output of the agent
|
Output of the agent
|
||||||
"""
|
"""
|
||||||
emit(self, event=AgentExecutionStarted(agent=self, task=task))
|
|
||||||
if self.tools_handler:
|
if self.tools_handler:
|
||||||
self.tools_handler.last_used_tool = {} # type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling")
|
self.tools_handler.last_used_tool = {} # type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling")
|
||||||
|
|
||||||
@@ -261,14 +259,19 @@ class Agent(BaseAgent):
|
|||||||
}
|
}
|
||||||
)["output"]
|
)["output"]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
event_bus.emit(
|
||||||
|
self,
|
||||||
|
event=AgentExecutionError(
|
||||||
|
agent=self,
|
||||||
|
task=task,
|
||||||
|
error=str(e),
|
||||||
|
),
|
||||||
|
)
|
||||||
if e.__class__.__module__.startswith("litellm"):
|
if e.__class__.__module__.startswith("litellm"):
|
||||||
# Do not retry on litellm errors
|
# Do not retry on litellm errors
|
||||||
raise e
|
raise e
|
||||||
self._times_executed += 1
|
self._times_executed += 1
|
||||||
if self._times_executed > self.max_retry_limit:
|
if self._times_executed > self.max_retry_limit:
|
||||||
emit(
|
|
||||||
self, event=AgentExecutionError(agent=self, task=task, error=str(e))
|
|
||||||
)
|
|
||||||
raise e
|
raise e
|
||||||
result = self.execute_task(task, context, tools)
|
result = self.execute_task(task, context, tools)
|
||||||
|
|
||||||
@@ -281,7 +284,9 @@ class Agent(BaseAgent):
|
|||||||
for tool_result in self.tools_results: # type: ignore # Item "None" of "list[Any] | None" has no attribute "__iter__" (not iterable)
|
for tool_result in self.tools_results: # type: ignore # Item "None" of "list[Any] | None" has no attribute "__iter__" (not iterable)
|
||||||
if tool_result.get("result_as_answer", False):
|
if tool_result.get("result_as_answer", False):
|
||||||
result = tool_result["result"]
|
result = tool_result["result"]
|
||||||
emit(self, event=AgentExecutionCompleted(agent=self, task=task, output=result))
|
event_bus.emit(
|
||||||
|
self, event=AgentExecutionCompleted(agent=self, task=task, output=result)
|
||||||
|
)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def create_agent_executor(
|
def create_agent_executor(
|
||||||
|
|||||||
@@ -48,12 +48,12 @@ from crewai.utilities.events.crew_events import (
|
|||||||
CrewKickoffCompleted,
|
CrewKickoffCompleted,
|
||||||
CrewKickoffFailed,
|
CrewKickoffFailed,
|
||||||
CrewKickoffStarted,
|
CrewKickoffStarted,
|
||||||
CrewTrainStarted,
|
|
||||||
CrewTrainCompleted,
|
|
||||||
CrewTrainFailed,
|
|
||||||
CrewTestStarted,
|
|
||||||
CrewTestCompleted,
|
CrewTestCompleted,
|
||||||
CrewTestFailed,
|
CrewTestFailed,
|
||||||
|
CrewTestStarted,
|
||||||
|
CrewTrainCompleted,
|
||||||
|
CrewTrainFailed,
|
||||||
|
CrewTrainStarted,
|
||||||
)
|
)
|
||||||
from crewai.utilities.formatter import (
|
from crewai.utilities.formatter import (
|
||||||
aggregate_raw_outputs_from_task_outputs,
|
aggregate_raw_outputs_from_task_outputs,
|
||||||
@@ -1192,13 +1192,6 @@ class Crew(BaseModel):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
test_crew = self.copy()
|
test_crew = self.copy()
|
||||||
# TODO: drop this
|
|
||||||
# self._test_execution_span = test_crew._telemetry.test_execution_span(
|
|
||||||
# test_crew,
|
|
||||||
# n_iterations,
|
|
||||||
# inputs,
|
|
||||||
# openai_model_name,
|
|
||||||
# )
|
|
||||||
evaluator = CrewEvaluator(test_crew, openai_model_name)
|
evaluator = CrewEvaluator(test_crew, openai_model_name)
|
||||||
|
|
||||||
for i in range(1, n_iterations + 1):
|
for i in range(1, n_iterations + 1):
|
||||||
@@ -1211,7 +1204,6 @@ class Crew(BaseModel):
|
|||||||
self,
|
self,
|
||||||
CrewTestCompleted(
|
CrewTestCompleted(
|
||||||
crew_name=self.name or "crew",
|
crew_name=self.name or "crew",
|
||||||
n_iterations=n_iterations,
|
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -40,8 +40,8 @@ from crewai.telemetry.telemetry import Telemetry
|
|||||||
from crewai.tools.base_tool import BaseTool
|
from crewai.tools.base_tool import BaseTool
|
||||||
from crewai.utilities.config import process_config
|
from crewai.utilities.config import process_config
|
||||||
from crewai.utilities.converter import Converter, convert_to_model
|
from crewai.utilities.converter import Converter, convert_to_model
|
||||||
from crewai.utilities.events.events import emit
|
from crewai.utilities.events.event_bus import event_bus
|
||||||
from crewai.utilities.events.task_events import TaskCompleted, TaskStarted
|
from crewai.utilities.events.task_events import TaskCompleted, TaskFailed, TaskStarted
|
||||||
from crewai.utilities.i18n import I18N
|
from crewai.utilities.i18n import I18N
|
||||||
from crewai.utilities.printer import Printer
|
from crewai.utilities.printer import Printer
|
||||||
|
|
||||||
@@ -350,97 +350,110 @@ class Task(BaseModel):
|
|||||||
tools: Optional[List[Any]],
|
tools: Optional[List[Any]],
|
||||||
) -> TaskOutput:
|
) -> TaskOutput:
|
||||||
"""Run the core execution logic of the task."""
|
"""Run the core execution logic of the task."""
|
||||||
|
try:
|
||||||
|
agent = agent or self.agent
|
||||||
|
self.agent = agent
|
||||||
|
if not agent:
|
||||||
|
raise Exception(
|
||||||
|
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
||||||
|
)
|
||||||
|
|
||||||
agent = agent or self.agent
|
self.start_time = datetime.datetime.now()
|
||||||
self.agent = agent
|
self._execution_span = self._telemetry.task_started(
|
||||||
if not agent:
|
crew=agent.crew, task=self
|
||||||
raise Exception(
|
|
||||||
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
|
||||||
)
|
)
|
||||||
|
|
||||||
self.start_time = datetime.datetime.now()
|
self.prompt_context = context
|
||||||
self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self)
|
tools = tools or self.tools or []
|
||||||
|
|
||||||
self.prompt_context = context
|
self.processed_by_agents.add(agent.role)
|
||||||
tools = tools or self.tools or []
|
event_bus.emit(self, TaskStarted(task=self))
|
||||||
|
result = agent.execute_task(
|
||||||
|
task=self,
|
||||||
|
context=context,
|
||||||
|
tools=tools,
|
||||||
|
)
|
||||||
|
|
||||||
self.processed_by_agents.add(agent.role)
|
pydantic_output, json_output = self._export_output(result)
|
||||||
emit(self, TaskStarted(task=self))
|
task_output = TaskOutput(
|
||||||
result = agent.execute_task(
|
name=self.name,
|
||||||
task=self,
|
description=self.description,
|
||||||
context=context,
|
expected_output=self.expected_output,
|
||||||
tools=tools,
|
raw=result,
|
||||||
)
|
pydantic=pydantic_output,
|
||||||
|
json_dict=json_output,
|
||||||
|
agent=agent.role,
|
||||||
|
output_format=self._get_output_format(),
|
||||||
|
)
|
||||||
|
|
||||||
pydantic_output, json_output = self._export_output(result)
|
if self.guardrail:
|
||||||
task_output = TaskOutput(
|
guardrail_result = GuardrailResult.from_tuple(
|
||||||
name=self.name,
|
self.guardrail(task_output)
|
||||||
description=self.description,
|
)
|
||||||
expected_output=self.expected_output,
|
if not guardrail_result.success:
|
||||||
raw=result,
|
if self.retry_count >= self.max_retries:
|
||||||
pydantic=pydantic_output,
|
raise Exception(
|
||||||
json_dict=json_output,
|
f"Task failed guardrail validation after {self.max_retries} retries. "
|
||||||
agent=agent.role,
|
f"Last error: {guardrail_result.error}"
|
||||||
output_format=self._get_output_format(),
|
)
|
||||||
)
|
|
||||||
|
|
||||||
if self.guardrail:
|
self.retry_count += 1
|
||||||
guardrail_result = GuardrailResult.from_tuple(self.guardrail(task_output))
|
context = self.i18n.errors("validation_error").format(
|
||||||
if not guardrail_result.success:
|
guardrail_result_error=guardrail_result.error,
|
||||||
if self.retry_count >= self.max_retries:
|
task_output=task_output.raw,
|
||||||
|
)
|
||||||
|
printer = Printer()
|
||||||
|
printer.print(
|
||||||
|
content=f"Guardrail blocked, retrying, due to: {guardrail_result.error}\n",
|
||||||
|
color="yellow",
|
||||||
|
)
|
||||||
|
return self._execute_core(agent, context, tools)
|
||||||
|
|
||||||
|
if guardrail_result.result is None:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"Task failed guardrail validation after {self.max_retries} retries. "
|
"Task guardrail returned None as result. This is not allowed."
|
||||||
f"Last error: {guardrail_result.error}"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
self.retry_count += 1
|
if isinstance(guardrail_result.result, str):
|
||||||
context = self.i18n.errors("validation_error").format(
|
task_output.raw = guardrail_result.result
|
||||||
guardrail_result_error=guardrail_result.error,
|
pydantic_output, json_output = self._export_output(
|
||||||
task_output=task_output.raw,
|
guardrail_result.result
|
||||||
|
)
|
||||||
|
task_output.pydantic = pydantic_output
|
||||||
|
task_output.json_dict = json_output
|
||||||
|
elif isinstance(guardrail_result.result, TaskOutput):
|
||||||
|
task_output = guardrail_result.result
|
||||||
|
|
||||||
|
self.output = task_output
|
||||||
|
self.end_time = datetime.datetime.now()
|
||||||
|
|
||||||
|
if self.callback:
|
||||||
|
self.callback(self.output)
|
||||||
|
|
||||||
|
if self._execution_span:
|
||||||
|
self._telemetry.task_ended(self._execution_span, self, agent.crew)
|
||||||
|
self._execution_span = None
|
||||||
|
|
||||||
|
if self.output_file:
|
||||||
|
content = (
|
||||||
|
json_output
|
||||||
|
if json_output
|
||||||
|
else pydantic_output.model_dump_json()
|
||||||
|
if pydantic_output
|
||||||
|
else result
|
||||||
)
|
)
|
||||||
printer = Printer()
|
self._save_file(content)
|
||||||
printer.print(
|
event_bus.emit(self, TaskCompleted(task=self, output=task_output))
|
||||||
content=f"Guardrail blocked, retrying, due to: {guardrail_result.error}\n",
|
return task_output
|
||||||
color="yellow",
|
except Exception as e:
|
||||||
)
|
self.end_time = datetime.datetime.now()
|
||||||
return self._execute_core(agent, context, tools)
|
if self._execution_span:
|
||||||
|
if agent and agent.crew:
|
||||||
|
self._telemetry.task_ended(self._execution_span, self, agent.crew)
|
||||||
|
self._execution_span = None
|
||||||
|
|
||||||
if guardrail_result.result is None:
|
event_bus.emit(self, TaskFailed(task=self, error=str(e)))
|
||||||
raise Exception(
|
raise e # Re-raise the exception after emitting the event
|
||||||
"Task guardrail returned None as result. This is not allowed."
|
|
||||||
)
|
|
||||||
|
|
||||||
if isinstance(guardrail_result.result, str):
|
|
||||||
task_output.raw = guardrail_result.result
|
|
||||||
pydantic_output, json_output = self._export_output(
|
|
||||||
guardrail_result.result
|
|
||||||
)
|
|
||||||
task_output.pydantic = pydantic_output
|
|
||||||
task_output.json_dict = json_output
|
|
||||||
elif isinstance(guardrail_result.result, TaskOutput):
|
|
||||||
task_output = guardrail_result.result
|
|
||||||
|
|
||||||
self.output = task_output
|
|
||||||
self.end_time = datetime.datetime.now()
|
|
||||||
|
|
||||||
if self.callback:
|
|
||||||
self.callback(self.output)
|
|
||||||
|
|
||||||
if self._execution_span:
|
|
||||||
self._telemetry.task_ended(self._execution_span, self, agent.crew)
|
|
||||||
self._execution_span = None
|
|
||||||
|
|
||||||
if self.output_file:
|
|
||||||
content = (
|
|
||||||
json_output
|
|
||||||
if json_output
|
|
||||||
else pydantic_output.model_dump_json()
|
|
||||||
if pydantic_output
|
|
||||||
else result
|
|
||||||
)
|
|
||||||
self._save_file(content)
|
|
||||||
emit(self, TaskCompleted(task=self, output=task_output))
|
|
||||||
return task_output
|
|
||||||
|
|
||||||
def prompt(self) -> str:
|
def prompt(self) -> str:
|
||||||
"""Prompt the task.
|
"""Prompt the task.
|
||||||
|
|||||||
@@ -10,15 +10,18 @@ from typing import Any, Dict, List, Optional, Union
|
|||||||
import json5
|
import json5
|
||||||
from json_repair import repair_json
|
from json_repair import repair_json
|
||||||
|
|
||||||
import crewai.utilities.events.events as events
|
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.telemetry import Telemetry
|
from crewai.telemetry import Telemetry
|
||||||
from crewai.tools import BaseTool
|
from crewai.tools import BaseTool
|
||||||
from crewai.tools.structured_tool import CrewStructuredTool
|
from crewai.tools.structured_tool import CrewStructuredTool
|
||||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||||
from crewai.tools.tool_usage_events import ToolUsageError, ToolUsageFinished
|
|
||||||
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
||||||
|
from crewai.utilities.events import event_bus
|
||||||
|
from crewai.utilities.events.event_types import (
|
||||||
|
ToolUsageError,
|
||||||
|
ToolUsageFinished,
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import agentops # type: ignore
|
import agentops # type: ignore
|
||||||
@@ -465,7 +468,7 @@ class ToolUsage:
|
|||||||
|
|
||||||
def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
|
def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
|
||||||
event_data = self._prepare_event_data(tool, tool_calling)
|
event_data = self._prepare_event_data(tool, tool_calling)
|
||||||
events.emit(self, event=ToolUsageError(**{**event_data, "error": str(e)}))
|
event_bus.emit(self, event=ToolUsageError(**{**event_data, "error": str(e)}))
|
||||||
|
|
||||||
def on_tool_use_finished(
|
def on_tool_use_finished(
|
||||||
self, tool: Any, tool_calling: ToolCalling, from_cache: bool, started_at: float
|
self, tool: Any, tool_calling: ToolCalling, from_cache: bool, started_at: float
|
||||||
@@ -479,7 +482,7 @@ class ToolUsage:
|
|||||||
"from_cache": from_cache,
|
"from_cache": from_cache,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
events.emit(self, event=ToolUsageFinished(**event_data))
|
event_bus.emit(self, event=ToolUsageFinished(**event_data))
|
||||||
|
|
||||||
def _prepare_event_data(self, tool: Any, tool_calling: ToolCalling) -> dict:
|
def _prepare_event_data(self, tool: Any, tool_calling: ToolCalling) -> dict:
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -4,13 +4,13 @@ from .crew_events import (
|
|||||||
CrewKickoffFailed,
|
CrewKickoffFailed,
|
||||||
)
|
)
|
||||||
from .agent_events import AgentExecutionStarted, AgentExecutionCompleted
|
from .agent_events import AgentExecutionStarted, AgentExecutionCompleted
|
||||||
from .task_events import TaskStarted, TaskCompleted
|
from .task_events import TaskStarted, TaskCompleted, TaskFailed
|
||||||
from .flow_events import FlowStarted, FlowFinished, MethodExecutionStarted, MethodExecutionFinished
|
from .flow_events import FlowStarted, FlowFinished, MethodExecutionStarted, MethodExecutionFinished
|
||||||
from .event_bus import event_bus, EventTypes
|
from .event_bus import event_bus, EventTypes
|
||||||
from .events import emit, on
|
from .events import emit, on
|
||||||
from .event_bus import EventBus
|
from .event_bus import EventBus
|
||||||
from .event_listener import EventListener
|
from .event_listener import EventListener
|
||||||
|
from .tool_usage_events import ToolUsageFinished, ToolUsageError
|
||||||
event_bus = EventBus()
|
event_bus = EventBus()
|
||||||
event_listener = EventListener()
|
event_listener = EventListener()
|
||||||
|
|
||||||
@@ -22,6 +22,7 @@ __all__ = [
|
|||||||
CrewKickoffFailed,
|
CrewKickoffFailed,
|
||||||
TaskStarted,
|
TaskStarted,
|
||||||
TaskCompleted,
|
TaskCompleted,
|
||||||
|
TaskFailed,
|
||||||
FlowStarted,
|
FlowStarted,
|
||||||
FlowFinished,
|
FlowFinished,
|
||||||
MethodExecutionStarted,
|
MethodExecutionStarted,
|
||||||
@@ -29,7 +30,9 @@ __all__ = [
|
|||||||
EventTypes,
|
EventTypes,
|
||||||
emit,
|
emit,
|
||||||
on,
|
on,
|
||||||
event_bus
|
event_bus,
|
||||||
|
ToolUsageFinished,
|
||||||
|
ToolUsageError,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,25 @@
|
|||||||
from typing import Any
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
|
from crewai.tools.base_tool import BaseTool
|
||||||
|
|
||||||
from .crew_events import CrewEvent
|
from .crew_events import CrewEvent
|
||||||
|
|
||||||
|
|
||||||
|
class AgentExecutorCreated(CrewEvent):
|
||||||
|
"""Event emitted when an agent executor is created"""
|
||||||
|
|
||||||
|
agent: Any
|
||||||
|
tools: List[BaseTool]
|
||||||
|
type: str = "agent_executor_created"
|
||||||
|
|
||||||
|
|
||||||
class AgentExecutionStarted(CrewEvent):
|
class AgentExecutionStarted(CrewEvent):
|
||||||
"""Event emitted when an agent starts executing a task"""
|
"""Event emitted when an agent starts executing a task"""
|
||||||
|
|
||||||
agent: Any # type: ignore
|
agent: Any # type: ignore
|
||||||
task: Any # type: ignore
|
task: Any # type: ignore
|
||||||
|
tools: List[Any]
|
||||||
|
inputs: Dict[str, Any]
|
||||||
type: str = "agent_execution_started"
|
type: str = "agent_execution_started"
|
||||||
|
|
||||||
model_config = {"arbitrary_types_allowed": True}
|
model_config = {"arbitrary_types_allowed": True}
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ class EventListener:
|
|||||||
_telemetry = Telemetry()
|
_telemetry = Telemetry()
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
print("Initializing EventListener")
|
|
||||||
self._setup_listeners()
|
self._setup_listeners()
|
||||||
self._telemetry.set_tracer()
|
self._telemetry.set_tracer()
|
||||||
|
|
||||||
@@ -64,7 +63,6 @@ class EventListener:
|
|||||||
|
|
||||||
@event_bus.on(TaskCompleted)
|
@event_bus.on(TaskCompleted)
|
||||||
def on_task_completed(source, event):
|
def on_task_completed(source, event):
|
||||||
print(f"✓ Task completed: {event.task.description}")
|
|
||||||
print(f" Output: {event.output}")
|
print(f" Output: {event.output}")
|
||||||
result = TaskEvaluator(event.task.agent).evaluate(event.task, event.output)
|
result = TaskEvaluator(event.task.agent).evaluate(event.task, event.output)
|
||||||
print(f" Evaluation: {result.quality}")
|
print(f" Evaluation: {result.quality}")
|
||||||
@@ -75,9 +73,7 @@ class EventListener:
|
|||||||
|
|
||||||
@event_bus.on(AgentExecutionStarted)
|
@event_bus.on(AgentExecutionStarted)
|
||||||
def on_agent_execution_started(source, event):
|
def on_agent_execution_started(source, event):
|
||||||
print(
|
print(f"🤖 Agent '{event.agent.role}' started task")
|
||||||
f"🤖 Agent '{event.agent.role}' started task: {event.task.description}"
|
|
||||||
)
|
|
||||||
|
|
||||||
@event_bus.on(AgentExecutionCompleted)
|
@event_bus.on(AgentExecutionCompleted)
|
||||||
def on_agent_execution_completed(source, event):
|
def on_agent_execution_completed(source, event):
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ from typing import Union
|
|||||||
|
|
||||||
from .agent_events import (
|
from .agent_events import (
|
||||||
AgentExecutionCompleted,
|
AgentExecutionCompleted,
|
||||||
|
AgentExecutionError,
|
||||||
AgentExecutionStarted,
|
AgentExecutionStarted,
|
||||||
)
|
)
|
||||||
from .crew_events import (
|
from .crew_events import (
|
||||||
@@ -23,8 +24,10 @@ from .flow_events import (
|
|||||||
)
|
)
|
||||||
from .task_events import (
|
from .task_events import (
|
||||||
TaskCompleted,
|
TaskCompleted,
|
||||||
|
TaskFailed,
|
||||||
TaskStarted,
|
TaskStarted,
|
||||||
)
|
)
|
||||||
|
from .tool_usage_events import ToolUsageError, ToolUsageFinished
|
||||||
|
|
||||||
EventTypes = Union[
|
EventTypes = Union[
|
||||||
CrewKickoffStarted,
|
CrewKickoffStarted,
|
||||||
@@ -40,8 +43,12 @@ EventTypes = Union[
|
|||||||
AgentExecutionCompleted,
|
AgentExecutionCompleted,
|
||||||
TaskStarted,
|
TaskStarted,
|
||||||
TaskCompleted,
|
TaskCompleted,
|
||||||
|
TaskFailed,
|
||||||
FlowStarted,
|
FlowStarted,
|
||||||
FlowFinished,
|
FlowFinished,
|
||||||
MethodExecutionStarted,
|
MethodExecutionStarted,
|
||||||
MethodExecutionFinished,
|
MethodExecutionFinished,
|
||||||
|
AgentExecutionError,
|
||||||
|
ToolUsageFinished,
|
||||||
|
ToolUsageError,
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -20,3 +20,11 @@ class TaskCompleted(CrewEvent):
|
|||||||
type: str = "task_completed"
|
type: str = "task_completed"
|
||||||
|
|
||||||
model_config = {"arbitrary_types_allowed": True}
|
model_config = {"arbitrary_types_allowed": True}
|
||||||
|
|
||||||
|
|
||||||
|
class TaskFailed(CrewEvent):
|
||||||
|
"""Event emitted when a task fails"""
|
||||||
|
|
||||||
|
task: Any
|
||||||
|
error: str
|
||||||
|
type: str = "task_failed"
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from .crew_events import CrewEvent
|
||||||
|
|
||||||
|
|
||||||
class ToolUsageEvent(BaseModel):
|
class ToolUsageEvent(CrewEvent):
|
||||||
|
"""Base event for tool usage tracking"""
|
||||||
|
|
||||||
agent_key: str
|
agent_key: str
|
||||||
agent_role: str
|
agent_role: str
|
||||||
tool_name: str
|
tool_name: str
|
||||||
@@ -13,12 +15,20 @@ class ToolUsageEvent(BaseModel):
|
|||||||
run_attempts: int | None = None
|
run_attempts: int | None = None
|
||||||
delegations: int | None = None
|
delegations: int | None = None
|
||||||
|
|
||||||
|
model_config = {"arbitrary_types_allowed": True}
|
||||||
|
|
||||||
|
|
||||||
class ToolUsageFinished(ToolUsageEvent):
|
class ToolUsageFinished(ToolUsageEvent):
|
||||||
|
"""Event emitted when a tool execution is completed"""
|
||||||
|
|
||||||
started_at: datetime
|
started_at: datetime
|
||||||
finished_at: datetime
|
finished_at: datetime
|
||||||
from_cache: bool = False
|
from_cache: bool = False
|
||||||
|
type: str = "tool_usage_finished"
|
||||||
|
|
||||||
|
|
||||||
class ToolUsageError(ToolUsageEvent):
|
class ToolUsageError(ToolUsageEvent):
|
||||||
|
"""Event emitted when a tool execution encounters an error"""
|
||||||
|
|
||||||
error: str
|
error: str
|
||||||
|
type: str = "tool_usage_error"
|
||||||
@@ -1,230 +0,0 @@
|
|||||||
interactions:
|
|
||||||
- request:
|
|
||||||
body: !!binary |
|
|
||||||
Cp0mCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS9CUKEgoQY3Jld2FpLnRl
|
|
||||||
bGVtZXRyeRKkBwoQsRaqiDfyxK42TccQwJG7SRII4z1RDvMH0b0qDENyZXcgQ3JlYXRlZDABOdiW
|
|
||||||
FcGxJCEYQXB3JMGxJCEYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAwLjBKGgoOcHl0aG9uX3Zl
|
|
||||||
cnNpb24SCAoGMy4xMi44Si4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhj
|
|
||||||
NzQ2MjhjSjEKB2NyZXdfaWQSJgokN2UyNDlhZmMtZDlkYS00OTVmLWE4NGItMjI0MWFkY2U3ZGVi
|
|
||||||
ShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3
|
|
||||||
X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrRAgoLY3Jl
|
|
||||||
d19hZ2VudHMSwQIKvgJbeyJrZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEi
|
|
||||||
LCAiaWQiOiAiMjI4ZWZlMTYtZjEwMC00ZjA0LTllYzItMDQxNTc2ZDMwODNmIiwgInJvbGUiOiAi
|
|
||||||
YmFzZV9hZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0i
|
|
||||||
OiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIs
|
|
||||||
ICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBm
|
|
||||||
YWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEKCmNyZXdf
|
|
||||||
dGFza3MS8AEK7QFbeyJrZXkiOiAiMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDIiLCAi
|
|
||||||
aWQiOiAiMzNiM2YxYTktODM5Yy00YzgxLWFkOTgtNjFiYzMzMzljM2Y2IiwgImFzeW5jX2V4ZWN1
|
|
||||||
dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJiYXNl
|
|
||||||
X2FnZW50IiwgImFnZW50X2tleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIs
|
|
||||||
ICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEo4CChC7VQnwwfAXyqbifjiX/Q9QEghiZojr
|
|
||||||
aZisPioMVGFzayBDcmVhdGVkMAE56HQ2wbEkIRhBeNo2wbEkIRhKLgoIY3Jld19rZXkSIgogZTU4
|
|
||||||
MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiQ3ZTI0OWFmYy1kOWRh
|
|
||||||
LTQ5NWYtYTg0Yi0yMjQxYWRjZTdkZWJKLgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4
|
|
||||||
OWEwZWMzYjI2YTEzZDJKMQoHdGFza19pZBImCiQzM2IzZjFhOS04MzljLTRjODEtYWQ5OC02MWJj
|
|
||||||
MzMzOWMzZjZ6AhgBhQEAAQAAEqQHChAtKhMjNUrbCPSzpZno1kdhEgg4MmdqKqFAXSoMQ3JldyBD
|
|
||||||
cmVhdGVkMAE5SBC4xrEkIRhBmIXFxrEkIRhKGwoOY3Jld2FpX3ZlcnNpb24SCQoHMC4xMDAuMEoa
|
|
||||||
Cg5weXRob25fdmVyc2lvbhIICgYzLjEyLjhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVh
|
|
||||||
ZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiQxNzg4Yjg4MC05NzhhLTQzMmMtYTViYi05
|
|
||||||
ZmY0ZjkxNGMwMWVKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkS
|
|
||||||
AhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMS
|
|
||||||
AhgBStECCgtjcmV3X2FnZW50cxLBAgq+Alt7ImtleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2
|
|
||||||
YjI0OWM0YzY0YSIsICJpZCI6ICIyMjhlZmUxNi1mMTAwLTRmMDQtOWVjMi0wNDE1NzZkMzA4M2Yi
|
|
||||||
LCAicm9sZSI6ICJiYXNlX2FnZW50IiwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRlciI6IDIw
|
|
||||||
LCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdw
|
|
||||||
dC00by1taW5pIiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhl
|
|
||||||
Y3V0aW9uPyI6IGZhbHNlLCAibWF4X3JldHJ5X2xpbWl0IjogMiwgInRvb2xzX25hbWVzIjogW119
|
|
||||||
XUr/AQoKY3Jld190YXNrcxLwAQrtAVt7ImtleSI6ICIxYjE1ZWYyMzkxNWIyNzU1ZTg5YTBlYzNi
|
|
||||||
MjZhMTNkMiIsICJpZCI6ICIzM2IzZjFhOS04MzljLTRjODEtYWQ5OC02MWJjMzMzOWMzZjYiLCAi
|
|
||||||
YXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9y
|
|
||||||
b2xlIjogImJhc2VfYWdlbnQiLCAiYWdlbnRfa2V5IjogImFkMTUzMTYxYzVjNWE4NTZhYTBkMDZi
|
|
||||||
MjQ5YzRjNjRhIiwgInRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASjgIKEFPj8mjm/h4Yp972
|
|
||||||
93VhBIcSCC4p9zkalotLKgxUYXNrIENyZWF0ZWQwATlQ6eDGsSQhGEGogeHGsSQhGEouCghjcmV3
|
|
||||||
X2tleRIiCiBlNTgwNzAxZDUyZWI2NWFmZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJDE3
|
|
||||||
ODhiODgwLTk3OGEtNDMyYy1hNWJiLTlmZjRmOTE0YzAxZUouCgh0YXNrX2tleRIiCiAxYjE1ZWYy
|
|
||||||
MzkxNWIyNzU1ZTg5YTBlYzNiMjZhMTNkMkoxCgd0YXNrX2lkEiYKJDMzYjNmMWE5LTgzOWMtNGM4
|
|
||||||
MS1hZDk4LTYxYmMzMzM5YzNmNnoCGAGFAQABAAASpAcKEM7q3Vt0B41b8yIp9m3apr8SCNFRy5YU
|
|
||||||
USxvKgxDcmV3IENyZWF0ZWQwATnwwfjHsSQhGEE4dAPIsSQhGEobCg5jcmV3YWlfdmVyc2lvbhIJ
|
|
||||||
CgcwLjEwMC4wShoKDnB5dGhvbl92ZXJzaW9uEggKBjMuMTIuOEouCghjcmV3X2tleRIiCiBlNTgw
|
|
||||||
NzAxZDUyZWI2NWFmZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJDQyYWExMjM2LTAyZmIt
|
|
||||||
NGU2MS1hMmYwLTMzOTQwZDUzNTljM0ocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoRCgtj
|
|
||||||
cmV3X21lbW9yeRICEABKGgoUY3Jld19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVy
|
|
||||||
X29mX2FnZW50cxICGAFK0QIKC2NyZXdfYWdlbnRzEsECCr4CW3sia2V5IjogImFkMTUzMTYxYzVj
|
|
||||||
NWE4NTZhYTBkMDZiMjQ5YzRjNjRhIiwgImlkIjogIjIyOGVmZTE2LWYxMDAtNGYwNC05ZWMyLTA0
|
|
||||||
MTU3NmQzMDgzZiIsICJyb2xlIjogImJhc2VfYWdlbnQiLCAidmVyYm9zZT8iOiBmYWxzZSwgIm1h
|
|
||||||
eF9pdGVyIjogMjAsICJtYXhfcnBtIjogbnVsbCwgImZ1bmN0aW9uX2NhbGxpbmdfbGxtIjogIiIs
|
|
||||||
ICJsbG0iOiAiZ3B0LTRvLW1pbmkiLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxs
|
|
||||||
b3dfY29kZV9leGVjdXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNf
|
|
||||||
bmFtZXMiOiBbXX1dSv8BCgpjcmV3X3Rhc2tzEvABCu0BW3sia2V5IjogIjFiMTVlZjIzOTE1YjI3
|
|
||||||
NTVlODlhMGVjM2IyNmExM2QyIiwgImlkIjogIjMzYjNmMWE5LTgzOWMtNGM4MS1hZDk4LTYxYmMz
|
|
||||||
MzM5YzNmNiIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9pbnB1dD8iOiBmYWxz
|
|
||||||
ZSwgImFnZW50X3JvbGUiOiAiYmFzZV9hZ2VudCIsICJhZ2VudF9rZXkiOiAiYWQxNTMxNjFjNWM1
|
|
||||||
YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAidG9vbHNfbmFtZXMiOiBbXX1degIYAYUBAAEAABKOAgoQ
|
|
||||||
Gm3H7iAWI9H+2713+jzGiBIIZtKcjlj6IXgqDFRhc2sgQ3JlYXRlZDABOahOIsixJCEYQXD+Isix
|
|
||||||
JCEYSi4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2Ny
|
|
||||||
ZXdfaWQSJgokNDJhYTEyMzYtMDJmYi00ZTYxLWEyZjAtMzM5NDBkNTM1OWMzSi4KCHRhc2tfa2V5
|
|
||||||
EiIKIDFiMTVlZjIzOTE1YjI3NTVlODlhMGVjM2IyNmExM2QySjEKB3Rhc2tfaWQSJgokMzNiM2Yx
|
|
||||||
YTktODM5Yy00YzgxLWFkOTgtNjFiYzMzMzljM2Y2egIYAYUBAAEAABKkBwoQXRdWLcLxR1WXiLgw
|
|
||||||
RBPKChIIvt3M2oEaowsqDENyZXcgQ3JlYXRlZDABOXgVEsmxJCEYQUAAHsmxJCEYShsKDmNyZXdh
|
|
||||||
aV92ZXJzaW9uEgkKBzAuMTAwLjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMi44Si4KCGNyZXdf
|
|
||||||
a2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2NyZXdfaWQSJgokMGFl
|
|
||||||
NTlkZWQtMTg0Yy00OGNiLWE2NTktNDljYjQ5OTE4NmMyShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1
|
|
||||||
ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoV
|
|
||||||
Y3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrRAgoLY3Jld19hZ2VudHMSwQIKvgJbeyJrZXkiOiAi
|
|
||||||
YWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAiaWQiOiAiMjI4ZWZlMTYtZjEwMC00
|
|
||||||
ZjA0LTllYzItMDQxNTc2ZDMwODNmIiwgInJvbGUiOiAiYmFzZV9hZ2VudCIsICJ2ZXJib3NlPyI6
|
|
||||||
IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGlu
|
|
||||||
Z19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/Ijog
|
|
||||||
ZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6
|
|
||||||
IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEKCmNyZXdfdGFza3MS8AEK7QFbeyJrZXkiOiAiMWIx
|
|
||||||
NWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDIiLCAiaWQiOiAiMzNiM2YxYTktODM5Yy00Yzgx
|
|
||||||
LWFkOTgtNjFiYzMzMzljM2Y2IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lu
|
|
||||||
cHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJiYXNlX2FnZW50IiwgImFnZW50X2tleSI6ICJh
|
|
||||||
ZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgB
|
|
||||||
hQEAAQAAEo4CChDKds56rQdRT8dQWNa+o3W8Egigg8CiorCTbSoMVGFzayBDcmVhdGVkMAE5IMku
|
|
||||||
ybEkIRhBIEYvybEkIRhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3
|
|
||||||
NDYyOGNKMQoHY3Jld19pZBImCiQwYWU1OWRlZC0xODRjLTQ4Y2ItYTY1OS00OWNiNDk5MTg2YzJK
|
|
||||||
LgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDJKMQoHdGFza19p
|
|
||||||
ZBImCiQzM2IzZjFhOS04MzljLTRjODEtYWQ5OC02MWJjMzMzOWMzZjZ6AhgBhQEAAQAA
|
|
||||||
headers:
|
|
||||||
Accept:
|
|
||||||
- '*/*'
|
|
||||||
Accept-Encoding:
|
|
||||||
- gzip, deflate
|
|
||||||
Connection:
|
|
||||||
- keep-alive
|
|
||||||
Content-Length:
|
|
||||||
- '4896'
|
|
||||||
Content-Type:
|
|
||||||
- application/x-protobuf
|
|
||||||
User-Agent:
|
|
||||||
- OTel-OTLP-Exporter-Python/1.27.0
|
|
||||||
method: POST
|
|
||||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
|
||||||
response:
|
|
||||||
body:
|
|
||||||
string: "\n\0"
|
|
||||||
headers:
|
|
||||||
Content-Length:
|
|
||||||
- '2'
|
|
||||||
Content-Type:
|
|
||||||
- application/x-protobuf
|
|
||||||
Date:
|
|
||||||
- Tue, 04 Feb 2025 23:21:22 GMT
|
|
||||||
status:
|
|
||||||
code: 200
|
|
||||||
message: OK
|
|
||||||
- request:
|
|
||||||
body: '{"messages": [{"role": "system", "content": "You are failing_agent. You
|
|
||||||
are an agent that will fail\nYour personal goal is: Fail execution\nTo give
|
|
||||||
my best complete final answer to the task respond using the exact following
|
|
||||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
|
||||||
answer must be the great and the most complete as possible, it must be outcome
|
|
||||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
|
||||||
"content": "\nCurrent Task: This will fail\n\nThis is the expect criteria for
|
|
||||||
your final answer: hi\nyou MUST return the actual complete content as the final
|
|
||||||
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
|
|
||||||
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
|
||||||
"model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
|
||||||
headers:
|
|
||||||
accept:
|
|
||||||
- application/json
|
|
||||||
accept-encoding:
|
|
||||||
- gzip, deflate
|
|
||||||
connection:
|
|
||||||
- keep-alive
|
|
||||||
content-length:
|
|
||||||
- '831'
|
|
||||||
content-type:
|
|
||||||
- application/json
|
|
||||||
cookie:
|
|
||||||
- _cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
|
|
||||||
host:
|
|
||||||
- api.openai.com
|
|
||||||
user-agent:
|
|
||||||
- OpenAI/Python 1.61.0
|
|
||||||
x-stainless-arch:
|
|
||||||
- arm64
|
|
||||||
x-stainless-async:
|
|
||||||
- 'false'
|
|
||||||
x-stainless-lang:
|
|
||||||
- python
|
|
||||||
x-stainless-os:
|
|
||||||
- MacOS
|
|
||||||
x-stainless-package-version:
|
|
||||||
- 1.61.0
|
|
||||||
x-stainless-raw-response:
|
|
||||||
- 'true'
|
|
||||||
x-stainless-retry-count:
|
|
||||||
- '0'
|
|
||||||
x-stainless-runtime:
|
|
||||||
- CPython
|
|
||||||
x-stainless-runtime-version:
|
|
||||||
- 3.12.8
|
|
||||||
method: POST
|
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
|
||||||
response:
|
|
||||||
content: "{\n \"id\": \"chatcmpl-AxMWEsVzvCdMy7AcEMz0E1bIsZE0N\",\n \"object\":
|
|
||||||
\"chat.completion\",\n \"created\": 1738711278,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
|
||||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
|
||||||
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
|
||||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
|
||||||
158,\n \"completion_tokens\": 12,\n \"total_tokens\": 170,\n \"prompt_tokens_details\":
|
|
||||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
|
||||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
|
||||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
|
||||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
|
||||||
headers:
|
|
||||||
CF-RAY:
|
|
||||||
- 90ce656b29f31726-SJC
|
|
||||||
Connection:
|
|
||||||
- keep-alive
|
|
||||||
Content-Encoding:
|
|
||||||
- gzip
|
|
||||||
Content-Type:
|
|
||||||
- application/json
|
|
||||||
Date:
|
|
||||||
- Tue, 04 Feb 2025 23:21:23 GMT
|
|
||||||
Server:
|
|
||||||
- cloudflare
|
|
||||||
Set-Cookie:
|
|
||||||
- __cf_bm=EdoOe3Ie16YFhyq0JMG_D28BpfQRp_kw01d0usxe.4I-1738711283-1.0.1.1-_S91JqcT20T_.dkcf3nGz_8EvOultyOqoshsb1sa2ovKyLsNvktLklUhWKhcOsCRP7QjoUV.7fah2qCmS1cIbQ;
|
|
||||||
path=/; expires=Tue, 04-Feb-25 23:51:23 GMT; domain=.api.openai.com; HttpOnly;
|
|
||||||
Secure; SameSite=None
|
|
||||||
Transfer-Encoding:
|
|
||||||
- chunked
|
|
||||||
X-Content-Type-Options:
|
|
||||||
- nosniff
|
|
||||||
access-control-expose-headers:
|
|
||||||
- X-Request-ID
|
|
||||||
alt-svc:
|
|
||||||
- h3=":443"; ma=86400
|
|
||||||
cf-cache-status:
|
|
||||||
- DYNAMIC
|
|
||||||
openai-organization:
|
|
||||||
- crewai-iuxna1
|
|
||||||
openai-processing-ms:
|
|
||||||
- '5144'
|
|
||||||
openai-version:
|
|
||||||
- '2020-10-01'
|
|
||||||
strict-transport-security:
|
|
||||||
- max-age=31536000; includeSubDomains; preload
|
|
||||||
x-ratelimit-limit-requests:
|
|
||||||
- '30000'
|
|
||||||
x-ratelimit-limit-tokens:
|
|
||||||
- '150000000'
|
|
||||||
x-ratelimit-remaining-requests:
|
|
||||||
- '29999'
|
|
||||||
x-ratelimit-remaining-tokens:
|
|
||||||
- '149999810'
|
|
||||||
x-ratelimit-reset-requests:
|
|
||||||
- 2ms
|
|
||||||
x-ratelimit-reset-tokens:
|
|
||||||
- 0s
|
|
||||||
x-request-id:
|
|
||||||
- req_40f9e17937c2aa2f80e75dd6c546aeac
|
|
||||||
http_version: HTTP/1.1
|
|
||||||
status_code: 200
|
|
||||||
version: 1
|
|
||||||
@@ -0,0 +1,243 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||||
|
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||||
|
give my best complete final answer to the task respond using the exact following
|
||||||
|
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||||
|
answer must be the great and the most complete as possible, it must be outcome
|
||||||
|
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||||
|
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||||
|
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||||
|
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '836'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.61.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.61.0
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.8
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AzTXAk4GatJOmLO9sEOCCITIjf1Dx\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1739214900,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||||
|
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- 90fe6ce92eba67b3-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Mon, 10 Feb 2025 19:15:01 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=pjX1I6y8RlqCjS.gvOqvXk4vM69UNwFwmslh1BhALNg-1739214901-1.0.1.1-nJcNlSdNcug82eDl7KSvteLbsg0xCiEh2yI1TZX2jMAblL7AMQ8LFhvXkJLlAMfk49RMzRzWy2aiQgeM7WRHPg;
|
||||||
|
path=/; expires=Mon, 10-Feb-25 19:45:01 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000;
|
||||||
|
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '571'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '30000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '150000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '29999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '149999810'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 2ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_a95183a7a85e6bdfe381b2510bf70f34
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
|
||||||
|
completed based on the description, expected output, and actual results.\n\nTask
|
||||||
|
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
|
||||||
|
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
|
||||||
|
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
|
||||||
|
extracted from the task output, if any, their type, description, and relationships"}],
|
||||||
|
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
|
||||||
|
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
|
||||||
|
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
|
||||||
|
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
|
||||||
|
{"description": "The name of the entity.", "title": "Name", "type": "string"},
|
||||||
|
"type": {"description": "The type of the entity.", "title": "Type", "type":
|
||||||
|
"string"}, "description": {"description": "Description of the entity.", "title":
|
||||||
|
"Description", "type": "string"}, "relationships": {"description": "Relationships
|
||||||
|
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
|
||||||
|
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
|
||||||
|
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
|
||||||
|
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
|
||||||
|
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
|
||||||
|
from 0 to 10 evaluating on completion, quality, and overall performance, all
|
||||||
|
taking into account the task description, expected output, and the result of
|
||||||
|
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
|
||||||
|
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
|
||||||
|
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
|
||||||
|
"suggestions"], "type": "object"}}}]}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '1962'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- __cf_bm=pjX1I6y8RlqCjS.gvOqvXk4vM69UNwFwmslh1BhALNg-1739214901-1.0.1.1-nJcNlSdNcug82eDl7KSvteLbsg0xCiEh2yI1TZX2jMAblL7AMQ8LFhvXkJLlAMfk49RMzRzWy2aiQgeM7WRHPg;
|
||||||
|
_cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.61.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.61.0
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.8
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AzTXDcgKWq3yosIyBal8LcY8dDrn1\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1739214903,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||||
|
\ \"id\": \"call_c41SAnqyEKNXEAZd5XV3jKF3\",\n \"type\":
|
||||||
|
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
|
||||||
|
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Consider specifying
|
||||||
|
the tone or context of the greeting for more engaging interactions.\\\",\\\"Clarify
|
||||||
|
if additional greetings or responses are acceptable to enhance the task's scope.\\\"],\\\"quality\\\":10,\\\"entities\\\":[]
|
||||||
|
}\"\n }\n }\n ],\n \"refusal\": null\n },\n
|
||||||
|
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||||
|
\ \"usage\": {\n \"prompt_tokens\": 273,\n \"completion_tokens\": 43,\n
|
||||||
|
\ \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||||
|
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 90fe6cf8c96e67b3-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Mon, 10 Feb 2025 19:15:04 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '1181'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '30000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '150000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '29999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '149999876'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 2ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_b2286c8ae6f9b2a42f46a3e2c52b4211
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
version: 1
|
||||||
114
tests/utilities/cassettes/test_crew_emits_task_failed_event.yaml
Normal file
114
tests/utilities/cassettes/test_crew_emits_task_failed_event.yaml
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||||
|
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||||
|
give my best complete final answer to the task respond using the exact following
|
||||||
|
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||||
|
answer must be the great and the most complete as possible, it must be outcome
|
||||||
|
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||||
|
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||||
|
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||||
|
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '836'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.61.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.61.0
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.8
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AzpkZLpCyjKT5d6Udfx4zAme2sOMy\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1739300299,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||||
|
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- 910691d3ab90ebef-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 11 Feb 2025 18:58:20 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=MOH5EY6n3p8JKY53.yz7qzLuLYsEB8QdQXH09loUMBM-1739300300-1.0.1.1-hjb4mk04sMygPFhoFyiySKZSqB_fN5PbhbOyn.kipa3.eLvk7EtriDyjvGkBFIAV13DYnc08BfF_l2kxdx9hfQ;
|
||||||
|
path=/; expires=Tue, 11-Feb-25 19:28:20 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
- _cfuvid=uu.cEiV.FfgvSvCdKOooDYJWrwjVEuFeGdQodijGUUI-1739300300232-0.0.1.1-604800000;
|
||||||
|
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '1357'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '30000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '150000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '29999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '149999810'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 2ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_2277503f851195e7d7a43b66eb044454
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,111 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||||
|
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||||
|
give my best complete final answer to the task respond using the exact following
|
||||||
|
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||||
|
answer must be the great and the most complete as possible, it must be outcome
|
||||||
|
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||||
|
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||||
|
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||||
|
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '836'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- _cfuvid=gsNyCo_jrDOolzf8SXHDaxQQrEgdR3jgv4OAH8MziDE-1739291824699-0.0.1.1-604800000;
|
||||||
|
__cf_bm=cRijYuylMGzRGxv3udQL5PhHOR5mRN_9_eLLwevlM_o-1739299455-1.0.1.1-Fszr_Msw0B1.IBMkiunP.VF2ilul1YGZZV8TqMcO3Q2SHvSlqfgm9NHgns1bJrm0wWRvHiCE7wdZfUAOx7T3Lg
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.61.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.61.0
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.8
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AzpWx6pctOvzu6xsbyg0XfSAc0q9V\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1739299455,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||||
|
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 91067d3ddc68fa16-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 11 Feb 2025 18:44:16 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '703'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '30000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '150000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '29999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '149999810'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 2ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_89222c00e4608e8557a135e91b223556
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,114 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||||
|
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||||
|
give my best complete final answer to the task respond using the exact following
|
||||||
|
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||||
|
answer must be the great and the most complete as possible, it must be outcome
|
||||||
|
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||||
|
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||||
|
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||||
|
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '836'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- _cfuvid=gsNyCo_jrDOolzf8SXHDaxQQrEgdR3jgv4OAH8MziDE-1739291824699-0.0.1.1-604800000
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.61.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.61.0
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.8
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AzpWxLzAcRzigZuIGmjP3ckQgxAom\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1739299455,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||||
|
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- 91067d389e90fa16-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 11 Feb 2025 18:44:15 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=cRijYuylMGzRGxv3udQL5PhHOR5mRN_9_eLLwevlM_o-1739299455-1.0.1.1-Fszr_Msw0B1.IBMkiunP.VF2ilul1YGZZV8TqMcO3Q2SHvSlqfgm9NHgns1bJrm0wWRvHiCE7wdZfUAOx7T3Lg;
|
||||||
|
path=/; expires=Tue, 11-Feb-25 19:14:15 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '716'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '30000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '150000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '29999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '149999810'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 2ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_ef807dc3223d40332aae8a313e96ef3a
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,114 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||||
|
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||||
|
give my best complete final answer to the task respond using the exact following
|
||||||
|
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||||
|
answer must be the great and the most complete as possible, it must be outcome
|
||||||
|
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||||
|
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||||
|
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||||
|
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '836'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.61.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.61.0
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.8
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-Azq6WJmBZbITt0acwjRonlLua3QxT\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1739301660,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||||
|
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- 9106b311e8c07e25-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 11 Feb 2025 19:21:01 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=BkEp5CX4ww8Vwy5oYVZjMyXHy_c9tXdEJGfsQ51m67c-1739301661-1.0.1.1-BUtQpsmDPsq5ZBEmUB.Zw8aq8D3MvPz5U8rR4oa.gsns9S9ve3SPfP8hmpYpSWYYYJPg1KJIBG0OTJwjC9lWkg;
|
||||||
|
path=/; expires=Tue, 11-Feb-25 19:51:01 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
- _cfuvid=4fxBBkCchurossCvL6LwAzMBGrP47yMs0bo0ZMKGz0I-1739301661039-0.0.1.1-604800000;
|
||||||
|
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '499'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '30000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '150000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '29999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '149999810'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 2ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_92c40fec00fb98496f76bdaa7b0842a2
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
version: 1
|
||||||
5651
tests/utilities/cassettes/test_tools_emits_error_events.yaml
Normal file
5651
tests/utilities/cassettes/test_tools_emits_error_events.yaml
Normal file
File diff suppressed because it is too large
Load Diff
512
tests/utilities/cassettes/test_tools_emits_finished_events.yaml
Normal file
512
tests/utilities/cassettes/test_tools_emits_finished_events.yaml
Normal file
@@ -0,0 +1,512 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||||
|
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nYou
|
||||||
|
ONLY have access to the following tools, and should NEVER make up tools that
|
||||||
|
are not listed here:\n\nTool Name: say_hi\nTool Arguments: {}\nTool Description:
|
||||||
|
Say hi\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
|
||||||
|
you should always think about what to do\nAction: the action to take, only one
|
||||||
|
name of [say_hi], just the name, exactly as it''s written.\nAction Input: the
|
||||||
|
input to the action, just a simple JSON object, enclosed in curly braces, using
|
||||||
|
\" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
|
||||||
|
all necessary information is gathered, return the following format:\n\n```\nThought:
|
||||||
|
I now know the final answer\nFinal Answer: the final answer to the original
|
||||||
|
input question\n```"}, {"role": "user", "content": "\nCurrent Task: Just say
|
||||||
|
hi\n\nThis is the expect criteria for your final answer: hi\nyou MUST return
|
||||||
|
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||||
|
is VERY important to you, use the tools available and give your best Final Answer,
|
||||||
|
your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '1275'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.61.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.61.0
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.8
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AzUA6kJQfpUvB4CGot4gSfAIR0foh\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1739217314,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"you should always think about what to
|
||||||
|
do \\nAction: say_hi \\nAction Input: {} \",\n \"refusal\": null\n
|
||||||
|
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||||
|
\ ],\n \"usage\": {\n \"prompt_tokens\": 257,\n \"completion_tokens\":
|
||||||
|
19,\n \"total_tokens\": 276,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||||
|
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- 90fea7d78e1fceb9-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Mon, 10 Feb 2025 19:55:15 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=fmlg1wjOwuOwZhUUOEtL1tQYluAPumn7AHLF8s0EU2Y-1739217315-1.0.1.1-PQDvxn8TOhzaznlHjwVsqPZUzbAyJWFkvzCubfNJydTu2_AyA1cJ8hkM0khsEE4UY_xp8iPe2gSGmH1ydrDa0Q;
|
||||||
|
path=/; expires=Mon, 10-Feb-25 20:25:15 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '526'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '30000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '150000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '29999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '149999703'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 2ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_f6358ff0cc7a2b8d2e167ab00a40f2a4
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||||
|
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nYou
|
||||||
|
ONLY have access to the following tools, and should NEVER make up tools that
|
||||||
|
are not listed here:\n\nTool Name: say_hi\nTool Arguments: {}\nTool Description:
|
||||||
|
Say hi\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
|
||||||
|
you should always think about what to do\nAction: the action to take, only one
|
||||||
|
name of [say_hi], just the name, exactly as it''s written.\nAction Input: the
|
||||||
|
input to the action, just a simple JSON object, enclosed in curly braces, using
|
||||||
|
\" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
|
||||||
|
all necessary information is gathered, return the following format:\n\n```\nThought:
|
||||||
|
I now know the final answer\nFinal Answer: the final answer to the original
|
||||||
|
input question\n```"}, {"role": "user", "content": "\nCurrent Task: Just say
|
||||||
|
hi\n\nThis is the expect criteria for your final answer: hi\nyou MUST return
|
||||||
|
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||||
|
is VERY important to you, use the tools available and give your best Final Answer,
|
||||||
|
your job depends on it!\n\nThought:"}, {"role": "assistant", "content": "you
|
||||||
|
should always think about what to do \nAction: say_hi \nAction Input: {} \nObservation:
|
||||||
|
hi"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '1410'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000;
|
||||||
|
__cf_bm=fmlg1wjOwuOwZhUUOEtL1tQYluAPumn7AHLF8s0EU2Y-1739217315-1.0.1.1-PQDvxn8TOhzaznlHjwVsqPZUzbAyJWFkvzCubfNJydTu2_AyA1cJ8hkM0khsEE4UY_xp8iPe2gSGmH1ydrDa0Q
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.61.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.61.0
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.8
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AzUA7QdlQy1WZZijxNWUv25sZycg0\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1739217315,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
|
||||||
|
Answer: hi\\n```\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||||
|
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
283,\n \"completion_tokens\": 17,\n \"total_tokens\": 300,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- 90fea7dc5ba6ceb9-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Mon, 10 Feb 2025 19:55:15 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '388'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '30000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '150000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '29999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '149999680'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 2ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_7d7c68b90b3a9c3ac6092fe17ac1185a
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
- request:
|
||||||
|
body: !!binary |
|
||||||
|
CoMzCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS2jIKEgoQY3Jld2FpLnRl
|
||||||
|
bGVtZXRyeRKOAgoQ2EINIGZRoXD589od63oHmBIIMfUgEWudUbIqDFRhc2sgQ3JlYXRlZDABOcjI
|
||||||
|
7lbu8CIYQZB471bu8CIYSi4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhj
|
||||||
|
NzQ2MjhjSjEKB2NyZXdfaWQSJgokNTE4ODdiOTktY2FlMy00Yjc4LWJjMGEtMDY4MmVmNWEzNGQ0
|
||||||
|
Si4KCHRhc2tfa2V5EiIKIDFiMTVlZjIzOTE1YjI3NTVlODlhMGVjM2IyNmExM2QySjEKB3Rhc2tf
|
||||||
|
aWQSJgokMzlmMDlmMWUtOTJmOC00ZGJiLTgzNDAtNjU2ZmVkMDk3ZjM0egIYAYUBAAEAABKkBwoQ
|
||||||
|
RzhWoF6ewSTS/qUc9yeFRhIIM3SNZCwjz5AqDENyZXcgQ3JlYXRlZDABOQjrGlru8CIYQdgbKVru
|
||||||
|
8CIYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAwLjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4x
|
||||||
|
Mi44Si4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2Ny
|
||||||
|
ZXdfaWQSJgokYzk4ODFkY2YtMmM0MS00ZjRlLTgzMjctNjJjYjFhYjJkOTg4ShwKDGNyZXdfcHJv
|
||||||
|
Y2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90
|
||||||
|
YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrRAgoLY3Jld19hZ2VudHMSwQIK
|
||||||
|
vgJbeyJrZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAiaWQiOiAiNTU2
|
||||||
|
NzJiMDgtOTU4ZC00MjljLWE3ZTctY2ZlN2U4Y2MwOGZkIiwgInJvbGUiOiAiYmFzZV9hZ2VudCIs
|
||||||
|
ICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVu
|
||||||
|
Y3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0aW9u
|
||||||
|
X2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9y
|
||||||
|
ZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEKCmNyZXdfdGFza3MS8AEK7QFb
|
||||||
|
eyJrZXkiOiAiMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDIiLCAiaWQiOiAiMzlmMDlm
|
||||||
|
MWUtOTJmOC00ZGJiLTgzNDAtNjU2ZmVkMDk3ZjM0IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxz
|
||||||
|
ZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJiYXNlX2FnZW50IiwgImFn
|
||||||
|
ZW50X2tleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIsICJ0b29sc19uYW1l
|
||||||
|
cyI6IFtdfV16AhgBhQEAAQAAEo4CChB8AxWkb2Uwpdc8RpyCRqw5EggJAxbgNu81XyoMVGFzayBD
|
||||||
|
cmVhdGVkMAE5+HQ8Wu7wIhhB+PE8Wu7wIhhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVh
|
||||||
|
ZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiRjOTg4MWRjZi0yYzQxLTRmNGUtODMyNy02
|
||||||
|
MmNiMWFiMmQ5ODhKLgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEz
|
||||||
|
ZDJKMQoHdGFza19pZBImCiQzOWYwOWYxZS05MmY4LTRkYmItODM0MC02NTZmZWQwOTdmMzR6AhgB
|
||||||
|
hQEAAQAAEqQHChCcXvdbsgYC+gzCMrXs3LN/EgijKwJLCRIiHioMQ3JldyBDcmVhdGVkMAE5iJqz
|
||||||
|
vu7wIhhBqKC/vu7wIhhKGwoOY3Jld2FpX3ZlcnNpb24SCQoHMC4xMDAuMEoaCg5weXRob25fdmVy
|
||||||
|
c2lvbhIICgYzLjEyLjhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3
|
||||||
|
NDYyOGNKMQoHY3Jld19pZBImCiQ2Zjk1ZWI3Yy0wOWM5LTQxOTYtYWFiYi1kOWIxNmMxMzZjODdK
|
||||||
|
HAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdf
|
||||||
|
bnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBStECCgtjcmV3
|
||||||
|
X2FnZW50cxLBAgq+Alt7ImtleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIs
|
||||||
|
ICJpZCI6ICI1NTY3MmIwOC05NThkLTQyOWMtYTdlNy1jZmU3ZThjYzA4ZmQiLCAicm9sZSI6ICJi
|
||||||
|
YXNlX2FnZW50IiwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRlciI6IDIwLCAibWF4X3JwbSI6
|
||||||
|
IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdwdC00by1taW5pIiwg
|
||||||
|
ImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhlY3V0aW9uPyI6IGZh
|
||||||
|
bHNlLCAibWF4X3JldHJ5X2xpbWl0IjogMiwgInRvb2xzX25hbWVzIjogW119XUr/AQoKY3Jld190
|
||||||
|
YXNrcxLwAQrtAVt7ImtleSI6ICIxYjE1ZWYyMzkxNWIyNzU1ZTg5YTBlYzNiMjZhMTNkMiIsICJp
|
||||||
|
ZCI6ICIzOWYwOWYxZS05MmY4LTRkYmItODM0MC02NTZmZWQwOTdmMzQiLCAiYXN5bmNfZXhlY3V0
|
||||||
|
aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9yb2xlIjogImJhc2Vf
|
||||||
|
YWdlbnQiLCAiYWdlbnRfa2V5IjogImFkMTUzMTYxYzVjNWE4NTZhYTBkMDZiMjQ5YzRjNjRhIiwg
|
||||||
|
InRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASjgIKEExDo5nPLyHb2H8DfYjPoX4SCLEYs+24
|
||||||
|
8EenKgxUYXNrIENyZWF0ZWQwATmI4NG+7vAiGEFYZdK+7vAiGEouCghjcmV3X2tleRIiCiBlNTgw
|
||||||
|
NzAxZDUyZWI2NWFmZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJDZmOTVlYjdjLTA5Yzkt
|
||||||
|
NDE5Ni1hYWJiLWQ5YjE2YzEzNmM4N0ouCgh0YXNrX2tleRIiCiAxYjE1ZWYyMzkxNWIyNzU1ZTg5
|
||||||
|
YTBlYzNiMjZhMTNkMkoxCgd0YXNrX2lkEiYKJDM5ZjA5ZjFlLTkyZjgtNGRiYi04MzQwLTY1NmZl
|
||||||
|
ZDA5N2YzNHoCGAGFAQABAAASpAcKEBBQzR2bcR/7woQ+VkaJ4kQSCD1LFx3SNPPPKgxDcmV3IENy
|
||||||
|
ZWF0ZWQwATlotsW/7vAiGEEgA9C/7vAiGEobCg5jcmV3YWlfdmVyc2lvbhIJCgcwLjEwMC4wShoK
|
||||||
|
DnB5dGhvbl92ZXJzaW9uEggKBjMuMTIuOEouCghjcmV3X2tleRIiCiBlNTgwNzAxZDUyZWI2NWFm
|
||||||
|
ZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJDJiMWI2MGYzLTNlZTMtNGNjYi05MDM2LTdk
|
||||||
|
MzE4OTJiYjVkZkocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoRCgtjcmV3X21lbW9yeRIC
|
||||||
|
EABKGgoUY3Jld19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVyX29mX2FnZW50cxIC
|
||||||
|
GAFK0QIKC2NyZXdfYWdlbnRzEsECCr4CW3sia2V5IjogImFkMTUzMTYxYzVjNWE4NTZhYTBkMDZi
|
||||||
|
MjQ5YzRjNjRhIiwgImlkIjogIjU1NjcyYjA4LTk1OGQtNDI5Yy1hN2U3LWNmZTdlOGNjMDhmZCIs
|
||||||
|
ICJyb2xlIjogImJhc2VfYWdlbnQiLCAidmVyYm9zZT8iOiBmYWxzZSwgIm1heF9pdGVyIjogMjAs
|
||||||
|
ICJtYXhfcnBtIjogbnVsbCwgImZ1bmN0aW9uX2NhbGxpbmdfbGxtIjogIiIsICJsbG0iOiAiZ3B0
|
||||||
|
LTRvLW1pbmkiLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxsb3dfY29kZV9leGVj
|
||||||
|
dXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNfbmFtZXMiOiBbXX1d
|
||||||
|
Sv8BCgpjcmV3X3Rhc2tzEvABCu0BW3sia2V5IjogIjFiMTVlZjIzOTE1YjI3NTVlODlhMGVjM2Iy
|
||||||
|
NmExM2QyIiwgImlkIjogIjM5ZjA5ZjFlLTkyZjgtNGRiYi04MzQwLTY1NmZlZDA5N2YzNCIsICJh
|
||||||
|
c3luY19leGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3Jv
|
||||||
|
bGUiOiAiYmFzZV9hZ2VudCIsICJhZ2VudF9rZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIy
|
||||||
|
NDljNGM2NGEiLCAidG9vbHNfbmFtZXMiOiBbXX1degIYAYUBAAEAABKOAgoQmT07KMiFRgzOOPQf
|
||||||
|
I4bJPhIIqzN+pCYM6IUqDFRhc2sgQ3JlYXRlZDABOYjr3r/u8CIYQehY37/u8CIYSi4KCGNyZXdf
|
||||||
|
a2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2NyZXdfaWQSJgokMmIx
|
||||||
|
YjYwZjMtM2VlMy00Y2NiLTkwMzYtN2QzMTg5MmJiNWRmSi4KCHRhc2tfa2V5EiIKIDFiMTVlZjIz
|
||||||
|
OTE1YjI3NTVlODlhMGVjM2IyNmExM2QySjEKB3Rhc2tfaWQSJgokMzlmMDlmMWUtOTJmOC00ZGJi
|
||||||
|
LTgzNDAtNjU2ZmVkMDk3ZjM0egIYAYUBAAEAABKkBwoQE53vZNAWshkoNK1bqTvovRII83djkBUL
|
||||||
|
EbcqDENyZXcgQ3JlYXRlZDABORBBzsDu8CIYQbAU2MDu8CIYShsKDmNyZXdhaV92ZXJzaW9uEgkK
|
||||||
|
BzAuMTAwLjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMi44Si4KCGNyZXdfa2V5EiIKIGU1ODA3
|
||||||
|
MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2NyZXdfaWQSJgokNTQ0MWY0MWYtOTVjMC00
|
||||||
|
YzdkLTkxM2QtNDUxODcwY2YyZjYzShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2Ny
|
||||||
|
ZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJf
|
||||||
|
b2ZfYWdlbnRzEgIYAUrRAgoLY3Jld19hZ2VudHMSwQIKvgJbeyJrZXkiOiAiYWQxNTMxNjFjNWM1
|
||||||
|
YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAiaWQiOiAiNTU2NzJiMDgtOTU4ZC00MjljLWE3ZTctY2Zl
|
||||||
|
N2U4Y2MwOGZkIiwgInJvbGUiOiAiYmFzZV9hZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4
|
||||||
|
X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwg
|
||||||
|
ImxsbSI6ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxv
|
||||||
|
d19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19u
|
||||||
|
YW1lcyI6IFtdfV1K/wEKCmNyZXdfdGFza3MS8AEK7QFbeyJrZXkiOiAiMWIxNWVmMjM5MTViMjc1
|
||||||
|
NWU4OWEwZWMzYjI2YTEzZDIiLCAiaWQiOiAiMzlmMDlmMWUtOTJmOC00ZGJiLTgzNDAtNjU2ZmVk
|
||||||
|
MDk3ZjM0IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNl
|
||||||
|
LCAiYWdlbnRfcm9sZSI6ICJiYXNlX2FnZW50IiwgImFnZW50X2tleSI6ICJhZDE1MzE2MWM1YzVh
|
||||||
|
ODU2YWEwZDA2YjI0OWM0YzY0YSIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEo4CChBV
|
||||||
|
JNEz3VIdOlQM9VT3bctVEgisogN707a2AioMVGFzayBDcmVhdGVkMAE5kGbnwO7wIhhBaMDnwO7w
|
||||||
|
IhhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jl
|
||||||
|
d19pZBImCiQ1NDQxZjQxZi05NWMwLTRjN2QtOTEzZC00NTE4NzBjZjJmNjNKLgoIdGFza19rZXkS
|
||||||
|
IgogMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDJKMQoHdGFza19pZBImCiQzOWYwOWYx
|
||||||
|
ZS05MmY4LTRkYmItODM0MC02NTZmZWQwOTdmMzR6AhgBhQEAAQAAErQHChDA7zaLCfy56rd5t3oS
|
||||||
|
rDPZEgjYoSW3mq6WJyoMQ3JldyBDcmVhdGVkMAE5cP/5we7wIhhBIH0Dwu7wIhhKGwoOY3Jld2Fp
|
||||||
|
X3ZlcnNpb24SCQoHMC4xMDAuMEoaCg5weXRob25fdmVyc2lvbhIICgYzLjEyLjhKLgoIY3Jld19r
|
||||||
|
ZXkSIgogZTU4MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiRmNjcz
|
||||||
|
MTc1ZS04Y2Q1LTQ1ZWUtYTZiOS0xYWFjMTliODQxZWJKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVl
|
||||||
|
bnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVj
|
||||||
|
cmV3X251bWJlcl9vZl9hZ2VudHMSAhgBStkCCgtjcmV3X2FnZW50cxLJAgrGAlt7ImtleSI6ICJh
|
||||||
|
ZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIsICJpZCI6ICJmMGUwMGIzZi0wZWNmLTQ2
|
||||||
|
OGQtYjdjMC0yZmJhN2I5OTc5YjMiLCAicm9sZSI6ICJiYXNlX2FnZW50IiwgInZlcmJvc2U/Ijog
|
||||||
|
ZmFsc2UsICJtYXhfaXRlciI6IDIwLCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5n
|
||||||
|
X2xsbSI6ICIiLCAibGxtIjogImdwdC00by1taW5pIiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBm
|
||||||
|
YWxzZSwgImFsbG93X2NvZGVfZXhlY3V0aW9uPyI6IGZhbHNlLCAibWF4X3JldHJ5X2xpbWl0Ijog
|
||||||
|
MiwgInRvb2xzX25hbWVzIjogWyJzYXlfaGkiXX1dSocCCgpjcmV3X3Rhc2tzEvgBCvUBW3sia2V5
|
||||||
|
IjogIjFiMTVlZjIzOTE1YjI3NTVlODlhMGVjM2IyNmExM2QyIiwgImlkIjogImFhMGFmMmE2LTdm
|
||||||
|
MTktNDZmNi1iMjMxLTg1M2JjYzYxYzhiZiIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJo
|
||||||
|
dW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAiYmFzZV9hZ2VudCIsICJhZ2VudF9r
|
||||||
|
ZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAidG9vbHNfbmFtZXMiOiBb
|
||||||
|
InNheV9oaSJdfV16AhgBhQEAAQAAEo4CChBH8NUZY1Cv8sM2lfQLaEogEgiFlW7Wp7QpdyoMVGFz
|
||||||
|
ayBDcmVhdGVkMAE5MNkPwu7wIhhBUCcQwu7wIhhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmVi
|
||||||
|
NjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiRmNjczMTc1ZS04Y2Q1LTQ1ZWUtYTZi
|
||||||
|
OS0xYWFjMTliODQxZWJKLgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2
|
||||||
|
YTEzZDJKMQoHdGFza19pZBImCiRhYTBhZjJhNi03ZjE5LTQ2ZjYtYjIzMS04NTNiY2M2MWM4YmZ6
|
||||||
|
AhgBhQEAAQAAEooBChCJg/wSACw+HIDy4vvYISP/EgjoC/oI/1V0cCoKVG9vbCBVc2FnZTABOWA0
|
||||||
|
ifTu8CIYQTD0lPTu8CIYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAwLjBKFQoJdG9vbF9uYW1l
|
||||||
|
EggKBnNheV9oaUoOCghhdHRlbXB0cxICGAF6AhgBhQEAAQAA
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '6534'
|
||||||
|
Content-Type:
|
||||||
|
- application/x-protobuf
|
||||||
|
User-Agent:
|
||||||
|
- OTel-OTLP-Exporter-Python/1.27.0
|
||||||
|
method: POST
|
||||||
|
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "\n\0"
|
||||||
|
headers:
|
||||||
|
Content-Length:
|
||||||
|
- '2'
|
||||||
|
Content-Type:
|
||||||
|
- application/x-protobuf
|
||||||
|
Date:
|
||||||
|
- Mon, 10 Feb 2025 19:55:17 GMT
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
|
||||||
|
completed based on the description, expected output, and actual results.\n\nTask
|
||||||
|
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n```\n\nPlease
|
||||||
|
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
|
||||||
|
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
|
||||||
|
extracted from the task output, if any, their type, description, and relationships"}],
|
||||||
|
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
|
||||||
|
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
|
||||||
|
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
|
||||||
|
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
|
||||||
|
{"description": "The name of the entity.", "title": "Name", "type": "string"},
|
||||||
|
"type": {"description": "The type of the entity.", "title": "Type", "type":
|
||||||
|
"string"}, "description": {"description": "Description of the entity.", "title":
|
||||||
|
"Description", "type": "string"}, "relationships": {"description": "Relationships
|
||||||
|
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
|
||||||
|
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
|
||||||
|
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
|
||||||
|
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
|
||||||
|
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
|
||||||
|
from 0 to 10 evaluating on completion, quality, and overall performance, all
|
||||||
|
taking into account the task description, expected output, and the result of
|
||||||
|
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
|
||||||
|
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
|
||||||
|
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
|
||||||
|
"suggestions"], "type": "object"}}}]}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '1967'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000;
|
||||||
|
__cf_bm=fmlg1wjOwuOwZhUUOEtL1tQYluAPumn7AHLF8s0EU2Y-1739217315-1.0.1.1-PQDvxn8TOhzaznlHjwVsqPZUzbAyJWFkvzCubfNJydTu2_AyA1cJ8hkM0khsEE4UY_xp8iPe2gSGmH1ydrDa0Q
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.61.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.61.0
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.8
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AzUA8oE0A2d99i1Khpu0CI7fSgRtZ\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1739217316,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||||
|
\ \"id\": \"call_bk3duHRErK1qCyvWJ1uVmmGl\",\n \"type\":
|
||||||
|
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
|
||||||
|
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Provide more context
|
||||||
|
or details for similar tasks to enhance clarity.\\\",\\\"Specify desired tone
|
||||||
|
or style for the output.\\\",\\\"Consider adding more variety in tasks to keep
|
||||||
|
engagement high.\\\"],\\\"quality\\\":10,\\\"entities\\\":[{\\\"name\\\":\\\"hi\\\",\\\"type\\\":\\\"greeting\\\",\\\"description\\\":\\\"A
|
||||||
|
casual way to say hello or acknowledge someone's presence.\\\",\\\"relationships\\\":[\\\"used
|
||||||
|
as a greeting\\\",\\\"expresses friendliness\\\"]}]}\"\n }\n }\n
|
||||||
|
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
275,\n \"completion_tokens\": 80,\n \"total_tokens\": 355,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- 90fea7dfef41ceb9-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Mon, 10 Feb 2025 19:55:17 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '1535'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '30000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '150000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '29999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '149999874'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 2ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_55d8eb91b4318245556b73d3f4c1e7c4
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
version: 1
|
||||||
@@ -1,18 +1,35 @@
|
|||||||
import pytest
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from crewai.utilities.events.events import on, emit
|
from unittest import mock
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from crewai.agent import Agent
|
||||||
|
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||||
|
from crewai.crew import Crew
|
||||||
|
from crewai.flow.flow import Flow, listen, start
|
||||||
|
from crewai.task import Task
|
||||||
|
from crewai.tools import BaseTool
|
||||||
from crewai.utilities.events.agent_events import (
|
from crewai.utilities.events.agent_events import (
|
||||||
AgentExecutionStarted,
|
|
||||||
AgentExecutionCompleted,
|
AgentExecutionCompleted,
|
||||||
AgentExecutionError,
|
AgentExecutionError,
|
||||||
|
AgentExecutionStarted,
|
||||||
)
|
)
|
||||||
from crewai.utilities.events.task_events import TaskStarted, TaskCompleted
|
from crewai.utilities.events.crew_events import (
|
||||||
from crewai.utilities.events.crew_events import CrewKickoffStarted, CrewKickoffCompleted
|
CrewKickoffCompleted,
|
||||||
from crewai.crew import Crew
|
CrewKickoffFailed,
|
||||||
from crewai.agent import Agent
|
CrewKickoffStarted,
|
||||||
from crewai.task import Task
|
)
|
||||||
from unittest.mock import patch
|
from crewai.utilities.events.event_bus import event_bus
|
||||||
from unittest import mock
|
from crewai.utilities.events.event_types import ToolUsageFinished
|
||||||
|
from crewai.utilities.events.flow_events import (
|
||||||
|
FlowFinished,
|
||||||
|
FlowStarted,
|
||||||
|
MethodExecutionStarted,
|
||||||
|
)
|
||||||
|
from crewai.utilities.events.task_events import TaskCompleted, TaskFailed, TaskStarted
|
||||||
|
from crewai.utilities.events.tool_usage_events import ToolUsageError
|
||||||
|
|
||||||
base_agent = Agent(
|
base_agent = Agent(
|
||||||
role="base_agent",
|
role="base_agent",
|
||||||
@@ -30,42 +47,36 @@ base_task = Task(
|
|||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_crew_emits_start_kickoff_event():
|
def test_crew_emits_start_kickoff_event():
|
||||||
# Setup event listener
|
|
||||||
received_events = []
|
received_events = []
|
||||||
|
|
||||||
@on(CrewKickoffStarted)
|
with event_bus.scoped_handlers():
|
||||||
def handle_crew_start(source, event):
|
|
||||||
received_events.append(event)
|
|
||||||
|
|
||||||
# Create a simple crew
|
@event_bus.on(CrewKickoffStarted)
|
||||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
def handle_crew_start(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
# Run the crew
|
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||||
crew.kickoff()
|
|
||||||
|
|
||||||
# Verify the event was emitted
|
crew.kickoff()
|
||||||
assert len(received_events) == 1
|
|
||||||
assert received_events[0].crew_name == "TestCrew"
|
assert len(received_events) == 1
|
||||||
assert isinstance(received_events[0].timestamp, datetime)
|
assert received_events[0].crew_name == "TestCrew"
|
||||||
assert received_events[0].type == "crew_kickoff_started"
|
assert isinstance(received_events[0].timestamp, datetime)
|
||||||
|
assert received_events[0].type == "crew_kickoff_started"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_crew_emits_end_kickoff_event():
|
def test_crew_emits_end_kickoff_event():
|
||||||
# Setup event listener
|
|
||||||
received_events = []
|
received_events = []
|
||||||
|
|
||||||
@on(CrewKickoffCompleted)
|
@event_bus.on(CrewKickoffCompleted)
|
||||||
def handle_crew_end(source, event):
|
def handle_crew_end(source, event):
|
||||||
received_events.append(event)
|
received_events.append(event)
|
||||||
|
|
||||||
# Create a simple crew
|
|
||||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||||
|
|
||||||
# Run the crew
|
|
||||||
crew.kickoff()
|
crew.kickoff()
|
||||||
|
|
||||||
# Verify the event was emitted
|
|
||||||
assert len(received_events) == 1
|
assert len(received_events) == 1
|
||||||
assert received_events[0].crew_name == "TestCrew"
|
assert received_events[0].crew_name == "TestCrew"
|
||||||
assert isinstance(received_events[0].timestamp, datetime)
|
assert isinstance(received_events[0].timestamp, datetime)
|
||||||
@@ -73,21 +84,42 @@ def test_crew_emits_end_kickoff_event():
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_crew_emits_start_task_event():
|
def test_crew_emits_kickoff_failed_event():
|
||||||
# Setup event listener
|
|
||||||
received_events = []
|
received_events = []
|
||||||
|
|
||||||
@on(TaskStarted)
|
with event_bus.scoped_handlers():
|
||||||
|
|
||||||
|
@event_bus.on(CrewKickoffFailed)
|
||||||
|
def handle_crew_failed(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||||
|
|
||||||
|
with patch.object(Crew, "_execute_tasks") as mock_execute:
|
||||||
|
error_message = "Simulated crew kickoff failure"
|
||||||
|
mock_execute.side_effect = Exception(error_message)
|
||||||
|
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
crew.kickoff()
|
||||||
|
|
||||||
|
assert len(received_events) == 1
|
||||||
|
assert received_events[0].error == error_message
|
||||||
|
assert isinstance(received_events[0].timestamp, datetime)
|
||||||
|
assert received_events[0].type == "crew_kickoff_failed"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_crew_emits_start_task_event():
|
||||||
|
received_events = []
|
||||||
|
|
||||||
|
@event_bus.on(TaskStarted)
|
||||||
def handle_task_start(source, event):
|
def handle_task_start(source, event):
|
||||||
received_events.append(event)
|
received_events.append(event)
|
||||||
|
|
||||||
# Create a simple crew
|
|
||||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||||
|
|
||||||
# Run the crew
|
|
||||||
crew.kickoff()
|
crew.kickoff()
|
||||||
|
|
||||||
# Verify the event was emitted
|
|
||||||
assert len(received_events) == 1
|
assert len(received_events) == 1
|
||||||
assert isinstance(received_events[0].timestamp, datetime)
|
assert isinstance(received_events[0].timestamp, datetime)
|
||||||
assert received_events[0].type == "task_started"
|
assert received_events[0].type == "task_started"
|
||||||
@@ -95,90 +127,308 @@ def test_crew_emits_start_task_event():
|
|||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_crew_emits_end_task_event():
|
def test_crew_emits_end_task_event():
|
||||||
# Setup event listener
|
|
||||||
received_events = []
|
received_events = []
|
||||||
|
|
||||||
@on(TaskCompleted)
|
@event_bus.on(TaskCompleted)
|
||||||
def handle_task_end(source, event):
|
def handle_task_end(source, event):
|
||||||
received_events.append(event)
|
received_events.append(event)
|
||||||
|
|
||||||
# Create a simple crew
|
|
||||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||||
|
|
||||||
# Run the crew
|
|
||||||
crew.kickoff()
|
crew.kickoff()
|
||||||
|
|
||||||
# Verify the event was emitted
|
|
||||||
assert len(received_events) == 1
|
assert len(received_events) == 1
|
||||||
assert isinstance(received_events[0].timestamp, datetime)
|
assert isinstance(received_events[0].timestamp, datetime)
|
||||||
assert received_events[0].type == "task_completed"
|
assert received_events[0].type == "task_completed"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_agent_emits_execution_error_event():
|
def test_task_emits_failed_event_on_execution_error():
|
||||||
# Setup event listener
|
|
||||||
received_events = []
|
received_events = []
|
||||||
|
|
||||||
@on(AgentExecutionError)
|
@event_bus.on(TaskFailed)
|
||||||
def handle_agent_error(source, event):
|
def handle_task_failed(source, event):
|
||||||
received_events.append(event)
|
received_events.append(event)
|
||||||
|
|
||||||
# Create an agent that will fail
|
task = Task(
|
||||||
failing_agent = Agent(
|
description="Just say hi",
|
||||||
role="failing_agent",
|
expected_output="hi",
|
||||||
goal="Fail execution",
|
agent=None,
|
||||||
backstory="You are an agent that will fail",
|
|
||||||
max_retry_limit=1, # Set low retry limit for testing
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create a task that will trigger an error
|
with pytest.raises(Exception) as exc_info:
|
||||||
failing_task = Task(
|
task._execute_core(agent=None, context=None, tools=None)
|
||||||
description="This will fail", agent=failing_agent, expected_output="hi"
|
|
||||||
)
|
|
||||||
|
|
||||||
error_message = "Forced error for testing"
|
assert "has no agent assigned" in str(exc_info.value)
|
||||||
# Mock the agent executor to raise an exception
|
|
||||||
with patch.object(failing_agent.agent_executor, "invoke") as mock_invoke:
|
|
||||||
mock_invoke.side_effect = Exception(error_message)
|
|
||||||
assert failing_agent._times_executed == 0
|
|
||||||
assert failing_agent.max_retry_limit == 1
|
|
||||||
|
|
||||||
# Execute task which should fail and emit error
|
|
||||||
with pytest.raises(Exception) as e:
|
|
||||||
failing_agent.execute_task(failing_task)
|
|
||||||
|
|
||||||
print("error message: ", e.value.args[0])
|
|
||||||
|
|
||||||
# assert e.value.args[0] == error_message
|
|
||||||
# assert failing_agent._times_executed == 2 # Initial attempt + 1 retry
|
|
||||||
|
|
||||||
# Verify the invoke was called twice (initial + retry)
|
|
||||||
mock_invoke.assert_has_calls(
|
|
||||||
[
|
|
||||||
mock.call(
|
|
||||||
{
|
|
||||||
"input": "This will fail\n\nThis is the expect criteria for your final answer: hi\nyou MUST return the actual complete content as the final answer, not a summary.",
|
|
||||||
"tool_names": "",
|
|
||||||
"tools": "",
|
|
||||||
"ask_for_human_input": False,
|
|
||||||
}
|
|
||||||
),
|
|
||||||
mock.call(
|
|
||||||
{
|
|
||||||
"input": "This will fail\n\nThis is the expect criteria for your final answer: hi\nyou MUST return the actual complete content as the final answer, not a summary.",
|
|
||||||
"tool_names": "",
|
|
||||||
"tools": "",
|
|
||||||
"ask_for_human_input": False,
|
|
||||||
}
|
|
||||||
),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
print("made it here")
|
|
||||||
|
|
||||||
# Verify the error event was emitted
|
|
||||||
assert len(received_events) == 1
|
assert len(received_events) == 1
|
||||||
|
assert received_events[0].task == task
|
||||||
|
assert "has no agent assigned" in received_events[0].error
|
||||||
assert isinstance(received_events[0].timestamp, datetime)
|
assert isinstance(received_events[0].timestamp, datetime)
|
||||||
assert received_events[0].type == "agent_execution_error"
|
assert received_events[0].type == "task_failed"
|
||||||
assert received_events[0].agent == failing_agent
|
|
||||||
assert received_events[0].task == failing_task
|
|
||||||
assert error_message in received_events[0].error
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_agent_emits_execution_started_and_completed_events():
|
||||||
|
received_events = []
|
||||||
|
|
||||||
|
@event_bus.on(AgentExecutionStarted)
|
||||||
|
def handle_agent_start(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
@event_bus.on(AgentExecutionCompleted)
|
||||||
|
def handle_agent_completed(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||||
|
crew.kickoff()
|
||||||
|
assert len(received_events) == 2
|
||||||
|
assert received_events[0].agent == base_agent
|
||||||
|
assert received_events[0].task == base_task
|
||||||
|
assert received_events[0].tools == []
|
||||||
|
assert received_events[0].inputs == {
|
||||||
|
"ask_for_human_input": False,
|
||||||
|
"input": "Just say hi\n"
|
||||||
|
"\n"
|
||||||
|
"This is the expect criteria for your final answer: hi\n"
|
||||||
|
"you MUST return the actual complete content as the final answer, not a "
|
||||||
|
"summary.",
|
||||||
|
"tool_names": "",
|
||||||
|
"tools": "",
|
||||||
|
}
|
||||||
|
assert isinstance(received_events[0].timestamp, datetime)
|
||||||
|
assert received_events[0].type == "agent_execution_started"
|
||||||
|
assert isinstance(received_events[1].timestamp, datetime)
|
||||||
|
assert received_events[1].type == "agent_execution_completed"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_agent_emits_execution_error_event():
|
||||||
|
received_events = []
|
||||||
|
|
||||||
|
@event_bus.on(AgentExecutionError)
|
||||||
|
def handle_agent_start(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
error_message = "Error happening while sending prompt to model."
|
||||||
|
base_agent.max_retry_limit = 0
|
||||||
|
with patch.object(
|
||||||
|
CrewAgentExecutor, "invoke", wraps=base_agent.agent_executor.invoke
|
||||||
|
) as invoke_mock:
|
||||||
|
invoke_mock.side_effect = Exception(error_message)
|
||||||
|
|
||||||
|
with pytest.raises(Exception) as e:
|
||||||
|
base_agent.execute_task(
|
||||||
|
task=base_task,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert len(received_events) == 1
|
||||||
|
assert received_events[0].agent == base_agent
|
||||||
|
assert received_events[0].task == base_task
|
||||||
|
assert received_events[0].error == error_message
|
||||||
|
assert isinstance(received_events[0].timestamp, datetime)
|
||||||
|
assert received_events[0].type == "agent_execution_error"
|
||||||
|
|
||||||
|
|
||||||
|
class SayHiTool(BaseTool):
|
||||||
|
name: str = Field(default="say_hi", description="The name of the tool")
|
||||||
|
description: str = Field(
|
||||||
|
default="Say hi", description="The description of the tool"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _run(self) -> str:
|
||||||
|
return "hi"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_tools_emits_finished_events():
|
||||||
|
received_events = []
|
||||||
|
|
||||||
|
@event_bus.on(ToolUsageFinished)
|
||||||
|
def handle_tool_end(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
agent = Agent(
|
||||||
|
role="base_agent",
|
||||||
|
goal="Just say hi",
|
||||||
|
backstory="You are a helpful assistant that just says hi",
|
||||||
|
tools=[SayHiTool()],
|
||||||
|
)
|
||||||
|
|
||||||
|
task = Task(
|
||||||
|
description="Just say hi",
|
||||||
|
expected_output="hi",
|
||||||
|
agent=agent,
|
||||||
|
)
|
||||||
|
crew = Crew(agents=[agent], tasks=[task], name="TestCrew")
|
||||||
|
crew.kickoff()
|
||||||
|
assert len(received_events) == 1
|
||||||
|
assert received_events[0].agent_key == agent.key
|
||||||
|
assert received_events[0].agent_role == agent.role
|
||||||
|
assert received_events[0].tool_name == SayHiTool().name
|
||||||
|
assert received_events[0].tool_args == {}
|
||||||
|
assert received_events[0].type == "tool_usage_finished"
|
||||||
|
assert isinstance(received_events[0].timestamp, datetime)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_tools_emits_error_events():
|
||||||
|
received_events = []
|
||||||
|
|
||||||
|
@event_bus.on(ToolUsageError)
|
||||||
|
def handle_tool_end(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
class ErrorTool(BaseTool):
|
||||||
|
name: str = Field(
|
||||||
|
default="error_tool", description="A tool that raises an error"
|
||||||
|
)
|
||||||
|
description: str = Field(
|
||||||
|
default="This tool always raises an error",
|
||||||
|
description="The description of the tool",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _run(self) -> str:
|
||||||
|
raise Exception("Simulated tool error")
|
||||||
|
|
||||||
|
agent = Agent(
|
||||||
|
role="base_agent",
|
||||||
|
goal="Try to use the error tool",
|
||||||
|
backstory="You are an assistant that tests error handling",
|
||||||
|
tools=[ErrorTool()],
|
||||||
|
)
|
||||||
|
|
||||||
|
task = Task(
|
||||||
|
description="Use the error tool",
|
||||||
|
expected_output="This should error",
|
||||||
|
agent=agent,
|
||||||
|
)
|
||||||
|
|
||||||
|
crew = Crew(agents=[agent], tasks=[task], name="TestCrew")
|
||||||
|
crew.kickoff()
|
||||||
|
|
||||||
|
assert len(received_events) == 60
|
||||||
|
assert received_events[0].agent_key == agent.key
|
||||||
|
assert received_events[0].agent_role == agent.role
|
||||||
|
assert received_events[0].tool_name == "error_tool"
|
||||||
|
assert received_events[0].tool_args == {}
|
||||||
|
assert received_events[0].error == "Simulated tool error"
|
||||||
|
assert received_events[0].type == "tool_usage_error"
|
||||||
|
assert isinstance(received_events[0].timestamp, datetime)
|
||||||
|
|
||||||
|
|
||||||
|
def test_flow_emits_start_event():
|
||||||
|
received_events = []
|
||||||
|
|
||||||
|
with event_bus.scoped_handlers():
|
||||||
|
|
||||||
|
@event_bus.on(FlowStarted)
|
||||||
|
def handle_flow_start(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
class TestFlow(Flow[dict]):
|
||||||
|
@start()
|
||||||
|
def begin(self):
|
||||||
|
return "started"
|
||||||
|
|
||||||
|
flow = TestFlow()
|
||||||
|
flow.kickoff()
|
||||||
|
|
||||||
|
assert len(received_events) == 1
|
||||||
|
assert received_events[0].flow_name == "TestFlow"
|
||||||
|
assert received_events[0].type == "flow_started"
|
||||||
|
|
||||||
|
|
||||||
|
def test_flow_emits_finish_event():
|
||||||
|
received_events = []
|
||||||
|
|
||||||
|
with event_bus.scoped_handlers():
|
||||||
|
|
||||||
|
@event_bus.on(FlowFinished)
|
||||||
|
def handle_flow_finish(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
class TestFlow(Flow[dict]):
|
||||||
|
@start()
|
||||||
|
def begin(self):
|
||||||
|
return "completed"
|
||||||
|
|
||||||
|
flow = TestFlow()
|
||||||
|
result = flow.kickoff()
|
||||||
|
|
||||||
|
assert len(received_events) == 1
|
||||||
|
assert received_events[0].flow_name == "TestFlow"
|
||||||
|
assert received_events[0].type == "flow_finished"
|
||||||
|
assert received_events[0].result == "completed"
|
||||||
|
assert result == "completed"
|
||||||
|
|
||||||
|
|
||||||
|
def test_flow_emits_method_execution_started_event():
|
||||||
|
received_events = []
|
||||||
|
|
||||||
|
with event_bus.scoped_handlers():
|
||||||
|
|
||||||
|
@event_bus.on(MethodExecutionStarted)
|
||||||
|
def handle_method_start(source, event):
|
||||||
|
print("event in method name", event.method_name)
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
class TestFlow(Flow[dict]):
|
||||||
|
@start()
|
||||||
|
def begin(self):
|
||||||
|
return "started"
|
||||||
|
|
||||||
|
@listen("begin")
|
||||||
|
def second_method(self):
|
||||||
|
return "executed"
|
||||||
|
|
||||||
|
flow = TestFlow()
|
||||||
|
flow.kickoff()
|
||||||
|
|
||||||
|
assert len(received_events) == 1
|
||||||
|
|
||||||
|
assert received_events[0].method_name == "second_method"
|
||||||
|
assert received_events[0].flow_name == "TestFlow"
|
||||||
|
assert received_events[0].type == "method_execution_started"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_register_handler_adds_new_handler():
|
||||||
|
received_events = []
|
||||||
|
|
||||||
|
def custom_handler(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
with event_bus.scoped_handlers():
|
||||||
|
event_bus.register_handler(CrewKickoffStarted, custom_handler)
|
||||||
|
|
||||||
|
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||||
|
crew.kickoff()
|
||||||
|
|
||||||
|
assert len(received_events) == 1
|
||||||
|
assert isinstance(received_events[0].timestamp, datetime)
|
||||||
|
assert received_events[0].type == "crew_kickoff_started"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_multiple_handlers_for_same_event():
|
||||||
|
received_events_1 = []
|
||||||
|
received_events_2 = []
|
||||||
|
|
||||||
|
def handler_1(source, event):
|
||||||
|
received_events_1.append(event)
|
||||||
|
|
||||||
|
def handler_2(source, event):
|
||||||
|
received_events_2.append(event)
|
||||||
|
|
||||||
|
with event_bus.scoped_handlers():
|
||||||
|
event_bus.register_handler(CrewKickoffStarted, handler_1)
|
||||||
|
event_bus.register_handler(CrewKickoffStarted, handler_2)
|
||||||
|
|
||||||
|
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||||
|
crew.kickoff()
|
||||||
|
|
||||||
|
assert len(received_events_1) == 1
|
||||||
|
assert len(received_events_2) == 1
|
||||||
|
assert received_events_1[0].type == "crew_kickoff_started"
|
||||||
|
assert received_events_2[0].type == "crew_kickoff_started"
|
||||||
|
|||||||
Reference in New Issue
Block a user