mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 20:38:29 +00:00
Compare commits
47 Commits
devin/1744
...
better/eve
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a88e910a7a | ||
|
|
73ee7ce1c9 | ||
|
|
74727592bc | ||
|
|
5a022fe6c0 | ||
|
|
34f5469490 | ||
|
|
f68a85a6f5 | ||
|
|
390031026a | ||
|
|
ae4c4cffc4 | ||
|
|
b623960a94 | ||
|
|
d368efdeda | ||
|
|
4dc258a590 | ||
|
|
c64c0698c5 | ||
|
|
6fea26d223 | ||
|
|
1b5cc08abe | ||
|
|
e9dc68723f | ||
|
|
d0f9abaa85 | ||
|
|
935da884ed | ||
|
|
64569ce130 | ||
|
|
1603a1d9ac | ||
|
|
6d1bcff6d1 | ||
|
|
aa2e7c888e | ||
|
|
ec048cf6fe | ||
|
|
18791eadd3 | ||
|
|
6eab0e3d3b | ||
|
|
fe7c8b2049 | ||
|
|
1c2903abea | ||
|
|
f4547648b4 | ||
|
|
a557275112 | ||
|
|
e17159f877 | ||
|
|
7d168d6d61 | ||
|
|
766422dd5e | ||
|
|
3e3e68ed75 | ||
|
|
43064e2a0e | ||
|
|
184d08e6e7 | ||
|
|
00a98cd5c9 | ||
|
|
62a20426a5 | ||
|
|
097ed1f0df | ||
|
|
fa5d7a2e05 | ||
|
|
779db3c3dd | ||
|
|
9debd3a6da | ||
|
|
1250388635 | ||
|
|
25453f7cb1 | ||
|
|
f70162c064 | ||
|
|
3a89b9feab | ||
|
|
9eb5b361dd | ||
|
|
676cabfdd6 | ||
|
|
95bae8bba3 |
@@ -19,25 +19,17 @@ from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.utilities.events.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
agentops = None
|
||||
|
||||
try:
|
||||
import agentops # type: ignore # Name "agentops" is already defined
|
||||
from agentops import track_agent # type: ignore
|
||||
except ImportError:
|
||||
|
||||
def track_agent():
|
||||
def noop(f):
|
||||
return f
|
||||
|
||||
return noop
|
||||
|
||||
|
||||
@track_agent()
|
||||
class Agent(BaseAgent):
|
||||
"""Represents an agent in a system.
|
||||
|
||||
@@ -240,6 +232,15 @@ class Agent(BaseAgent):
|
||||
task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
||||
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionStartedEvent(
|
||||
agent=self,
|
||||
tools=self.tools,
|
||||
task_prompt=task_prompt,
|
||||
task=task,
|
||||
),
|
||||
)
|
||||
result = self.agent_executor.invoke(
|
||||
{
|
||||
"input": task_prompt,
|
||||
@@ -251,9 +252,25 @@ class Agent(BaseAgent):
|
||||
except Exception as e:
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
# Do not retry on litellm errors
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionErrorEvent(
|
||||
agent=self,
|
||||
task=task,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise e
|
||||
self._times_executed += 1
|
||||
if self._times_executed > self.max_retry_limit:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionErrorEvent(
|
||||
agent=self,
|
||||
task=task,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise e
|
||||
result = self.execute_task(task, context, tools)
|
||||
|
||||
@@ -266,7 +283,10 @@ class Agent(BaseAgent):
|
||||
for tool_result in self.tools_results: # type: ignore # Item "None" of "list[Any] | None" has no attribute "__iter__" (not iterable)
|
||||
if tool_result.get("result_as_answer", False):
|
||||
result = tool_result["result"]
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionCompletedEvent(agent=self, task=task, output=result),
|
||||
)
|
||||
return result
|
||||
|
||||
def create_agent_executor(
|
||||
|
||||
@@ -20,8 +20,7 @@ from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.tools.base_tool import BaseTool, Tool
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.converter import Converter
|
||||
@@ -112,7 +111,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: Optional[List[Any]] = Field(
|
||||
tools: Optional[List[BaseTool]] = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: int = Field(
|
||||
|
||||
@@ -18,6 +18,12 @@ from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N, Printer
|
||||
from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
|
||||
from crewai.utilities.events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
crewai_event_bus,
|
||||
)
|
||||
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
@@ -107,11 +113,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
self._handle_unknown_error(e)
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
# Do not retry on litellm errors
|
||||
raise e
|
||||
else:
|
||||
self._handle_unknown_error(e)
|
||||
raise e
|
||||
|
||||
if self.ask_for_human_input:
|
||||
@@ -349,40 +355,68 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult:
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=self.tools_handler,
|
||||
tools=self.tools,
|
||||
original_tools=self.original_tools,
|
||||
tools_description=self.tools_description,
|
||||
tools_names=self.tools_names,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
task=self.task, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
action=agent_action,
|
||||
)
|
||||
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
tool_result = tool_calling.message
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
else:
|
||||
if tool_calling.tool_name.casefold().strip() in [
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
] or tool_calling.tool_name.casefold().replace("_", " ") in [
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
]:
|
||||
tool_result = tool_usage.use(tool_calling, agent_action.text)
|
||||
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
|
||||
if tool:
|
||||
return ToolResult(
|
||||
result=tool_result, result_as_answer=tool.result_as_answer
|
||||
)
|
||||
else:
|
||||
tool_result = self._i18n.errors("wrong_tool_name").format(
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
||||
try:
|
||||
if self.agent:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageStartedEvent(
|
||||
agent_key=self.agent.key,
|
||||
agent_role=self.agent.role,
|
||||
tool_name=agent_action.tool,
|
||||
tool_args=agent_action.tool_input,
|
||||
tool_class=agent_action.tool,
|
||||
),
|
||||
)
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=self.tools_handler,
|
||||
tools=self.tools,
|
||||
original_tools=self.original_tools,
|
||||
tools_description=self.tools_description,
|
||||
tools_names=self.tools_names,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
task=self.task, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
action=agent_action,
|
||||
)
|
||||
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
tool_result = tool_calling.message
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
else:
|
||||
if tool_calling.tool_name.casefold().strip() in [
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
] or tool_calling.tool_name.casefold().replace("_", " ") in [
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
]:
|
||||
tool_result = tool_usage.use(tool_calling, agent_action.text)
|
||||
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
|
||||
if tool:
|
||||
return ToolResult(
|
||||
result=tool_result, result_as_answer=tool.result_as_answer
|
||||
)
|
||||
else:
|
||||
tool_result = self._i18n.errors("wrong_tool_name").format(
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
||||
)
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
|
||||
except Exception as e:
|
||||
# TODO: drop
|
||||
if self.agent:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageErrorEvent( # validation error
|
||||
agent_key=self.agent.key,
|
||||
agent_role=self.agent.role,
|
||||
tool_name=agent_action.tool,
|
||||
tool_args=agent_action.tool_input,
|
||||
tool_class=agent_action.tool,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise e
|
||||
|
||||
def _summarize_messages(self) -> None:
|
||||
messages_groups = []
|
||||
|
||||
@@ -44,6 +44,18 @@ from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.events.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.formatter import (
|
||||
aggregate_raw_outputs_from_task_outputs,
|
||||
aggregate_raw_outputs_from_tasks,
|
||||
@@ -53,12 +65,6 @@ from crewai.utilities.planning_handler import CrewPlanner
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
try:
|
||||
import agentops # type: ignore
|
||||
except ImportError:
|
||||
agentops = None
|
||||
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||
|
||||
|
||||
@@ -522,10 +528,19 @@ class Crew(BaseModel):
|
||||
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
|
||||
) -> None:
|
||||
"""Trains the crew for a given number of iterations."""
|
||||
train_crew = self.copy()
|
||||
train_crew._setup_for_training(filename)
|
||||
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTrainStartedEvent(
|
||||
crew_name=self.name or "crew",
|
||||
n_iterations=n_iterations,
|
||||
filename=filename,
|
||||
inputs=inputs,
|
||||
),
|
||||
)
|
||||
train_crew = self.copy()
|
||||
train_crew._setup_for_training(filename)
|
||||
|
||||
for n_iteration in range(n_iterations):
|
||||
train_crew._train_iteration = n_iteration
|
||||
train_crew.kickoff(inputs=inputs)
|
||||
@@ -540,7 +555,20 @@ class Crew(BaseModel):
|
||||
CrewTrainingHandler(filename).save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTrainCompletedEvent(
|
||||
crew_name=self.name or "crew",
|
||||
n_iterations=n_iterations,
|
||||
filename=filename,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTrainFailedEvent(error=str(e), crew_name=self.name or "crew"),
|
||||
)
|
||||
self._logger.log("error", f"Training failed: {e}", color="red")
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).clear()
|
||||
CrewTrainingHandler(filename).clear()
|
||||
@@ -551,60 +579,70 @@ class Crew(BaseModel):
|
||||
self,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> CrewOutput:
|
||||
for before_callback in self.before_kickoff_callbacks:
|
||||
if inputs is None:
|
||||
inputs = {}
|
||||
inputs = before_callback(inputs)
|
||||
try:
|
||||
for before_callback in self.before_kickoff_callbacks:
|
||||
if inputs is None:
|
||||
inputs = {}
|
||||
inputs = before_callback(inputs)
|
||||
|
||||
"""Starts the crew to work on its assigned tasks."""
|
||||
self._execution_span = self._telemetry.crew_execution_span(self, inputs)
|
||||
self._task_output_handler.reset()
|
||||
self._logging_color = "bold_purple"
|
||||
|
||||
if inputs is not None:
|
||||
self._inputs = inputs
|
||||
self._interpolate_inputs(inputs)
|
||||
self._set_tasks_callbacks()
|
||||
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
|
||||
for agent in self.agents:
|
||||
agent.i18n = i18n
|
||||
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
agent.crew = self # type: ignore[attr-defined]
|
||||
# TODO: Create an AgentFunctionCalling protocol for future refactoring
|
||||
if not agent.function_calling_llm: # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||
agent.function_calling_llm = self.function_calling_llm # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||
|
||||
if not agent.step_callback: # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||
agent.step_callback = self.step_callback # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||
|
||||
agent.create_agent_executor()
|
||||
|
||||
if self.planning:
|
||||
self._handle_crew_planning()
|
||||
|
||||
metrics: List[UsageMetrics] = []
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = self._run_sequential_process()
|
||||
elif self.process == Process.hierarchical:
|
||||
result = self._run_hierarchical_process()
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffStartedEvent(crew_name=self.name or "crew", inputs=inputs),
|
||||
)
|
||||
|
||||
for after_callback in self.after_kickoff_callbacks:
|
||||
result = after_callback(result)
|
||||
# Starts the crew to work on its assigned tasks.
|
||||
self._task_output_handler.reset()
|
||||
self._logging_color = "bold_purple"
|
||||
|
||||
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
||||
if inputs is not None:
|
||||
self._inputs = inputs
|
||||
self._interpolate_inputs(inputs)
|
||||
self._set_tasks_callbacks()
|
||||
|
||||
self.usage_metrics = UsageMetrics()
|
||||
for metric in metrics:
|
||||
self.usage_metrics.add_usage_metrics(metric)
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
|
||||
return result
|
||||
for agent in self.agents:
|
||||
agent.i18n = i18n
|
||||
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
agent.crew = self # type: ignore[attr-defined]
|
||||
# TODO: Create an AgentFunctionCalling protocol for future refactoring
|
||||
if not agent.function_calling_llm: # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||
agent.function_calling_llm = self.function_calling_llm # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||
|
||||
if not agent.step_callback: # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||
agent.step_callback = self.step_callback # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||
|
||||
agent.create_agent_executor()
|
||||
|
||||
if self.planning:
|
||||
self._handle_crew_planning()
|
||||
|
||||
metrics: List[UsageMetrics] = []
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = self._run_sequential_process()
|
||||
elif self.process == Process.hierarchical:
|
||||
result = self._run_hierarchical_process()
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
)
|
||||
|
||||
for after_callback in self.after_kickoff_callbacks:
|
||||
result = after_callback(result)
|
||||
|
||||
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
||||
|
||||
self.usage_metrics = UsageMetrics()
|
||||
for metric in metrics:
|
||||
self.usage_metrics.add_usage_metrics(metric)
|
||||
return result
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffFailedEvent(error=str(e), crew_name=self.name or "crew"),
|
||||
)
|
||||
raise
|
||||
|
||||
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]:
|
||||
"""Executes the Crew's workflow for each input in the list and aggregates results."""
|
||||
@@ -952,7 +990,12 @@ class Crew(BaseModel):
|
||||
final_string_output = final_task_output.raw
|
||||
self._finish_execution(final_string_output)
|
||||
token_usage = self.calculate_usage_metrics()
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffCompletedEvent(
|
||||
crew_name=self.name or "crew", output=final_task_output
|
||||
),
|
||||
)
|
||||
return CrewOutput(
|
||||
raw=final_task_output.raw,
|
||||
pydantic=final_task_output.pydantic,
|
||||
@@ -1138,13 +1181,6 @@ class Crew(BaseModel):
|
||||
def _finish_execution(self, final_string_output: str) -> None:
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
if agentops:
|
||||
agentops.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
is_auto_end=True,
|
||||
)
|
||||
self._telemetry.end_crew(self, final_string_output)
|
||||
|
||||
def calculate_usage_metrics(self) -> UsageMetrics:
|
||||
"""Calculates and returns the usage metrics."""
|
||||
@@ -1166,26 +1202,41 @@ class Crew(BaseModel):
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||
test_crew = self.copy()
|
||||
try:
|
||||
eval_llm = create_llm(eval_llm)
|
||||
if not eval_llm:
|
||||
raise ValueError("Failed to create LLM instance.")
|
||||
|
||||
eval_llm = create_llm(eval_llm)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTestStartedEvent(
|
||||
crew_name=self.name or "crew",
|
||||
n_iterations=n_iterations,
|
||||
eval_llm=eval_llm,
|
||||
inputs=inputs,
|
||||
),
|
||||
)
|
||||
test_crew = self.copy()
|
||||
evaluator = CrewEvaluator(test_crew, eval_llm) # type: ignore[arg-type]
|
||||
|
||||
if not eval_llm:
|
||||
raise ValueError("Failed to create LLM instance.")
|
||||
for i in range(1, n_iterations + 1):
|
||||
evaluator.set_iteration(i)
|
||||
test_crew.kickoff(inputs=inputs)
|
||||
|
||||
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
||||
test_crew,
|
||||
n_iterations,
|
||||
inputs,
|
||||
eval_llm.model, # type: ignore[arg-type]
|
||||
) # type: ignore[arg-type]
|
||||
evaluator = CrewEvaluator(test_crew, eval_llm) # type: ignore[arg-type]
|
||||
evaluator.print_crew_evaluation_result()
|
||||
|
||||
for i in range(1, n_iterations + 1):
|
||||
evaluator.set_iteration(i)
|
||||
test_crew.kickoff(inputs=inputs)
|
||||
|
||||
evaluator.print_crew_evaluation_result()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTestCompletedEvent(
|
||||
crew_name=self.name or "crew",
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTestFailedEvent(error=str(e), crew_name=self.name or "crew"),
|
||||
)
|
||||
raise
|
||||
|
||||
def __repr__(self):
|
||||
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
||||
|
||||
@@ -17,23 +17,25 @@ from typing import (
|
||||
)
|
||||
from uuid import uuid4
|
||||
|
||||
from blinker import Signal
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
|
||||
from crewai.flow.flow_events import (
|
||||
FlowFinishedEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.flow.flow_visualizer import plot_flow
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.utils import get_possible_return_constants
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.traces.unified_trace_controller import (
|
||||
init_flow_main_trace,
|
||||
trace_flow_step,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowFinishedEvent,
|
||||
FlowPlotEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -431,7 +433,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
Type parameter T must be either Dict[str, Any] or a subclass of BaseModel."""
|
||||
|
||||
_telemetry = Telemetry()
|
||||
_printer = Printer()
|
||||
|
||||
_start_methods: List[str] = []
|
||||
@@ -439,7 +440,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
_routers: Set[str] = set()
|
||||
_router_paths: Dict[str, List[str]] = {}
|
||||
initial_state: Union[Type[T], T, None] = None
|
||||
event_emitter = Signal("event_emitter")
|
||||
|
||||
def __class_getitem__(cls: Type["Flow"], item: Type[T]) -> Type["Flow"]:
|
||||
class _FlowGeneric(cls): # type: ignore
|
||||
@@ -473,7 +473,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if kwargs:
|
||||
self._initialize_state(kwargs)
|
||||
|
||||
self._telemetry.flow_creation_span(self.__class__.__name__)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
FlowCreatedEvent(
|
||||
type="flow_created",
|
||||
flow_name=self.__class__.__name__,
|
||||
),
|
||||
)
|
||||
|
||||
# Register all flow-related methods
|
||||
for method_name in dir(self):
|
||||
@@ -742,9 +748,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._initialize_state(filtered_inputs)
|
||||
|
||||
# Start flow execution
|
||||
self.event_emitter.send(
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=FlowStartedEvent(
|
||||
FlowStartedEvent(
|
||||
type="flow_started",
|
||||
flow_name=self.__class__.__name__,
|
||||
inputs=inputs,
|
||||
@@ -767,10 +773,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if not self._start_methods:
|
||||
raise ValueError("No start method defined")
|
||||
|
||||
self._telemetry.flow_execution_span(
|
||||
self.__class__.__name__, list(self._methods.keys())
|
||||
)
|
||||
|
||||
tasks = [
|
||||
self._execute_start_method(start_method)
|
||||
for start_method in self._start_methods
|
||||
@@ -779,9 +781,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
final_output = self._method_outputs[-1] if self._method_outputs else None
|
||||
|
||||
self.event_emitter.send(
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=FlowFinishedEvent(
|
||||
FlowFinishedEvent(
|
||||
type="flow_finished",
|
||||
flow_name=self.__class__.__name__,
|
||||
result=final_output,
|
||||
@@ -816,40 +818,55 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
async def _execute_method(
|
||||
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
|
||||
) -> Any:
|
||||
dumped_params = {f"_{i}": arg for i, arg in enumerate(args)} | (kwargs or {})
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=MethodExecutionStartedEvent(
|
||||
type="method_execution_started",
|
||||
method_name=method_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
params=dumped_params,
|
||||
state=self._copy_state(),
|
||||
),
|
||||
)
|
||||
try:
|
||||
dumped_params = {f"_{i}": arg for i, arg in enumerate(args)} | (
|
||||
kwargs or {}
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MethodExecutionStartedEvent(
|
||||
type="method_execution_started",
|
||||
method_name=method_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
params=dumped_params,
|
||||
state=self._copy_state(),
|
||||
),
|
||||
)
|
||||
|
||||
result = (
|
||||
await method(*args, **kwargs)
|
||||
if asyncio.iscoroutinefunction(method)
|
||||
else method(*args, **kwargs)
|
||||
)
|
||||
self._method_outputs.append(result)
|
||||
self._method_execution_counts[method_name] = (
|
||||
self._method_execution_counts.get(method_name, 0) + 1
|
||||
)
|
||||
result = (
|
||||
await method(*args, **kwargs)
|
||||
if asyncio.iscoroutinefunction(method)
|
||||
else method(*args, **kwargs)
|
||||
)
|
||||
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=MethodExecutionFinishedEvent(
|
||||
type="method_execution_finished",
|
||||
method_name=method_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
state=self._copy_state(),
|
||||
result=result,
|
||||
),
|
||||
)
|
||||
self._method_outputs.append(result)
|
||||
self._method_execution_counts[method_name] = (
|
||||
self._method_execution_counts.get(method_name, 0) + 1
|
||||
)
|
||||
|
||||
return result
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MethodExecutionFinishedEvent(
|
||||
type="method_execution_finished",
|
||||
method_name=method_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
state=self._copy_state(),
|
||||
result=result,
|
||||
),
|
||||
)
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MethodExecutionFailedEvent(
|
||||
type="method_execution_failed",
|
||||
method_name=method_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
raise e
|
||||
|
||||
async def _execute_listeners(self, trigger_method: str, result: Any) -> None:
|
||||
"""
|
||||
@@ -987,6 +1004,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
"""
|
||||
try:
|
||||
method = self._methods[listener_name]
|
||||
|
||||
sig = inspect.signature(method)
|
||||
params = list(sig.parameters.values())
|
||||
method_params = [p for p in params if p.name != "self"]
|
||||
@@ -1036,7 +1054,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
logger.warning(message)
|
||||
|
||||
def plot(self, filename: str = "crewai_flow") -> None:
|
||||
self._telemetry.flow_plotting_span(
|
||||
self.__class__.__name__, list(self._methods.keys())
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
FlowPlotEvent(
|
||||
type="flow_plot",
|
||||
flow_name=self.__class__.__name__,
|
||||
),
|
||||
)
|
||||
plot_flow(self, filename)
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
@dataclass
|
||||
class Event:
|
||||
type: str
|
||||
flow_name: str
|
||||
timestamp: datetime = field(init=False)
|
||||
|
||||
def __post_init__(self):
|
||||
self.timestamp = datetime.now()
|
||||
|
||||
|
||||
@dataclass
|
||||
class FlowStartedEvent(Event):
|
||||
inputs: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MethodExecutionStartedEvent(Event):
|
||||
method_name: str
|
||||
state: Union[Dict[str, Any], BaseModel]
|
||||
params: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MethodExecutionFinishedEvent(Event):
|
||||
method_name: str
|
||||
state: Union[Dict[str, Any], BaseModel]
|
||||
result: Any = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class FlowFinishedEvent(Event):
|
||||
result: Optional[Any] = None
|
||||
@@ -76,7 +76,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
"context": fetched["documents"][0][i], # type: ignore
|
||||
"score": fetched["distances"][0][i], # type: ignore
|
||||
}
|
||||
if result["score"] >= score_threshold: # type: ignore
|
||||
if result["score"] >= score_threshold:
|
||||
results.append(result)
|
||||
return results
|
||||
else:
|
||||
|
||||
@@ -21,6 +21,8 @@ from typing import (
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.utilities.events.tool_usage_events import ToolExecutionErrorEvent
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore", UserWarning)
|
||||
import litellm
|
||||
@@ -30,6 +32,7 @@ with warnings.catch_warnings():
|
||||
|
||||
|
||||
from crewai.traces.unified_trace_controller import trace_llm_call
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
@@ -335,7 +338,7 @@ class LLM:
|
||||
# --- 5) Handle the tool call
|
||||
tool_call = tool_calls[0]
|
||||
function_name = tool_call.function.name
|
||||
|
||||
print("function_name", function_name)
|
||||
if function_name in available_functions:
|
||||
try:
|
||||
function_args = json.loads(tool_call.function.arguments)
|
||||
@@ -353,6 +356,15 @@ class LLM:
|
||||
logging.error(
|
||||
f"Error executing function '{function_name}': {e}"
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolExecutionErrorEvent(
|
||||
tool_name=function_name,
|
||||
tool_args=function_args,
|
||||
tool_class=fn,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
return text_response
|
||||
|
||||
else:
|
||||
|
||||
@@ -21,7 +21,6 @@ from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
from opentelemetry.trace import Span
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
@@ -36,10 +35,15 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tasks.guardrail_result import GuardrailResult
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.converter import Converter, convert_to_model
|
||||
from crewai.utilities.events import (
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.i18n import I18N
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
@@ -183,8 +187,6 @@ class Task(BaseModel):
|
||||
)
|
||||
return v
|
||||
|
||||
_telemetry: Telemetry = PrivateAttr(default_factory=Telemetry)
|
||||
_execution_span: Optional[Span] = PrivateAttr(default=None)
|
||||
_original_description: Optional[str] = PrivateAttr(default=None)
|
||||
_original_expected_output: Optional[str] = PrivateAttr(default=None)
|
||||
_original_output_file: Optional[str] = PrivateAttr(default=None)
|
||||
@@ -348,100 +350,102 @@ class Task(BaseModel):
|
||||
tools: Optional[List[Any]],
|
||||
) -> TaskOutput:
|
||||
"""Run the core execution logic of the task."""
|
||||
agent = agent or self.agent
|
||||
self.agent = agent
|
||||
if not agent:
|
||||
raise Exception(
|
||||
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
||||
try:
|
||||
agent = agent or self.agent
|
||||
self.agent = agent
|
||||
if not agent:
|
||||
raise Exception(
|
||||
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
||||
)
|
||||
|
||||
self.start_time = datetime.datetime.now()
|
||||
|
||||
self.prompt_context = context
|
||||
tools = tools or self.tools or []
|
||||
|
||||
self.processed_by_agents.add(agent.role)
|
||||
crewai_event_bus.emit(self, TaskStartedEvent(context=context))
|
||||
result = agent.execute_task(
|
||||
task=self,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
self.start_time = datetime.datetime.now()
|
||||
self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self)
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
task_output = TaskOutput(
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
expected_output=self.expected_output,
|
||||
raw=result,
|
||||
pydantic=pydantic_output,
|
||||
json_dict=json_output,
|
||||
agent=agent.role,
|
||||
output_format=self._get_output_format(),
|
||||
)
|
||||
|
||||
self.prompt_context = context
|
||||
tools = tools or self.tools or []
|
||||
if self.guardrail:
|
||||
guardrail_result = GuardrailResult.from_tuple(
|
||||
self.guardrail(task_output)
|
||||
)
|
||||
if not guardrail_result.success:
|
||||
if self.retry_count >= self.max_retries:
|
||||
raise Exception(
|
||||
f"Task failed guardrail validation after {self.max_retries} retries. "
|
||||
f"Last error: {guardrail_result.error}"
|
||||
)
|
||||
|
||||
self.processed_by_agents.add(agent.role)
|
||||
self.retry_count += 1
|
||||
context = self.i18n.errors("validation_error").format(
|
||||
guardrail_result_error=guardrail_result.error,
|
||||
task_output=task_output.raw,
|
||||
)
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail blocked, retrying, due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
return self._execute_core(agent, context, tools)
|
||||
|
||||
result = agent.execute_task(
|
||||
task=self,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
task_output = TaskOutput(
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
expected_output=self.expected_output,
|
||||
raw=result,
|
||||
pydantic=pydantic_output,
|
||||
json_dict=json_output,
|
||||
agent=agent.role,
|
||||
output_format=self._get_output_format(),
|
||||
)
|
||||
|
||||
if self.guardrail:
|
||||
guardrail_result = GuardrailResult.from_tuple(self.guardrail(task_output))
|
||||
if not guardrail_result.success:
|
||||
if self.retry_count >= self.max_retries:
|
||||
if guardrail_result.result is None:
|
||||
raise Exception(
|
||||
f"Task failed guardrail validation after {self.max_retries} retries. "
|
||||
f"Last error: {guardrail_result.error}"
|
||||
"Task guardrail returned None as result. This is not allowed."
|
||||
)
|
||||
|
||||
self.retry_count += 1
|
||||
context = self.i18n.errors("validation_error").format(
|
||||
guardrail_result_error=guardrail_result.error,
|
||||
task_output=task_output.raw,
|
||||
if isinstance(guardrail_result.result, str):
|
||||
task_output.raw = guardrail_result.result
|
||||
pydantic_output, json_output = self._export_output(
|
||||
guardrail_result.result
|
||||
)
|
||||
task_output.pydantic = pydantic_output
|
||||
task_output.json_dict = json_output
|
||||
elif isinstance(guardrail_result.result, TaskOutput):
|
||||
task_output = guardrail_result.result
|
||||
|
||||
self.output = task_output
|
||||
self.end_time = datetime.datetime.now()
|
||||
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
|
||||
crew = self.agent.crew # type: ignore[union-attr]
|
||||
if crew and crew.task_callback and crew.task_callback != self.callback:
|
||||
crew.task_callback(self.output)
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
)
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail blocked, retrying, due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
return self._execute_core(agent, context, tools)
|
||||
|
||||
if guardrail_result.result is None:
|
||||
raise Exception(
|
||||
"Task guardrail returned None as result. This is not allowed."
|
||||
)
|
||||
|
||||
if isinstance(guardrail_result.result, str):
|
||||
task_output.raw = guardrail_result.result
|
||||
pydantic_output, json_output = self._export_output(
|
||||
guardrail_result.result
|
||||
)
|
||||
task_output.pydantic = pydantic_output
|
||||
task_output.json_dict = json_output
|
||||
elif isinstance(guardrail_result.result, TaskOutput):
|
||||
task_output = guardrail_result.result
|
||||
|
||||
self.output = task_output
|
||||
self.end_time = datetime.datetime.now()
|
||||
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
|
||||
crew = self.agent.crew # type: ignore[union-attr]
|
||||
if crew and crew.task_callback and crew.task_callback != self.callback:
|
||||
crew.task_callback(self.output)
|
||||
|
||||
if self._execution_span:
|
||||
self._telemetry.task_ended(self._execution_span, self, agent.crew)
|
||||
self._execution_span = None
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
return task_output
|
||||
self._save_file(content)
|
||||
crewai_event_bus.emit(self, TaskCompletedEvent(output=task_output))
|
||||
return task_output
|
||||
except Exception as e:
|
||||
self.end_time = datetime.datetime.now()
|
||||
crewai_event_bus.emit(self, TaskFailedEvent(error=str(e)))
|
||||
raise e # Re-raise the exception after emitting the event
|
||||
|
||||
def prompt(self) -> str:
|
||||
"""Prompt the task.
|
||||
@@ -716,10 +720,9 @@ class Task(BaseModel):
|
||||
file.write(str(result))
|
||||
except (OSError, IOError) as e:
|
||||
raise RuntimeError(
|
||||
"\n".join([
|
||||
f"Failed to save output file: {e}",
|
||||
FILEWRITER_RECOMMENDATION
|
||||
])
|
||||
"\n".join(
|
||||
[f"Failed to save output file: {e}", FILEWRITER_RECOMMENDATION]
|
||||
)
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
@@ -11,20 +11,21 @@ from typing import Any, Dict, List, Optional, Union
|
||||
import json5
|
||||
from json_repair import repair_json
|
||||
|
||||
import crewai.utilities.events as events
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.task import Task
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from crewai.tools.tool_usage_events import ToolUsageError, ToolUsageFinished
|
||||
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.tool_usage_events import (
|
||||
ToolSelectionErrorEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolValidateInputErrorEvent,
|
||||
)
|
||||
|
||||
try:
|
||||
import agentops # type: ignore
|
||||
except ImportError:
|
||||
agentops = None
|
||||
OPENAI_BIGGER_MODELS = [
|
||||
"gpt-4",
|
||||
"gpt-4o",
|
||||
@@ -140,7 +141,6 @@ class ToolUsage:
|
||||
tool: Any,
|
||||
calling: Union[ToolCalling, InstructorToolCalling],
|
||||
) -> str: # TODO: Fix this return type
|
||||
tool_event = agentops.ToolEvent(name=calling.tool_name) if agentops else None # type: ignore
|
||||
if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
|
||||
try:
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
@@ -219,10 +219,6 @@ class ToolUsage:
|
||||
return error # type: ignore # No return value expected
|
||||
|
||||
self.task.increment_tools_errors()
|
||||
if agentops:
|
||||
agentops.record(
|
||||
agentops.ErrorEvent(exception=e, trigger_event=tool_event)
|
||||
)
|
||||
return self.use(calling=calling, tool_string=tool_string) # type: ignore # No return value expected
|
||||
|
||||
if self.tools_handler:
|
||||
@@ -238,9 +234,6 @@ class ToolUsage:
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
|
||||
if agentops:
|
||||
agentops.record(tool_event)
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
@@ -316,14 +309,33 @@ class ToolUsage:
|
||||
):
|
||||
return tool
|
||||
self.task.increment_tools_errors()
|
||||
tool_selection_data = {
|
||||
"agent_key": self.agent.key,
|
||||
"agent_role": self.agent.role,
|
||||
"tool_name": tool_name,
|
||||
"tool_args": {},
|
||||
"tool_class": self.tools_description,
|
||||
}
|
||||
if tool_name and tool_name != "":
|
||||
raise Exception(
|
||||
f"Action '{tool_name}' don't exist, these are the only available Actions:\n{self.tools_description}"
|
||||
error = f"Action '{tool_name}' don't exist, these are the only available Actions:\n{self.tools_description}"
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
ToolSelectionErrorEvent(
|
||||
**tool_selection_data,
|
||||
error=error,
|
||||
),
|
||||
)
|
||||
raise Exception(error)
|
||||
else:
|
||||
raise Exception(
|
||||
f"I forgot the Action name, these are the only available Actions: {self.tools_description}"
|
||||
error = f"I forgot the Action name, these are the only available Actions: {self.tools_description}"
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
ToolSelectionErrorEvent(
|
||||
**tool_selection_data,
|
||||
error=error,
|
||||
),
|
||||
)
|
||||
raise Exception(error)
|
||||
|
||||
def _render(self) -> str:
|
||||
"""Render the tool name and description in plain text."""
|
||||
@@ -459,18 +471,33 @@ class ToolUsage:
|
||||
if isinstance(arguments, dict):
|
||||
return arguments
|
||||
except Exception as e:
|
||||
self._printer.print(content=f"Failed to repair JSON: {e}", color="red")
|
||||
error = f"Failed to repair JSON: {e}"
|
||||
self._printer.print(content=error, color="red")
|
||||
|
||||
# If all parsing attempts fail, raise an error
|
||||
raise Exception(
|
||||
error_message = (
|
||||
"Tool input must be a valid dictionary in JSON or Python literal format"
|
||||
)
|
||||
self._emit_validate_input_error(error_message)
|
||||
# If all parsing attempts fail, raise an error
|
||||
raise Exception(error_message)
|
||||
|
||||
def _emit_validate_input_error(self, final_error: str):
|
||||
tool_selection_data = {
|
||||
"agent_key": self.agent.key,
|
||||
"agent_role": self.agent.role,
|
||||
"tool_name": self.action.tool,
|
||||
"tool_args": str(self.action.tool_input),
|
||||
"tool_class": self.__class__.__name__,
|
||||
}
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
ToolValidateInputErrorEvent(**tool_selection_data, error=final_error),
|
||||
)
|
||||
|
||||
def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
|
||||
event_data = self._prepare_event_data(tool, tool_calling)
|
||||
events.emit(
|
||||
source=self, event=ToolUsageError(**{**event_data, "error": str(e)})
|
||||
)
|
||||
crewai_event_bus.emit(self, ToolUsageErrorEvent(**{**event_data, "error": e}))
|
||||
|
||||
def on_tool_use_finished(
|
||||
self, tool: Any, tool_calling: ToolCalling, from_cache: bool, started_at: float
|
||||
@@ -484,7 +511,7 @@ class ToolUsage:
|
||||
"from_cache": from_cache,
|
||||
}
|
||||
)
|
||||
events.emit(source=self, event=ToolUsageFinished(**event_data))
|
||||
crewai_event_bus.emit(self, ToolUsageFinishedEvent(**event_data))
|
||||
|
||||
def _prepare_event_data(self, tool: Any, tool_calling: ToolCalling) -> dict:
|
||||
return {
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ToolUsageEvent(BaseModel):
|
||||
agent_key: str
|
||||
agent_role: str
|
||||
tool_name: str
|
||||
tool_args: Dict[str, Any]
|
||||
tool_class: str
|
||||
run_attempts: int | None = None
|
||||
delegations: int | None = None
|
||||
|
||||
|
||||
class ToolUsageFinished(ToolUsageEvent):
|
||||
started_at: datetime
|
||||
finished_at: datetime
|
||||
from_cache: bool = False
|
||||
|
||||
|
||||
class ToolUsageError(ToolUsageEvent):
|
||||
error: str
|
||||
@@ -4,3 +4,4 @@ DEFAULT_SCORE_THRESHOLD = 0.35
|
||||
KNOWLEDGE_DIRECTORY = "knowledge"
|
||||
MAX_LLM_RETRY = 3
|
||||
MAX_FILE_NAME_LENGTH = 255
|
||||
EMITTER_COLOR = "bold_blue"
|
||||
|
||||
@@ -3,19 +3,9 @@ from typing import List
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.utilities import Converter
|
||||
from crewai.utilities.events import TaskEvaluationEvent, crewai_event_bus
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
agentops = None
|
||||
try:
|
||||
from agentops import track_agent # type: ignore
|
||||
except ImportError:
|
||||
|
||||
def track_agent(name):
|
||||
def noop(f):
|
||||
return f
|
||||
|
||||
return noop
|
||||
|
||||
|
||||
class Entity(BaseModel):
|
||||
name: str = Field(description="The name of the entity.")
|
||||
@@ -48,12 +38,15 @@ class TrainingTaskEvaluation(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
@track_agent(name="Task Evaluator")
|
||||
class TaskEvaluator:
|
||||
def __init__(self, original_agent):
|
||||
self.llm = original_agent.llm
|
||||
self.original_agent = original_agent
|
||||
|
||||
def evaluate(self, task, output) -> TaskEvaluation:
|
||||
crewai_event_bus.emit(
|
||||
self, TaskEvaluationEvent(evaluation_type="task_evaluation")
|
||||
)
|
||||
evaluation_query = (
|
||||
f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n"
|
||||
f"Task Description:\n{task.description}\n\n"
|
||||
@@ -90,6 +83,9 @@ class TaskEvaluator:
|
||||
- training_data (dict): The training data to be evaluated.
|
||||
- agent_id (str): The ID of the agent.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self, TaskEvaluationEvent(evaluation_type="training_data_evaluation")
|
||||
)
|
||||
|
||||
output_training_data = training_data[agent_id]
|
||||
final_aggregated_data = ""
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
from functools import wraps
|
||||
from typing import Any, Callable, Dict, Generic, List, Type, TypeVar
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
T = TypeVar("T")
|
||||
EVT = TypeVar("EVT", bound=BaseModel)
|
||||
|
||||
|
||||
class Emitter(Generic[T, EVT]):
|
||||
_listeners: Dict[Type[EVT], List[Callable]] = {}
|
||||
|
||||
def on(self, event_type: Type[EVT]):
|
||||
def decorator(func: Callable):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
self._listeners.setdefault(event_type, []).append(wrapper)
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
def emit(self, source: T, event: EVT) -> None:
|
||||
event_type = type(event)
|
||||
for func in self._listeners.get(event_type, []):
|
||||
func(source, event)
|
||||
|
||||
|
||||
default_emitter = Emitter[Any, BaseModel]()
|
||||
|
||||
|
||||
def emit(source: Any, event: BaseModel, raise_on_error: bool = False) -> None:
|
||||
try:
|
||||
default_emitter.emit(source, event)
|
||||
except Exception as e:
|
||||
if raise_on_error:
|
||||
raise e
|
||||
else:
|
||||
print(f"Error emitting event: {e}")
|
||||
|
||||
|
||||
def on(event_type: Type[BaseModel]) -> Callable:
|
||||
return default_emitter.on(event_type)
|
||||
40
src/crewai/utilities/events/__init__.py
Normal file
40
src/crewai/utilities/events/__init__.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from .crew_events import (
|
||||
CrewKickoffStartedEvent,
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
)
|
||||
from .agent_events import (
|
||||
AgentExecutionStartedEvent,
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
)
|
||||
from .task_events import TaskStartedEvent, TaskCompletedEvent, TaskFailedEvent, TaskEvaluationEvent
|
||||
from .flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowStartedEvent,
|
||||
FlowFinishedEvent,
|
||||
FlowPlotEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
)
|
||||
from .crewai_event_bus import CrewAIEventsBus, crewai_event_bus
|
||||
from .tool_usage_events import (
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
ToolExecutionErrorEvent,
|
||||
ToolSelectionErrorEvent,
|
||||
ToolUsageEvent,
|
||||
ToolValidateInputErrorEvent,
|
||||
)
|
||||
|
||||
# events
|
||||
from .event_listener import EventListener
|
||||
from .third_party.agentops_listener import agentops_listener
|
||||
40
src/crewai/utilities/events/agent_events.py
Normal file
40
src/crewai/utilities/events/agent_events.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Union
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
|
||||
from .base_events import CrewEvent
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
|
||||
|
||||
class AgentExecutionStartedEvent(CrewEvent):
|
||||
"""Event emitted when an agent starts executing a task"""
|
||||
|
||||
agent: BaseAgent
|
||||
task: Any
|
||||
tools: Optional[Sequence[Union[BaseTool, CrewStructuredTool]]]
|
||||
task_prompt: str
|
||||
type: str = "agent_execution_started"
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
|
||||
class AgentExecutionCompletedEvent(CrewEvent):
|
||||
"""Event emitted when an agent completes executing a task"""
|
||||
|
||||
agent: BaseAgent
|
||||
task: Any
|
||||
output: str
|
||||
type: str = "agent_execution_completed"
|
||||
|
||||
|
||||
class AgentExecutionErrorEvent(CrewEvent):
|
||||
"""Event emitted when an agent encounters an error during execution"""
|
||||
|
||||
agent: BaseAgent
|
||||
task: Any
|
||||
error: str
|
||||
type: str = "agent_execution_error"
|
||||
14
src/crewai/utilities/events/base_event_listener.py
Normal file
14
src/crewai/utilities/events/base_event_listener.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from logging import Logger
|
||||
|
||||
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus, crewai_event_bus
|
||||
|
||||
|
||||
class BaseEventListener(ABC):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.setup_listeners(crewai_event_bus)
|
||||
|
||||
@abstractmethod
|
||||
def setup_listeners(self, crewai_event_bus: CrewAIEventsBus):
|
||||
pass
|
||||
10
src/crewai/utilities/events/base_events.py
Normal file
10
src/crewai/utilities/events/base_events.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from datetime import datetime
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class CrewEvent(BaseModel):
|
||||
"""Base class for all crew events"""
|
||||
|
||||
timestamp: datetime = Field(default_factory=datetime.now)
|
||||
type: str
|
||||
81
src/crewai/utilities/events/crew_events.py
Normal file
81
src/crewai/utilities/events/crew_events.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
from pydantic import InstanceOf
|
||||
|
||||
from crewai.utilities.events.base_events import CrewEvent
|
||||
|
||||
|
||||
class CrewKickoffStartedEvent(CrewEvent):
|
||||
"""Event emitted when a crew starts execution"""
|
||||
|
||||
crew_name: Optional[str]
|
||||
inputs: Optional[Dict[str, Any]]
|
||||
type: str = "crew_kickoff_started"
|
||||
|
||||
|
||||
class CrewKickoffCompletedEvent(CrewEvent):
|
||||
"""Event emitted when a crew completes execution"""
|
||||
|
||||
crew_name: Optional[str]
|
||||
output: Any
|
||||
type: str = "crew_kickoff_completed"
|
||||
|
||||
|
||||
class CrewKickoffFailedEvent(CrewEvent):
|
||||
"""Event emitted when a crew fails to complete execution"""
|
||||
|
||||
error: str
|
||||
crew_name: Optional[str]
|
||||
type: str = "crew_kickoff_failed"
|
||||
|
||||
|
||||
class CrewTrainStartedEvent(CrewEvent):
|
||||
"""Event emitted when a crew starts training"""
|
||||
|
||||
crew_name: Optional[str]
|
||||
n_iterations: int
|
||||
filename: str
|
||||
inputs: Optional[Dict[str, Any]]
|
||||
type: str = "crew_train_started"
|
||||
|
||||
|
||||
class CrewTrainCompletedEvent(CrewEvent):
|
||||
"""Event emitted when a crew completes training"""
|
||||
|
||||
crew_name: Optional[str]
|
||||
n_iterations: int
|
||||
filename: str
|
||||
type: str = "crew_train_completed"
|
||||
|
||||
|
||||
class CrewTrainFailedEvent(CrewEvent):
|
||||
"""Event emitted when a crew fails to complete training"""
|
||||
|
||||
error: str
|
||||
crew_name: Optional[str]
|
||||
type: str = "crew_train_failed"
|
||||
|
||||
|
||||
class CrewTestStartedEvent(CrewEvent):
|
||||
"""Event emitted when a crew starts testing"""
|
||||
|
||||
crew_name: Optional[str]
|
||||
n_iterations: int
|
||||
eval_llm: Optional[Union[str, Any]]
|
||||
inputs: Optional[Dict[str, Any]]
|
||||
type: str = "crew_test_started"
|
||||
|
||||
|
||||
class CrewTestCompletedEvent(CrewEvent):
|
||||
"""Event emitted when a crew completes testing"""
|
||||
|
||||
crew_name: Optional[str]
|
||||
type: str = "crew_test_completed"
|
||||
|
||||
|
||||
class CrewTestFailedEvent(CrewEvent):
|
||||
"""Event emitted when a crew fails to complete testing"""
|
||||
|
||||
error: str
|
||||
crew_name: Optional[str]
|
||||
type: str = "crew_test_failed"
|
||||
113
src/crewai/utilities/events/crewai_event_bus.py
Normal file
113
src/crewai/utilities/events/crewai_event_bus.py
Normal file
@@ -0,0 +1,113 @@
|
||||
import threading
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Callable, Dict, List, Type, TypeVar, cast
|
||||
|
||||
from blinker import Signal
|
||||
|
||||
from crewai.utilities.events.base_events import CrewEvent
|
||||
from crewai.utilities.events.event_types import EventTypes
|
||||
|
||||
EventT = TypeVar("EventT", bound=CrewEvent)
|
||||
|
||||
|
||||
class CrewAIEventsBus:
|
||||
"""
|
||||
A singleton event bus that uses blinker signals for event handling.
|
||||
Allows both internal (Flow/Crew) and external event handling.
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
_lock = threading.Lock()
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
with cls._lock:
|
||||
if cls._instance is None: # prevent race condition
|
||||
cls._instance = super(CrewAIEventsBus, cls).__new__(cls)
|
||||
cls._instance._initialize()
|
||||
return cls._instance
|
||||
|
||||
def _initialize(self) -> None:
|
||||
"""Initialize the event bus internal state"""
|
||||
self._signal = Signal("crewai_event_bus")
|
||||
self._handlers: Dict[Type[CrewEvent], List[Callable]] = {}
|
||||
|
||||
def on(
|
||||
self, event_type: Type[EventT]
|
||||
) -> Callable[[Callable[[Any, EventT], None]], Callable[[Any, EventT], None]]:
|
||||
"""
|
||||
Decorator to register an event handler for a specific event type.
|
||||
|
||||
Usage:
|
||||
@crewai_event_bus.on(AgentExecutionCompletedEvent)
|
||||
def on_agent_execution_completed(
|
||||
source: Any, event: AgentExecutionCompletedEvent
|
||||
):
|
||||
print(f"👍 Agent '{event.agent}' completed task")
|
||||
print(f" Output: {event.output}")
|
||||
"""
|
||||
|
||||
def decorator(
|
||||
handler: Callable[[Any, EventT], None],
|
||||
) -> Callable[[Any, EventT], None]:
|
||||
if event_type not in self._handlers:
|
||||
self._handlers[event_type] = []
|
||||
self._handlers[event_type].append(
|
||||
cast(Callable[[Any, EventT], None], handler)
|
||||
)
|
||||
return handler
|
||||
|
||||
return decorator
|
||||
|
||||
def emit(self, source: Any, event: CrewEvent) -> None:
|
||||
"""
|
||||
Emit an event to all registered handlers
|
||||
|
||||
Args:
|
||||
source: The object emitting the event
|
||||
event: The event instance to emit
|
||||
"""
|
||||
event_type = type(event)
|
||||
if event_type in self._handlers:
|
||||
for handler in self._handlers[event_type]:
|
||||
handler(source, event)
|
||||
self._signal.send(source, event=event)
|
||||
|
||||
def clear_handlers(self) -> None:
|
||||
"""Clear all registered event handlers - useful for testing"""
|
||||
self._handlers.clear()
|
||||
|
||||
def register_handler(
|
||||
self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
|
||||
) -> None:
|
||||
"""Register an event handler for a specific event type"""
|
||||
if event_type not in self._handlers:
|
||||
self._handlers[event_type] = []
|
||||
self._handlers[event_type].append(
|
||||
cast(Callable[[Any, EventTypes], None], handler)
|
||||
)
|
||||
|
||||
@contextmanager
|
||||
def scoped_handlers(self):
|
||||
"""
|
||||
Context manager for temporary event handling scope.
|
||||
Useful for testing or temporary event handling.
|
||||
|
||||
Usage:
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(CrewKickoffStarted)
|
||||
def temp_handler(source, event):
|
||||
print("Temporary handler")
|
||||
# Do stuff...
|
||||
# Handlers are cleared after the context
|
||||
"""
|
||||
previous_handlers = self._handlers.copy()
|
||||
self._handlers.clear()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self._handlers = previous_handlers
|
||||
|
||||
|
||||
# Global instance
|
||||
crewai_event_bus = CrewAIEventsBus()
|
||||
257
src/crewai/utilities/events/event_listener.py
Normal file
257
src/crewai/utilities/events/event_listener.py
Normal file
@@ -0,0 +1,257 @@
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.constants import EMITTER_COLOR
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
|
||||
from .agent_events import AgentExecutionCompletedEvent, AgentExecutionStartedEvent
|
||||
from .crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from .flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowFinishedEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from .task_events import TaskCompletedEvent, TaskFailedEvent, TaskStartedEvent
|
||||
from .tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
|
||||
|
||||
class EventListener(BaseEventListener):
|
||||
_instance = None
|
||||
_telemetry: Telemetry = PrivateAttr(default_factory=lambda: Telemetry())
|
||||
logger = Logger(verbose=True, default_color=EMITTER_COLOR)
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if not hasattr(self, "_initialized") or not self._initialized:
|
||||
super().__init__()
|
||||
self._telemetry = Telemetry()
|
||||
self._telemetry.set_tracer()
|
||||
self._initialized = True
|
||||
|
||||
# ----------- CREW EVENTS -----------
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_started(source, event: CrewKickoffStartedEvent):
|
||||
self.logger.log(
|
||||
f"🚀 Crew '{event.crew_name}' started",
|
||||
event.timestamp,
|
||||
)
|
||||
self._telemetry.crew_execution_span(source, event.inputs)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_completed(source, event: CrewKickoffCompletedEvent):
|
||||
final_string_output = event.output.raw
|
||||
self._telemetry.end_crew(source, final_string_output)
|
||||
self.logger.log(
|
||||
f"✅ Crew '{event.crew_name}' completed",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffFailedEvent)
|
||||
def on_crew_failed(source, event: CrewKickoffFailedEvent):
|
||||
self.logger.log(
|
||||
f"❌ Crew '{event.crew_name}' failed",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewTestStartedEvent)
|
||||
def on_crew_test_started(source, event: CrewTestStartedEvent):
|
||||
cloned_crew = source.copy()
|
||||
cloned_crew._telemetry.test_execution_span(
|
||||
cloned_crew,
|
||||
event.n_iterations,
|
||||
event.inputs,
|
||||
event.eval_llm,
|
||||
)
|
||||
self.logger.log(
|
||||
f"🚀 Crew '{event.crew_name}' started test",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewTestCompletedEvent)
|
||||
def on_crew_test_completed(source, event: CrewTestCompletedEvent):
|
||||
self.logger.log(
|
||||
f"✅ Crew '{event.crew_name}' completed test",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewTestFailedEvent)
|
||||
def on_crew_test_failed(source, event: CrewTestFailedEvent):
|
||||
self.logger.log(
|
||||
f"❌ Crew '{event.crew_name}' failed test",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewTrainStartedEvent)
|
||||
def on_crew_train_started(source, event: CrewTrainStartedEvent):
|
||||
self.logger.log(
|
||||
f"📋 Crew '{event.crew_name}' started train",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewTrainCompletedEvent)
|
||||
def on_crew_train_completed(source, event: CrewTrainCompletedEvent):
|
||||
self.logger.log(
|
||||
f"✅ Crew '{event.crew_name}' completed train",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewTrainFailedEvent)
|
||||
def on_crew_train_failed(source, event: CrewTrainFailedEvent):
|
||||
self.logger.log(
|
||||
f"❌ Crew '{event.crew_name}' failed train",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
# ----------- TASK EVENTS -----------
|
||||
|
||||
@crewai_event_bus.on(TaskStartedEvent)
|
||||
def on_task_started(source, event: TaskStartedEvent):
|
||||
source._execution_span = self._telemetry.task_started(
|
||||
crew=source.agent.crew, task=source
|
||||
)
|
||||
self.logger.log(
|
||||
f"📋 Task started: {source.description}",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(TaskCompletedEvent)
|
||||
def on_task_completed(source, event: TaskCompletedEvent):
|
||||
if source._execution_span:
|
||||
self._telemetry.task_ended(
|
||||
source._execution_span, source, source.agent.crew
|
||||
)
|
||||
self.logger.log(
|
||||
f"✅ Task completed: {source.description}",
|
||||
event.timestamp,
|
||||
)
|
||||
source._execution_span = None
|
||||
|
||||
@crewai_event_bus.on(TaskFailedEvent)
|
||||
def on_task_failed(source, event: TaskFailedEvent):
|
||||
if source._execution_span:
|
||||
if source.agent and source.agent.crew:
|
||||
self._telemetry.task_ended(
|
||||
source._execution_span, source, source.agent.crew
|
||||
)
|
||||
source._execution_span = None
|
||||
self.logger.log(
|
||||
f"❌ Task failed: {source.description}",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
# ----------- AGENT EVENTS -----------
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionStartedEvent)
|
||||
def on_agent_execution_started(source, event: AgentExecutionStartedEvent):
|
||||
self.logger.log(
|
||||
f"🤖 Agent '{event.agent.role}' started task",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionCompletedEvent)
|
||||
def on_agent_execution_completed(source, event: AgentExecutionCompletedEvent):
|
||||
self.logger.log(
|
||||
f"✅ Agent '{event.agent.role}' completed task",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
# ----------- FLOW EVENTS -----------
|
||||
|
||||
@crewai_event_bus.on(FlowCreatedEvent)
|
||||
def on_flow_created(source, event: FlowCreatedEvent):
|
||||
self._telemetry.flow_creation_span(self.__class__.__name__)
|
||||
self.logger.log(
|
||||
f"🌊 Flow Created: '{event.flow_name}'",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(FlowStartedEvent)
|
||||
def on_flow_started(source, event: FlowStartedEvent):
|
||||
self._telemetry.flow_execution_span(
|
||||
source.__class__.__name__, list(source._methods.keys())
|
||||
)
|
||||
self.logger.log(
|
||||
f"🤖 Flow Started: '{event.flow_name}'",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(FlowFinishedEvent)
|
||||
def on_flow_finished(source, event: FlowFinishedEvent):
|
||||
self.logger.log(
|
||||
f"👍 Flow Finished: '{event.flow_name}'",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionStartedEvent)
|
||||
def on_method_execution_started(source, event: MethodExecutionStartedEvent):
|
||||
self.logger.log(
|
||||
f"🤖 Flow Method Started: '{event.method_name}'",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionFailedEvent)
|
||||
def on_method_execution_failed(source, event: MethodExecutionFailedEvent):
|
||||
self.logger.log(
|
||||
f"❌ Flow Method Failed: '{event.method_name}'",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionFinishedEvent)
|
||||
def on_method_execution_finished(source, event: MethodExecutionFinishedEvent):
|
||||
self.logger.log(
|
||||
f"👍 Flow Method Finished: '{event.method_name}'",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
# ----------- TOOL USAGE EVENTS -----------
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
||||
self.logger.log(
|
||||
f"🤖 Tool Usage Started: '{event.tool_name}'",
|
||||
event.timestamp,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageFinishedEvent)
|
||||
def on_tool_usage_finished(source, event: ToolUsageFinishedEvent):
|
||||
self.logger.log(
|
||||
f"✅ Tool Usage Finished: '{event.tool_name}'",
|
||||
event.timestamp,
|
||||
#
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
self.logger.log(
|
||||
f"❌ Tool Usage Error: '{event.tool_name}'",
|
||||
event.timestamp,
|
||||
#
|
||||
)
|
||||
|
||||
|
||||
event_listener = EventListener()
|
||||
61
src/crewai/utilities/events/event_types.py
Normal file
61
src/crewai/utilities/events/event_types.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from typing import Union
|
||||
|
||||
from .agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from .crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from .flow_events import (
|
||||
FlowFinishedEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from .task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from .tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
|
||||
EventTypes = Union[
|
||||
CrewKickoffStartedEvent,
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
AgentExecutionCompletedEvent,
|
||||
TaskStartedEvent,
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
FlowStartedEvent,
|
||||
FlowFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
]
|
||||
71
src/crewai/utilities/events/flow_events.py
Normal file
71
src/crewai/utilities/events/flow_events.py
Normal file
@@ -0,0 +1,71 @@
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from .base_events import CrewEvent
|
||||
|
||||
|
||||
class FlowEvent(CrewEvent):
|
||||
"""Base class for all flow events"""
|
||||
|
||||
type: str
|
||||
flow_name: str
|
||||
|
||||
|
||||
class FlowStartedEvent(FlowEvent):
|
||||
"""Event emitted when a flow starts execution"""
|
||||
|
||||
flow_name: str
|
||||
inputs: Optional[Dict[str, Any]] = None
|
||||
type: str = "flow_started"
|
||||
|
||||
|
||||
class FlowCreatedEvent(FlowEvent):
|
||||
"""Event emitted when a flow is created"""
|
||||
|
||||
flow_name: str
|
||||
type: str = "flow_created"
|
||||
|
||||
|
||||
class MethodExecutionStartedEvent(FlowEvent):
|
||||
"""Event emitted when a flow method starts execution"""
|
||||
|
||||
flow_name: str
|
||||
method_name: str
|
||||
state: Union[Dict[str, Any], BaseModel]
|
||||
params: Optional[Dict[str, Any]] = None
|
||||
type: str = "method_execution_started"
|
||||
|
||||
|
||||
class MethodExecutionFinishedEvent(FlowEvent):
|
||||
"""Event emitted when a flow method completes execution"""
|
||||
|
||||
flow_name: str
|
||||
method_name: str
|
||||
result: Any = None
|
||||
state: Union[Dict[str, Any], BaseModel]
|
||||
type: str = "method_execution_finished"
|
||||
|
||||
|
||||
class MethodExecutionFailedEvent(FlowEvent):
|
||||
"""Event emitted when a flow method fails execution"""
|
||||
|
||||
flow_name: str
|
||||
method_name: str
|
||||
error: Any
|
||||
type: str = "method_execution_failed"
|
||||
|
||||
|
||||
class FlowFinishedEvent(FlowEvent):
|
||||
"""Event emitted when a flow completes execution"""
|
||||
|
||||
flow_name: str
|
||||
result: Optional[Any] = None
|
||||
type: str = "flow_finished"
|
||||
|
||||
|
||||
class FlowPlotEvent(FlowEvent):
|
||||
"""Event emitted when a flow plot is created"""
|
||||
|
||||
flow_name: str
|
||||
type: str = "flow_plot"
|
||||
32
src/crewai/utilities/events/task_events.py
Normal file
32
src/crewai/utilities/events/task_events.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities.events.base_events import CrewEvent
|
||||
|
||||
|
||||
class TaskStartedEvent(CrewEvent):
|
||||
"""Event emitted when a task starts"""
|
||||
|
||||
type: str = "task_started"
|
||||
context: Optional[str]
|
||||
|
||||
|
||||
class TaskCompletedEvent(CrewEvent):
|
||||
"""Event emitted when a task completes"""
|
||||
|
||||
output: TaskOutput
|
||||
type: str = "task_completed"
|
||||
|
||||
|
||||
class TaskFailedEvent(CrewEvent):
|
||||
"""Event emitted when a task fails"""
|
||||
|
||||
error: str
|
||||
type: str = "task_failed"
|
||||
|
||||
|
||||
class TaskEvaluationEvent(CrewEvent):
|
||||
"""Event emitted when a task evaluation is completed"""
|
||||
|
||||
type: str = "task_evaluation"
|
||||
evaluation_type: str
|
||||
1
src/crewai/utilities/events/third_party/__init__.py
vendored
Normal file
1
src/crewai/utilities/events/third_party/__init__.py
vendored
Normal file
@@ -0,0 +1 @@
|
||||
from .agentops_listener import agentops_listener
|
||||
67
src/crewai/utilities/events/third_party/agentops_listener.py
vendored
Normal file
67
src/crewai/utilities/events/third_party/agentops_listener.py
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
|
||||
from crewai.utilities.events.task_events import TaskEvaluationEvent
|
||||
|
||||
try:
|
||||
import agentops
|
||||
|
||||
AGENTOPS_INSTALLED = True
|
||||
except ImportError:
|
||||
AGENTOPS_INSTALLED = False
|
||||
|
||||
|
||||
class AgentOpsListener(BaseEventListener):
|
||||
tool_event: Optional["agentops.ToolEvent"] = None
|
||||
session: Optional["agentops.Session"] = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
if not AGENTOPS_INSTALLED:
|
||||
return
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
|
||||
self.session = agentops.init()
|
||||
for agent in source.agents:
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
|
||||
if self.session:
|
||||
self.session.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
||||
self.tool_event = agentops.ToolEvent(name=event.tool_name)
|
||||
if self.session:
|
||||
self.session.record(self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(TaskEvaluationEvent)
|
||||
def on_task_evaluation(source, event: TaskEvaluationEvent):
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name="Task Evaluator", agent_id=str(source.original_agent.id)
|
||||
)
|
||||
|
||||
|
||||
agentops_listener = AgentOpsListener()
|
||||
64
src/crewai/utilities/events/tool_usage_events.py
Normal file
64
src/crewai/utilities/events/tool_usage_events.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from datetime import datetime
|
||||
from typing import Any, Callable, Dict
|
||||
|
||||
from .base_events import CrewEvent
|
||||
|
||||
|
||||
class ToolUsageEvent(CrewEvent):
|
||||
"""Base event for tool usage tracking"""
|
||||
|
||||
agent_key: str
|
||||
agent_role: str
|
||||
tool_name: str
|
||||
tool_args: Dict[str, Any] | str
|
||||
tool_class: str
|
||||
run_attempts: int | None = None
|
||||
delegations: int | None = None
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
|
||||
class ToolUsageStartedEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool execution is started"""
|
||||
|
||||
type: str = "tool_usage_started"
|
||||
|
||||
|
||||
class ToolUsageFinishedEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool execution is completed"""
|
||||
|
||||
started_at: datetime
|
||||
finished_at: datetime
|
||||
from_cache: bool = False
|
||||
type: str = "tool_usage_finished"
|
||||
|
||||
|
||||
class ToolUsageErrorEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool execution encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: str = "tool_usage_error"
|
||||
|
||||
|
||||
class ToolValidateInputErrorEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool input validation encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: str = "tool_validate_input_error"
|
||||
|
||||
|
||||
class ToolSelectionErrorEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool selection encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: str = "tool_selection_error"
|
||||
|
||||
|
||||
class ToolExecutionErrorEvent(CrewEvent):
|
||||
"""Event emitted when a tool execution encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: str = "tool_execution_error"
|
||||
tool_name: str
|
||||
tool_args: Dict[str, Any]
|
||||
tool_class: Callable
|
||||
@@ -8,8 +8,11 @@ from crewai.utilities.printer import Printer
|
||||
class Logger(BaseModel):
|
||||
verbose: bool = Field(default=False)
|
||||
_printer: Printer = PrivateAttr(default_factory=Printer)
|
||||
default_color: str = Field(default="bold_yellow")
|
||||
|
||||
def log(self, level, message, color="bold_yellow"):
|
||||
def log(self, level, message, color=None):
|
||||
if color is None:
|
||||
color = self.default_color
|
||||
if self.verbose:
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
self._printer.print(
|
||||
|
||||
@@ -17,9 +17,9 @@ from crewai.llm import LLM
|
||||
from crewai.tools import tool
|
||||
from crewai.tools.tool_calling import InstructorToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.tools.tool_usage_events import ToolUsageFinished
|
||||
from crewai.utilities import RPMController
|
||||
from crewai.utilities.events import Emitter
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.events.tool_usage_events import ToolUsageFinishedEvent
|
||||
|
||||
|
||||
def test_agent_llm_creation_with_env_vars():
|
||||
@@ -155,15 +155,19 @@ def test_agent_execution_with_tools():
|
||||
agent=agent,
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
with patch.object(Emitter, "emit") as emit:
|
||||
output = agent.execute_task(task)
|
||||
assert output == "The result of the multiplication is 12."
|
||||
assert emit.call_count == 1
|
||||
args, _ = emit.call_args
|
||||
assert isinstance(args[1], ToolUsageFinished)
|
||||
assert not args[1].from_cache
|
||||
assert args[1].tool_name == "multiplier"
|
||||
assert args[1].tool_args == {"first_number": 3, "second_number": 4}
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolUsageFinishedEvent)
|
||||
def handle_tool_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
output = agent.execute_task(task)
|
||||
assert output == "The result of the multiplication is 12."
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert isinstance(received_events[0], ToolUsageFinishedEvent)
|
||||
assert received_events[0].tool_name == "multiplier"
|
||||
assert received_events[0].tool_args == {"first_number": 3, "second_number": 4}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -250,10 +254,14 @@ def test_cache_hitting():
|
||||
"multiplier-{'first_number': 3, 'second_number': 3}": 9,
|
||||
"multiplier-{'first_number': 12, 'second_number': 3}": 36,
|
||||
}
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolUsageFinishedEvent)
|
||||
def handle_tool_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
with (
|
||||
patch.object(CacheHandler, "read") as read,
|
||||
patch.object(Emitter, "emit") as emit,
|
||||
):
|
||||
read.return_value = "0"
|
||||
task = Task(
|
||||
@@ -266,10 +274,9 @@ def test_cache_hitting():
|
||||
read.assert_called_with(
|
||||
tool="multiplier", input={"first_number": 2, "second_number": 6}
|
||||
)
|
||||
assert emit.call_count == 1
|
||||
args, _ = emit.call_args
|
||||
assert isinstance(args[1], ToolUsageFinished)
|
||||
assert args[1].from_cache
|
||||
assert len(received_events) == 1
|
||||
assert isinstance(received_events[0], ToolUsageFinishedEvent)
|
||||
assert received_events[0].from_cache
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
|
||||
112
tests/cassettes/test_tool_execution_error_event.yaml
Normal file
112
tests/cassettes/test_tool_execution_error_event.yaml
Normal file
@@ -0,0 +1,112 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Use the failing tool"}], "model":
|
||||
"gpt-4o-mini", "stop": [], "tools": [{"type": "function", "function": {"name":
|
||||
"failing_tool", "description": "This tool always fails.", "parameters": {"type":
|
||||
"object", "properties": {"param": {"type": "string", "description": "A test
|
||||
parameter"}}, "required": ["param"]}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '353'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-B2P4zoJZuES7Aom8ugEq1modz5Vsl\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739912761,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_F6fJxISpMKUBIGV6dd2vjRNG\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"failing_tool\",\n
|
||||
\ \"arguments\": \"{\\\"param\\\":\\\"test\\\"}\"\n }\n
|
||||
\ }\n ],\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n
|
||||
\ \"prompt_tokens\": 51,\n \"completion_tokens\": 15,\n \"total_tokens\":
|
||||
66,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\":
|
||||
0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\":
|
||||
0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":
|
||||
\"fp_00428b782a\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 9140fa827f38eb1e-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 18 Feb 2025 21:06:02 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=xbuu3IQpCMh.43ZrqL1TRMECOc6QldgHV0hzOX1GrWI-1739912762-1.0.1.1-t7iyq5xMioPrwfeaHLvPT9rwRPp7Q9A9uIm69icH9dPxRD4xMA3cWqb1aXj1_e2IyAEQQWFe1UWjlmJ22aHh3Q;
|
||||
path=/; expires=Tue, 18-Feb-25 21:36:02 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=x9l.Rhja8_wXDN.j8qcEU1PvvEqAwZp4Fd3s_aj4qwM-1739912762161-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '861'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999978'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_8666ec3aa6677cb346ba00993556051d
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -6,7 +6,6 @@ from concurrent.futures import Future
|
||||
from unittest import mock
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import instructor
|
||||
import pydantic_core
|
||||
import pytest
|
||||
|
||||
@@ -18,13 +17,21 @@ from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSourc
|
||||
from crewai.llm import LLM
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.process import Process
|
||||
from crewai.project import crew
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.events import (
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
crewai_event_bus,
|
||||
)
|
||||
from crewai.utilities.events.crew_events import (
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestStartedEvent,
|
||||
)
|
||||
from crewai.utilities.rpm_controller import RPMController
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
|
||||
@@ -844,8 +851,21 @@ def test_crew_verbose_output(capsys):
|
||||
crew.verbose = False
|
||||
crew._logger = Logger(verbose=False)
|
||||
crew.kickoff()
|
||||
expected_listener_logs = [
|
||||
"[🚀 CREW 'CREW' STARTED]",
|
||||
"[📋 TASK STARTED: RESEARCH AI ADVANCEMENTS.]",
|
||||
"[🤖 AGENT 'RESEARCHER' STARTED TASK]",
|
||||
"[✅ AGENT 'RESEARCHER' COMPLETED TASK]",
|
||||
"[✅ TASK COMPLETED: RESEARCH AI ADVANCEMENTS.]",
|
||||
"[📋 TASK STARTED: WRITE ABOUT AI IN HEALTHCARE.]",
|
||||
"[🤖 AGENT 'SENIOR WRITER' STARTED TASK]",
|
||||
"[✅ AGENT 'SENIOR WRITER' COMPLETED TASK]",
|
||||
"[✅ TASK COMPLETED: WRITE ABOUT AI IN HEALTHCARE.]",
|
||||
"[✅ CREW 'CREW' COMPLETED]",
|
||||
]
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == ""
|
||||
for log in expected_listener_logs:
|
||||
assert log in captured.out
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1283,9 +1303,9 @@ def test_kickoff_for_each_invalid_input():
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
with pytest.raises(pydantic_core._pydantic_core.ValidationError):
|
||||
# Pass a string instead of a list
|
||||
crew.kickoff_for_each("invalid input")
|
||||
crew.kickoff_for_each(["invalid input"])
|
||||
|
||||
|
||||
def test_kickoff_for_each_error_handling():
|
||||
@@ -2569,6 +2589,16 @@ def test_crew_train_success(
|
||||
# Create a mock for the copied crew
|
||||
copy_mock.return_value = crew
|
||||
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(CrewTrainStartedEvent)
|
||||
def on_crew_train_started(source, event: CrewTrainStartedEvent):
|
||||
received_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(CrewTrainCompletedEvent)
|
||||
def on_crew_train_completed(source, event: CrewTrainCompletedEvent):
|
||||
received_events.append(event)
|
||||
|
||||
crew.train(
|
||||
n_iterations=2, inputs={"topic": "AI"}, filename="trained_agents_data.pkl"
|
||||
)
|
||||
@@ -2614,6 +2644,10 @@ def test_crew_train_success(
|
||||
]
|
||||
)
|
||||
|
||||
assert len(received_events) == 2
|
||||
assert isinstance(received_events[0], CrewTrainStartedEvent)
|
||||
assert isinstance(received_events[1], CrewTrainCompletedEvent)
|
||||
|
||||
|
||||
def test_crew_train_error():
|
||||
task = Task(
|
||||
@@ -3342,7 +3376,18 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
|
||||
copy_mock.return_value = crew
|
||||
|
||||
n_iterations = 2
|
||||
llm_instance = LLM('gpt-4o-mini')
|
||||
llm_instance = LLM("gpt-4o-mini")
|
||||
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(CrewTestStartedEvent)
|
||||
def on_crew_test_started(source, event: CrewTestStartedEvent):
|
||||
received_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(CrewTestCompletedEvent)
|
||||
def on_crew_test_completed(source, event: CrewTestCompletedEvent):
|
||||
received_events.append(event)
|
||||
|
||||
crew.test(n_iterations, llm_instance, inputs={"topic": "AI"})
|
||||
|
||||
# Ensure kickoff is called on the copied crew
|
||||
@@ -3352,13 +3397,17 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
|
||||
|
||||
crew_evaluator.assert_has_calls(
|
||||
[
|
||||
mock.call(crew,llm_instance),
|
||||
mock.call(crew, llm_instance),
|
||||
mock.call().set_iteration(1),
|
||||
mock.call().set_iteration(2),
|
||||
mock.call().print_crew_evaluation_result(),
|
||||
]
|
||||
)
|
||||
|
||||
assert len(received_events) == 2
|
||||
assert isinstance(received_events[0], CrewTestStartedEvent)
|
||||
assert isinstance(received_events[1], CrewTestCompletedEvent)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_verbose_manager_agent():
|
||||
|
||||
@@ -7,12 +7,14 @@ import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow.flow import Flow, and_, listen, or_, router, start
|
||||
from crewai.flow.flow_events import (
|
||||
from crewai.utilities.events import (
|
||||
FlowFinishedEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
crewai_event_bus,
|
||||
)
|
||||
from crewai.utilities.events.flow_events import FlowPlotEvent
|
||||
|
||||
|
||||
def test_simple_sequential_flow():
|
||||
@@ -434,90 +436,65 @@ def test_unstructured_flow_event_emission():
|
||||
@listen(finish_poem)
|
||||
def save_poem_to_database(self):
|
||||
# A method without args/kwargs to ensure events are sent correctly
|
||||
pass
|
||||
|
||||
event_log = []
|
||||
|
||||
def handle_event(_, event):
|
||||
event_log.append(event)
|
||||
return "roses are red\nviolets are blue"
|
||||
|
||||
flow = PoemFlow()
|
||||
flow.event_emitter.connect(handle_event)
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(FlowStartedEvent)
|
||||
def handle_flow_start(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionStartedEvent)
|
||||
def handle_method_start(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(FlowFinishedEvent)
|
||||
def handle_flow_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
flow.kickoff(inputs={"separator": ", "})
|
||||
assert isinstance(received_events[0], FlowStartedEvent)
|
||||
assert received_events[0].flow_name == "PoemFlow"
|
||||
assert received_events[0].inputs == {"separator": ", "}
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
|
||||
assert isinstance(event_log[0], FlowStartedEvent)
|
||||
assert event_log[0].flow_name == "PoemFlow"
|
||||
assert event_log[0].inputs == {"separator": ", "}
|
||||
assert isinstance(event_log[0].timestamp, datetime)
|
||||
|
||||
# Asserting for concurrent start method executions in a for loop as you
|
||||
# can't guarantee ordering in asynchronous executions
|
||||
for i in range(1, 5):
|
||||
event = event_log[i]
|
||||
# All subsequent events are MethodExecutionStartedEvent
|
||||
for event in received_events[1:-1]:
|
||||
assert isinstance(event, MethodExecutionStartedEvent)
|
||||
assert event.flow_name == "PoemFlow"
|
||||
assert isinstance(event.state, dict)
|
||||
assert isinstance(event.state["id"], str)
|
||||
assert event.state["separator"] == ", "
|
||||
|
||||
if event.method_name == "prepare_flower":
|
||||
if isinstance(event, MethodExecutionStartedEvent):
|
||||
assert event.params == {}
|
||||
assert event.state["separator"] == ", "
|
||||
elif isinstance(event, MethodExecutionFinishedEvent):
|
||||
assert event.result == "foo"
|
||||
assert event.state["flower"] == "roses"
|
||||
assert event.state["separator"] == ", "
|
||||
else:
|
||||
assert False, "Unexpected event type for prepare_flower"
|
||||
elif event.method_name == "prepare_color":
|
||||
if isinstance(event, MethodExecutionStartedEvent):
|
||||
assert event.params == {}
|
||||
assert event.state["separator"] == ", "
|
||||
elif isinstance(event, MethodExecutionFinishedEvent):
|
||||
assert event.result == "bar"
|
||||
assert event.state["color"] == "red"
|
||||
assert event.state["separator"] == ", "
|
||||
else:
|
||||
assert False, "Unexpected event type for prepare_color"
|
||||
else:
|
||||
assert False, f"Unexpected method {event.method_name} in prepare events"
|
||||
assert received_events[1].method_name == "prepare_flower"
|
||||
assert received_events[1].params == {}
|
||||
assert "flower" not in received_events[1].state
|
||||
|
||||
assert isinstance(event_log[5], MethodExecutionStartedEvent)
|
||||
assert event_log[5].method_name == "write_first_sentence"
|
||||
assert event_log[5].params == {}
|
||||
assert isinstance(event_log[5].state, dict)
|
||||
assert event_log[5].state["flower"] == "roses"
|
||||
assert event_log[5].state["color"] == "red"
|
||||
assert event_log[5].state["separator"] == ", "
|
||||
assert received_events[2].method_name == "prepare_color"
|
||||
assert received_events[2].params == {}
|
||||
print("received_events[2]", received_events[2])
|
||||
assert "flower" in received_events[2].state
|
||||
|
||||
assert isinstance(event_log[6], MethodExecutionFinishedEvent)
|
||||
assert event_log[6].method_name == "write_first_sentence"
|
||||
assert event_log[6].result == "roses are red"
|
||||
assert received_events[3].method_name == "write_first_sentence"
|
||||
assert received_events[3].params == {}
|
||||
assert received_events[3].state["flower"] == "roses"
|
||||
assert received_events[3].state["color"] == "red"
|
||||
|
||||
assert isinstance(event_log[7], MethodExecutionStartedEvent)
|
||||
assert event_log[7].method_name == "finish_poem"
|
||||
assert event_log[7].params == {"_0": "roses are red"}
|
||||
assert isinstance(event_log[7].state, dict)
|
||||
assert event_log[7].state["flower"] == "roses"
|
||||
assert event_log[7].state["color"] == "red"
|
||||
assert received_events[4].method_name == "finish_poem"
|
||||
assert received_events[4].params == {"_0": "roses are red"}
|
||||
assert received_events[4].state["flower"] == "roses"
|
||||
assert received_events[4].state["color"] == "red"
|
||||
|
||||
assert isinstance(event_log[8], MethodExecutionFinishedEvent)
|
||||
assert event_log[8].method_name == "finish_poem"
|
||||
assert event_log[8].result == "roses are red, violets are blue"
|
||||
assert received_events[5].method_name == "save_poem_to_database"
|
||||
assert received_events[5].params == {}
|
||||
assert received_events[5].state["flower"] == "roses"
|
||||
assert received_events[5].state["color"] == "red"
|
||||
|
||||
assert isinstance(event_log[9], MethodExecutionStartedEvent)
|
||||
assert event_log[9].method_name == "save_poem_to_database"
|
||||
assert event_log[9].params == {}
|
||||
assert isinstance(event_log[9].state, dict)
|
||||
assert event_log[9].state["flower"] == "roses"
|
||||
assert event_log[9].state["color"] == "red"
|
||||
|
||||
assert isinstance(event_log[10], MethodExecutionFinishedEvent)
|
||||
assert event_log[10].method_name == "save_poem_to_database"
|
||||
assert event_log[10].result is None
|
||||
|
||||
assert isinstance(event_log[11], FlowFinishedEvent)
|
||||
assert event_log[11].flow_name == "PoemFlow"
|
||||
assert event_log[11].result is None
|
||||
assert isinstance(event_log[11].timestamp, datetime)
|
||||
assert isinstance(received_events[6], FlowFinishedEvent)
|
||||
assert received_events[6].flow_name == "PoemFlow"
|
||||
assert received_events[6].result == "roses are red\nviolets are blue"
|
||||
assert isinstance(received_events[6].timestamp, datetime)
|
||||
|
||||
|
||||
def test_structured_flow_event_emission():
|
||||
@@ -538,40 +515,54 @@ def test_structured_flow_event_emission():
|
||||
self.state.sent = True
|
||||
return f"Welcome, {self.state.name}!"
|
||||
|
||||
event_log = []
|
||||
|
||||
def handle_event(_, event):
|
||||
event_log.append(event)
|
||||
|
||||
flow = OnboardingFlow()
|
||||
flow.event_emitter.connect(handle_event)
|
||||
flow.kickoff(inputs={"name": "Anakin"})
|
||||
|
||||
assert isinstance(event_log[0], FlowStartedEvent)
|
||||
assert event_log[0].flow_name == "OnboardingFlow"
|
||||
assert event_log[0].inputs == {"name": "Anakin"}
|
||||
assert isinstance(event_log[0].timestamp, datetime)
|
||||
received_events = []
|
||||
|
||||
assert isinstance(event_log[1], MethodExecutionStartedEvent)
|
||||
assert event_log[1].method_name == "user_signs_up"
|
||||
@crewai_event_bus.on(FlowStartedEvent)
|
||||
def handle_flow_start(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
assert isinstance(event_log[2], MethodExecutionFinishedEvent)
|
||||
assert event_log[2].method_name == "user_signs_up"
|
||||
@crewai_event_bus.on(MethodExecutionStartedEvent)
|
||||
def handle_method_start(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
assert isinstance(event_log[3], MethodExecutionStartedEvent)
|
||||
assert event_log[3].method_name == "send_welcome_message"
|
||||
assert event_log[3].params == {}
|
||||
assert getattr(event_log[3].state, "sent") is False
|
||||
@crewai_event_bus.on(MethodExecutionFinishedEvent)
|
||||
def handle_method_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
assert isinstance(event_log[4], MethodExecutionFinishedEvent)
|
||||
assert event_log[4].method_name == "send_welcome_message"
|
||||
assert getattr(event_log[4].state, "sent") is True
|
||||
assert event_log[4].result == "Welcome, Anakin!"
|
||||
@crewai_event_bus.on(FlowFinishedEvent)
|
||||
def handle_flow_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
assert isinstance(event_log[5], FlowFinishedEvent)
|
||||
assert event_log[5].flow_name == "OnboardingFlow"
|
||||
assert event_log[5].result == "Welcome, Anakin!"
|
||||
assert isinstance(event_log[5].timestamp, datetime)
|
||||
flow.kickoff(inputs={"name": "Anakin"})
|
||||
|
||||
assert isinstance(received_events[0], FlowStartedEvent)
|
||||
assert received_events[0].flow_name == "OnboardingFlow"
|
||||
assert received_events[0].inputs == {"name": "Anakin"}
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
|
||||
assert isinstance(received_events[1], MethodExecutionStartedEvent)
|
||||
assert received_events[1].method_name == "user_signs_up"
|
||||
|
||||
assert isinstance(received_events[2], MethodExecutionFinishedEvent)
|
||||
assert received_events[2].method_name == "user_signs_up"
|
||||
|
||||
assert isinstance(received_events[3], MethodExecutionStartedEvent)
|
||||
assert received_events[3].method_name == "send_welcome_message"
|
||||
assert received_events[3].params == {}
|
||||
assert getattr(received_events[3].state, "sent") is False
|
||||
|
||||
assert isinstance(received_events[4], MethodExecutionFinishedEvent)
|
||||
assert received_events[4].method_name == "send_welcome_message"
|
||||
assert getattr(received_events[4].state, "sent") is True
|
||||
assert received_events[4].result == "Welcome, Anakin!"
|
||||
|
||||
assert isinstance(received_events[5], FlowFinishedEvent)
|
||||
assert received_events[5].flow_name == "OnboardingFlow"
|
||||
assert received_events[5].result == "Welcome, Anakin!"
|
||||
assert isinstance(received_events[5].timestamp, datetime)
|
||||
|
||||
|
||||
def test_stateless_flow_event_emission():
|
||||
@@ -593,30 +584,73 @@ def test_stateless_flow_event_emission():
|
||||
event_log.append(event)
|
||||
|
||||
flow = StatelessFlow()
|
||||
flow.event_emitter.connect(handle_event)
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(FlowStartedEvent)
|
||||
def handle_flow_start(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionStartedEvent)
|
||||
def handle_method_start(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionFinishedEvent)
|
||||
def handle_method_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(FlowFinishedEvent)
|
||||
def handle_flow_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
flow.kickoff()
|
||||
|
||||
assert isinstance(event_log[0], FlowStartedEvent)
|
||||
assert event_log[0].flow_name == "StatelessFlow"
|
||||
assert event_log[0].inputs is None
|
||||
assert isinstance(event_log[0].timestamp, datetime)
|
||||
assert isinstance(received_events[0], FlowStartedEvent)
|
||||
assert received_events[0].flow_name == "StatelessFlow"
|
||||
assert received_events[0].inputs is None
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
|
||||
assert isinstance(event_log[1], MethodExecutionStartedEvent)
|
||||
assert event_log[1].method_name == "init"
|
||||
assert isinstance(received_events[1], MethodExecutionStartedEvent)
|
||||
assert received_events[1].method_name == "init"
|
||||
|
||||
assert isinstance(event_log[2], MethodExecutionFinishedEvent)
|
||||
assert event_log[2].method_name == "init"
|
||||
assert isinstance(received_events[2], MethodExecutionFinishedEvent)
|
||||
assert received_events[2].method_name == "init"
|
||||
|
||||
assert isinstance(event_log[3], MethodExecutionStartedEvent)
|
||||
assert event_log[3].method_name == "process"
|
||||
assert isinstance(received_events[3], MethodExecutionStartedEvent)
|
||||
assert received_events[3].method_name == "process"
|
||||
|
||||
assert isinstance(event_log[4], MethodExecutionFinishedEvent)
|
||||
assert event_log[4].method_name == "process"
|
||||
assert isinstance(received_events[4], MethodExecutionFinishedEvent)
|
||||
assert received_events[4].method_name == "process"
|
||||
|
||||
assert isinstance(event_log[5], FlowFinishedEvent)
|
||||
assert event_log[5].flow_name == "StatelessFlow"
|
||||
assert isinstance(received_events[5], FlowFinishedEvent)
|
||||
assert received_events[5].flow_name == "StatelessFlow"
|
||||
assert (
|
||||
event_log[5].result
|
||||
received_events[5].result
|
||||
== "Deeds will not be less valiant because they are unpraised."
|
||||
)
|
||||
assert isinstance(event_log[5].timestamp, datetime)
|
||||
assert isinstance(received_events[5].timestamp, datetime)
|
||||
|
||||
|
||||
def test_flow_plotting():
|
||||
class StatelessFlow(Flow):
|
||||
@start()
|
||||
def init(self):
|
||||
return "Initializing flow..."
|
||||
|
||||
@listen(init)
|
||||
def process(self):
|
||||
return "Deeds will not be less valiant because they are unpraised."
|
||||
|
||||
flow = StatelessFlow()
|
||||
flow.kickoff()
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(FlowPlotEvent)
|
||||
def handle_flow_plot(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
flow.plot("test_flow")
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert isinstance(received_events[0], FlowPlotEvent)
|
||||
assert received_events[0].flow_name == "StatelessFlow"
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
|
||||
@@ -7,7 +7,8 @@ from pydantic import BaseModel
|
||||
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.llm import LLM
|
||||
from crewai.tools import tool
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.events.tool_usage_events import ToolExecutionErrorEvent
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
|
||||
|
||||
@@ -291,32 +292,36 @@ def anthropic_llm():
|
||||
"""Fixture providing an Anthropic LLM instance."""
|
||||
return LLM(model="anthropic/claude-3-sonnet")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def system_message():
|
||||
"""Fixture providing a system message."""
|
||||
return {"role": "system", "content": "test"}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def user_message():
|
||||
"""Fixture providing a user message."""
|
||||
return {"role": "user", "content": "test"}
|
||||
|
||||
|
||||
def test_anthropic_message_formatting_edge_cases(anthropic_llm):
|
||||
"""Test edge cases for Anthropic message formatting."""
|
||||
# Test None messages
|
||||
with pytest.raises(TypeError, match="Messages cannot be None"):
|
||||
anthropic_llm._format_messages_for_provider(None)
|
||||
|
||||
|
||||
# Test empty message list
|
||||
formatted = anthropic_llm._format_messages_for_provider([])
|
||||
assert len(formatted) == 1
|
||||
assert formatted[0]["role"] == "user"
|
||||
assert formatted[0]["content"] == "."
|
||||
|
||||
|
||||
# Test invalid message format
|
||||
with pytest.raises(TypeError, match="Invalid message format"):
|
||||
anthropic_llm._format_messages_for_provider([{"invalid": "message"}])
|
||||
|
||||
|
||||
def test_anthropic_model_detection():
|
||||
"""Test Anthropic model detection with various formats."""
|
||||
models = [
|
||||
@@ -327,11 +332,12 @@ def test_anthropic_model_detection():
|
||||
("", False),
|
||||
("anthropomorphic", False), # Should not match partial words
|
||||
]
|
||||
|
||||
|
||||
for model, expected in models:
|
||||
llm = LLM(model=model)
|
||||
assert llm.is_anthropic == expected, f"Failed for model: {model}"
|
||||
|
||||
|
||||
def test_anthropic_message_formatting(anthropic_llm, system_message, user_message):
|
||||
"""Test Anthropic message formatting with fixtures."""
|
||||
# Test when first message is system
|
||||
@@ -371,3 +377,51 @@ def test_deepseek_r1_with_open_router():
|
||||
result = llm.call("What is the capital of France?")
|
||||
assert isinstance(result, str)
|
||||
assert "Paris" in result
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_tool_execution_error_event():
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
|
||||
def failing_tool(param: str) -> str:
|
||||
"""This tool always fails."""
|
||||
raise Exception("Tool execution failed!")
|
||||
|
||||
tool_schema = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "failing_tool",
|
||||
"description": "This tool always fails.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"param": {"type": "string", "description": "A test parameter"}
|
||||
},
|
||||
"required": ["param"],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolExecutionErrorEvent)
|
||||
def event_handler(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
available_functions = {"failing_tool": failing_tool}
|
||||
|
||||
messages = [{"role": "user", "content": "Use the failing tool"}]
|
||||
|
||||
llm.call(
|
||||
messages,
|
||||
tools=[tool_schema],
|
||||
available_functions=available_functions,
|
||||
)
|
||||
|
||||
assert len(received_events) == 1
|
||||
event = received_events[0]
|
||||
assert isinstance(event, ToolExecutionErrorEvent)
|
||||
assert event.tool_name == "failing_tool"
|
||||
assert event.tool_args == {"param": "test"}
|
||||
assert event.tool_class == failing_tool
|
||||
assert "Tool execution failed!" in event.error
|
||||
|
||||
@@ -13,6 +13,7 @@ from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
|
||||
|
||||
class TestState(FlowState):
|
||||
"""Test state model with required id field."""
|
||||
|
||||
counter: int = 0
|
||||
message: str = ""
|
||||
|
||||
@@ -73,7 +74,6 @@ def test_flow_state_restoration(tmp_path):
|
||||
|
||||
# First flow execution to create initial state
|
||||
class RestorableFlow(Flow[TestState]):
|
||||
|
||||
@start()
|
||||
@persist(persistence)
|
||||
def set_message(self):
|
||||
@@ -89,10 +89,7 @@ def test_flow_state_restoration(tmp_path):
|
||||
|
||||
# Test case 1: Restore using restore_uuid with field override
|
||||
flow2 = RestorableFlow(persistence=persistence)
|
||||
flow2.kickoff(inputs={
|
||||
"id": original_uuid,
|
||||
"counter": 43
|
||||
})
|
||||
flow2.kickoff(inputs={"id": original_uuid, "counter": 43})
|
||||
|
||||
# Verify state restoration and selective field override
|
||||
assert flow2.state.id == original_uuid
|
||||
@@ -101,10 +98,7 @@ def test_flow_state_restoration(tmp_path):
|
||||
|
||||
# Test case 2: Restore using kwargs['id']
|
||||
flow3 = RestorableFlow(persistence=persistence)
|
||||
flow3.kickoff(inputs={
|
||||
"id": original_uuid,
|
||||
"message": "Updated message"
|
||||
})
|
||||
flow3.kickoff(inputs={"id": original_uuid, "message": "Updated message"})
|
||||
|
||||
# Verify state restoration and selective field override
|
||||
assert flow3.state.id == original_uuid
|
||||
@@ -175,8 +169,12 @@ def test_multiple_method_persistence(tmp_path):
|
||||
assert final_state.counter == 99999
|
||||
assert final_state.message == "Step 99999"
|
||||
|
||||
|
||||
def test_persist_decorator_verbose_logging(tmp_path, caplog):
|
||||
"""Test that @persist decorator's verbose parameter controls logging."""
|
||||
# Set logging level to ensure we capture all logs
|
||||
caplog.set_level("INFO")
|
||||
|
||||
db_path = os.path.join(tmp_path, "test_flows.db")
|
||||
persistence = SQLiteFlowPersistence(db_path)
|
||||
|
||||
@@ -192,7 +190,7 @@ def test_persist_decorator_verbose_logging(tmp_path, caplog):
|
||||
|
||||
flow = QuietFlow(persistence=persistence)
|
||||
flow.kickoff()
|
||||
assert "Saving flow state to memory for ID: test-uuid-1" not in caplog.text
|
||||
assert "Saving flow state" not in caplog.text
|
||||
|
||||
# Clear the log
|
||||
caplog.clear()
|
||||
@@ -209,4 +207,4 @@ def test_persist_decorator_verbose_logging(tmp_path, caplog):
|
||||
|
||||
flow = VerboseFlow(persistence=persistence)
|
||||
flow.kickoff()
|
||||
assert "Saving flow state to memory for ID: test-uuid-2" in caplog.text
|
||||
assert "Saving flow state" in caplog.text
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
import random
|
||||
from unittest.mock import MagicMock
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -8,6 +8,11 @@ from pydantic import BaseModel, Field
|
||||
from crewai import Agent, Task
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.events.tool_usage_events import (
|
||||
ToolSelectionErrorEvent,
|
||||
ToolValidateInputErrorEvent,
|
||||
)
|
||||
|
||||
|
||||
class RandomNumberToolInput(BaseModel):
|
||||
@@ -226,7 +231,7 @@ def test_validate_tool_input_with_special_characters():
|
||||
)
|
||||
|
||||
# Input with special characters
|
||||
tool_input = '{"message": "Hello, world! \u263A", "valid": True}'
|
||||
tool_input = '{"message": "Hello, world! \u263a", "valid": True}'
|
||||
expected_arguments = {"message": "Hello, world! ☺", "valid": True}
|
||||
|
||||
arguments = tool_usage._validate_tool_input(tool_input)
|
||||
@@ -331,6 +336,19 @@ def test_validate_tool_input_with_trailing_commas():
|
||||
|
||||
|
||||
def test_validate_tool_input_invalid_input():
|
||||
# Create mock agent with proper string values
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.key = "test_agent_key" # Must be a string
|
||||
mock_agent.role = "test_agent_role" # Must be a string
|
||||
mock_agent._original_role = "test_agent_role" # Must be a string
|
||||
mock_agent.i18n = MagicMock()
|
||||
mock_agent.verbose = False
|
||||
|
||||
# Create mock action with proper string value
|
||||
mock_action = MagicMock()
|
||||
mock_action.tool = "test_tool" # Must be a string
|
||||
mock_action.tool_input = "test_input" # Must be a string
|
||||
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=MagicMock(),
|
||||
tools=[],
|
||||
@@ -339,8 +357,8 @@ def test_validate_tool_input_invalid_input():
|
||||
tools_names="",
|
||||
task=MagicMock(),
|
||||
function_calling_llm=None,
|
||||
agent=MagicMock(),
|
||||
action=MagicMock(),
|
||||
agent=mock_agent,
|
||||
action=mock_action,
|
||||
)
|
||||
|
||||
invalid_inputs = [
|
||||
@@ -360,7 +378,7 @@ def test_validate_tool_input_invalid_input():
|
||||
|
||||
# Test for None input separately
|
||||
arguments = tool_usage._validate_tool_input(None)
|
||||
assert arguments == {} # Expecting an empty dictionary
|
||||
assert arguments == {}
|
||||
|
||||
|
||||
def test_validate_tool_input_complex_structure():
|
||||
@@ -468,18 +486,141 @@ def test_validate_tool_input_large_json_content():
|
||||
assert arguments == expected_arguments
|
||||
|
||||
|
||||
def test_validate_tool_input_none_input():
|
||||
def test_tool_selection_error_event_direct():
|
||||
"""Test tool selection error event emission directly from ToolUsage class."""
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.key = "test_key"
|
||||
mock_agent.role = "test_role"
|
||||
mock_agent.i18n = MagicMock()
|
||||
mock_agent.verbose = False
|
||||
|
||||
mock_task = MagicMock()
|
||||
mock_tools_handler = MagicMock()
|
||||
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool"
|
||||
|
||||
def _run(self, input: dict) -> str:
|
||||
return "test result"
|
||||
|
||||
test_tool = TestTool()
|
||||
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=MagicMock(),
|
||||
tools=[],
|
||||
original_tools=[],
|
||||
tools_description="",
|
||||
tools_names="",
|
||||
task=MagicMock(),
|
||||
tools_handler=mock_tools_handler,
|
||||
tools=[test_tool],
|
||||
original_tools=[test_tool],
|
||||
tools_description="Test Tool Description",
|
||||
tools_names="Test Tool",
|
||||
task=mock_task,
|
||||
function_calling_llm=None,
|
||||
agent=MagicMock(),
|
||||
agent=mock_agent,
|
||||
action=MagicMock(),
|
||||
)
|
||||
|
||||
arguments = tool_usage._validate_tool_input(None)
|
||||
assert arguments == {} # Expecting an empty dictionary
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolSelectionErrorEvent)
|
||||
def event_handler(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
tool_usage._select_tool("Non Existent Tool")
|
||||
assert len(received_events) == 1
|
||||
event = received_events[0]
|
||||
assert isinstance(event, ToolSelectionErrorEvent)
|
||||
assert event.agent_key == "test_key"
|
||||
assert event.agent_role == "test_role"
|
||||
assert event.tool_name == "Non Existent Tool"
|
||||
assert event.tool_args == {}
|
||||
assert event.tool_class == "Test Tool Description"
|
||||
assert "don't exist" in event.error
|
||||
|
||||
received_events.clear()
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
tool_usage._select_tool("")
|
||||
|
||||
assert len(received_events) == 1
|
||||
event = received_events[0]
|
||||
assert isinstance(event, ToolSelectionErrorEvent)
|
||||
assert event.agent_key == "test_key"
|
||||
assert event.agent_role == "test_role"
|
||||
assert event.tool_name == ""
|
||||
assert event.tool_args == {}
|
||||
assert event.tool_class == "Test Tool Description"
|
||||
assert "forgot the Action name" in event.error
|
||||
|
||||
|
||||
def test_tool_validate_input_error_event():
|
||||
"""Test tool validation input error event emission from ToolUsage class."""
|
||||
# Mock agent and required components
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.key = "test_key"
|
||||
mock_agent.role = "test_role"
|
||||
mock_agent.verbose = False
|
||||
mock_agent._original_role = "test_role"
|
||||
|
||||
# Mock i18n with error message
|
||||
mock_i18n = MagicMock()
|
||||
mock_i18n.errors.return_value = (
|
||||
"Tool input must be a valid dictionary in JSON or Python literal format"
|
||||
)
|
||||
mock_agent.i18n = mock_i18n
|
||||
|
||||
# Mock task and tools handler
|
||||
mock_task = MagicMock()
|
||||
mock_tools_handler = MagicMock()
|
||||
|
||||
# Mock printer
|
||||
mock_printer = MagicMock()
|
||||
|
||||
# Create test tool
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool"
|
||||
|
||||
def _run(self, input: dict) -> str:
|
||||
return "test result"
|
||||
|
||||
test_tool = TestTool()
|
||||
|
||||
# Create ToolUsage instance
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=mock_tools_handler,
|
||||
tools=[test_tool],
|
||||
original_tools=[test_tool],
|
||||
tools_description="Test Tool Description",
|
||||
tools_names="Test Tool",
|
||||
task=mock_task,
|
||||
function_calling_llm=None,
|
||||
agent=mock_agent,
|
||||
action=MagicMock(tool="test_tool"),
|
||||
)
|
||||
tool_usage._printer = mock_printer
|
||||
|
||||
# Mock all parsing attempts to fail
|
||||
with (
|
||||
patch("json.loads", side_effect=json.JSONDecodeError("Test Error", "", 0)),
|
||||
patch("ast.literal_eval", side_effect=ValueError),
|
||||
patch("json5.loads", side_effect=json.JSONDecodeError("Test Error", "", 0)),
|
||||
patch("json_repair.repair_json", side_effect=Exception("Failed to repair")),
|
||||
):
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolValidateInputErrorEvent)
|
||||
def event_handler(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
# Test invalid input
|
||||
invalid_input = "invalid json {[}"
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
tool_usage._validate_tool_input(invalid_input)
|
||||
|
||||
# Verify event was emitted
|
||||
assert len(received_events) == 1, "Expected one event to be emitted"
|
||||
event = received_events[0]
|
||||
assert isinstance(event, ToolValidateInputErrorEvent)
|
||||
assert event.agent_key == "test_key"
|
||||
assert event.agent_role == "test_role"
|
||||
assert event.tool_name == "test_tool"
|
||||
assert "must be a valid dictionary" in event.error
|
||||
|
||||
@@ -0,0 +1,243 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '836'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzTXAk4GatJOmLO9sEOCCITIjf1Dx\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739214900,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90fe6ce92eba67b3-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 10 Feb 2025 19:15:01 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=pjX1I6y8RlqCjS.gvOqvXk4vM69UNwFwmslh1BhALNg-1739214901-1.0.1.1-nJcNlSdNcug82eDl7KSvteLbsg0xCiEh2yI1TZX2jMAblL7AMQ8LFhvXkJLlAMfk49RMzRzWy2aiQgeM7WRHPg;
|
||||
path=/; expires=Mon, 10-Feb-25 19:45:01 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '571'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999810'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_a95183a7a85e6bdfe381b2510bf70f34
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
|
||||
completed based on the description, expected output, and actual results.\n\nTask
|
||||
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
|
||||
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
|
||||
extracted from the task output, if any, their type, description, and relationships"}],
|
||||
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
|
||||
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
|
||||
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
|
||||
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
|
||||
{"description": "The name of the entity.", "title": "Name", "type": "string"},
|
||||
"type": {"description": "The type of the entity.", "title": "Type", "type":
|
||||
"string"}, "description": {"description": "Description of the entity.", "title":
|
||||
"Description", "type": "string"}, "relationships": {"description": "Relationships
|
||||
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
|
||||
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
|
||||
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
|
||||
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
|
||||
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance, all
|
||||
taking into account the task description, expected output, and the result of
|
||||
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
|
||||
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
|
||||
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
|
||||
"suggestions"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1962'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=pjX1I6y8RlqCjS.gvOqvXk4vM69UNwFwmslh1BhALNg-1739214901-1.0.1.1-nJcNlSdNcug82eDl7KSvteLbsg0xCiEh2yI1TZX2jMAblL7AMQ8LFhvXkJLlAMfk49RMzRzWy2aiQgeM7WRHPg;
|
||||
_cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzTXDcgKWq3yosIyBal8LcY8dDrn1\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739214903,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_c41SAnqyEKNXEAZd5XV3jKF3\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
|
||||
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Consider specifying
|
||||
the tone or context of the greeting for more engaging interactions.\\\",\\\"Clarify
|
||||
if additional greetings or responses are acceptable to enhance the task's scope.\\\"],\\\"quality\\\":10,\\\"entities\\\":[]
|
||||
}\"\n }\n }\n ],\n \"refusal\": null\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 273,\n \"completion_tokens\": 43,\n
|
||||
\ \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 90fe6cf8c96e67b3-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 10 Feb 2025 19:15:04 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1181'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999876'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_b2286c8ae6f9b2a42f46a3e2c52b4211
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
315
tests/utilities/cassettes/test_crew_emits_end_kickoff_event.yaml
Normal file
315
tests/utilities/cassettes/test_crew_emits_end_kickoff_event.yaml
Normal file
@@ -0,0 +1,315 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '836'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=4s6sWmJ49B9F_wNc1STtdZF1nikfl6uN9_ov3Xzfa8U-1738698987-1.0.1.1-lmbRRS1MHrDbnU93Gh16CP3qNczxxIrQnyBU7vpHSwNf6PdmuWOHKd1mkl5SBx6rg7p1NLaNUMyqDDcE0Mvjzw;
|
||||
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AxJK2OCJSkUj1plgbj59b4dC39QV2\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1738698990,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90cd396c0ab71698-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 19:56:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '951'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999810'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_2c3cb5caed61ccd1e058ef3e6301c691
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
Cq0TCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkShBMKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKkBwoQzBQBWCz+GLuI1awj3OPWrRIIGpT16t5bk6MqDENyZXcgQ3JlYXRlZDABOUBz
|
||||
OyuEGSEYQYDBSCuEGSEYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAwLjBKGgoOcHl0aG9uX3Zl
|
||||
cnNpb24SCAoGMy4xMi44Si4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhj
|
||||
NzQ2MjhjSjEKB2NyZXdfaWQSJgokMDE3NjQ5ZWMtYTBlMS00MzYxLWFlNjgtYzA1N2E3ZGM5YzI5
|
||||
ShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3
|
||||
X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrRAgoLY3Jl
|
||||
d19hZ2VudHMSwQIKvgJbeyJrZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEi
|
||||
LCAiaWQiOiAiOGU3NzgyN2QtN2Y2OC00ZDA2LWI2YTctOWI4YjRkMGE0YzMzIiwgInJvbGUiOiAi
|
||||
YmFzZV9hZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0i
|
||||
OiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIs
|
||||
ICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBm
|
||||
YWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEKCmNyZXdf
|
||||
dGFza3MS8AEK7QFbeyJrZXkiOiAiMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDIiLCAi
|
||||
aWQiOiAiOTJiZDIzMWYtYzAxMC00ZDI3LWIxNGYtZjE5NjEyZTBmZTkzIiwgImFzeW5jX2V4ZWN1
|
||||
dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJiYXNl
|
||||
X2FnZW50IiwgImFnZW50X2tleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIs
|
||||
ICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEo4CChC22Au0eMkAAjV6cfU1NrNIEggxb1Bq
|
||||
Xnll/ioMVGFzayBDcmVhdGVkMAE5IOJaK4QZIRhBwG5bK4QZIRhKLgoIY3Jld19rZXkSIgogZTU4
|
||||
MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiQwMTc2NDllYy1hMGUx
|
||||
LTQzNjEtYWU2OC1jMDU3YTdkYzljMjlKLgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4
|
||||
OWEwZWMzYjI2YTEzZDJKMQoHdGFza19pZBImCiQ5MmJkMjMxZi1jMDEwLTRkMjctYjE0Zi1mMTk2
|
||||
MTJlMGZlOTN6AhgBhQEAAQAAEqQHChC63jCLGR8RP8RmYiHrdNVeEggZ39ffmGm5xyoMQ3JldyBD
|
||||
cmVhdGVkMAE5GFEe04QZIRhBELEq04QZIRhKGwoOY3Jld2FpX3ZlcnNpb24SCQoHMC4xMDAuMEoa
|
||||
Cg5weXRob25fdmVyc2lvbhIICgYzLjEyLjhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVh
|
||||
ZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiQ5MTY4YmQxNC0yN2Q2LTQ3NWMtODljOC01
|
||||
NjJjOTAyMGIxOTBKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkS
|
||||
AhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMS
|
||||
AhgBStECCgtjcmV3X2FnZW50cxLBAgq+Alt7ImtleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2
|
||||
YjI0OWM0YzY0YSIsICJpZCI6ICI4ZTc3ODI3ZC03ZjY4LTRkMDYtYjZhNy05YjhiNGQwYTRjMzMi
|
||||
LCAicm9sZSI6ICJiYXNlX2FnZW50IiwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRlciI6IDIw
|
||||
LCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdw
|
||||
dC00by1taW5pIiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhl
|
||||
Y3V0aW9uPyI6IGZhbHNlLCAibWF4X3JldHJ5X2xpbWl0IjogMiwgInRvb2xzX25hbWVzIjogW119
|
||||
XUr/AQoKY3Jld190YXNrcxLwAQrtAVt7ImtleSI6ICIxYjE1ZWYyMzkxNWIyNzU1ZTg5YTBlYzNi
|
||||
MjZhMTNkMiIsICJpZCI6ICI5MmJkMjMxZi1jMDEwLTRkMjctYjE0Zi1mMTk2MTJlMGZlOTMiLCAi
|
||||
YXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9y
|
||||
b2xlIjogImJhc2VfYWdlbnQiLCAiYWdlbnRfa2V5IjogImFkMTUzMTYxYzVjNWE4NTZhYTBkMDZi
|
||||
MjQ5YzRjNjRhIiwgInRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASjgIKEOo6FGs7r9hHrN+f
|
||||
qhMTUysSCJgbYV+vQMbCKgxUYXNrIENyZWF0ZWQwATlAxjrThBkhGEEYIDvThBkhGEouCghjcmV3
|
||||
X2tleRIiCiBlNTgwNzAxZDUyZWI2NWFmZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJDkx
|
||||
NjhiZDE0LTI3ZDYtNDc1Yy04OWM4LTU2MmM5MDIwYjE5MEouCgh0YXNrX2tleRIiCiAxYjE1ZWYy
|
||||
MzkxNWIyNzU1ZTg5YTBlYzNiMjZhMTNkMkoxCgd0YXNrX2lkEiYKJDkyYmQyMzFmLWMwMTAtNGQy
|
||||
Ny1iMTRmLWYxOTYxMmUwZmU5M3oCGAGFAQABAAA=
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '2480'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.27.0
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 19:56:31 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
|
||||
completed based on the description, expected output, and actual results.\n\nTask
|
||||
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
|
||||
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
|
||||
extracted from the task output, if any, their type, description, and relationships"}],
|
||||
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
|
||||
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
|
||||
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
|
||||
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
|
||||
{"description": "The name of the entity.", "title": "Name", "type": "string"},
|
||||
"type": {"description": "The type of the entity.", "title": "Type", "type":
|
||||
"string"}, "description": {"description": "Description of the entity.", "title":
|
||||
"Description", "type": "string"}, "relationships": {"description": "Relationships
|
||||
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
|
||||
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
|
||||
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
|
||||
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
|
||||
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance, all
|
||||
taking into account the task description, expected output, and the result of
|
||||
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
|
||||
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
|
||||
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
|
||||
"suggestions"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1962'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=4s6sWmJ49B9F_wNc1STtdZF1nikfl6uN9_ov3Xzfa8U-1738698987-1.0.1.1-lmbRRS1MHrDbnU93Gh16CP3qNczxxIrQnyBU7vpHSwNf6PdmuWOHKd1mkl5SBx6rg7p1NLaNUMyqDDcE0Mvjzw;
|
||||
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AxJK3bJiyqGhPeqdCcCjoeNavGHrR\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1738698991,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_uAFkclWHIRqgrXFrQFcEoUIS\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
|
||||
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Include additional
|
||||
context for the greeting to make it more meaningful.\\\",\\\"Specify if you
|
||||
want a casual or formal tone for greetings.\\\",\\\"Provide examples of variations
|
||||
of the greeting if necessary.\\\"],\\\"quality\\\":10,\\\"entities\\\":[],\\\"relationships\\\":[]}\"\n
|
||||
\ }\n }\n ],\n \"refusal\": null\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 273,\n \"completion_tokens\": 50,\n
|
||||
\ \"total_tokens\": 323,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_bd83329f63\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90cd3973589f1698-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 19:56:32 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1408'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999876'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_519fd27ca3d5da4d541c4331654e0520
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
357
tests/utilities/cassettes/test_crew_emits_end_task_event.yaml
Normal file
357
tests/utilities/cassettes/test_crew_emits_end_task_event.yaml
Normal file
@@ -0,0 +1,357 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '836'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=nedOdWE1YnKQYt1kSbrcA.zhwa3bZDzmZqTOjZYER0c-1738700521-1.0.1.1-xQk9iXOvqvyXNhkIOgc8Ws2WYcT1mJFkDCvCC8xA5joFD8QfNrBIAr_Qs6sIxt2EzXyeFwBA6gA8ZgWApCHx0Q;
|
||||
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AxJlK2np8dMxYgsDIuyz2TSKKELWh\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1738700682,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90cd62c1fdb0fa6a-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 20:24:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '326'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999810'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_22be86be6fd9d69ca8d310ef534e7bec
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
Cp0mCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS9CUKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKkBwoQfRmsiIy66Scw43CcUw5ZiBIIfYDEhWJTOTkqDENyZXcgQ3JlYXRlZDABOcD6
|
||||
XvsOGyEYQWC2bPsOGyEYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAwLjBKGgoOcHl0aG9uX3Zl
|
||||
cnNpb24SCAoGMy4xMi44Si4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhj
|
||||
NzQ2MjhjSjEKB2NyZXdfaWQSJgokMDMzNTZiYmEtMzJmZC00OThmLTgxYTItYzc1ZDBkMzc2N2Qx
|
||||
ShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3
|
||||
X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrRAgoLY3Jl
|
||||
d19hZ2VudHMSwQIKvgJbeyJrZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEi
|
||||
LCAiaWQiOiAiZjY5YTRmYTMtMzQ4OC00MmFhLTlhMTQtMGEzZmEyOWJmYjZjIiwgInJvbGUiOiAi
|
||||
YmFzZV9hZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0i
|
||||
OiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIs
|
||||
ICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBm
|
||||
YWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEKCmNyZXdf
|
||||
dGFza3MS8AEK7QFbeyJrZXkiOiAiMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDIiLCAi
|
||||
aWQiOiAiNDllNzk0MmMtZTJiMy00YmE1LTg5MTUtMTYwYjQxMDU2ZmVlIiwgImFzeW5jX2V4ZWN1
|
||||
dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJiYXNl
|
||||
X2FnZW50IiwgImFnZW50X2tleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIs
|
||||
ICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEo4CChCLgjutPq/3tkNyPqVjfbZhEgjhB7lb
|
||||
clxzdyoMVGFzayBDcmVhdGVkMAE5CC9++w4bIRhBeMN++w4bIRhKLgoIY3Jld19rZXkSIgogZTU4
|
||||
MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiQwMzM1NmJiYS0zMmZk
|
||||
LTQ5OGYtODFhMi1jNzVkMGQzNzY3ZDFKLgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4
|
||||
OWEwZWMzYjI2YTEzZDJKMQoHdGFza19pZBImCiQ0OWU3OTQyYy1lMmIzLTRiYTUtODkxNS0xNjBi
|
||||
NDEwNTZmZWV6AhgBhQEAAQAAEqQHChC7SpRSs6eG9XFmYuMQgghQEghpZMlScOy2DyoMQ3JldyBD
|
||||
cmVhdGVkMAE50O2cAA8bIRhBOFmrAA8bIRhKGwoOY3Jld2FpX3ZlcnNpb24SCQoHMC4xMDAuMEoa
|
||||
Cg5weXRob25fdmVyc2lvbhIICgYzLjEyLjhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVh
|
||||
ZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiRlYTQ1MWY2OS00Zjk3LTQ4MjYtOWNlYi04
|
||||
NTAzMDk2MTQ2MDlKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkS
|
||||
AhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMS
|
||||
AhgBStECCgtjcmV3X2FnZW50cxLBAgq+Alt7ImtleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2
|
||||
YjI0OWM0YzY0YSIsICJpZCI6ICJmNjlhNGZhMy0zNDg4LTQyYWEtOWExNC0wYTNmYTI5YmZiNmMi
|
||||
LCAicm9sZSI6ICJiYXNlX2FnZW50IiwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRlciI6IDIw
|
||||
LCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdw
|
||||
dC00by1taW5pIiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhl
|
||||
Y3V0aW9uPyI6IGZhbHNlLCAibWF4X3JldHJ5X2xpbWl0IjogMiwgInRvb2xzX25hbWVzIjogW119
|
||||
XUr/AQoKY3Jld190YXNrcxLwAQrtAVt7ImtleSI6ICIxYjE1ZWYyMzkxNWIyNzU1ZTg5YTBlYzNi
|
||||
MjZhMTNkMiIsICJpZCI6ICI0OWU3OTQyYy1lMmIzLTRiYTUtODkxNS0xNjBiNDEwNTZmZWUiLCAi
|
||||
YXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9y
|
||||
b2xlIjogImJhc2VfYWdlbnQiLCAiYWdlbnRfa2V5IjogImFkMTUzMTYxYzVjNWE4NTZhYTBkMDZi
|
||||
MjQ5YzRjNjRhIiwgInRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASjgIKEM1q5zTl6Q6WKHPQ
|
||||
eIqEm7sSCCWi2wvaFqpfKgxUYXNrIENyZWF0ZWQwATmIObwADxshGEEAo7wADxshGEouCghjcmV3
|
||||
X2tleRIiCiBlNTgwNzAxZDUyZWI2NWFmZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJGVh
|
||||
NDUxZjY5LTRmOTctNDgyNi05Y2ViLTg1MDMwOTYxNDYwOUouCgh0YXNrX2tleRIiCiAxYjE1ZWYy
|
||||
MzkxNWIyNzU1ZTg5YTBlYzNiMjZhMTNkMkoxCgd0YXNrX2lkEiYKJDQ5ZTc5NDJjLWUyYjMtNGJh
|
||||
NS04OTE1LTE2MGI0MTA1NmZlZXoCGAGFAQABAAASpAcKEEhIZzGdZRAdvfcluDR5qvESCFMGo60X
|
||||
V/dYKgxDcmV3IENyZWF0ZWQwATnY8LgBDxshGEGQrsUBDxshGEobCg5jcmV3YWlfdmVyc2lvbhIJ
|
||||
CgcwLjEwMC4wShoKDnB5dGhvbl92ZXJzaW9uEggKBjMuMTIuOEouCghjcmV3X2tleRIiCiBlNTgw
|
||||
NzAxZDUyZWI2NWFmZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJDA3MDJmOGU1LWRjMTYt
|
||||
NDlhYi1hMWY1LThjOWQyY2IwMDYwYkocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoRCgtj
|
||||
cmV3X21lbW9yeRICEABKGgoUY3Jld19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVy
|
||||
X29mX2FnZW50cxICGAFK0QIKC2NyZXdfYWdlbnRzEsECCr4CW3sia2V5IjogImFkMTUzMTYxYzVj
|
||||
NWE4NTZhYTBkMDZiMjQ5YzRjNjRhIiwgImlkIjogImY2OWE0ZmEzLTM0ODgtNDJhYS05YTE0LTBh
|
||||
M2ZhMjliZmI2YyIsICJyb2xlIjogImJhc2VfYWdlbnQiLCAidmVyYm9zZT8iOiBmYWxzZSwgIm1h
|
||||
eF9pdGVyIjogMjAsICJtYXhfcnBtIjogbnVsbCwgImZ1bmN0aW9uX2NhbGxpbmdfbGxtIjogIiIs
|
||||
ICJsbG0iOiAiZ3B0LTRvLW1pbmkiLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxs
|
||||
b3dfY29kZV9leGVjdXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNf
|
||||
bmFtZXMiOiBbXX1dSv8BCgpjcmV3X3Rhc2tzEvABCu0BW3sia2V5IjogIjFiMTVlZjIzOTE1YjI3
|
||||
NTVlODlhMGVjM2IyNmExM2QyIiwgImlkIjogIjQ5ZTc5NDJjLWUyYjMtNGJhNS04OTE1LTE2MGI0
|
||||
MTA1NmZlZSIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9pbnB1dD8iOiBmYWxz
|
||||
ZSwgImFnZW50X3JvbGUiOiAiYmFzZV9hZ2VudCIsICJhZ2VudF9rZXkiOiAiYWQxNTMxNjFjNWM1
|
||||
YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAidG9vbHNfbmFtZXMiOiBbXX1degIYAYUBAAEAABKOAgoQ
|
||||
PkgvMtq3aT5YPXE8gCRxPBIIMx/zQOuC+8sqDFRhc2sgQ3JlYXRlZDABOYjk2AEPGyEYQZiI2QEP
|
||||
GyEYSi4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2Ny
|
||||
ZXdfaWQSJgokMDcwMmY4ZTUtZGMxNi00OWFiLWExZjUtOGM5ZDJjYjAwNjBiSi4KCHRhc2tfa2V5
|
||||
EiIKIDFiMTVlZjIzOTE1YjI3NTVlODlhMGVjM2IyNmExM2QySjEKB3Rhc2tfaWQSJgokNDllNzk0
|
||||
MmMtZTJiMy00YmE1LTg5MTUtMTYwYjQxMDU2ZmVlegIYAYUBAAEAABKkBwoQex7nA0gUUrZHbN6F
|
||||
gWp/gBIIKPd4fiRi7DwqDENyZXcgQ3JlYXRlZDABOTiIrAIPGyEYQdCAtwIPGyEYShsKDmNyZXdh
|
||||
aV92ZXJzaW9uEgkKBzAuMTAwLjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMi44Si4KCGNyZXdf
|
||||
a2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2NyZXdfaWQSJgokMGNm
|
||||
YjUzZWItMDA2Mi00YmVmLTk1ZTgtMDgwMjQ3NmNkMWRlShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1
|
||||
ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoV
|
||||
Y3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrRAgoLY3Jld19hZ2VudHMSwQIKvgJbeyJrZXkiOiAi
|
||||
YWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAiaWQiOiAiZjY5YTRmYTMtMzQ4OC00
|
||||
MmFhLTlhMTQtMGEzZmEyOWJmYjZjIiwgInJvbGUiOiAiYmFzZV9hZ2VudCIsICJ2ZXJib3NlPyI6
|
||||
IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGlu
|
||||
Z19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/Ijog
|
||||
ZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6
|
||||
IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEKCmNyZXdfdGFza3MS8AEK7QFbeyJrZXkiOiAiMWIx
|
||||
NWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDIiLCAiaWQiOiAiNDllNzk0MmMtZTJiMy00YmE1
|
||||
LTg5MTUtMTYwYjQxMDU2ZmVlIiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lu
|
||||
cHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJiYXNlX2FnZW50IiwgImFnZW50X2tleSI6ICJh
|
||||
ZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgB
|
||||
hQEAAQAAEo4CChD+ymllejCksajGYDua8tgNEghnjlejrbuw2SoMVGFzayBDcmVhdGVkMAE5EDrI
|
||||
Ag8bIRhBGIzIAg8bIRhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3
|
||||
NDYyOGNKMQoHY3Jld19pZBImCiQwY2ZiNTNlYi0wMDYyLTRiZWYtOTVlOC0wODAyNDc2Y2QxZGVK
|
||||
LgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDJKMQoHdGFza19p
|
||||
ZBImCiQ0OWU3OTQyYy1lMmIzLTRiYTUtODkxNS0xNjBiNDEwNTZmZWV6AhgBhQEAAQAA
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '4896'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.27.0
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 20:24:47 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
|
||||
completed based on the description, expected output, and actual results.\n\nTask
|
||||
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
|
||||
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
|
||||
extracted from the task output, if any, their type, description, and relationships"}],
|
||||
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
|
||||
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
|
||||
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
|
||||
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
|
||||
{"description": "The name of the entity.", "title": "Name", "type": "string"},
|
||||
"type": {"description": "The type of the entity.", "title": "Type", "type":
|
||||
"string"}, "description": {"description": "Description of the entity.", "title":
|
||||
"Description", "type": "string"}, "relationships": {"description": "Relationships
|
||||
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
|
||||
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
|
||||
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
|
||||
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
|
||||
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance, all
|
||||
taking into account the task description, expected output, and the result of
|
||||
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
|
||||
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
|
||||
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
|
||||
"suggestions"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1962'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=nedOdWE1YnKQYt1kSbrcA.zhwa3bZDzmZqTOjZYER0c-1738700521-1.0.1.1-xQk9iXOvqvyXNhkIOgc8Ws2WYcT1mJFkDCvCC8xA5joFD8QfNrBIAr_Qs6sIxt2EzXyeFwBA6gA8ZgWApCHx0Q;
|
||||
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AxJlLVC3gCB9gRI0ZSkoPCZY7EwpQ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1738700683,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_mgwImOITW8lkjzAyf9Pp76cL\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
|
||||
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Provide context or
|
||||
additional information to make tasks more engaging.\\\",\\\"Encourage variations
|
||||
in responses to make the interaction more dynamic.\\\"],\\\"quality\\\":10,\\\"entities\\\":[{\\\"name\\\":\\\"hi\\\",\\\"type\\\":\\\"greeting\\\",\\\"description\\\":\\\"A
|
||||
common word used to initiate a conversation or express friendliness.\\\",\\\"relationships\\\":[\\\"initiates
|
||||
conversation\\\",\\\"expresses friendliness\\\"]}]}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
273,\n \"completion_tokens\": 71,\n \"total_tokens\": 344,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_bd83329f63\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 90cd62c4ba41fa6a-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 20:24:50 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '7347'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999876'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_aec28dd3fe998d628754e8429623bf9e
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
245
tests/utilities/cassettes/test_crew_emits_kickoff_events.yaml
Normal file
245
tests/utilities/cassettes/test_crew_emits_kickoff_events.yaml
Normal file
@@ -0,0 +1,245 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '836'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AxJIrSWAFqDEsNtLRhcM8vMHO9Ejw\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1738698917,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90cd37a83f5f176a-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 19:55:18 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=rKQWp4fbAvcCp4rasEN6DqiTjQfiWYpLfjcLpWcmzi0-1738698918-1.0.1.1-qlcCSdBY3KWbzVms0eLtz5ub5SSLGs_sRLxTdNhDk_purQuz9k6EFp8PHJfN3aP_sLnuyKnFlppM3.2k_dCtPQ;
|
||||
path=/; expires=Tue, 04-Feb-25 20:25:18 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=Oi91zDXvjWohBYXSVqK4hFsq3_GZePEIIbi7b7wrjcI-1738698918130-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '894'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999810'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_864253996bbc0f797f9a2c1b9247a0d5
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
|
||||
completed based on the description, expected output, and actual results.\n\nTask
|
||||
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
|
||||
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
|
||||
extracted from the task output, if any, their type, description, and relationships"}],
|
||||
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
|
||||
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
|
||||
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
|
||||
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
|
||||
{"description": "The name of the entity.", "title": "Name", "type": "string"},
|
||||
"type": {"description": "The type of the entity.", "title": "Type", "type":
|
||||
"string"}, "description": {"description": "Description of the entity.", "title":
|
||||
"Description", "type": "string"}, "relationships": {"description": "Relationships
|
||||
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
|
||||
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
|
||||
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
|
||||
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
|
||||
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance, all
|
||||
taking into account the task description, expected output, and the result of
|
||||
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
|
||||
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
|
||||
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
|
||||
"suggestions"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1962'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rKQWp4fbAvcCp4rasEN6DqiTjQfiWYpLfjcLpWcmzi0-1738698918-1.0.1.1-qlcCSdBY3KWbzVms0eLtz5ub5SSLGs_sRLxTdNhDk_purQuz9k6EFp8PHJfN3aP_sLnuyKnFlppM3.2k_dCtPQ;
|
||||
_cfuvid=Oi91zDXvjWohBYXSVqK4hFsq3_GZePEIIbi7b7wrjcI-1738698918130-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AxJIsVEppA04iGQh0k6sanKnVObrO\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1738698918,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_AQ3iizjGWjEvk1SmhGCzjbf1\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
|
||||
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Provide context for
|
||||
the greeting, like a specific scenario or recipient.\\\",\\\"Encourage responses
|
||||
or follow-ups to promote engagement.\\\",\\\"Specify the tone or formality of
|
||||
the greeting, if relevant.\\\"],\\\"quality\\\":10,\\\"entities\\\":[{\\\"name\\\":\\\"hi\\\",\\\"type\\\":\\\"greeting\\\",\\\"description\\\":\\\"A
|
||||
common informal expression used to initiate conversation or acknowledge someone.\\\",\\\"relationships\\\":[\\\"used
|
||||
in conversation\\\",\\\"expresses friendliness\\\"]}]}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
273,\n \"completion_tokens\": 84,\n \"total_tokens\": 357,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_bd83329f63\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 90cd37aec8c8176a-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 19:55:21 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '3269'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999876'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_e6e67a3f5c6f2d48e0351cdce95edd97
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -0,0 +1,243 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '836'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AxJJzafmayYpGTsTAWbOyZkmQJNa5\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1738698987,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 90cd395b0e641698-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 19:56:27 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=4s6sWmJ49B9F_wNc1STtdZF1nikfl6uN9_ov3Xzfa8U-1738698987-1.0.1.1-lmbRRS1MHrDbnU93Gh16CP3qNczxxIrQnyBU7vpHSwNf6PdmuWOHKd1mkl5SBx6rg7p1NLaNUMyqDDcE0Mvjzw;
|
||||
path=/; expires=Tue, 04-Feb-25 20:26:27 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '839'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999810'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_577b484a927b455c40ed80f9fd4d9106
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
|
||||
completed based on the description, expected output, and actual results.\n\nTask
|
||||
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
|
||||
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
|
||||
extracted from the task output, if any, their type, description, and relationships"}],
|
||||
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
|
||||
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
|
||||
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
|
||||
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
|
||||
{"description": "The name of the entity.", "title": "Name", "type": "string"},
|
||||
"type": {"description": "The type of the entity.", "title": "Type", "type":
|
||||
"string"}, "description": {"description": "Description of the entity.", "title":
|
||||
"Description", "type": "string"}, "relationships": {"description": "Relationships
|
||||
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
|
||||
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
|
||||
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
|
||||
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
|
||||
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance, all
|
||||
taking into account the task description, expected output, and the result of
|
||||
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
|
||||
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
|
||||
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
|
||||
"suggestions"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1962'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=4s6sWmJ49B9F_wNc1STtdZF1nikfl6uN9_ov3Xzfa8U-1738698987-1.0.1.1-lmbRRS1MHrDbnU93Gh16CP3qNczxxIrQnyBU7vpHSwNf6PdmuWOHKd1mkl5SBx6rg7p1NLaNUMyqDDcE0Mvjzw;
|
||||
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AxJJz10KP7iadNPdKsbcsvHBa7cic\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1738698987,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_czeHQgy5eiOVa0zlrtcfwepe\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
|
||||
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Provide more context
|
||||
or details for similar tasks to enhance output expectations.\\\",\\\"Encourage
|
||||
creativity in responses for simple tasks to engage users more effectively.\\\"],\\\"quality\\\":10,\\\"entities\\\":[]
|
||||
}\"\n }\n }\n ],\n \"refusal\": null\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 273,\n \"completion_tokens\": 40,\n
|
||||
\ \"total_tokens\": 313,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_bd83329f63\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90cd39615b281698-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 19:56:29 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1411'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999876'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_3e717a80c7d9c5ea19893dd990aaae26
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
245
tests/utilities/cassettes/test_crew_emits_start_task_event.yaml
Normal file
245
tests/utilities/cassettes/test_crew_emits_start_task_event.yaml
Normal file
@@ -0,0 +1,245 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '836'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=4s6sWmJ49B9F_wNc1STtdZF1nikfl6uN9_ov3Xzfa8U-1738698987-1.0.1.1-lmbRRS1MHrDbnU93Gh16CP3qNczxxIrQnyBU7vpHSwNf6PdmuWOHKd1mkl5SBx6rg7p1NLaNUMyqDDcE0Mvjzw;
|
||||
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AxJiiHEQwIXsiG0Sd5wofcuhxVbo9\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1738700520,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90cd5ecd0f7667ee-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 20:22:01 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=nedOdWE1YnKQYt1kSbrcA.zhwa3bZDzmZqTOjZYER0c-1738700521-1.0.1.1-xQk9iXOvqvyXNhkIOgc8Ws2WYcT1mJFkDCvCC8xA5joFD8QfNrBIAr_Qs6sIxt2EzXyeFwBA6gA8ZgWApCHx0Q;
|
||||
path=/; expires=Tue, 04-Feb-25 20:52:01 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '450'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999810'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_10eaafc81640a98a0a4789d270dd94d9
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
|
||||
completed based on the description, expected output, and actual results.\n\nTask
|
||||
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n\nPlease
|
||||
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
|
||||
extracted from the task output, if any, their type, description, and relationships"}],
|
||||
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
|
||||
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
|
||||
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
|
||||
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
|
||||
{"description": "The name of the entity.", "title": "Name", "type": "string"},
|
||||
"type": {"description": "The type of the entity.", "title": "Type", "type":
|
||||
"string"}, "description": {"description": "Description of the entity.", "title":
|
||||
"Description", "type": "string"}, "relationships": {"description": "Relationships
|
||||
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
|
||||
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
|
||||
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
|
||||
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
|
||||
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance, all
|
||||
taking into account the task description, expected output, and the result of
|
||||
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
|
||||
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
|
||||
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
|
||||
"suggestions"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1962'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=nedOdWE1YnKQYt1kSbrcA.zhwa3bZDzmZqTOjZYER0c-1738700521-1.0.1.1-xQk9iXOvqvyXNhkIOgc8Ws2WYcT1mJFkDCvCC8xA5joFD8QfNrBIAr_Qs6sIxt2EzXyeFwBA6gA8ZgWApCHx0Q;
|
||||
_cfuvid=Cl48aI8.jSRja0Pqr6Jrh3mAnigd4rDn6lhGicyjMPY-1738698987673-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AxJijOhk12Ua6lS23IwtZTachfjq9\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1738700521,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_DSteeMHHPf5RanJb8qjCo4qx\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
|
||||
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Consider adding context
|
||||
for the greeting to make it more engaging.\\\",\\\"Specify if any additional
|
||||
information or tone is desired in the greeting.\\\"],\\\"quality\\\":10,\\\"entities\\\":[{\\\"name\\\":\\\"greeting\\\",\\\"type\\\":\\\"text\\\",\\\"description\\\":\\\"A
|
||||
simple greeting phrase\\\",\\\"relationships\\\":[\\\"is a\\\",\\\"is part of
|
||||
a conversation\\\"]}]}\"\n }\n }\n ],\n \"refusal\":
|
||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 273,\n \"completion_tokens\":
|
||||
67,\n \"total_tokens\": 340,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_bd83329f63\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90cd5ed20cb267ee-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Feb 2025 20:22:02 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1624'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999876'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_4ee944acdd3928afbf6c5562403b064a
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
114
tests/utilities/cassettes/test_crew_emits_task_failed_event.yaml
Normal file
114
tests/utilities/cassettes/test_crew_emits_task_failed_event.yaml
Normal file
@@ -0,0 +1,114 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '836'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzpkZLpCyjKT5d6Udfx4zAme2sOMy\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739300299,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 910691d3ab90ebef-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Feb 2025 18:58:20 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=MOH5EY6n3p8JKY53.yz7qzLuLYsEB8QdQXH09loUMBM-1739300300-1.0.1.1-hjb4mk04sMygPFhoFyiySKZSqB_fN5PbhbOyn.kipa3.eLvk7EtriDyjvGkBFIAV13DYnc08BfF_l2kxdx9hfQ;
|
||||
path=/; expires=Tue, 11-Feb-25 19:28:20 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=uu.cEiV.FfgvSvCdKOooDYJWrwjVEuFeGdQodijGUUI-1739300300232-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1357'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999810'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_2277503f851195e7d7a43b66eb044454
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -0,0 +1,111 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '836'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=gsNyCo_jrDOolzf8SXHDaxQQrEgdR3jgv4OAH8MziDE-1739291824699-0.0.1.1-604800000;
|
||||
__cf_bm=cRijYuylMGzRGxv3udQL5PhHOR5mRN_9_eLLwevlM_o-1739299455-1.0.1.1-Fszr_Msw0B1.IBMkiunP.VF2ilul1YGZZV8TqMcO3Q2SHvSlqfgm9NHgns1bJrm0wWRvHiCE7wdZfUAOx7T3Lg
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzpWx6pctOvzu6xsbyg0XfSAc0q9V\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739299455,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 91067d3ddc68fa16-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Feb 2025 18:44:16 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '703'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999810'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_89222c00e4608e8557a135e91b223556
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -0,0 +1,114 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Just say hi\n\nThis is the expect criteria for your
|
||||
final answer: hi\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '836'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=gsNyCo_jrDOolzf8SXHDaxQQrEgdR3jgv4OAH8MziDE-1739291824699-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzpWxLzAcRzigZuIGmjP3ckQgxAom\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739299455,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 91067d389e90fa16-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Feb 2025 18:44:15 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=cRijYuylMGzRGxv3udQL5PhHOR5mRN_9_eLLwevlM_o-1739299455-1.0.1.1-Fszr_Msw0B1.IBMkiunP.VF2ilul1YGZZV8TqMcO3Q2SHvSlqfgm9NHgns1bJrm0wWRvHiCE7wdZfUAOx7T3Lg;
|
||||
path=/; expires=Tue, 11-Feb-25 19:14:15 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '716'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999810'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_ef807dc3223d40332aae8a313e96ef3a
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
File diff suppressed because it is too large
Load Diff
7984
tests/utilities/cassettes/test_tools_emits_error_events.yaml
Normal file
7984
tests/utilities/cassettes/test_tools_emits_error_events.yaml
Normal file
File diff suppressed because it is too large
Load Diff
512
tests/utilities/cassettes/test_tools_emits_finished_events.yaml
Normal file
512
tests/utilities/cassettes/test_tools_emits_finished_events.yaml
Normal file
@@ -0,0 +1,512 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nYou
|
||||
ONLY have access to the following tools, and should NEVER make up tools that
|
||||
are not listed here:\n\nTool Name: say_hi\nTool Arguments: {}\nTool Description:
|
||||
Say hi\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [say_hi], just the name, exactly as it''s written.\nAction Input: the
|
||||
input to the action, just a simple JSON object, enclosed in curly braces, using
|
||||
\" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
|
||||
all necessary information is gathered, return the following format:\n\n```\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n```"}, {"role": "user", "content": "\nCurrent Task: Just say
|
||||
hi\n\nThis is the expect criteria for your final answer: hi\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1275'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzUA6kJQfpUvB4CGot4gSfAIR0foh\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739217314,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"you should always think about what to
|
||||
do \\nAction: say_hi \\nAction Input: {} \",\n \"refusal\": null\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 257,\n \"completion_tokens\":
|
||||
19,\n \"total_tokens\": 276,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90fea7d78e1fceb9-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 10 Feb 2025 19:55:15 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=fmlg1wjOwuOwZhUUOEtL1tQYluAPumn7AHLF8s0EU2Y-1739217315-1.0.1.1-PQDvxn8TOhzaznlHjwVsqPZUzbAyJWFkvzCubfNJydTu2_AyA1cJ8hkM0khsEE4UY_xp8iPe2gSGmH1ydrDa0Q;
|
||||
path=/; expires=Mon, 10-Feb-25 20:25:15 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '526'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999703'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_f6358ff0cc7a2b8d2e167ab00a40f2a4
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nYou
|
||||
ONLY have access to the following tools, and should NEVER make up tools that
|
||||
are not listed here:\n\nTool Name: say_hi\nTool Arguments: {}\nTool Description:
|
||||
Say hi\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [say_hi], just the name, exactly as it''s written.\nAction Input: the
|
||||
input to the action, just a simple JSON object, enclosed in curly braces, using
|
||||
\" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
|
||||
all necessary information is gathered, return the following format:\n\n```\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n```"}, {"role": "user", "content": "\nCurrent Task: Just say
|
||||
hi\n\nThis is the expect criteria for your final answer: hi\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}, {"role": "assistant", "content": "you
|
||||
should always think about what to do \nAction: say_hi \nAction Input: {} \nObservation:
|
||||
hi"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1410'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000;
|
||||
__cf_bm=fmlg1wjOwuOwZhUUOEtL1tQYluAPumn7AHLF8s0EU2Y-1739217315-1.0.1.1-PQDvxn8TOhzaznlHjwVsqPZUzbAyJWFkvzCubfNJydTu2_AyA1cJ8hkM0khsEE4UY_xp8iPe2gSGmH1ydrDa0Q
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzUA7QdlQy1WZZijxNWUv25sZycg0\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739217315,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
|
||||
Answer: hi\\n```\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
283,\n \"completion_tokens\": 17,\n \"total_tokens\": 300,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90fea7dc5ba6ceb9-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 10 Feb 2025 19:55:15 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '388'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999680'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_7d7c68b90b3a9c3ac6092fe17ac1185a
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
CoMzCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS2jIKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKOAgoQ2EINIGZRoXD589od63oHmBIIMfUgEWudUbIqDFRhc2sgQ3JlYXRlZDABOcjI
|
||||
7lbu8CIYQZB471bu8CIYSi4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhj
|
||||
NzQ2MjhjSjEKB2NyZXdfaWQSJgokNTE4ODdiOTktY2FlMy00Yjc4LWJjMGEtMDY4MmVmNWEzNGQ0
|
||||
Si4KCHRhc2tfa2V5EiIKIDFiMTVlZjIzOTE1YjI3NTVlODlhMGVjM2IyNmExM2QySjEKB3Rhc2tf
|
||||
aWQSJgokMzlmMDlmMWUtOTJmOC00ZGJiLTgzNDAtNjU2ZmVkMDk3ZjM0egIYAYUBAAEAABKkBwoQ
|
||||
RzhWoF6ewSTS/qUc9yeFRhIIM3SNZCwjz5AqDENyZXcgQ3JlYXRlZDABOQjrGlru8CIYQdgbKVru
|
||||
8CIYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAwLjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4x
|
||||
Mi44Si4KCGNyZXdfa2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2Ny
|
||||
ZXdfaWQSJgokYzk4ODFkY2YtMmM0MS00ZjRlLTgzMjctNjJjYjFhYjJkOTg4ShwKDGNyZXdfcHJv
|
||||
Y2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90
|
||||
YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrRAgoLY3Jld19hZ2VudHMSwQIK
|
||||
vgJbeyJrZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAiaWQiOiAiNTU2
|
||||
NzJiMDgtOTU4ZC00MjljLWE3ZTctY2ZlN2U4Y2MwOGZkIiwgInJvbGUiOiAiYmFzZV9hZ2VudCIs
|
||||
ICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVu
|
||||
Y3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0aW9u
|
||||
X2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9y
|
||||
ZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEKCmNyZXdfdGFza3MS8AEK7QFb
|
||||
eyJrZXkiOiAiMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDIiLCAiaWQiOiAiMzlmMDlm
|
||||
MWUtOTJmOC00ZGJiLTgzNDAtNjU2ZmVkMDk3ZjM0IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxz
|
||||
ZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJiYXNlX2FnZW50IiwgImFn
|
||||
ZW50X2tleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIsICJ0b29sc19uYW1l
|
||||
cyI6IFtdfV16AhgBhQEAAQAAEo4CChB8AxWkb2Uwpdc8RpyCRqw5EggJAxbgNu81XyoMVGFzayBD
|
||||
cmVhdGVkMAE5+HQ8Wu7wIhhB+PE8Wu7wIhhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVh
|
||||
ZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiRjOTg4MWRjZi0yYzQxLTRmNGUtODMyNy02
|
||||
MmNiMWFiMmQ5ODhKLgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEz
|
||||
ZDJKMQoHdGFza19pZBImCiQzOWYwOWYxZS05MmY4LTRkYmItODM0MC02NTZmZWQwOTdmMzR6AhgB
|
||||
hQEAAQAAEqQHChCcXvdbsgYC+gzCMrXs3LN/EgijKwJLCRIiHioMQ3JldyBDcmVhdGVkMAE5iJqz
|
||||
vu7wIhhBqKC/vu7wIhhKGwoOY3Jld2FpX3ZlcnNpb24SCQoHMC4xMDAuMEoaCg5weXRob25fdmVy
|
||||
c2lvbhIICgYzLjEyLjhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3
|
||||
NDYyOGNKMQoHY3Jld19pZBImCiQ2Zjk1ZWI3Yy0wOWM5LTQxOTYtYWFiYi1kOWIxNmMxMzZjODdK
|
||||
HAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdf
|
||||
bnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBStECCgtjcmV3
|
||||
X2FnZW50cxLBAgq+Alt7ImtleSI6ICJhZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIs
|
||||
ICJpZCI6ICI1NTY3MmIwOC05NThkLTQyOWMtYTdlNy1jZmU3ZThjYzA4ZmQiLCAicm9sZSI6ICJi
|
||||
YXNlX2FnZW50IiwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRlciI6IDIwLCAibWF4X3JwbSI6
|
||||
IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdwdC00by1taW5pIiwg
|
||||
ImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhlY3V0aW9uPyI6IGZh
|
||||
bHNlLCAibWF4X3JldHJ5X2xpbWl0IjogMiwgInRvb2xzX25hbWVzIjogW119XUr/AQoKY3Jld190
|
||||
YXNrcxLwAQrtAVt7ImtleSI6ICIxYjE1ZWYyMzkxNWIyNzU1ZTg5YTBlYzNiMjZhMTNkMiIsICJp
|
||||
ZCI6ICIzOWYwOWYxZS05MmY4LTRkYmItODM0MC02NTZmZWQwOTdmMzQiLCAiYXN5bmNfZXhlY3V0
|
||||
aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9yb2xlIjogImJhc2Vf
|
||||
YWdlbnQiLCAiYWdlbnRfa2V5IjogImFkMTUzMTYxYzVjNWE4NTZhYTBkMDZiMjQ5YzRjNjRhIiwg
|
||||
InRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASjgIKEExDo5nPLyHb2H8DfYjPoX4SCLEYs+24
|
||||
8EenKgxUYXNrIENyZWF0ZWQwATmI4NG+7vAiGEFYZdK+7vAiGEouCghjcmV3X2tleRIiCiBlNTgw
|
||||
NzAxZDUyZWI2NWFmZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJDZmOTVlYjdjLTA5Yzkt
|
||||
NDE5Ni1hYWJiLWQ5YjE2YzEzNmM4N0ouCgh0YXNrX2tleRIiCiAxYjE1ZWYyMzkxNWIyNzU1ZTg5
|
||||
YTBlYzNiMjZhMTNkMkoxCgd0YXNrX2lkEiYKJDM5ZjA5ZjFlLTkyZjgtNGRiYi04MzQwLTY1NmZl
|
||||
ZDA5N2YzNHoCGAGFAQABAAASpAcKEBBQzR2bcR/7woQ+VkaJ4kQSCD1LFx3SNPPPKgxDcmV3IENy
|
||||
ZWF0ZWQwATlotsW/7vAiGEEgA9C/7vAiGEobCg5jcmV3YWlfdmVyc2lvbhIJCgcwLjEwMC4wShoK
|
||||
DnB5dGhvbl92ZXJzaW9uEggKBjMuMTIuOEouCghjcmV3X2tleRIiCiBlNTgwNzAxZDUyZWI2NWFm
|
||||
ZjI0ZWVmZTc4Yzc0NjI4Y0oxCgdjcmV3X2lkEiYKJDJiMWI2MGYzLTNlZTMtNGNjYi05MDM2LTdk
|
||||
MzE4OTJiYjVkZkocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoRCgtjcmV3X21lbW9yeRIC
|
||||
EABKGgoUY3Jld19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVyX29mX2FnZW50cxIC
|
||||
GAFK0QIKC2NyZXdfYWdlbnRzEsECCr4CW3sia2V5IjogImFkMTUzMTYxYzVjNWE4NTZhYTBkMDZi
|
||||
MjQ5YzRjNjRhIiwgImlkIjogIjU1NjcyYjA4LTk1OGQtNDI5Yy1hN2U3LWNmZTdlOGNjMDhmZCIs
|
||||
ICJyb2xlIjogImJhc2VfYWdlbnQiLCAidmVyYm9zZT8iOiBmYWxzZSwgIm1heF9pdGVyIjogMjAs
|
||||
ICJtYXhfcnBtIjogbnVsbCwgImZ1bmN0aW9uX2NhbGxpbmdfbGxtIjogIiIsICJsbG0iOiAiZ3B0
|
||||
LTRvLW1pbmkiLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxsb3dfY29kZV9leGVj
|
||||
dXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNfbmFtZXMiOiBbXX1d
|
||||
Sv8BCgpjcmV3X3Rhc2tzEvABCu0BW3sia2V5IjogIjFiMTVlZjIzOTE1YjI3NTVlODlhMGVjM2Iy
|
||||
NmExM2QyIiwgImlkIjogIjM5ZjA5ZjFlLTkyZjgtNGRiYi04MzQwLTY1NmZlZDA5N2YzNCIsICJh
|
||||
c3luY19leGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3Jv
|
||||
bGUiOiAiYmFzZV9hZ2VudCIsICJhZ2VudF9rZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIy
|
||||
NDljNGM2NGEiLCAidG9vbHNfbmFtZXMiOiBbXX1degIYAYUBAAEAABKOAgoQmT07KMiFRgzOOPQf
|
||||
I4bJPhIIqzN+pCYM6IUqDFRhc2sgQ3JlYXRlZDABOYjr3r/u8CIYQehY37/u8CIYSi4KCGNyZXdf
|
||||
a2V5EiIKIGU1ODA3MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2NyZXdfaWQSJgokMmIx
|
||||
YjYwZjMtM2VlMy00Y2NiLTkwMzYtN2QzMTg5MmJiNWRmSi4KCHRhc2tfa2V5EiIKIDFiMTVlZjIz
|
||||
OTE1YjI3NTVlODlhMGVjM2IyNmExM2QySjEKB3Rhc2tfaWQSJgokMzlmMDlmMWUtOTJmOC00ZGJi
|
||||
LTgzNDAtNjU2ZmVkMDk3ZjM0egIYAYUBAAEAABKkBwoQE53vZNAWshkoNK1bqTvovRII83djkBUL
|
||||
EbcqDENyZXcgQ3JlYXRlZDABORBBzsDu8CIYQbAU2MDu8CIYShsKDmNyZXdhaV92ZXJzaW9uEgkK
|
||||
BzAuMTAwLjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMi44Si4KCGNyZXdfa2V5EiIKIGU1ODA3
|
||||
MDFkNTJlYjY1YWZmMjRlZWZlNzhjNzQ2MjhjSjEKB2NyZXdfaWQSJgokNTQ0MWY0MWYtOTVjMC00
|
||||
YzdkLTkxM2QtNDUxODcwY2YyZjYzShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2Ny
|
||||
ZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJf
|
||||
b2ZfYWdlbnRzEgIYAUrRAgoLY3Jld19hZ2VudHMSwQIKvgJbeyJrZXkiOiAiYWQxNTMxNjFjNWM1
|
||||
YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAiaWQiOiAiNTU2NzJiMDgtOTU4ZC00MjljLWE3ZTctY2Zl
|
||||
N2U4Y2MwOGZkIiwgInJvbGUiOiAiYmFzZV9hZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4
|
||||
X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwg
|
||||
ImxsbSI6ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxv
|
||||
d19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19u
|
||||
YW1lcyI6IFtdfV1K/wEKCmNyZXdfdGFza3MS8AEK7QFbeyJrZXkiOiAiMWIxNWVmMjM5MTViMjc1
|
||||
NWU4OWEwZWMzYjI2YTEzZDIiLCAiaWQiOiAiMzlmMDlmMWUtOTJmOC00ZGJiLTgzNDAtNjU2ZmVk
|
||||
MDk3ZjM0IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNl
|
||||
LCAiYWdlbnRfcm9sZSI6ICJiYXNlX2FnZW50IiwgImFnZW50X2tleSI6ICJhZDE1MzE2MWM1YzVh
|
||||
ODU2YWEwZDA2YjI0OWM0YzY0YSIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEo4CChBV
|
||||
JNEz3VIdOlQM9VT3bctVEgisogN707a2AioMVGFzayBDcmVhdGVkMAE5kGbnwO7wIhhBaMDnwO7w
|
||||
IhhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jl
|
||||
d19pZBImCiQ1NDQxZjQxZi05NWMwLTRjN2QtOTEzZC00NTE4NzBjZjJmNjNKLgoIdGFza19rZXkS
|
||||
IgogMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2YTEzZDJKMQoHdGFza19pZBImCiQzOWYwOWYx
|
||||
ZS05MmY4LTRkYmItODM0MC02NTZmZWQwOTdmMzR6AhgBhQEAAQAAErQHChDA7zaLCfy56rd5t3oS
|
||||
rDPZEgjYoSW3mq6WJyoMQ3JldyBDcmVhdGVkMAE5cP/5we7wIhhBIH0Dwu7wIhhKGwoOY3Jld2Fp
|
||||
X3ZlcnNpb24SCQoHMC4xMDAuMEoaCg5weXRob25fdmVyc2lvbhIICgYzLjEyLjhKLgoIY3Jld19r
|
||||
ZXkSIgogZTU4MDcwMWQ1MmViNjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiRmNjcz
|
||||
MTc1ZS04Y2Q1LTQ1ZWUtYTZiOS0xYWFjMTliODQxZWJKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVl
|
||||
bnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVj
|
||||
cmV3X251bWJlcl9vZl9hZ2VudHMSAhgBStkCCgtjcmV3X2FnZW50cxLJAgrGAlt7ImtleSI6ICJh
|
||||
ZDE1MzE2MWM1YzVhODU2YWEwZDA2YjI0OWM0YzY0YSIsICJpZCI6ICJmMGUwMGIzZi0wZWNmLTQ2
|
||||
OGQtYjdjMC0yZmJhN2I5OTc5YjMiLCAicm9sZSI6ICJiYXNlX2FnZW50IiwgInZlcmJvc2U/Ijog
|
||||
ZmFsc2UsICJtYXhfaXRlciI6IDIwLCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5n
|
||||
X2xsbSI6ICIiLCAibGxtIjogImdwdC00by1taW5pIiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBm
|
||||
YWxzZSwgImFsbG93X2NvZGVfZXhlY3V0aW9uPyI6IGZhbHNlLCAibWF4X3JldHJ5X2xpbWl0Ijog
|
||||
MiwgInRvb2xzX25hbWVzIjogWyJzYXlfaGkiXX1dSocCCgpjcmV3X3Rhc2tzEvgBCvUBW3sia2V5
|
||||
IjogIjFiMTVlZjIzOTE1YjI3NTVlODlhMGVjM2IyNmExM2QyIiwgImlkIjogImFhMGFmMmE2LTdm
|
||||
MTktNDZmNi1iMjMxLTg1M2JjYzYxYzhiZiIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJo
|
||||
dW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAiYmFzZV9hZ2VudCIsICJhZ2VudF9r
|
||||
ZXkiOiAiYWQxNTMxNjFjNWM1YTg1NmFhMGQwNmIyNDljNGM2NGEiLCAidG9vbHNfbmFtZXMiOiBb
|
||||
InNheV9oaSJdfV16AhgBhQEAAQAAEo4CChBH8NUZY1Cv8sM2lfQLaEogEgiFlW7Wp7QpdyoMVGFz
|
||||
ayBDcmVhdGVkMAE5MNkPwu7wIhhBUCcQwu7wIhhKLgoIY3Jld19rZXkSIgogZTU4MDcwMWQ1MmVi
|
||||
NjVhZmYyNGVlZmU3OGM3NDYyOGNKMQoHY3Jld19pZBImCiRmNjczMTc1ZS04Y2Q1LTQ1ZWUtYTZi
|
||||
OS0xYWFjMTliODQxZWJKLgoIdGFza19rZXkSIgogMWIxNWVmMjM5MTViMjc1NWU4OWEwZWMzYjI2
|
||||
YTEzZDJKMQoHdGFza19pZBImCiRhYTBhZjJhNi03ZjE5LTQ2ZjYtYjIzMS04NTNiY2M2MWM4YmZ6
|
||||
AhgBhQEAAQAAEooBChCJg/wSACw+HIDy4vvYISP/EgjoC/oI/1V0cCoKVG9vbCBVc2FnZTABOWA0
|
||||
ifTu8CIYQTD0lPTu8CIYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAwLjBKFQoJdG9vbF9uYW1l
|
||||
EggKBnNheV9oaUoOCghhdHRlbXB0cxICGAF6AhgBhQEAAQAA
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '6534'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.27.0
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Mon, 10 Feb 2025 19:55:17 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Assess the quality of the task
|
||||
completed based on the description, expected output, and actual results.\n\nTask
|
||||
Description:\nJust say hi\n\nExpected Output:\nhi\n\nActual Output:\nhi\n```\n\nPlease
|
||||
provide:\n- Bullet points suggestions to improve future similar tasks\n- A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance- Entities
|
||||
extracted from the task output, if any, their type, description, and relationships"}],
|
||||
"model": "gpt-4o-mini", "tool_choice": {"type": "function", "function": {"name":
|
||||
"TaskEvaluation"}}, "tools": [{"type": "function", "function": {"name": "TaskEvaluation",
|
||||
"description": "Correctly extracted `TaskEvaluation` with all the required parameters
|
||||
with correct types", "parameters": {"$defs": {"Entity": {"properties": {"name":
|
||||
{"description": "The name of the entity.", "title": "Name", "type": "string"},
|
||||
"type": {"description": "The type of the entity.", "title": "Type", "type":
|
||||
"string"}, "description": {"description": "Description of the entity.", "title":
|
||||
"Description", "type": "string"}, "relationships": {"description": "Relationships
|
||||
of the entity.", "items": {"type": "string"}, "title": "Relationships", "type":
|
||||
"array"}}, "required": ["name", "type", "description", "relationships"], "title":
|
||||
"Entity", "type": "object"}}, "properties": {"suggestions": {"description":
|
||||
"Suggestions to improve future similar tasks.", "items": {"type": "string"},
|
||||
"title": "Suggestions", "type": "array"}, "quality": {"description": "A score
|
||||
from 0 to 10 evaluating on completion, quality, and overall performance, all
|
||||
taking into account the task description, expected output, and the result of
|
||||
the task.", "title": "Quality", "type": "number"}, "entities": {"description":
|
||||
"Entities extracted from the task output.", "items": {"$ref": "#/$defs/Entity"},
|
||||
"title": "Entities", "type": "array"}}, "required": ["entities", "quality",
|
||||
"suggestions"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1967'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=efIHP1NUsh1dFewGJBu4YoBu6hhGa8vjOOKQglYQGno-1739214901306-0.0.1.1-604800000;
|
||||
__cf_bm=fmlg1wjOwuOwZhUUOEtL1tQYluAPumn7AHLF8s0EU2Y-1739217315-1.0.1.1-PQDvxn8TOhzaznlHjwVsqPZUzbAyJWFkvzCubfNJydTu2_AyA1cJ8hkM0khsEE4UY_xp8iPe2gSGmH1ydrDa0Q
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzUA8oE0A2d99i1Khpu0CI7fSgRtZ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739217316,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_bk3duHRErK1qCyvWJ1uVmmGl\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"TaskEvaluation\",\n
|
||||
\ \"arguments\": \"{\\\"suggestions\\\":[\\\"Provide more context
|
||||
or details for similar tasks to enhance clarity.\\\",\\\"Specify desired tone
|
||||
or style for the output.\\\",\\\"Consider adding more variety in tasks to keep
|
||||
engagement high.\\\"],\\\"quality\\\":10,\\\"entities\\\":[{\\\"name\\\":\\\"hi\\\",\\\"type\\\":\\\"greeting\\\",\\\"description\\\":\\\"A
|
||||
casual way to say hello or acknowledge someone's presence.\\\",\\\"relationships\\\":[\\\"used
|
||||
as a greeting\\\",\\\"expresses friendliness\\\"]}]}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
275,\n \"completion_tokens\": 80,\n \"total_tokens\": 355,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90fea7dfef41ceb9-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 10 Feb 2025 19:55:17 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1535'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999874'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_55d8eb91b4318245556b73d3f4c1e7c4
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
497
tests/utilities/test_events.py
Normal file
497
tests/utilities/test_events.py
Normal file
@@ -0,0 +1,497 @@
|
||||
import json
|
||||
from datetime import datetime
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import Field
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.crew import Crew
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.utilities.events.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.event_types import ToolUsageFinishedEvent
|
||||
from crewai.utilities.events.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowFinishedEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
)
|
||||
|
||||
base_agent = Agent(
|
||||
role="base_agent",
|
||||
llm="gpt-4o-mini",
|
||||
goal="Just say hi",
|
||||
backstory="You are a helpful assistant that just says hi",
|
||||
)
|
||||
|
||||
base_task = Task(
|
||||
description="Just say hi",
|
||||
expected_output="hi",
|
||||
agent=base_agent,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_start_kickoff_event():
|
||||
received_events = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def handle_crew_start(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
|
||||
crew.kickoff()
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].crew_name == "TestCrew"
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
assert received_events[0].type == "crew_kickoff_started"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_end_kickoff_event():
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def handle_crew_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
|
||||
crew.kickoff()
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].crew_name == "TestCrew"
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
assert received_events[0].type == "crew_kickoff_completed"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_kickoff_failed_event():
|
||||
received_events = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffFailedEvent)
|
||||
def handle_crew_failed(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
|
||||
with patch.object(Crew, "_execute_tasks") as mock_execute:
|
||||
error_message = "Simulated crew kickoff failure"
|
||||
mock_execute.side_effect = Exception(error_message)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
crew.kickoff()
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].error == error_message
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
assert received_events[0].type == "crew_kickoff_failed"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_start_task_event():
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(TaskStartedEvent)
|
||||
def handle_task_start(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
|
||||
crew.kickoff()
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
assert received_events[0].type == "task_started"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_end_task_event():
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(TaskCompletedEvent)
|
||||
def handle_task_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
|
||||
crew.kickoff()
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
assert received_events[0].type == "task_completed"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_task_emits_failed_event_on_execution_error():
|
||||
received_events = []
|
||||
received_sources = []
|
||||
|
||||
@crewai_event_bus.on(TaskFailedEvent)
|
||||
def handle_task_failed(source, event):
|
||||
received_events.append(event)
|
||||
received_sources.append(source)
|
||||
|
||||
with patch.object(
|
||||
Task,
|
||||
"_execute_core",
|
||||
) as mock_execute:
|
||||
error_message = "Simulated task failure"
|
||||
mock_execute.side_effect = Exception(error_message)
|
||||
agent = Agent(
|
||||
role="base_agent",
|
||||
goal="Just say hi",
|
||||
backstory="You are a helpful assistant that just says hi",
|
||||
)
|
||||
task = Task(
|
||||
description="Just say hi",
|
||||
expected_output="hi",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
agent.execute_task(task=task)
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_sources[0] == task
|
||||
assert received_events[0].error == error_message
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
assert received_events[0].type == "task_failed"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_emits_execution_started_and_completed_events():
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionStartedEvent)
|
||||
def handle_agent_start(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionCompletedEvent)
|
||||
def handle_agent_completed(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
crew.kickoff()
|
||||
assert len(received_events) == 2
|
||||
assert received_events[0].agent == base_agent
|
||||
assert received_events[0].task == base_task
|
||||
assert received_events[0].tools == []
|
||||
assert isinstance(received_events[0].task_prompt, str)
|
||||
assert (
|
||||
received_events[0].task_prompt
|
||||
== "Just say hi\n\nThis is the expected criteria for your final answer: hi\nyou MUST return the actual complete content as the final answer, not a summary."
|
||||
)
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
assert received_events[0].type == "agent_execution_started"
|
||||
assert isinstance(received_events[1].timestamp, datetime)
|
||||
assert received_events[1].type == "agent_execution_completed"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_emits_execution_error_event():
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionErrorEvent)
|
||||
def handle_agent_start(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
error_message = "Error happening while sending prompt to model."
|
||||
base_agent.max_retry_limit = 0
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "invoke", wraps=base_agent.agent_executor.invoke
|
||||
) as invoke_mock:
|
||||
invoke_mock.side_effect = Exception(error_message)
|
||||
|
||||
with pytest.raises(Exception) as e:
|
||||
base_agent.execute_task(
|
||||
task=base_task,
|
||||
)
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].agent == base_agent
|
||||
assert received_events[0].task == base_task
|
||||
assert received_events[0].error == error_message
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
assert received_events[0].type == "agent_execution_error"
|
||||
|
||||
|
||||
class SayHiTool(BaseTool):
|
||||
name: str = Field(default="say_hi", description="The name of the tool")
|
||||
description: str = Field(
|
||||
default="Say hi", description="The description of the tool"
|
||||
)
|
||||
|
||||
def _run(self) -> str:
|
||||
return "hi"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_tools_emits_finished_events():
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolUsageFinishedEvent)
|
||||
def handle_tool_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
agent = Agent(
|
||||
role="base_agent",
|
||||
goal="Just say hi",
|
||||
backstory="You are a helpful assistant that just says hi",
|
||||
tools=[SayHiTool()],
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Just say hi",
|
||||
expected_output="hi",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], name="TestCrew")
|
||||
crew.kickoff()
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].agent_key == agent.key
|
||||
assert received_events[0].agent_role == agent.role
|
||||
assert received_events[0].tool_name == SayHiTool().name
|
||||
assert received_events[0].tool_args == {}
|
||||
assert received_events[0].type == "tool_usage_finished"
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_tools_emits_error_events():
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def handle_tool_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
class ErrorTool(BaseTool):
|
||||
name: str = Field(
|
||||
default="error_tool", description="A tool that raises an error"
|
||||
)
|
||||
description: str = Field(
|
||||
default="This tool always raises an error",
|
||||
description="The description of the tool",
|
||||
)
|
||||
|
||||
def _run(self) -> str:
|
||||
raise Exception("Simulated tool error")
|
||||
|
||||
agent = Agent(
|
||||
role="base_agent",
|
||||
goal="Try to use the error tool",
|
||||
backstory="You are an assistant that tests error handling",
|
||||
tools=[ErrorTool()],
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Use the error tool",
|
||||
expected_output="This should error",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task], name="TestCrew")
|
||||
crew.kickoff()
|
||||
|
||||
assert len(received_events) == 75
|
||||
assert received_events[0].agent_key == agent.key
|
||||
assert received_events[0].agent_role == agent.role
|
||||
assert received_events[0].tool_name == "error_tool"
|
||||
assert received_events[0].tool_args == {}
|
||||
assert str(received_events[0].error) == "Simulated tool error"
|
||||
assert received_events[0].type == "tool_usage_error"
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
|
||||
|
||||
def test_flow_emits_start_event():
|
||||
received_events = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(FlowStartedEvent)
|
||||
def handle_flow_start(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
class TestFlow(Flow[dict]):
|
||||
@start()
|
||||
def begin(self):
|
||||
return "started"
|
||||
|
||||
flow = TestFlow()
|
||||
flow.kickoff()
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].flow_name == "TestFlow"
|
||||
assert received_events[0].type == "flow_started"
|
||||
|
||||
|
||||
def test_flow_emits_finish_event():
|
||||
received_events = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(FlowFinishedEvent)
|
||||
def handle_flow_finish(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
class TestFlow(Flow[dict]):
|
||||
@start()
|
||||
def begin(self):
|
||||
return "completed"
|
||||
|
||||
flow = TestFlow()
|
||||
result = flow.kickoff()
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].flow_name == "TestFlow"
|
||||
assert received_events[0].type == "flow_finished"
|
||||
assert received_events[0].result == "completed"
|
||||
assert result == "completed"
|
||||
|
||||
|
||||
def test_flow_emits_method_execution_started_event():
|
||||
received_events = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionStartedEvent)
|
||||
def handle_method_start(source, event):
|
||||
print("event in method name", event.method_name)
|
||||
received_events.append(event)
|
||||
|
||||
class TestFlow(Flow[dict]):
|
||||
@start()
|
||||
def begin(self):
|
||||
return "started"
|
||||
|
||||
@listen("begin")
|
||||
def second_method(self):
|
||||
return "executed"
|
||||
|
||||
flow = TestFlow()
|
||||
flow.kickoff()
|
||||
|
||||
assert len(received_events) == 2
|
||||
|
||||
assert received_events[0].method_name == "begin"
|
||||
assert received_events[0].flow_name == "TestFlow"
|
||||
assert received_events[0].type == "method_execution_started"
|
||||
|
||||
assert received_events[1].method_name == "second_method"
|
||||
assert received_events[1].flow_name == "TestFlow"
|
||||
assert received_events[1].type == "method_execution_started"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_register_handler_adds_new_handler():
|
||||
received_events = []
|
||||
|
||||
def custom_handler(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
crewai_event_bus.register_handler(CrewKickoffStartedEvent, custom_handler)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
crew.kickoff()
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert isinstance(received_events[0].timestamp, datetime)
|
||||
assert received_events[0].type == "crew_kickoff_started"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_multiple_handlers_for_same_event():
|
||||
received_events_1 = []
|
||||
received_events_2 = []
|
||||
|
||||
def handler_1(source, event):
|
||||
received_events_1.append(event)
|
||||
|
||||
def handler_2(source, event):
|
||||
received_events_2.append(event)
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
crewai_event_bus.register_handler(CrewKickoffStartedEvent, handler_1)
|
||||
crewai_event_bus.register_handler(CrewKickoffStartedEvent, handler_2)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
|
||||
crew.kickoff()
|
||||
|
||||
assert len(received_events_1) == 1
|
||||
assert len(received_events_2) == 1
|
||||
assert received_events_1[0].type == "crew_kickoff_started"
|
||||
assert received_events_2[0].type == "crew_kickoff_started"
|
||||
|
||||
|
||||
def test_flow_emits_created_event():
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(FlowCreatedEvent)
|
||||
def handle_flow_created(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
class TestFlow(Flow[dict]):
|
||||
@start()
|
||||
def begin(self):
|
||||
return "started"
|
||||
|
||||
flow = TestFlow()
|
||||
flow.kickoff()
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].flow_name == "TestFlow"
|
||||
assert received_events[0].type == "flow_created"
|
||||
|
||||
|
||||
def test_flow_emits_method_execution_failed_event():
|
||||
received_events = []
|
||||
error = Exception("Simulated method failure")
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionFailedEvent)
|
||||
def handle_method_failed(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
class TestFlow(Flow[dict]):
|
||||
@start()
|
||||
def begin(self):
|
||||
raise error
|
||||
|
||||
flow = TestFlow()
|
||||
with pytest.raises(Exception):
|
||||
flow.kickoff()
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].method_name == "begin"
|
||||
assert received_events[0].flow_name == "TestFlow"
|
||||
assert received_events[0].type == "method_execution_failed"
|
||||
assert received_events[0].error == error
|
||||
Reference in New Issue
Block a user