Merge branch 'main' into aa

This commit is contained in:
Brandon Hancock (bhancock_ai)
2025-02-18 11:59:21 -05:00
committed by GitHub
19 changed files with 604 additions and 663 deletions

View File

@@ -14,7 +14,7 @@ warnings.filterwarnings(
category=UserWarning,
module="pydantic.main",
)
__version__ = "0.100.1"
__version__ = "0.102.0"
__all__ = [
"Agent",
"Crew",

View File

@@ -56,7 +56,8 @@ def test():
Test the crew execution and returns the results.
"""
inputs = {
"topic": "AI LLMs"
"topic": "AI LLMs",
"current_year": str(datetime.now().year)
}
try:
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.100.1,<1.0.0"
"crewai[tools]>=0.102.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.100.1,<1.0.0",
"crewai[tools]>=0.102.0,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.100.1"
"crewai[tools]>=0.102.0"
]
[tool.crewai]

View File

@@ -1148,19 +1148,24 @@ class Crew(BaseModel):
def test(
self,
n_iterations: int,
openai_model_name: Optional[str] = None,
eval_llm: Union[str, InstanceOf[LLM]],
inputs: Optional[Dict[str, Any]] = None,
) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
test_crew = self.copy()
eval_llm = create_llm(eval_llm)
if not eval_llm:
raise ValueError("Failed to create LLM instance.")
self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew,
n_iterations,
inputs,
openai_model_name, # type: ignore[arg-type]
eval_llm.model, # type: ignore[arg-type]
) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, eval_llm) # type: ignore[arg-type]
for i in range(1, n_iterations + 1):
evaluator.set_iteration(i)

View File

@@ -1,4 +1,5 @@
import asyncio
import copy
import inspect
import logging
from typing import (
@@ -394,7 +395,6 @@ class FlowMeta(type):
or hasattr(attr_value, "__trigger_methods__")
or hasattr(attr_value, "__is_router__")
):
# Register start methods
if hasattr(attr_value, "__is_start_method__"):
start_methods.append(attr_name)
@@ -569,6 +569,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
f"Initial state must be dict or BaseModel, got {type(self.initial_state)}"
)
def _copy_state(self) -> T:
return copy.deepcopy(self._state)
@property
def state(self) -> T:
return self._state
@@ -740,6 +743,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
event=FlowStartedEvent(
type="flow_started",
flow_name=self.__class__.__name__,
inputs=inputs,
),
)
self._log_flow_event(
@@ -803,6 +807,18 @@ class Flow(Generic[T], metaclass=FlowMeta):
async def _execute_method(
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
) -> Any:
dumped_params = {f"_{i}": arg for i, arg in enumerate(args)} | (kwargs or {})
self.event_emitter.send(
self,
event=MethodExecutionStartedEvent(
type="method_execution_started",
method_name=method_name,
flow_name=self.__class__.__name__,
params=dumped_params,
state=self._copy_state(),
),
)
result = (
await method(*args, **kwargs)
if asyncio.iscoroutinefunction(method)
@@ -812,6 +828,18 @@ class Flow(Generic[T], metaclass=FlowMeta):
self._method_execution_counts[method_name] = (
self._method_execution_counts.get(method_name, 0) + 1
)
self.event_emitter.send(
self,
event=MethodExecutionFinishedEvent(
type="method_execution_finished",
method_name=method_name,
flow_name=self.__class__.__name__,
state=self._copy_state(),
result=result,
),
)
return result
async def _execute_listeners(self, trigger_method: str, result: Any) -> None:
@@ -950,16 +978,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
"""
try:
method = self._methods[listener_name]
self.event_emitter.send(
self,
event=MethodExecutionStartedEvent(
type="method_execution_started",
method_name=listener_name,
flow_name=self.__class__.__name__,
),
)
sig = inspect.signature(method)
params = list(sig.parameters.values())
method_params = [p for p in params if p.name != "self"]
@@ -971,15 +989,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
else:
listener_result = await self._execute_method(listener_name, method)
self.event_emitter.send(
self,
event=MethodExecutionFinishedEvent(
type="method_execution_finished",
method_name=listener_name,
flow_name=self.__class__.__name__,
),
)
# Execute listeners (and possibly routers) of this listener
await self._execute_listeners(listener_name, listener_result)

View File

@@ -1,6 +1,8 @@
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, Optional
from typing import Any, Dict, Optional, Union
from pydantic import BaseModel
@dataclass
@@ -15,17 +17,21 @@ class Event:
@dataclass
class FlowStartedEvent(Event):
pass
inputs: Optional[Dict[str, Any]] = None
@dataclass
class MethodExecutionStartedEvent(Event):
method_name: str
state: Union[Dict[str, Any], BaseModel]
params: Optional[Dict[str, Any]] = None
@dataclass
class MethodExecutionFinishedEvent(Event):
method_name: str
state: Union[Dict[str, Any], BaseModel]
result: Any = None
@dataclass

View File

@@ -0,0 +1,52 @@
from datetime import date, datetime
from typing import Any
from pydantic import BaseModel
from crewai.flow import Flow
def export_state(flow: Flow) -> dict[str, Any]:
"""Exports the Flow's internal state as JSON-compatible data structures.
Performs a one-way transformation of a Flow's state into basic Python types
that can be safely serialized to JSON. To prevent infinite recursion with
circular references, the conversion is limited to a depth of 5 levels.
Args:
flow: The Flow object whose state needs to be exported
Returns:
dict[str, Any]: The transformed state using JSON-compatible Python
types.
"""
return _to_serializable(flow._state)
def _to_serializable(obj: Any, max_depth: int = 5, _current_depth: int = 0) -> Any:
if _current_depth >= max_depth:
return repr(obj)
if isinstance(obj, (str, int, float, bool, type(None))):
return obj
elif isinstance(obj, (date, datetime)):
return obj.isoformat()
elif isinstance(obj, (list, tuple, set)):
return [_to_serializable(item, max_depth, _current_depth + 1) for item in obj]
elif isinstance(obj, dict):
return {
_to_serializable_key(key): _to_serializable(
value, max_depth, _current_depth + 1
)
for key, value in obj.items()
}
elif isinstance(obj, BaseModel):
return _to_serializable(obj.model_dump(), max_depth, _current_depth + 1)
else:
return repr(obj)
def _to_serializable_key(key: Any) -> str:
if isinstance(key, (str, int)):
return str(key)
return f"key_{id(key)}_{repr(key)}"

View File

@@ -1,11 +1,12 @@
from collections import defaultdict
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, InstanceOf
from rich.box import HEAVY_EDGE
from rich.console import Console
from rich.table import Table
from crewai.agent import Agent
from crewai.llm import LLM
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.telemetry import Telemetry
@@ -23,7 +24,7 @@ class CrewEvaluator:
Attributes:
crew (Crew): The crew of agents to evaluate.
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted).
eval_llm (LLM): Language model instance to use for evaluations
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
iteration (int): The current iteration of the evaluation.
"""
@@ -32,9 +33,9 @@ class CrewEvaluator:
run_execution_times: defaultdict = defaultdict(list)
iteration: int = 0
def __init__(self, crew, openai_model_name: str):
def __init__(self, crew, eval_llm: InstanceOf[LLM]):
self.crew = crew
self.openai_model_name = openai_model_name
self.llm = eval_llm
self._telemetry = Telemetry()
self._setup_for_evaluating()
@@ -51,7 +52,7 @@ class CrewEvaluator:
),
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
verbose=False,
llm=self.openai_model_name,
llm=self.llm,
)
def _evaluation_task(
@@ -181,7 +182,7 @@ class CrewEvaluator:
self.crew,
evaluation_result.pydantic.quality,
current_task.execution_duration,
self.openai_model_name,
self.llm.model,
)
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
self.run_execution_times[self.iteration].append(