Compare commits

..

1 Commits

Author SHA1 Message Date
Devin AI
3efc5f67fb Fix pyright LSP errors in example code
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-08 22:29:10 +00:00
11 changed files with 103 additions and 122 deletions

View File

@@ -23,6 +23,7 @@ from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_F
from crewai.utilities.converter import generate_model_description
from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.training_handler import CrewTrainingHandler
from crewai.utilities.typing import AgentConfig
agentops = None
@@ -88,6 +89,7 @@ class Agent(BaseAgent):
function_calling_llm: Optional[Any] = Field(
description="Language model that will run the agent.", default=None
)
config: Optional[Union[Dict[str, Any], AgentConfig]] = Field(default=None)
system_template: Optional[str] = Field(
default=None, description="System format for the agent."
)

View File

@@ -1,9 +1,6 @@
from importlib.metadata import version as get_version
from typing import Optional
from typing import Union
from crewai.llm import LLM
import click
from crewai.cli.add_crew_to_flow import add_crew_to_flow
@@ -183,15 +180,8 @@ def reset_memories(
default="gpt-4o-mini",
help="LLM Model to run the tests on the Crew. For now only accepting only OpenAI models.",
)
def test(n_iterations: int, model: Union[str, LLM]):
"""Test the crew and evaluate the results using either a model name or LLM instance.
Args:
n_iterations: The number of iterations to run the test.
model: Either a model name string or an LLM instance to use for evaluating
the performance of the agents. If a string is provided, it will be used
to create an LLM instance.
"""
def test(n_iterations: int, model: str):
"""Test the crew and evaluate the results."""
click.echo(f"Testing the crew for {n_iterations} iterations with model {model}")
evaluate_crew(n_iterations, model)

View File

@@ -18,9 +18,6 @@ from pydantic import (
)
from pydantic_core import PydanticCustomError
from typing import Union
from crewai.llm import LLM
from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.cache import CacheHandler
@@ -1078,30 +1075,19 @@ class Crew(BaseModel):
def test(
self,
n_iterations: int,
openai_model_name: Optional[Union[str, LLM]] = None,
openai_model_name: Optional[str] = None,
inputs: Optional[Dict[str, Any]] = None,
) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations.
Args:
n_iterations: The number of iterations to run the test.
openai_model_name: Either a model name string or an LLM instance to use for evaluating
the performance of the agents. If a string is provided, it will be used to create
an LLM instance.
inputs: The inputs to use for the test.
Raises:
ValueError: If openai_model_name is not a string or LLM instance.
"""
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
test_crew = self.copy()
self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew,
n_iterations,
inputs,
openai_model_name,
)
evaluator = CrewEvaluator(test_crew, openai_model_name)
openai_model_name, # type: ignore[arg-type]
) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
for i in range(1, n_iterations + 1):
evaluator.set_iteration(i)

View File

@@ -16,6 +16,12 @@ def after_kickoff(func):
def task(func):
"""Decorator to mark a method as a task creator.
When applied to a method in a class decorated with @CrewBase,
this makes the method's return value accessible as an element
of the self.tasks list.
"""
func.is_task = True
@wraps(func)
@@ -29,6 +35,12 @@ def task(func):
def agent(func):
"""Decorator to mark a method as an agent creator.
When applied to a method in a class decorated with @CrewBase,
this makes the method's return value accessible as an element
of the self.agents list.
"""
func.is_agent = True
func = memoize(func)
return func

View File

@@ -1,6 +1,6 @@
import inspect
from pathlib import Path
from typing import Any, Callable, Dict, TypeVar, cast
from typing import Any, Callable, Dict, List, TypeVar, cast
import yaml
from dotenv import load_dotenv
@@ -66,6 +66,9 @@ def CrewBase(cls: T) -> T:
self._kickoff = self._filter_functions(
self._original_functions, "is_kickoff"
)
self.agents = [] # type: List[Any]
self.tasks = [] # type: List[Any]
@staticmethod
def load_yaml(config_path: Path):

View File

@@ -41,6 +41,7 @@ from crewai.tools.base_tool import BaseTool
from crewai.utilities.config import process_config
from crewai.utilities.converter import Converter, convert_to_model
from crewai.utilities.i18n import I18N
from crewai.utilities.typing import TaskConfig
class Task(BaseModel):
@@ -74,7 +75,7 @@ class Task(BaseModel):
expected_output: str = Field(
description="Clear definition of expected output for the task."
)
config: Optional[Dict[str, Any]] = Field(
config: Optional[Union[Dict[str, Any], TaskConfig]] = Field(
description="Configuration for the agent",
default=None,
)

View File

@@ -1,10 +1,6 @@
from typing import Union
from crewai.llm import LLM
from collections import defaultdict
from pydantic import BaseModel, Field
from crewai.utilities.logger import Logger
from rich.box import HEAVY_EDGE
from rich.console import Console
from rich.table import Table
@@ -27,7 +23,7 @@ class CrewEvaluator:
Attributes:
crew (Crew): The crew of agents to evaluate.
openai_model_name (Union[str, LLM]): Either a model name string or an LLM instance to use for evaluating the performance of the agents.
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted).
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
iteration (int): The current iteration of the evaluation.
"""
@@ -36,29 +32,10 @@ class CrewEvaluator:
run_execution_times: defaultdict = defaultdict(list)
iteration: int = 0
def __init__(self, crew, openai_model_name: Union[str, LLM]):
"""Initialize the CrewEvaluator.
Args:
crew (Crew): The crew to evaluate
openai_model_name (Union[str, LLM]): Either a model name string or an LLM instance
to use for evaluation. If a string is provided, it will be used to create an
LLM instance with default settings. If an LLM instance is provided, its settings
(like temperature) will be preserved.
Raises:
ValueError: If openai_model_name is not a string or LLM instance.
"""
def __init__(self, crew, openai_model_name: str):
self.crew = crew
if not isinstance(openai_model_name, (str, LLM)):
raise ValueError(f"Invalid model type '{type(openai_model_name)}'. Expected str or LLM instance.")
self.model_instance = openai_model_name if isinstance(openai_model_name, LLM) else LLM(model=openai_model_name)
self.openai_model_name = openai_model_name
self._telemetry = Telemetry()
self._logger = Logger()
self._logger.log(
"info",
f"Initializing CrewEvaluator with model: {openai_model_name if isinstance(openai_model_name, str) else openai_model_name.model}"
)
self._setup_for_evaluating()
def _setup_for_evaluating(self) -> None:
@@ -74,7 +51,7 @@ class CrewEvaluator:
),
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
verbose=False,
llm=self.model_instance,
llm=self.openai_model_name,
)
def _evaluation_task(
@@ -204,11 +181,7 @@ class CrewEvaluator:
self.crew,
evaluation_result.pydantic.quality,
current_task._execution_time,
self.model_instance.model,
)
self._logger.log(
"info",
f"Task evaluation completed with quality score: {evaluation_result.pydantic.quality}"
self.openai_model_name,
)
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
self.run_execution_times[self.iteration].append(

View File

@@ -0,0 +1,14 @@
from typing import Dict, List, Optional, Any, TypedDict, Union
class AgentConfig(TypedDict, total=False):
"""TypedDict for agent configuration loaded from YAML."""
role: str
goal: str
backstory: str
verbose: bool
class TaskConfig(TypedDict, total=False):
"""TypedDict for task configuration loaded from YAML."""
description: str
expected_output: str
agent: str # Role of the agent to execute this task

View File

@@ -10,7 +10,6 @@ import instructor
import pydantic_core
import pytest
from crewai.llm import LLM
from crewai.agent import Agent
from crewai.agents.cache import CacheHandler
from crewai.crew import Crew
@@ -301,35 +300,6 @@ def test_hierarchical_process():
)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_test_with_custom_llm():
"""Test that Crew.test() works correctly with custom LLM instances."""
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
custom_llm = LLM(model="gpt-4", temperature=0.5)
crew = Crew(agents=[researcher], tasks=[task], process=Process.sequential)
with mock.patch('crewai.crew.CrewEvaluator') as mock_evaluator:
crew.test(n_iterations=1, openai_model_name=custom_llm)
mock_evaluator.assert_called_once_with(mock.ANY, custom_llm)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_test_backward_compatibility():
"""Test that Crew.test() maintains backward compatibility with string model names."""
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task], process=Process.sequential)
with mock.patch('crewai.crew.CrewEvaluator') as mock_evaluator:
crew.test(n_iterations=1, openai_model_name="gpt-4")
mock_evaluator.assert_called_once_with(mock.ANY, "gpt-4")
def test_manager_llm_requirement_for_hierarchical_process():
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
@@ -1153,7 +1123,7 @@ def test_kickoff_for_each_empty_input():
assert results == []
@pytest.mark.vcr(filter_headeruvs=["authorization"])
@pytest.mark.vcr(filter_headers=["authorization"])
def test_kickoff_for_each_invalid_input():
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
@@ -3155,4 +3125,4 @@ def test_multimodal_agent_live_image_analysis():
# Verify we got a meaningful response
assert isinstance(result.raw, str)
assert len(result.raw) > 100 # Expecting a detailed analysis
assert "error" not in result.raw.lower() # No error messages in response
assert "error" not in result.raw.lower() # No error messages in response

55
tests/typing_test.py Normal file
View File

@@ -0,0 +1,55 @@
from typing import Dict, Any
import pytest
from crewai.agent import Agent
from crewai.task import Task
from crewai.utilities.typing import AgentConfig, TaskConfig
def test_agent_with_config_dict():
config: AgentConfig = {
"role": "Test Agent",
"goal": "Test Goal",
"backstory": "Test Backstory",
"verbose": True
}
agent = Agent(config=config)
assert agent.role == "Test Agent"
assert agent.goal == "Test Goal"
assert agent.backstory == "Test Backstory"
assert agent.verbose is True
def test_agent_with_yaml_config():
config: Dict[str, Any] = {
"researcher": {
"role": "Researcher",
"goal": "Research Goal",
"backstory": "Researcher Backstory",
"verbose": True
}
}
agent = Agent(config=config["researcher"])
assert agent.role == "Researcher"
assert agent.goal == "Research Goal"
assert agent.backstory == "Researcher Backstory"
def test_task_with_config_dict():
config: TaskConfig = {
"description": "Test Task",
"expected_output": "Test Output",
"agent": "researcher"
}
agent = Agent(role="Researcher", goal="Goal", backstory="Backstory")
task = Task(config=config, agent=agent)
assert task.description == "Test Task"
assert task.expected_output == "Test Output"
assert task.agent == agent

View File

@@ -2,7 +2,6 @@ from unittest import mock
import pytest
from crewai.llm import LLM
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.task import Task
@@ -132,30 +131,6 @@ class TestCrewEvaluator:
# Ensure the console prints the table
console.assert_has_calls([mock.call(), mock.call().print(table())])
def test_evaluator_with_custom_llm(self, crew_planner):
"""Test that CrewEvaluator correctly handles custom LLM instances."""
custom_llm = LLM(model="gpt-4", temperature=0.5)
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
assert evaluator.model_instance == custom_llm
assert evaluator.model_instance.temperature == 0.5
def test_evaluator_with_invalid_model_type(self, crew_planner):
"""Test that CrewEvaluator raises error for invalid model type."""
with pytest.raises(ValueError, match="Invalid model type"):
CrewEvaluator(crew_planner.crew, 123)
def test_evaluator_preserves_model_settings(self, crew_planner):
"""Test that CrewEvaluator preserves model settings."""
custom_llm = LLM(model="gpt-4", temperature=0.7)
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
assert evaluator.model_instance.temperature == 0.7
def test_evaluator_with_model_name(self, crew_planner):
"""Test that CrewEvaluator correctly handles string model names."""
evaluator = CrewEvaluator(crew_planner.crew, "gpt-4")
assert isinstance(evaluator.model_instance, LLM)
assert evaluator.model_instance.model == "gpt-4"
def test_evaluate(self, crew_planner):
task_output = TaskOutput(
description="Task 1", agent=str(crew_planner.crew.agents[0])