From ac819bcb6ee6ad95888ba051e173a31492a1f59d Mon Sep 17 00:00:00 2001 From: Vidit Ostwal <110953813+Vidit-Ostwal@users.noreply.github.com> Date: Tue, 18 Feb 2025 22:15:26 +0530 Subject: [PATCH 1/2] Added functionality to have any llm run test functionality (#2071) * Added functionality to have any llm run test functionality * Fixed lint issues * Fixed Linting issues * Fixed unit test case * Fixed unit test * Fixed test case * Fixed unit test case --------- Co-authored-by: Brandon Hancock (bhancock_ai) <109994880+bhancockio@users.noreply.github.com> --- src/crewai/crew.py | 11 ++++++++--- .../utilities/evaluators/crew_evaluator_handler.py | 13 +++++++------ tests/crew_test.py | 6 ++++-- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 9ae9ce2c0..9eb93a16c 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -1148,19 +1148,24 @@ class Crew(BaseModel): def test( self, n_iterations: int, - openai_model_name: Optional[str] = None, + eval_llm: Union[str, InstanceOf[LLM]], inputs: Optional[Dict[str, Any]] = None, ) -> None: """Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures.""" test_crew = self.copy() + eval_llm = create_llm(eval_llm) + + if not eval_llm: + raise ValueError("Failed to create LLM instance.") + self._test_execution_span = test_crew._telemetry.test_execution_span( test_crew, n_iterations, inputs, - openai_model_name, # type: ignore[arg-type] + eval_llm.model, # type: ignore[arg-type] ) # type: ignore[arg-type] - evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type] + evaluator = CrewEvaluator(test_crew, eval_llm) # type: ignore[arg-type] for i in range(1, n_iterations + 1): evaluator.set_iteration(i) diff --git a/src/crewai/utilities/evaluators/crew_evaluator_handler.py b/src/crewai/utilities/evaluators/crew_evaluator_handler.py index ef9b908e1..9fcd2886d 100644 --- a/src/crewai/utilities/evaluators/crew_evaluator_handler.py +++ b/src/crewai/utilities/evaluators/crew_evaluator_handler.py @@ -1,11 +1,12 @@ from collections import defaultdict -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, InstanceOf from rich.box import HEAVY_EDGE from rich.console import Console from rich.table import Table from crewai.agent import Agent +from crewai.llm import LLM from crewai.task import Task from crewai.tasks.task_output import TaskOutput from crewai.telemetry import Telemetry @@ -23,7 +24,7 @@ class CrewEvaluator: Attributes: crew (Crew): The crew of agents to evaluate. - openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted). + eval_llm (LLM): Language model instance to use for evaluations tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task. iteration (int): The current iteration of the evaluation. """ @@ -32,9 +33,9 @@ class CrewEvaluator: run_execution_times: defaultdict = defaultdict(list) iteration: int = 0 - def __init__(self, crew, openai_model_name: str): + def __init__(self, crew, eval_llm: InstanceOf[LLM]): self.crew = crew - self.openai_model_name = openai_model_name + self.llm = eval_llm self._telemetry = Telemetry() self._setup_for_evaluating() @@ -51,7 +52,7 @@ class CrewEvaluator: ), backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed", verbose=False, - llm=self.openai_model_name, + llm=self.llm, ) def _evaluation_task( @@ -181,7 +182,7 @@ class CrewEvaluator: self.crew, evaluation_result.pydantic.quality, current_task.execution_duration, - self.openai_model_name, + self.llm.model, ) self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality) self.run_execution_times[self.iteration].append( diff --git a/tests/crew_test.py b/tests/crew_test.py index 0539ea347..398be37de 100644 --- a/tests/crew_test.py +++ b/tests/crew_test.py @@ -15,6 +15,7 @@ from crewai.agents.cache import CacheHandler from crewai.crew import Crew from crewai.crews.crew_output import CrewOutput from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource +from crewai.llm import LLM from crewai.memory.contextual.contextual_memory import ContextualMemory from crewai.process import Process from crewai.project import crew @@ -3341,7 +3342,8 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator): copy_mock.return_value = crew n_iterations = 2 - crew.test(n_iterations, openai_model_name="gpt-4o-mini", inputs={"topic": "AI"}) + llm_instance = LLM('gpt-4o-mini') + crew.test(n_iterations, llm_instance, inputs={"topic": "AI"}) # Ensure kickoff is called on the copied crew kickoff_mock.assert_has_calls( @@ -3350,7 +3352,7 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator): crew_evaluator.assert_has_calls( [ - mock.call(crew, "gpt-4o-mini"), + mock.call(crew,llm_instance), mock.call().set_iteration(1), mock.call().set_iteration(2), mock.call().print_crew_evaluation_result(), From 7dc47adb5c28b140caa14ad74abbce29e8727058 Mon Sep 17 00:00:00 2001 From: sharmasundip <59015684+sharmasundip@users.noreply.github.com> Date: Tue, 18 Feb 2025 22:29:29 +0530 Subject: [PATCH 2/2] fix user memory config issue (#2086) Co-authored-by: Brandon Hancock (bhancock_ai) <109994880+bhancockio@users.noreply.github.com> --- src/crewai/crew.py | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 9eb93a16c..d331599b5 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -275,12 +275,26 @@ class Crew(BaseModel): if self.entity_memory else EntityMemory(crew=self, embedder_config=self.embedder) ) - if hasattr(self, "memory_config") and self.memory_config is not None: - self._user_memory = ( - self.user_memory if self.user_memory else UserMemory(crew=self) - ) + if ( + self.memory_config and "user_memory" in self.memory_config + ): # Check for user_memory in config + user_memory_config = self.memory_config["user_memory"] + if isinstance( + user_memory_config, UserMemory + ): # Check if it is already an instance + self._user_memory = user_memory_config + elif isinstance( + user_memory_config, dict + ): # Check if it's a configuration dict + self._user_memory = UserMemory( + crew=self, **user_memory_config + ) # Initialize with config + else: + raise TypeError( + "user_memory must be a UserMemory instance or a configuration dictionary" + ) else: - self._user_memory = None + self._user_memory = None # No user memory if not in config return self @model_validator(mode="after") @@ -455,8 +469,6 @@ class Crew(BaseModel): ) return self - - @property def key(self) -> str: source = [agent.key for agent in self.agents] + [ @@ -928,13 +940,13 @@ class Crew(BaseModel): def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput: if not task_outputs: raise ValueError("No task outputs available to create crew output.") - + # Filter out empty outputs and get the last valid one as the main output valid_outputs = [t for t in task_outputs if t.raw] if not valid_outputs: raise ValueError("No valid task outputs available to create crew output.") final_task_output = valid_outputs[-1] - + final_string_output = final_task_output.raw self._finish_execution(final_string_output) token_usage = self.calculate_usage_metrics()