mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 08:38:30 +00:00
Added functionality to have any llm run test functionality (#2071)
* Added functionality to have any llm run test functionality * Fixed lint issues * Fixed Linting issues * Fixed unit test case * Fixed unit test * Fixed test case * Fixed unit test case --------- Co-authored-by: Brandon Hancock (bhancock_ai) <109994880+bhancockio@users.noreply.github.com>
This commit is contained in:
@@ -1148,19 +1148,24 @@ class Crew(BaseModel):
|
||||
def test(
|
||||
self,
|
||||
n_iterations: int,
|
||||
openai_model_name: Optional[str] = None,
|
||||
eval_llm: Union[str, InstanceOf[LLM]],
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||
test_crew = self.copy()
|
||||
|
||||
eval_llm = create_llm(eval_llm)
|
||||
|
||||
if not eval_llm:
|
||||
raise ValueError("Failed to create LLM instance.")
|
||||
|
||||
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
||||
test_crew,
|
||||
n_iterations,
|
||||
inputs,
|
||||
openai_model_name, # type: ignore[arg-type]
|
||||
eval_llm.model, # type: ignore[arg-type]
|
||||
) # type: ignore[arg-type]
|
||||
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
|
||||
evaluator = CrewEvaluator(test_crew, eval_llm) # type: ignore[arg-type]
|
||||
|
||||
for i in range(1, n_iterations + 1):
|
||||
evaluator.set_iteration(i)
|
||||
|
||||
Reference in New Issue
Block a user