mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-07 07:08:31 +00:00
Compare commits
15 Commits
brandon/ge
...
devin/1739
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
10af8e35fd | ||
|
|
e343017414 | ||
|
|
dad121a692 | ||
|
|
8dc07febc7 | ||
|
|
09240a7b62 | ||
|
|
0423dd8134 | ||
|
|
f4efdc55e2 | ||
|
|
0ab66041da | ||
|
|
b8f2603bf3 | ||
|
|
ff620f0ad6 | ||
|
|
1caf45ad9b | ||
|
|
4a216d1f15 | ||
|
|
5f5a1b3687 | ||
|
|
f3704a44b3 | ||
|
|
9bd39464cc |
@@ -91,7 +91,7 @@ result = crew.kickoff(inputs={"question": "What city does John live in and how o
|
||||
```
|
||||
|
||||
|
||||
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including MD, PDF, DOCX, HTML, and more.
|
||||
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including TXT, PDF, DOCX, HTML, and more.
|
||||
|
||||
<Note>
|
||||
You need to install `docling` for the following example to work: `uv add docling`
|
||||
@@ -152,10 +152,10 @@ Here are examples of how to use different types of knowledge sources:
|
||||
|
||||
### Text File Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.text_file_knowledge_source import TextFileKnowledgeSource
|
||||
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
|
||||
|
||||
# Create a text file knowledge source
|
||||
text_source = TextFileKnowledgeSource(
|
||||
text_source = CrewDoclingSource(
|
||||
file_paths=["document.txt", "another.txt"]
|
||||
)
|
||||
|
||||
|
||||
@@ -282,19 +282,6 @@ my_crew = Crew(
|
||||
|
||||
### Using Google AI embeddings
|
||||
|
||||
#### Prerequisites
|
||||
Before using Google AI embeddings, ensure you have:
|
||||
- Access to the Gemini API
|
||||
- The necessary API keys and permissions
|
||||
|
||||
You will need to update your *pyproject.toml* dependencies:
|
||||
```YAML
|
||||
dependencies = [
|
||||
"google-generativeai>=0.8.4", #main version in January/2025 - crewai v.0.100.0 and crewai-tools 0.33.0
|
||||
"crewai[tools]>=0.100.0,<1.0.0"
|
||||
]
|
||||
```
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
|
||||
@@ -268,7 +268,7 @@ analysis_task = Task(
|
||||
|
||||
Task guardrails provide a way to validate and transform task outputs before they
|
||||
are passed to the next task. This feature helps ensure data quality and provides
|
||||
feedback to agents when their output doesn't meet specific criteria.
|
||||
efeedback to agents when their output doesn't meet specific criteria.
|
||||
|
||||
### Using Task Guardrails
|
||||
|
||||
|
||||
@@ -1147,20 +1147,32 @@ class Crew(BaseModel):
|
||||
|
||||
def test(
|
||||
self,
|
||||
n_iterations: int,
|
||||
n_iterations: int = 1,
|
||||
openai_model_name: Optional[str] = None,
|
||||
llm: Optional[Union[str, LLM]] = None,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations.
|
||||
|
||||
Args:
|
||||
n_iterations: Number of iterations to run the test
|
||||
openai_model_name: OpenAI model name to use for evaluation (deprecated)
|
||||
llm: LLM instance or model name to use for evaluation
|
||||
inputs: Optional dictionary of inputs to pass to the crew
|
||||
"""
|
||||
if not llm and not openai_model_name:
|
||||
raise ValueError("Must provide either 'llm' or 'openai_model_name' parameter")
|
||||
|
||||
model_to_use = self._get_llm_instance(llm, openai_model_name)
|
||||
test_crew = self.copy()
|
||||
|
||||
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
||||
test_crew,
|
||||
n_iterations,
|
||||
inputs,
|
||||
openai_model_name, # type: ignore[arg-type]
|
||||
) # type: ignore[arg-type]
|
||||
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
|
||||
str(model_to_use.model),
|
||||
)
|
||||
evaluator = CrewEvaluator(test_crew, model_to_use)
|
||||
|
||||
for i in range(1, n_iterations + 1):
|
||||
evaluator.set_iteration(i)
|
||||
@@ -1168,6 +1180,28 @@ class Crew(BaseModel):
|
||||
|
||||
evaluator.print_crew_evaluation_result()
|
||||
|
||||
def _get_llm_instance(self, llm: Optional[Union[str, LLM]], openai_model_name: Optional[str]) -> LLM:
|
||||
"""Get an LLM instance from either llm or openai_model_name parameter.
|
||||
|
||||
Args:
|
||||
llm: LLM instance or model name
|
||||
openai_model_name: OpenAI model name (deprecated)
|
||||
|
||||
Returns:
|
||||
LLM instance
|
||||
|
||||
Raises:
|
||||
ValueError: If neither llm nor openai_model_name is provided
|
||||
"""
|
||||
model = llm if llm is not None else openai_model_name
|
||||
if model is None:
|
||||
raise ValueError("Must provide either 'llm' or 'openai_model_name' parameter")
|
||||
if isinstance(model, str):
|
||||
return LLM(model=model)
|
||||
if not isinstance(model, LLM):
|
||||
raise ValueError("Model must be either a string or an LLM instance")
|
||||
return model
|
||||
|
||||
def __repr__(self):
|
||||
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
||||
|
||||
|
||||
@@ -1,138 +1,28 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterator, List, Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
from typing import Dict, List
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
|
||||
from crewai.utilities.logger import Logger
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class ExcelKnowledgeSource(BaseKnowledgeSource):
|
||||
class ExcelKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries Excel file content using embeddings."""
|
||||
|
||||
# override content to be a dict of file paths to sheet names to csv content
|
||||
|
||||
_logger: Logger = Logger(verbose=True)
|
||||
|
||||
file_path: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
||||
default=None,
|
||||
description="[Deprecated] The path to the file. Use file_paths instead.",
|
||||
)
|
||||
file_paths: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
||||
default_factory=list, description="The path to the file"
|
||||
)
|
||||
chunks: List[str] = Field(default_factory=list)
|
||||
content: Dict[Path, Dict[str, str]] = Field(default_factory=dict)
|
||||
safe_file_paths: List[Path] = Field(default_factory=list)
|
||||
|
||||
@field_validator("file_path", "file_paths", mode="before")
|
||||
def validate_file_path(cls, v, info):
|
||||
"""Validate that at least one of file_path or file_paths is provided."""
|
||||
# Single check if both are None, O(1) instead of nested conditions
|
||||
if (
|
||||
v is None
|
||||
and info.data.get(
|
||||
"file_path" if info.field_name == "file_paths" else "file_paths"
|
||||
)
|
||||
is None
|
||||
):
|
||||
raise ValueError("Either file_path or file_paths must be provided")
|
||||
return v
|
||||
|
||||
def _process_file_paths(self) -> List[Path]:
|
||||
"""Convert file_path to a list of Path objects."""
|
||||
|
||||
if hasattr(self, "file_path") and self.file_path is not None:
|
||||
self._logger.log(
|
||||
"warning",
|
||||
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
|
||||
color="yellow",
|
||||
)
|
||||
self.file_paths = self.file_path
|
||||
|
||||
if self.file_paths is None:
|
||||
raise ValueError("Your source must be provided with a file_paths: []")
|
||||
|
||||
# Convert single path to list
|
||||
path_list: List[Union[Path, str]] = (
|
||||
[self.file_paths]
|
||||
if isinstance(self.file_paths, (str, Path))
|
||||
else list(self.file_paths)
|
||||
if isinstance(self.file_paths, list)
|
||||
else []
|
||||
)
|
||||
|
||||
if not path_list:
|
||||
raise ValueError(
|
||||
"file_path/file_paths must be a Path, str, or a list of these types"
|
||||
)
|
||||
|
||||
return [self.convert_to_path(path) for path in path_list]
|
||||
|
||||
def validate_content(self):
|
||||
"""Validate the paths."""
|
||||
for path in self.safe_file_paths:
|
||||
if not path.exists():
|
||||
self._logger.log(
|
||||
"error",
|
||||
f"File not found: {path}. Try adding sources to the knowledge directory. If it's inside the knowledge directory, use the relative path.",
|
||||
color="red",
|
||||
)
|
||||
raise FileNotFoundError(f"File not found: {path}")
|
||||
if not path.is_file():
|
||||
self._logger.log(
|
||||
"error",
|
||||
f"Path is not a file: {path}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def model_post_init(self, _) -> None:
|
||||
if self.file_path:
|
||||
self._logger.log(
|
||||
"warning",
|
||||
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
|
||||
color="yellow",
|
||||
)
|
||||
self.file_paths = self.file_path
|
||||
self.safe_file_paths = self._process_file_paths()
|
||||
self.validate_content()
|
||||
self.content = self._load_content()
|
||||
|
||||
def _load_content(self) -> Dict[Path, Dict[str, str]]:
|
||||
"""Load and preprocess Excel file content from multiple sheets.
|
||||
|
||||
Each sheet's content is converted to CSV format and stored.
|
||||
|
||||
Returns:
|
||||
Dict[Path, Dict[str, str]]: A mapping of file paths to their respective sheet contents.
|
||||
|
||||
Raises:
|
||||
ImportError: If required dependencies are missing.
|
||||
FileNotFoundError: If the specified Excel file cannot be opened.
|
||||
"""
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess Excel file content."""
|
||||
pd = self._import_dependencies()
|
||||
|
||||
content_dict = {}
|
||||
for file_path in self.safe_file_paths:
|
||||
file_path = self.convert_to_path(file_path)
|
||||
with pd.ExcelFile(file_path) as xl:
|
||||
sheet_dict = {
|
||||
str(sheet_name): str(
|
||||
pd.read_excel(xl, sheet_name).to_csv(index=False)
|
||||
)
|
||||
for sheet_name in xl.sheet_names
|
||||
}
|
||||
content_dict[file_path] = sheet_dict
|
||||
df = pd.read_excel(file_path)
|
||||
content = df.to_csv(index=False)
|
||||
content_dict[file_path] = content
|
||||
return content_dict
|
||||
|
||||
def convert_to_path(self, path: Union[Path, str]) -> Path:
|
||||
"""Convert a path to a Path object."""
|
||||
return Path(KNOWLEDGE_DIRECTORY + "/" + path) if isinstance(path, str) else path
|
||||
|
||||
def _import_dependencies(self):
|
||||
"""Dynamically import dependencies."""
|
||||
try:
|
||||
import openpyxl # noqa
|
||||
import pandas as pd
|
||||
|
||||
return pd
|
||||
@@ -148,14 +38,10 @@ class ExcelKnowledgeSource(BaseKnowledgeSource):
|
||||
and save the embeddings.
|
||||
"""
|
||||
# Convert dictionary values to a single string if content is a dictionary
|
||||
# Updated to account for .xlsx workbooks with multiple tabs/sheets
|
||||
content_str = ""
|
||||
for value in self.content.values():
|
||||
if isinstance(value, dict):
|
||||
for sheet_value in value.values():
|
||||
content_str += str(sheet_value) + "\n"
|
||||
else:
|
||||
content_str += str(value) + "\n"
|
||||
if isinstance(self.content, dict):
|
||||
content_str = "\n".join(str(value) for value in self.content.values())
|
||||
else:
|
||||
content_str = str(self.content)
|
||||
|
||||
new_chunks = self._chunk_text(content_str)
|
||||
self.chunks.extend(new_chunks)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from collections import defaultdict
|
||||
from typing import Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from rich.box import HEAVY_EDGE
|
||||
@@ -6,6 +7,7 @@ from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
@@ -32,9 +34,9 @@ class CrewEvaluator:
|
||||
run_execution_times: defaultdict = defaultdict(list)
|
||||
iteration: int = 0
|
||||
|
||||
def __init__(self, crew, openai_model_name: str):
|
||||
def __init__(self, crew, llm: Union[str, LLM]):
|
||||
self.crew = crew
|
||||
self.openai_model_name = openai_model_name
|
||||
self.llm = LLM(model=llm) if isinstance(llm, str) else llm
|
||||
self._telemetry = Telemetry()
|
||||
self._setup_for_evaluating()
|
||||
|
||||
@@ -51,7 +53,7 @@ class CrewEvaluator:
|
||||
),
|
||||
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
||||
verbose=False,
|
||||
llm=self.openai_model_name,
|
||||
llm=self.llm,
|
||||
)
|
||||
|
||||
def _evaluation_task(
|
||||
@@ -181,7 +183,7 @@ class CrewEvaluator:
|
||||
self.crew,
|
||||
evaluation_result.pydantic.quality,
|
||||
current_task.execution_duration,
|
||||
self.openai_model_name,
|
||||
self.llm.model,
|
||||
)
|
||||
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
||||
self.run_execution_times[self.iteration].append(
|
||||
|
||||
@@ -51,7 +51,6 @@ writer = Agent(
|
||||
|
||||
def test_crew_with_only_conditional_tasks_raises_error():
|
||||
"""Test that creating a crew with only conditional tasks raises an error."""
|
||||
|
||||
def condition_func(task_output: TaskOutput) -> bool:
|
||||
return True
|
||||
|
||||
@@ -83,7 +82,6 @@ def test_crew_with_only_conditional_tasks_raises_error():
|
||||
tasks=[conditional1, conditional2, conditional3],
|
||||
)
|
||||
|
||||
|
||||
def test_crew_config_conditional_requirement():
|
||||
with pytest.raises(ValueError):
|
||||
Crew(process=Process.sequential)
|
||||
@@ -591,12 +589,12 @@ def test_crew_with_delegating_agents_should_not_override_task_tools():
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -655,12 +653,12 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in new_ceo.tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in new_ceo.tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -784,17 +782,17 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Confirm AnotherTestTool is present but TestTool is not
|
||||
assert any(
|
||||
isinstance(tool, AnotherTestTool) for tool in used_tools
|
||||
), "AnotherTestTool should be present"
|
||||
assert not any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "TestTool should not be present among used tools"
|
||||
assert any(isinstance(tool, AnotherTestTool) for tool in used_tools), (
|
||||
"AnotherTestTool should be present"
|
||||
)
|
||||
assert not any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"TestTool should not be present among used tools"
|
||||
)
|
||||
|
||||
# Confirm delegation tool(s) are present
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
# Finally, make sure the agent's original tools remain unchanged
|
||||
assert len(researcher_with_delegation.tools) == 1
|
||||
@@ -1595,9 +1593,9 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
||||
|
||||
# Verify that exactly one tool was used and it was a CodeInterpreterTool
|
||||
assert len(used_tools) == 1, "Should have exactly one tool"
|
||||
assert isinstance(
|
||||
used_tools[0], CodeInterpreterTool
|
||||
), "Tool should be CodeInterpreterTool"
|
||||
assert isinstance(used_tools[0], CodeInterpreterTool), (
|
||||
"Tool should be CodeInterpreterTool"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1954,7 +1952,6 @@ def test_task_callback_on_crew():
|
||||
|
||||
def test_task_callback_both_on_task_and_crew():
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
mock_callback_on_task = MagicMock()
|
||||
mock_callback_on_crew = MagicMock()
|
||||
|
||||
@@ -2104,22 +2101,21 @@ def test_conditional_task_uses_last_output():
|
||||
expected_output="First output",
|
||||
agent=researcher,
|
||||
)
|
||||
|
||||
def condition_fails(task_output: TaskOutput) -> bool:
|
||||
# This condition will never be met
|
||||
return "never matches" in task_output.raw.lower()
|
||||
|
||||
|
||||
def condition_succeeds(task_output: TaskOutput) -> bool:
|
||||
# This condition will match first task's output
|
||||
return "first success" in task_output.raw.lower()
|
||||
|
||||
|
||||
conditional_task1 = ConditionalTask(
|
||||
description="Second task - conditional that fails condition",
|
||||
expected_output="Second output",
|
||||
agent=researcher,
|
||||
condition=condition_fails,
|
||||
)
|
||||
|
||||
|
||||
conditional_task2 = ConditionalTask(
|
||||
description="Third task - conditional that succeeds using first task output",
|
||||
expected_output="Third output",
|
||||
@@ -2138,37 +2134,35 @@ def test_conditional_task_uses_last_output():
|
||||
raw="First success output", # Will be used by third task's condition
|
||||
agent=researcher.role,
|
||||
)
|
||||
mock_skipped = TaskOutput(
|
||||
description="Second task output",
|
||||
raw="", # Empty output since condition fails
|
||||
agent=researcher.role,
|
||||
)
|
||||
mock_third = TaskOutput(
|
||||
description="Third task output",
|
||||
raw="Third task executed", # Output when condition succeeds using first task output
|
||||
agent=writer.role,
|
||||
)
|
||||
|
||||
|
||||
# Set up mocks for task execution and conditional logic
|
||||
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
|
||||
# First conditional fails, second succeeds
|
||||
mock_should_execute.side_effect = [False, True]
|
||||
|
||||
with patch.object(Task, "execute_sync") as mock_execute:
|
||||
mock_execute.side_effect = [mock_first, mock_third]
|
||||
result = crew.kickoff()
|
||||
|
||||
|
||||
# Verify execution behavior
|
||||
assert mock_execute.call_count == 2 # Only first and third tasks execute
|
||||
assert mock_should_execute.call_count == 2 # Both conditionals checked
|
||||
|
||||
# Verify outputs collection:
|
||||
# First executed task output, followed by an automatically generated (skipped) output, then the conditional execution
|
||||
|
||||
# Verify outputs collection
|
||||
assert len(result.tasks_output) == 3
|
||||
assert (
|
||||
result.tasks_output[0].raw == "First success output"
|
||||
) # First task succeeded
|
||||
assert (
|
||||
result.tasks_output[1].raw == ""
|
||||
) # Second task skipped (condition failed)
|
||||
assert (
|
||||
result.tasks_output[2].raw == "Third task executed"
|
||||
) # Third task used first task's output
|
||||
|
||||
assert result.tasks_output[0].raw == "First success output" # First task succeeded
|
||||
assert result.tasks_output[1].raw == "" # Second task skipped (condition failed)
|
||||
assert result.tasks_output[2].raw == "Third task executed" # Third task used first task's output
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_tasks_result_collection():
|
||||
@@ -2178,20 +2172,20 @@ def test_conditional_tasks_result_collection():
|
||||
expected_output="First output",
|
||||
agent=researcher,
|
||||
)
|
||||
|
||||
|
||||
def condition_never_met(task_output: TaskOutput) -> bool:
|
||||
return "never matches" in task_output.raw.lower()
|
||||
|
||||
|
||||
def condition_always_met(task_output: TaskOutput) -> bool:
|
||||
return "success" in task_output.raw.lower()
|
||||
|
||||
|
||||
task2 = ConditionalTask(
|
||||
description="Conditional task that never executes",
|
||||
expected_output="Second output",
|
||||
agent=researcher,
|
||||
condition=condition_never_met,
|
||||
)
|
||||
|
||||
|
||||
task3 = ConditionalTask(
|
||||
description="Conditional task that always executes",
|
||||
expected_output="Third output",
|
||||
@@ -2210,46 +2204,35 @@ def test_conditional_tasks_result_collection():
|
||||
raw="Success output", # Triggers third task's condition
|
||||
agent=researcher.role,
|
||||
)
|
||||
mock_skipped = TaskOutput(
|
||||
description="Skipped output",
|
||||
raw="", # Empty output for skipped task
|
||||
agent=researcher.role,
|
||||
)
|
||||
mock_conditional = TaskOutput(
|
||||
description="Conditional output",
|
||||
raw="Conditional task executed",
|
||||
agent=writer.role,
|
||||
)
|
||||
|
||||
|
||||
# Set up mocks for task execution and conditional logic
|
||||
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
|
||||
# First conditional fails, second succeeds
|
||||
mock_should_execute.side_effect = [False, True]
|
||||
|
||||
with patch.object(Task, "execute_sync") as mock_execute:
|
||||
mock_execute.side_effect = [mock_success, mock_conditional]
|
||||
result = crew.kickoff()
|
||||
|
||||
|
||||
# Verify execution behavior
|
||||
assert mock_execute.call_count == 2 # Only first and third tasks execute
|
||||
assert mock_should_execute.call_count == 2 # Both conditionals checked
|
||||
|
||||
# Verify task output collection:
|
||||
# There should be three outputs: normal task, skipped conditional task (empty output),
|
||||
# and the conditional task that executed.
|
||||
assert len(result.tasks_output) == 3
|
||||
assert (
|
||||
result.tasks_output[0].raw == "Success output"
|
||||
) # Normal task executed
|
||||
assert result.tasks_output[1].raw == "" # Second task skipped
|
||||
assert (
|
||||
result.tasks_output[2].raw == "Conditional task executed"
|
||||
) # Third task executed
|
||||
|
||||
|
||||
# Verify task output collection
|
||||
assert len(result.tasks_output) == 3
|
||||
assert (
|
||||
result.tasks_output[0].raw == "Success output"
|
||||
) # Normal task executed
|
||||
assert result.tasks_output[1].raw == "" # Second task skipped
|
||||
assert (
|
||||
result.tasks_output[2].raw == "Conditional task executed"
|
||||
) # Third task executed
|
||||
|
||||
assert result.tasks_output[0].raw == "Success output" # Normal task executed
|
||||
assert result.tasks_output[1].raw == "" # Second task skipped
|
||||
assert result.tasks_output[2].raw == "Conditional task executed" # Third task executed
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_multiple_conditional_tasks():
|
||||
@@ -2259,20 +2242,20 @@ def test_multiple_conditional_tasks():
|
||||
expected_output="Research output",
|
||||
agent=researcher,
|
||||
)
|
||||
|
||||
|
||||
def condition1(task_output: TaskOutput) -> bool:
|
||||
return "success" in task_output.raw.lower()
|
||||
|
||||
|
||||
def condition2(task_output: TaskOutput) -> bool:
|
||||
return "proceed" in task_output.raw.lower()
|
||||
|
||||
|
||||
task2 = ConditionalTask(
|
||||
description="First conditional task",
|
||||
expected_output="Conditional output 1",
|
||||
agent=writer,
|
||||
condition=condition1,
|
||||
)
|
||||
|
||||
|
||||
task3 = ConditionalTask(
|
||||
description="Second conditional task",
|
||||
expected_output="Conditional output 2",
|
||||
@@ -2291,7 +2274,7 @@ def test_multiple_conditional_tasks():
|
||||
raw="Success and proceed output",
|
||||
agent=researcher.role,
|
||||
)
|
||||
|
||||
|
||||
# Set up mocks for task execution
|
||||
with patch.object(Task, "execute_sync", return_value=mock_success) as mock_execute:
|
||||
result = crew.kickoff()
|
||||
@@ -2299,7 +2282,6 @@ def test_multiple_conditional_tasks():
|
||||
assert mock_execute.call_count == 3
|
||||
assert len(result.tasks_output) == 3
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_using_contextual_memory():
|
||||
from unittest.mock import patch
|
||||
@@ -3324,8 +3306,7 @@ def test_conditional_should_execute():
|
||||
|
||||
@mock.patch("crewai.crew.CrewEvaluator")
|
||||
@mock.patch("crewai.crew.Crew.copy")
|
||||
@mock.patch("crewai.crew.Crew.kickoff")
|
||||
def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
|
||||
def test_crew_testing_function(copy_mock, crew_evaluator_mock):
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
@@ -3337,25 +3318,28 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
# Create a mock for the copied crew
|
||||
copy_mock.return_value = crew
|
||||
# Create a mock for the copied crew with a mock kickoff method
|
||||
copied_crew = MagicMock()
|
||||
copy_mock.return_value = copied_crew
|
||||
|
||||
# Create a mock for the CrewEvaluator instance
|
||||
evaluator_instance = MagicMock()
|
||||
crew_evaluator_mock.return_value = evaluator_instance
|
||||
|
||||
n_iterations = 2
|
||||
crew.test(n_iterations, openai_model_name="gpt-4o-mini", inputs={"topic": "AI"})
|
||||
|
||||
# Ensure kickoff is called on the copied crew
|
||||
kickoff_mock.assert_has_calls(
|
||||
copied_crew.kickoff.assert_has_calls(
|
||||
[mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})]
|
||||
)
|
||||
|
||||
crew_evaluator.assert_has_calls(
|
||||
[
|
||||
mock.call(crew, "gpt-4o-mini"),
|
||||
mock.call().set_iteration(1),
|
||||
mock.call().set_iteration(2),
|
||||
mock.call().print_crew_evaluation_result(),
|
||||
]
|
||||
)
|
||||
# Verify CrewEvaluator interactions
|
||||
# We don't check the exact LLM object since it's created internally
|
||||
assert len(crew_evaluator_mock.mock_calls) == 4
|
||||
assert crew_evaluator_mock.mock_calls[1] == mock.call().set_iteration(1)
|
||||
assert crew_evaluator_mock.mock_calls[2] == mock.call().set_iteration(2)
|
||||
assert crew_evaluator_mock.mock_calls[3] == mock.call().print_crew_evaluation_result()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -3418,9 +3402,9 @@ def test_fetch_inputs():
|
||||
expected_placeholders = {"role_detail", "topic", "field"}
|
||||
actual_placeholders = crew.fetch_inputs()
|
||||
|
||||
assert (
|
||||
actual_placeholders == expected_placeholders
|
||||
), f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
assert actual_placeholders == expected_placeholders, (
|
||||
f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
)
|
||||
|
||||
|
||||
def test_task_tools_preserve_code_execution_tools():
|
||||
@@ -3493,20 +3477,20 @@ def test_task_tools_preserve_code_execution_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Verify all expected tools are present
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "Task's TestTool should be present"
|
||||
assert any(
|
||||
isinstance(tool, CodeInterpreterTool) for tool in used_tools
|
||||
), "CodeInterpreterTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"Task's TestTool should be present"
|
||||
)
|
||||
assert any(isinstance(tool, CodeInterpreterTool) for tool in used_tools), (
|
||||
"CodeInterpreterTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
# Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools)
|
||||
assert (
|
||||
len(used_tools) == 4
|
||||
), "Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
assert len(used_tools) == 4, (
|
||||
"Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -3550,9 +3534,9 @@ def test_multimodal_flag_adds_multimodal_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Check that the multimodal tool was added
|
||||
assert any(
|
||||
isinstance(tool, AddImageTool) for tool in used_tools
|
||||
), "AddImageTool should be present when agent is multimodal"
|
||||
assert any(isinstance(tool, AddImageTool) for tool in used_tools), (
|
||||
"AddImageTool should be present when agent is multimodal"
|
||||
)
|
||||
|
||||
# Verify we have exactly one tool (just the AddImageTool)
|
||||
assert len(used_tools) == 1, "Should only have the AddImageTool"
|
||||
@@ -3778,9 +3762,9 @@ def test_crew_guardrail_feedback_in_context():
|
||||
assert len(execution_contexts) > 1, "Task should have been executed multiple times"
|
||||
|
||||
# Verify that the second execution included the guardrail feedback
|
||||
assert (
|
||||
"Output must contain the keyword 'IMPORTANT'" in execution_contexts[1]
|
||||
), "Guardrail feedback should be included in retry context"
|
||||
assert "Output must contain the keyword 'IMPORTANT'" in execution_contexts[1], (
|
||||
"Guardrail feedback should be included in retry context"
|
||||
)
|
||||
|
||||
# Verify final output meets guardrail requirements
|
||||
assert "IMPORTANT" in result.raw, "Final output should contain required keyword"
|
||||
|
||||
@@ -0,0 +1,942 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test. test\nYour personal
|
||||
goal is: test\nTo give my best complete final answer to the task respond using
|
||||
the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!"}, {"role": "user", "content": "\nCurrent Task: test\n\nThis is the expected
|
||||
criteria for your final answer: test output\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- Bearer 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
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '780'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- Linux
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzAMqMWFX8hC1szIKxWNSyXm0SPFi\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739141224,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I am prepared to conduct the test efficiently.\\n\\nFinal
|
||||
Answer: The test output that aligns with the given criteria is a detailed description
|
||||
of the testing process, providing a thorough understanding for anyone reviewing
|
||||
it. The output not only contains the raw data or results but also includes step-by-step
|
||||
documentation of the process employed, thoughts and reasoning behind each step,
|
||||
deviations if any from the original plan, and how these deviations impacted
|
||||
the results. In addition, it captures any errors or unexpected occurrences during
|
||||
the course of the test, and proposes possible explanations or solutions for
|
||||
these. It is detailed yet comprehensible, catering to both technical and non-technical
|
||||
audiences. It is a result of meticulous planning, diligent execution, and robust
|
||||
post-test analysis, making it a complete content.\",\n \"refusal\": null\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 149,\n \"completion_tokens\":
|
||||
151,\n \"total_tokens\": 300,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90f7662cab11ba33-SEA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 09 Feb 2025 22:47:09 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=p1aGVyahvfLAvEwvbX0FMmrN5o18PpVAu2dG_dTgMSU-1739141229-1.0.1.1-_q7aCslZTr11IMFZ81VgyuqsGiqTARFPANUvBEWM_0dZdb97Py78KE1omxdNv5F1pFKoWUqA1kEF2wzQ2wz4aA;
|
||||
path=/; expires=Sun, 09-Feb-25 23:17:09 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=bsF0jwE67cS.ywAaQU59jKPFC03S1dvynClHm_wTQik-1739141229143-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '4585'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '1000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '999822'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 10ms
|
||||
x-request-id:
|
||||
- req_1ba81a80018602119b871a7a42d7becf
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
Ct8LCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkStgsKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRL5AQoQT8uAOJ+suOhFs22RW56o6BII0Ob64+TP3XQqE0NyZXcgVGVzdCBFeGVjdXRp
|
||||
b24wATk1IlKguqsiGEGGgWOguqsiGEobCg5jcmV3YWlfdmVyc2lvbhIJCgcwLjEwMC4xSi4KCGNy
|
||||
ZXdfa2V5EiIKIGZlYjFlMjFiMzI1NmM1OWE2NDcxNTJhZmRkNjYzMjJlSjEKB2NyZXdfaWQSJgok
|
||||
M2Q1MGJkYWItZDI1NS00MjFiLThkMzMtZjZmOTAzMThhOWQwShEKCml0ZXJhdGlvbnMSAwoBMUoV
|
||||
Cgptb2RlbF9uYW1lEgcKBWdwdC00egIYAYUBAAEAABKSBwoQgzrB2KxaHe9FwPNktJHbFRIIT8gM
|
||||
r7rSvJUqDENyZXcgQ3JlYXRlZDABOW4kaqC6qyIYQe+yeaC6qyIYShsKDmNyZXdhaV92ZXJzaW9u
|
||||
EgkKBzAuMTAwLjFKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMi43Si4KCGNyZXdfa2V5EiIKIGZl
|
||||
YjFlMjFiMzI1NmM1OWE2NDcxNTJhZmRkNjYzMjJlSjEKB2NyZXdfaWQSJgokM2Q1MGJkYWItZDI1
|
||||
NS00MjFiLThkMzMtZjZmOTAzMThhOWQwShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEK
|
||||
C2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1i
|
||||
ZXJfb2ZfYWdlbnRzEgIYAUrFAgoLY3Jld19hZ2VudHMStQIKsgJbeyJrZXkiOiAiOTc2ZjhmNTBh
|
||||
Y2NmZWJhMjIzZTQ5YzQyYjE2ZTk5ZTYiLCAiaWQiOiAiN2E3NmZjNmYtZTI5YS00MDBlLWI0NGEt
|
||||
NzAyMDNlMzg1Y2RmIiwgInJvbGUiOiAidGVzdCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0
|
||||
ZXIiOiAyNSwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxs
|
||||
bSI6ICJncHQtNCIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4
|
||||
ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtd
|
||||
fV1K+QEKCmNyZXdfdGFza3MS6gEK5wFbeyJrZXkiOiAiZGE5NWViZGIzNmU0Y2RmOTJkZjZhNmRk
|
||||
MTZiY2VlMGUiLCAiaWQiOiAiNTcwYmJlYjQtYzkzNi00NTNkLTg2MjktYzhjMDM0ODA5NDhjIiwg
|
||||
ImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRf
|
||||
cm9sZSI6ICJ0ZXN0IiwgImFnZW50X2tleSI6ICI5NzZmOGY1MGFjY2ZlYmEyMjNlNDljNDJiMTZl
|
||||
OTllNiIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEo4CChAus8hZAJcezzXdP2XqhVyF
|
||||
Egi1wnliqIdQdSoMVGFzayBDcmVhdGVkMAE5nneIoLqrIhhBKDyJoLqrIhhKLgoIY3Jld19rZXkS
|
||||
IgogZmViMWUyMWIzMjU2YzU5YTY0NzE1MmFmZGQ2NjMyMmVKMQoHY3Jld19pZBImCiQzZDUwYmRh
|
||||
Yi1kMjU1LTQyMWItOGQzMy1mNmY5MDMxOGE5ZDBKLgoIdGFza19rZXkSIgogZGE5NWViZGIzNmU0
|
||||
Y2RmOTJkZjZhNmRkMTZiY2VlMGVKMQoHdGFza19pZBImCiQ1NzBiYmViNC1jOTM2LTQ1M2QtODYy
|
||||
OS1jOGMwMzQ4MDk0OGN6AhgBhQEAAQAA
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '1506'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.27.0
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Sun, 09 Feb 2025 22:47:09 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Task Execution Evaluator.
|
||||
Evaluator agent for crew evaluation with precise capabilities to evaluate the
|
||||
performance of the agents in the crew based on the tasks they have performed\nYour
|
||||
personal goal is: Your goal is to evaluate the performance of the agents in
|
||||
the crew based on the tasks they have performed using score from 1 to 10 evaluating
|
||||
on completion, quality, and overall performance.\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
|
||||
Based on the task description and the expected output, compare and evaluate
|
||||
the performance of the agents in the crew based on the Task Output they have
|
||||
performed using score from 1 to 10 evaluating on completion, quality, and overall
|
||||
performance.task_description: test task_expected_output: test output agent:
|
||||
test agent_goal: test Task Output: The test output that aligns with the given
|
||||
criteria is a detailed description of the testing process, providing a thorough
|
||||
understanding for anyone reviewing it. The output not only contains the raw
|
||||
data or results but also includes step-by-step documentation of the process
|
||||
employed, thoughts and reasoning behind each step, deviations if any from the
|
||||
original plan, and how these deviations impacted the results. In addition, it
|
||||
captures any errors or unexpected occurrences during the course of the test,
|
||||
and proposes possible explanations or solutions for these. It is detailed yet
|
||||
comprehensible, catering to both technical and non-technical audiences. It is
|
||||
a result of meticulous planning, diligent execution, and robust post-test analysis,
|
||||
making it a complete content.\n\nThis is the expected criteria for your final
|
||||
answer: Evaluation Score from 1 to 10 based on the performance of the agents
|
||||
on the tasks\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\nEnsure your final answer contains only the content in the following
|
||||
format: {\n \"quality\": float\n}\n\nEnsure the final output does not include
|
||||
any code block markers like ```json or ```python.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- Bearer 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
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2523'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=p1aGVyahvfLAvEwvbX0FMmrN5o18PpVAu2dG_dTgMSU-1739141229-1.0.1.1-_q7aCslZTr11IMFZ81VgyuqsGiqTARFPANUvBEWM_0dZdb97Py78KE1omxdNv5F1pFKoWUqA1kEF2wzQ2wz4aA;
|
||||
_cfuvid=bsF0jwE67cS.ywAaQU59jKPFC03S1dvynClHm_wTQik-1739141229143-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- Linux
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzAMvAOSw5847reo2vh61focjnyK2\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739141229,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Based on the given task output, I can
|
||||
determine that the test agent has performed impressively well. Their work is
|
||||
comprehensive, catering to both non-technical and technical audiences and includes
|
||||
complete and detailed process documentation. Further, the way they detect and
|
||||
elaborate deviances and errors shows their meticulousness and efficiency. Their
|
||||
planning, execution, and analysis are sound.\\n\\nFinal Answer: The quality
|
||||
of this task is admirable, paying attention to details and meticulously planning
|
||||
and reasoning behind each step. Considering all these, on a scale from 1 to
|
||||
10, I would rate the task performed by the test agent as follows:\\n\\n{\\n
|
||||
\ \\\"quality\\\": 9.5\\n}\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
471,\n \"completion_tokens\": 133,\n \"total_tokens\": 604,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 90f7664a581eba33-SEA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 09 Feb 2025 22:47:14 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '4884'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '1000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '999388'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 36ms
|
||||
x-request-id:
|
||||
- req_0335ff13c1777c1bcdbee89879bc132c
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
CvsNCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS0g0KEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKZAgoQJQy0LAglHxA7Ok+n0Gmi9hIIybabc5KDQlkqG0NyZXcgSW5kaXZpZHVhbCBU
|
||||
ZXN0IFJlc3VsdDABOQK5tem8qyIYQVTw0+m8qyIYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAw
|
||||
LjFKLgoIY3Jld19rZXkSIgogZmViMWUyMWIzMjU2YzU5YTY0NzE1MmFmZGQ2NjMyMmVKMQoHY3Jl
|
||||
d19pZBImCiQzZDUwYmRhYi1kMjU1LTQyMWItOGQzMy1mNmY5MDMxOGE5ZDBKEAoHcXVhbGl0eRIF
|
||||
CgM5LjVKFwoJZXhlY190aW1lEgoKCDQuODA5MTc4ShUKCm1vZGVsX25hbWUSBwoFZ3B0LTR6AhgB
|
||||
hQEAAQAAEvkBChCnePSvJJg/cFeYF3HlEvyVEgh3YAewpHkssyoTQ3JldyBUZXN0IEV4ZWN1dGlv
|
||||
bjABOeiBSOq8qyIYQQWTV+q8qyIYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAwLjFKLgoIY3Jl
|
||||
d19rZXkSIgogZmViMWUyMWIzMjU2YzU5YTY0NzE1MmFmZGQ2NjMyMmVKMQoHY3Jld19pZBImCiQ2
|
||||
YzYyNmEyZi05OGRlLTQ2ODAtOWJhNC01NWVkYzdmODhiZTNKEQoKaXRlcmF0aW9ucxIDCgExShUK
|
||||
Cm1vZGVsX25hbWUSBwoFZ3B0LTR6AhgBhQEAAQAAEpIHChB0Gz3vlppAjams1hbMI/RQEggPCufR
|
||||
e9thfSoMQ3JldyBDcmVhdGVkMAE5X3td6ryrIhhBWNhr6ryrIhhKGwoOY3Jld2FpX3ZlcnNpb24S
|
||||
CQoHMC4xMDAuMUoaCg5weXRob25fdmVyc2lvbhIICgYzLjEyLjdKLgoIY3Jld19rZXkSIgogZmVi
|
||||
MWUyMWIzMjU2YzU5YTY0NzE1MmFmZGQ2NjMyMmVKMQoHY3Jld19pZBImCiQ2YzYyNmEyZi05OGRl
|
||||
LTQ2ODAtOWJhNC01NWVkYzdmODhiZTNKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoL
|
||||
Y3Jld19tZW1vcnkSAhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJl
|
||||
cl9vZl9hZ2VudHMSAhgBSsUCCgtjcmV3X2FnZW50cxK1AgqyAlt7ImtleSI6ICI5NzZmOGY1MGFj
|
||||
Y2ZlYmEyMjNlNDljNDJiMTZlOTllNiIsICJpZCI6ICJkYTA4M2Q5ZS0xOWU5LTQyMzAtYjZmNC0y
|
||||
NjlhNzM1NzViOWQiLCAicm9sZSI6ICJ0ZXN0IiwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRl
|
||||
ciI6IDI1LCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxt
|
||||
IjogImdwdC00IiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhl
|
||||
Y3V0aW9uPyI6IGZhbHNlLCAibWF4X3JldHJ5X2xpbWl0IjogMiwgInRvb2xzX25hbWVzIjogW119
|
||||
XUr5AQoKY3Jld190YXNrcxLqAQrnAVt7ImtleSI6ICJkYTk1ZWJkYjM2ZTRjZGY5MmRmNmE2ZGQx
|
||||
NmJjZWUwZSIsICJpZCI6ICJhNGUwYjM1Ny0zMDBlLTQ0MjMtYTU1My0yZTZlMWQxODg1M2MiLCAi
|
||||
YXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9y
|
||||
b2xlIjogInRlc3QiLCAiYWdlbnRfa2V5IjogIjk3NmY4ZjUwYWNjZmViYTIyM2U0OWM0MmIxNmU5
|
||||
OWU2IiwgInRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASjgIKEC5jZek+sSlZP8lSwF5zTSYS
|
||||
CIHcVJhIpsWuKgxUYXNrIENyZWF0ZWQwATl9vnrqvKsiGEFgjnvqvKsiGEouCghjcmV3X2tleRIi
|
||||
CiBmZWIxZTIxYjMyNTZjNTlhNjQ3MTUyYWZkZDY2MzIyZUoxCgdjcmV3X2lkEiYKJDZjNjI2YTJm
|
||||
LTk4ZGUtNDY4MC05YmE0LTU1ZWRjN2Y4OGJlM0ouCgh0YXNrX2tleRIiCiBkYTk1ZWJkYjM2ZTRj
|
||||
ZGY5MmRmNmE2ZGQxNmJjZWUwZUoxCgd0YXNrX2lkEiYKJGE0ZTBiMzU3LTMwMGUtNDQyMy1hNTUz
|
||||
LTJlNmUxZDE4ODUzY3oCGAGFAQABAAA=
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '1790'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.27.0
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Sun, 09 Feb 2025 22:47:14 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test. test\nYour personal
|
||||
goal is: test\nTo give my best complete final answer to the task respond using
|
||||
the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!"}, {"role": "user", "content": "\nCurrent Task: test\n\nThis is the expected
|
||||
criteria for your final answer: test output\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- Bearer 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
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '780'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=p1aGVyahvfLAvEwvbX0FMmrN5o18PpVAu2dG_dTgMSU-1739141229-1.0.1.1-_q7aCslZTr11IMFZ81VgyuqsGiqTARFPANUvBEWM_0dZdb97Py78KE1omxdNv5F1pFKoWUqA1kEF2wzQ2wz4aA;
|
||||
_cfuvid=bsF0jwE67cS.ywAaQU59jKPFC03S1dvynClHm_wTQik-1739141229143-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- Linux
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzAN0cgAktzQnGukedPNpZsTy461c\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739141234,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I have an understanding of the task at
|
||||
hand and am ready to provide an in-depth and comprehensive answer.\\n\\nFinal
|
||||
Answer: As per the requirement of the task to provide a complete output, I am
|
||||
returning this test output as my final answer. It is not a summary, but rather
|
||||
a full and comprehensive response that fully addresses the question and expectations
|
||||
set forth. Your test output is ready.\",\n \"refusal\": null\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 149,\n \"completion_tokens\": 77,\n
|
||||
\ \"total_tokens\": 226,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 90f76669bd9eba33-SEA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 09 Feb 2025 22:47:17 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '3379'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '1000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '999822'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 10ms
|
||||
x-request-id:
|
||||
- req_977371ae262154885689d766016ed132
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Task Execution Evaluator.
|
||||
Evaluator agent for crew evaluation with precise capabilities to evaluate the
|
||||
performance of the agents in the crew based on the tasks they have performed\nYour
|
||||
personal goal is: Your goal is to evaluate the performance of the agents in
|
||||
the crew based on the tasks they have performed using score from 1 to 10 evaluating
|
||||
on completion, quality, and overall performance.\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
|
||||
Based on the task description and the expected output, compare and evaluate
|
||||
the performance of the agents in the crew based on the Task Output they have
|
||||
performed using score from 1 to 10 evaluating on completion, quality, and overall
|
||||
performance.task_description: test task_expected_output: test output agent:
|
||||
test agent_goal: test Task Output: As per the requirement of the task to provide
|
||||
a complete output, I am returning this test output as my final answer. It is
|
||||
not a summary, but rather a full and comprehensive response that fully addresses
|
||||
the question and expectations set forth. Your test output is ready.\n\nThis
|
||||
is the expected criteria for your final answer: Evaluation Score from 1 to 10
|
||||
based on the performance of the agents on the tasks\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\nEnsure your final answer
|
||||
contains only the content in the following format: {\n \"quality\": float\n}\n\nEnsure
|
||||
the final output does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- Bearer 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
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2017'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=p1aGVyahvfLAvEwvbX0FMmrN5o18PpVAu2dG_dTgMSU-1739141229-1.0.1.1-_q7aCslZTr11IMFZ81VgyuqsGiqTARFPANUvBEWM_0dZdb97Py78KE1omxdNv5F1pFKoWUqA1kEF2wzQ2wz4aA;
|
||||
_cfuvid=bsF0jwE67cS.ywAaQU59jKPFC03S1dvynClHm_wTQik-1739141229143-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- Linux
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzAN3PPgDvH836sMJBXCVRc3im99S\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739141237,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Based on the information provided, the
|
||||
agent appears to have completed the task, providing an output that they have
|
||||
defined as 'full and comprehensive'. It appears that the agent has attempted
|
||||
to meet all the expectations of the task description and has reached the goal
|
||||
of returning a 'test output' as the final answer.\\n\\nFinal Answer: Considering
|
||||
these aspects, for task completion, the agent receives a 10 as they have successfully
|
||||
generated an output. For quality, the agent again receives a significant score
|
||||
of 10, because the full and comprehensive nature of the output matches the task's
|
||||
expectations. Finally, taking into account both the completion and quality aspects,
|
||||
the overall performance evaluation is also 10, recognizing the perfect alignment
|
||||
between the task's expected output and the output delivered by the agent. \\nTherefore,
|
||||
the final evaluation score can be summarized in the below format:\\n{\\n\\\"completion\\\":
|
||||
10,\\n\\\"quality\\\": 10,\\n\\\"overall performance\\\": 10\\n}\",\n \"refusal\":
|
||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 385,\n \"completion_tokens\":
|
||||
190,\n \"total_tokens\": 575,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90f7667f9bfdba33-SEA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 09 Feb 2025 22:47:24 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '6909'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '1000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '999515'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 29ms
|
||||
x-request-id:
|
||||
- req_3e283ae8c1cd132001ecf2d96198bbd6
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test. test\nYour personal
|
||||
goal is: test\nTo give my best complete final answer to the task respond using
|
||||
the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!"}, {"role": "user", "content": "\nCurrent Task: test\n\nThis is the expected
|
||||
criteria for your final answer: test output\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- Bearer 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
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '780'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=p1aGVyahvfLAvEwvbX0FMmrN5o18PpVAu2dG_dTgMSU-1739141229-1.0.1.1-_q7aCslZTr11IMFZ81VgyuqsGiqTARFPANUvBEWM_0dZdb97Py78KE1omxdNv5F1pFKoWUqA1kEF2wzQ2wz4aA;
|
||||
_cfuvid=bsF0jwE67cS.ywAaQU59jKPFC03S1dvynClHm_wTQik-1739141229143-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- Linux
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzANAznjrZdppuFIsRnEouHG8WuM0\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739141244,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I am ready to prepare my final answer
|
||||
based on the test output criteria provided\\nFinal Answer: I have followed all
|
||||
the instructions provided in the task to the best of my ability, and the outcome
|
||||
of the test is as described in the final answer. It is complete, detailed, and
|
||||
accurate.\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
149,\n \"completion_tokens\": 59,\n \"total_tokens\": 208,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 90f766abaaa3ba33-SEA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 09 Feb 2025 22:47:27 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2533'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '1000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '999822'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 10ms
|
||||
x-request-id:
|
||||
- req_6ea4c81627695f58de56727aa8d8cc59
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
CvwNCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS0w0KEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKaAgoQGJwIgEdh/Dq2y8ue+Gl/XxIInTNNpEL8yjQqG0NyZXcgSW5kaXZpZHVhbCBU
|
||||
ZXN0IFJlc3VsdDABOaAPzl6/qyIYQYnY6V6/qyIYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTAw
|
||||
LjFKLgoIY3Jld19rZXkSIgogZmViMWUyMWIzMjU2YzU5YTY0NzE1MmFmZGQ2NjMyMmVKMQoHY3Jl
|
||||
d19pZBImCiQ2YzYyNmEyZi05OGRlLTQ2ODAtOWJhNC01NWVkYzdmODhiZTNKEQoHcXVhbGl0eRIG
|
||||
CgQxMC4wShcKCWV4ZWNfdGltZRIKCggzLjUwMzUyNkoVCgptb2RlbF9uYW1lEgcKBWdwdC00egIY
|
||||
AYUBAAEAABL5AQoQsfCFg6/ZkEo2LShWV3X+WhII4I9o90lQzxMqE0NyZXcgVGVzdCBFeGVjdXRp
|
||||
b24wATmWhUlfv6siGEFeYlZfv6siGEobCg5jcmV3YWlfdmVyc2lvbhIJCgcwLjEwMC4xSi4KCGNy
|
||||
ZXdfa2V5EiIKIGZlYjFlMjFiMzI1NmM1OWE2NDcxNTJhZmRkNjYzMjJlSjEKB2NyZXdfaWQSJgok
|
||||
ZDU2ZjljMWEtYmRkMS00MDI3LWI1ZjctMzg1ZGVlMWU2YjljShEKCml0ZXJhdGlvbnMSAwoBMUoV
|
||||
Cgptb2RlbF9uYW1lEgcKBWdwdC00egIYAYUBAAEAABKSBwoQlucrHD/mwnCU8Dl9QKzgYhIIGyix
|
||||
8K7RcoAqDENyZXcgQ3JlYXRlZDABOTQxXF+/qyIYQamXaV+/qyIYShsKDmNyZXdhaV92ZXJzaW9u
|
||||
EgkKBzAuMTAwLjFKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMi43Si4KCGNyZXdfa2V5EiIKIGZl
|
||||
YjFlMjFiMzI1NmM1OWE2NDcxNTJhZmRkNjYzMjJlSjEKB2NyZXdfaWQSJgokZDU2ZjljMWEtYmRk
|
||||
MS00MDI3LWI1ZjctMzg1ZGVlMWU2YjljShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEK
|
||||
C2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1i
|
||||
ZXJfb2ZfYWdlbnRzEgIYAUrFAgoLY3Jld19hZ2VudHMStQIKsgJbeyJrZXkiOiAiOTc2ZjhmNTBh
|
||||
Y2NmZWJhMjIzZTQ5YzQyYjE2ZTk5ZTYiLCAiaWQiOiAiMzcwMzA5YTQtMDU5OS00MWVlLWFiMTgt
|
||||
YWE1ZmQ1Mjg2ZGQ1IiwgInJvbGUiOiAidGVzdCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0
|
||||
ZXIiOiAyNSwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxs
|
||||
bSI6ICJncHQtNCIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4
|
||||
ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtd
|
||||
fV1K+QEKCmNyZXdfdGFza3MS6gEK5wFbeyJrZXkiOiAiZGE5NWViZGIzNmU0Y2RmOTJkZjZhNmRk
|
||||
MTZiY2VlMGUiLCAiaWQiOiAiZTBmNDgzNjAtYzNjNS00ZGY1LThkZjEtNDg2ZTc4OWNiZWUyIiwg
|
||||
ImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRf
|
||||
cm9sZSI6ICJ0ZXN0IiwgImFnZW50X2tleSI6ICI5NzZmOGY1MGFjY2ZlYmEyMjNlNDljNDJiMTZl
|
||||
OTllNiIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEo4CChA4OLnKHp32b0EUM2g5rs+r
|
||||
EgjifMpu5dQ6xCoMVGFzayBDcmVhdGVkMAE5x1d3X7+rIhhBMxF4X7+rIhhKLgoIY3Jld19rZXkS
|
||||
IgogZmViMWUyMWIzMjU2YzU5YTY0NzE1MmFmZGQ2NjMyMmVKMQoHY3Jld19pZBImCiRkNTZmOWMx
|
||||
YS1iZGQxLTQwMjctYjVmNy0zODVkZWUxZTZiOWNKLgoIdGFza19rZXkSIgogZGE5NWViZGIzNmU0
|
||||
Y2RmOTJkZjZhNmRkMTZiY2VlMGVKMQoHdGFza19pZBImCiRlMGY0ODM2MC1jM2M1LTRkZjUtOGRm
|
||||
MS00ODZlNzg5Y2JlZTJ6AhgBhQEAAQAA
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '1791'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.27.0
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Sun, 09 Feb 2025 22:47:29 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Task Execution Evaluator.
|
||||
Evaluator agent for crew evaluation with precise capabilities to evaluate the
|
||||
performance of the agents in the crew based on the tasks they have performed\nYour
|
||||
personal goal is: Your goal is to evaluate the performance of the agents in
|
||||
the crew based on the tasks they have performed using score from 1 to 10 evaluating
|
||||
on completion, quality, and overall performance.\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
|
||||
Based on the task description and the expected output, compare and evaluate
|
||||
the performance of the agents in the crew based on the Task Output they have
|
||||
performed using score from 1 to 10 evaluating on completion, quality, and overall
|
||||
performance.task_description: test task_expected_output: test output agent:
|
||||
test agent_goal: test Task Output: I have followed all the instructions provided
|
||||
in the task to the best of my ability, and the outcome of the test is as described
|
||||
in the final answer. It is complete, detailed, and accurate.\n\nThis is the
|
||||
expected criteria for your final answer: Evaluation Score from 1 to 10 based
|
||||
on the performance of the agents on the tasks\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\nEnsure your final answer contains
|
||||
only the content in the following format: {\n \"quality\": float\n}\n\nEnsure
|
||||
the final output does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- Bearer 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
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1935'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=p1aGVyahvfLAvEwvbX0FMmrN5o18PpVAu2dG_dTgMSU-1739141229-1.0.1.1-_q7aCslZTr11IMFZ81VgyuqsGiqTARFPANUvBEWM_0dZdb97Py78KE1omxdNv5F1pFKoWUqA1kEF2wzQ2wz4aA;
|
||||
_cfuvid=bsF0jwE67cS.ywAaQU59jKPFC03S1dvynClHm_wTQik-1739141229143-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- Linux
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AzANDGSiRIu1XO56ZMfLuO2SuL4l0\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739141247,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Looking at the task output provided by
|
||||
the agent, the agent has expressed a high level of confidence in their ability
|
||||
to follow the instructions provided to the best of their ability. The agent
|
||||
seems to have executed the task with detailed attention and accuracy.\\n\\nI
|
||||
need to evaluate both the quality and the overall performance of the agent keeping
|
||||
in mind the task description and the expected output. Given the agent's output
|
||||
and goal, I can deduce the quality of their work as well as their overall performance.\\n\\nFinal
|
||||
Answer: \\n{\\n \\\"quality\\\": 8.5\\n}\",\n \"refusal\": null\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 372,\n \"completion_tokens\": 112,\n
|
||||
\ \"total_tokens\": 484,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 90f766bc4d87ba33-SEA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 09 Feb 2025 22:47:32 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '5056'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '1000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '999535'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 27ms
|
||||
x-request-id:
|
||||
- req_ff2926b015823a70e2173c71f8d63209
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
71
tests/utilities/evaluators/test_custom_llm_support.py
Normal file
71
tests/utilities/evaluators/test_custom_llm_support.py
Normal file
@@ -0,0 +1,71 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_crew_test_with_custom_llm():
|
||||
"""Test Crew.test() with both string model name and LLM instance."""
|
||||
|
||||
# Setup
|
||||
agent = Agent(
|
||||
role="test",
|
||||
goal="test",
|
||||
backstory="test",
|
||||
llm=LLM(model="gpt-4"),
|
||||
)
|
||||
task = Task(
|
||||
description="test",
|
||||
expected_output="test output",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
# Test with string model name
|
||||
crew.test(n_iterations=1, llm="gpt-4")
|
||||
|
||||
# Test with LLM instance
|
||||
custom_llm = LLM(model="gpt-4")
|
||||
crew.test(n_iterations=1, llm=custom_llm)
|
||||
|
||||
# Test backward compatibility
|
||||
crew.test(n_iterations=1, openai_model_name="gpt-4")
|
||||
|
||||
# Test error when neither parameter is provided
|
||||
with pytest.raises(ValueError, match="Must provide either 'llm' or 'openai_model_name' parameter"):
|
||||
crew.test(n_iterations=1)
|
||||
|
||||
def test_crew_evaluator_with_custom_llm():
|
||||
# Setup
|
||||
agent = Agent(
|
||||
role="test",
|
||||
goal="test",
|
||||
backstory="test",
|
||||
llm=LLM(model="gpt-4"),
|
||||
)
|
||||
task = Task(
|
||||
description="test",
|
||||
expected_output="test output",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
# Test with string model name
|
||||
evaluator = CrewEvaluator(crew, "gpt-4")
|
||||
assert isinstance(evaluator.llm, LLM)
|
||||
assert evaluator.llm.model == "gpt-4"
|
||||
|
||||
# Test with LLM instance
|
||||
custom_llm = LLM(model="gpt-4")
|
||||
evaluator = CrewEvaluator(crew, custom_llm)
|
||||
assert evaluator.llm == custom_llm
|
||||
|
||||
# Test that evaluator agent uses the correct LLM
|
||||
evaluator_agent = evaluator._evaluator_agent()
|
||||
assert evaluator_agent.llm == evaluator.llm
|
||||
Reference in New Issue
Block a user