fix: correct default model (gpt-4o), correct token counts, and correct TaskOutput attributes (added agent) (#749)

* fix: 'from datetime import datetime for logging' to print the timestamp

* fix: correct default model (gpt-4o), correct token counts, and correct TaskOutput attributes (added agent)

* test: verify Task callback data is an instance of TaskOutput
This commit is contained in:
Matt Thompson
2024-06-11 20:29:22 +02:00
committed by GitHub
parent 946c56494e
commit bb622bf747
4 changed files with 59 additions and 9 deletions

View File

@@ -1,3 +1,4 @@
from datetime import datetime
from crewai.utilities.printer import Printer
@@ -13,6 +14,7 @@ class Logger:
def log(self, level, message, color="bold_green"):
level_map = {"debug": 1, "info": 2}
if self.verbose_level and level_map.get(level, 0) <= self.verbose_level:
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self._printer.print(f"[{timestamp}][{level.upper()}]: {message}", color=color)
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self._printer.print(
f"[{timestamp}][{level.upper()}]: {message}", color=color
)

View File

@@ -29,7 +29,7 @@ def test_agent_default_values():
agent = Agent(role="test role", goal="test goal", backstory="test backstory")
assert isinstance(agent.llm, ChatOpenAI)
assert agent.llm.model_name == "gpt-4"
assert agent.llm.model_name == "gpt-4o"
assert agent.llm.temperature == 0.7
assert agent.llm.verbose is False
assert agent.allow_delegation is True
@@ -732,7 +732,7 @@ def test_agent_llm_uses_token_calc_handler_with_llm_has_model_name():
assert len(agent1.llm.callbacks) == 1
assert agent1.llm.callbacks[0].__class__.__name__ == "TokenCalcHandler"
assert agent1.llm.callbacks[0].model == "gpt-4"
assert agent1.llm.callbacks[0].model == "gpt-4o"
assert (
agent1.llm.callbacks[0].token_cost_process.__class__.__name__ == "TokenProcess"
)

View File

@@ -445,10 +445,14 @@ def test_async_task_execution():
start.return_value = thread
with patch.object(threading.Thread, "join", wraps=thread.join()) as join:
list_ideas.output = TaskOutput(
description="A 4 paragraph article about AI.", raw_output="ok"
description="A 4 paragraph article about AI.",
raw_output="ok",
agent="writer",
)
list_important_history.output = TaskOutput(
description="A 4 paragraph article about AI.", raw_output="ok"
description="A 4 paragraph article about AI.",
raw_output="ok",
agent="writer",
)
crew.kickoff()
start.assert_called()
@@ -677,9 +681,10 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
result = crew.kickoff()
assert result == '"Howdy!"'
print(crew.usage_metrics)
assert crew.usage_metrics == {
"total_tokens": 1666,
"prompt_tokens": 1383,
"total_tokens": 1664,
"prompt_tokens": 1381,
"completion_tokens": 283,
"successful_requests": 3,
}

View File

@@ -1,5 +1,7 @@
"""Test Agent creation and execution basic functionality."""
import json
from unittest.mock import MagicMock, patch
import pytest
@@ -7,6 +9,7 @@ from pydantic import BaseModel
from pydantic_core import ValidationError
from crewai import Agent, Crew, Process, Task
from crewai.tasks.task_output import TaskOutput
def test_task_tool_reflect_agent_tools():
@@ -105,6 +108,46 @@ def test_task_callback():
task_completed.assert_called_once_with(task.output)
def test_task_callback_returns_task_ouput():
researcher = Agent(
role="Researcher",
goal="Make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
allow_delegation=False,
)
task_completed = MagicMock(return_value="done")
task = Task(
description="Give me a list of 5 interesting ideas to explore for an article, what makes them unique and interesting.",
expected_output="Bullet point list of 5 interesting ideas.",
agent=researcher,
callback=task_completed,
)
with patch.object(Agent, "execute_task") as execute:
execute.return_value = "exported_ok"
task.execute()
# Ensure the callback is called with a TaskOutput object serialized to JSON
task_completed.assert_called_once()
callback_data = task_completed.call_args[0][0]
# Check if callback_data is TaskOutput object or JSON string
if isinstance(callback_data, TaskOutput):
callback_data = json.dumps(callback_data.model_dump())
assert isinstance(callback_data, str)
output_dict = json.loads(callback_data)
expected_output = {
"description": task.description,
"exported_output": "exported_ok",
"raw_output": "exported_ok",
"agent": researcher.role,
"summary": "Give me a list of 5 interesting ideas to explore...",
}
assert output_dict == expected_output
def test_execute_with_agent():
researcher = Agent(
role="Researcher",