diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 5e08f59b8..5a1fc87fe 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -6,15 +6,15 @@ from typing import Any, Dict, List, Optional, Tuple, Union from langchain_core.callbacks import BaseCallbackHandler from pydantic import ( - UUID4, - BaseModel, - ConfigDict, - Field, - InstanceOf, - Json, - PrivateAttr, - field_validator, - model_validator, + UUID4, + BaseModel, + ConfigDict, + Field, + InstanceOf, + Json, + PrivateAttr, + field_validator, + model_validator, ) from pydantic_core import PydanticCustomError @@ -656,5 +656,27 @@ class Crew(BaseModel): ) self._telemetry.end_crew(self, final_string_output) + def calculate_usage_metrics(self) -> Dict[str, int]: + """Calculates and returns the usage metrics.""" + total_usage_metrics = { + "total_tokens": 0, + "prompt_tokens": 0, + "completion_tokens": 0, + "successful_requests": 0, + } + + for agent in self.agents: + if hasattr(agent, "_token_process"): + token_sum = agent._token_process.get_summary() + for key in total_usage_metrics: + total_usage_metrics[key] += token_sum.get(key, 0) + + if self.manager_agent and hasattr(self.manager_agent, "_token_process"): + token_sum = self.manager_agent._token_process.get_summary() + for key in total_usage_metrics: + total_usage_metrics[key] += token_sum.get(key, 0) + + return total_usage_metrics + def __repr__(self): return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})" diff --git a/src/crewai/crews/crew_output.py b/src/crewai/crews/crew_output.py index 99a8e37bc..018fc76ff 100644 --- a/src/crewai/crews/crew_output.py +++ b/src/crewai/crews/crew_output.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, List, Union +from typing import Any, Dict, List from pydantic import BaseModel, Field @@ -18,7 +18,7 @@ class CrewOutput(BaseModel): # TODO: Ask @joao what is the desired behavior here def result( self, - ) -> List[str | BaseModel | Dict[str, Any]]]: + ) -> List[str | BaseModel | Dict[str, Any]]: """Return the result of the task based on the available output.""" results = [output.result() for output in self.output] return results diff --git a/src/crewai/task.py b/src/crewai/task.py index 5ac6fad63..06d2808df 100644 --- a/src/crewai/task.py +++ b/src/crewai/task.py @@ -217,7 +217,7 @@ class Task(BaseModel): f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical." ) - self._execution_span = self._telemetry.task_started(self) + self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self) if self.context: task_outputs: List[TaskOutput] = [] diff --git a/tests/agent_test.py b/tests/agent_test.py index 7c0f3c242..cb4a63bc9 100644 --- a/tests/agent_test.py +++ b/tests/agent_test.py @@ -4,10 +4,6 @@ from unittest import mock from unittest.mock import patch import pytest -from langchain.tools import tool -from langchain_core.exceptions import OutputParserException -from langchain_openai import ChatOpenAI - from crewai import Agent, Crew, Task from crewai.agents.cache import CacheHandler from crewai.agents.executor import CrewAgentExecutor @@ -15,6 +11,9 @@ from crewai.agents.parser import CrewAgentParser from crewai.tools.tool_calling import InstructorToolCalling from crewai.tools.tool_usage import ToolUsage from crewai.utilities import RPMController +from langchain.tools import tool +from langchain_core.exceptions import OutputParserException +from langchain_openai import ChatOpenAI def test_agent_creation(): @@ -631,8 +630,8 @@ def test_agent_use_specific_tasks_output_as_context(capsys): crew = Crew(agents=[agent1, agent2], tasks=tasks) result = crew.kickoff() - assert "bye" not in result.lower() - assert "hi" in result.lower() or "hello" in result.lower() + assert "bye" not in result.raw_output().lower() + assert "hi" in result.raw_output().lower() or "hello" in result.raw_output().lower() @pytest.mark.vcr(filter_headers=["authorization"]) @@ -750,7 +749,8 @@ def test_tool_result_as_answer_is_the_final_answer_for_the_agent(): crew = Crew(agents=[agent1], tasks=tasks) result = crew.kickoff() - assert result == "Howdy!" + print("RESULT: ", result.raw_output()) + assert result.raw_output() == "Howdy!" @pytest.mark.vcr(filter_headers=["authorization"])