Fixing issues brought about by merge

This commit is contained in:
Brandon Hancock
2024-07-08 09:00:36 -04:00
parent 10b84955ad
commit 363ce5e9ce
4 changed files with 41 additions and 19 deletions

View File

@@ -6,15 +6,15 @@ from typing import Any, Dict, List, Optional, Tuple, Union
from langchain_core.callbacks import BaseCallbackHandler
from pydantic import (
UUID4,
BaseModel,
ConfigDict,
Field,
InstanceOf,
Json,
PrivateAttr,
field_validator,
model_validator,
UUID4,
BaseModel,
ConfigDict,
Field,
InstanceOf,
Json,
PrivateAttr,
field_validator,
model_validator,
)
from pydantic_core import PydanticCustomError
@@ -656,5 +656,27 @@ class Crew(BaseModel):
)
self._telemetry.end_crew(self, final_string_output)
def calculate_usage_metrics(self) -> Dict[str, int]:
"""Calculates and returns the usage metrics."""
total_usage_metrics = {
"total_tokens": 0,
"prompt_tokens": 0,
"completion_tokens": 0,
"successful_requests": 0,
}
for agent in self.agents:
if hasattr(agent, "_token_process"):
token_sum = agent._token_process.get_summary()
for key in total_usage_metrics:
total_usage_metrics[key] += token_sum.get(key, 0)
if self.manager_agent and hasattr(self.manager_agent, "_token_process"):
token_sum = self.manager_agent._token_process.get_summary()
for key in total_usage_metrics:
total_usage_metrics[key] += token_sum.get(key, 0)
return total_usage_metrics
def __repr__(self):
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"

View File

@@ -1,4 +1,4 @@
from typing import Any, Dict, List, Union
from typing import Any, Dict, List
from pydantic import BaseModel, Field
@@ -18,7 +18,7 @@ class CrewOutput(BaseModel):
# TODO: Ask @joao what is the desired behavior here
def result(
self,
) -> List[str | BaseModel | Dict[str, Any]]]:
) -> List[str | BaseModel | Dict[str, Any]]:
"""Return the result of the task based on the available output."""
results = [output.result() for output in self.output]
return results

View File

@@ -217,7 +217,7 @@ class Task(BaseModel):
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
)
self._execution_span = self._telemetry.task_started(self)
self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self)
if self.context:
task_outputs: List[TaskOutput] = []

View File

@@ -4,10 +4,6 @@ from unittest import mock
from unittest.mock import patch
import pytest
from langchain.tools import tool
from langchain_core.exceptions import OutputParserException
from langchain_openai import ChatOpenAI
from crewai import Agent, Crew, Task
from crewai.agents.cache import CacheHandler
from crewai.agents.executor import CrewAgentExecutor
@@ -15,6 +11,9 @@ from crewai.agents.parser import CrewAgentParser
from crewai.tools.tool_calling import InstructorToolCalling
from crewai.tools.tool_usage import ToolUsage
from crewai.utilities import RPMController
from langchain.tools import tool
from langchain_core.exceptions import OutputParserException
from langchain_openai import ChatOpenAI
def test_agent_creation():
@@ -631,8 +630,8 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
crew = Crew(agents=[agent1, agent2], tasks=tasks)
result = crew.kickoff()
assert "bye" not in result.lower()
assert "hi" in result.lower() or "hello" in result.lower()
assert "bye" not in result.raw_output().lower()
assert "hi" in result.raw_output().lower() or "hello" in result.raw_output().lower()
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -750,7 +749,8 @@ def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
crew = Crew(agents=[agent1], tasks=tasks)
result = crew.kickoff()
assert result == "Howdy!"
print("RESULT: ", result.raw_output())
assert result.raw_output() == "Howdy!"
@pytest.mark.vcr(filter_headers=["authorization"])