Fixing missing function. Working on tests.

This commit is contained in:
Brandon Hancock
2024-07-02 15:31:32 -04:00
parent 053d8a0449
commit e745094d73
6 changed files with 234413 additions and 18770 deletions

View File

@@ -55,7 +55,6 @@ class Crew(BaseModel):
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
prompt_file: Path to the prompt json file to be used for the crew.
id: A unique identifier for the crew instance.
full_output: Whether the crew should return the full output with all tasks outputs and token usage metrics or just the final output.
task_callback: Callback to be executed after each task for every agents execution.
step_callback: Callback to be executed after each step for every agents execution.
share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models.
@@ -91,10 +90,6 @@ class Crew(BaseModel):
default=None,
description="Metrics for the LLM usage during all tasks execution.",
)
full_output: Optional[bool] = Field(
default=False,
description="Whether the crew should return the full output with all tasks outputs and token usage metrics or just the final output.",
)
manager_llm: Optional[Any] = Field(
description="Language model that will run the agent.", default=None
)
@@ -598,7 +593,7 @@ class Crew(BaseModel):
return CrewOutput(
output=output,
tasks_output=[task.output for task in self.tasks if task],
token_output=token_usage,
token_usage=token_usage,
)
def _finish_execution(self, final_string_output: str) -> None:
@@ -606,5 +601,27 @@ class Crew(BaseModel):
self._rpm_controller.stop_rpm_counter()
self._telemetry.end_crew(self, final_string_output)
def calculate_usage_metrics(self) -> Dict[str, int]:
"""Calculates and returns the usage metrics."""
total_usage_metrics = {
"total_tokens": 0,
"prompt_tokens": 0,
"completion_tokens": 0,
"successful_requests": 0,
}
for agent in self.agents:
if hasattr(agent, "_token_process"):
token_sum = agent._token_process.get_summary()
for key in total_usage_metrics:
total_usage_metrics[key] += token_sum.get(key, 0)
if self.manager_agent and hasattr(self.manager_agent, "_token_process"):
token_sum = self.manager_agent._token_process.get_summary()
for key in total_usage_metrics:
total_usage_metrics[key] += token_sum.get(key, 0)
return total_usage_metrics
def __repr__(self):
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"

View File

@@ -11,13 +11,15 @@ class CrewOutput(BaseModel):
tasks_output: list[TaskOutput] = Field(
description="Output of each task", default=[]
)
token_output: Dict[str, Any] = Field(
token_usage: Dict[str, Any] = Field(
description="Processed token summary", default={}
)
# TODO: Ask @joao what is the desired behavior here
def result(self) -> Union[str, BaseModel, Dict[str, Any]]:
"""Return the result of the task based on the available output."""
return self.output.result()
results = [output.result() for output in self.output]
return results if len(results) > 1 else results[0]
def raw_output(self) -> str:
"""Return the raw output of the task."""
@@ -29,5 +31,6 @@ class CrewOutput(BaseModel):
def __getitem__(self, key: str) -> Any:
self.output[key]
# TODO: Confirm with Joao that we want to print the raw output and not the object
def __str__(self):
return str(self.raw_output())

View File

@@ -3,7 +3,7 @@ import re
import threading
import uuid
from concurrent.futures import Future
from copy import copy, deepcopy
from copy import copy
from typing import Any, Dict, List, Optional, Type, Union
from langchain_openai import ChatOpenAI
@@ -209,8 +209,8 @@ class Task(BaseModel):
if self.context:
context_list = []
for task in self.context:
if task.async_execution and task.thread:
task.thread.join()
if task.async_execution and task._thread:
task._thread.join()
if task and task.output:
context_list.append(task.output.raw_output)
context = "\n".join(context_list)