mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 08:38:30 +00:00
feat: enhance per-agent token metrics accuracy by aggregating task data
This commit is contained in:
@@ -1644,43 +1644,57 @@ class Crew(FlowTrackable, BaseModel):
|
|||||||
else:
|
else:
|
||||||
workflow_metrics = WorkflowTokenMetrics()
|
workflow_metrics = WorkflowTokenMetrics()
|
||||||
|
|
||||||
|
# Build per-agent metrics from per-task data (more accurate)
|
||||||
|
# This avoids the cumulative token issue where all agents show the same total
|
||||||
|
agent_token_sums = {}
|
||||||
|
|
||||||
|
if workflow_metrics.per_task:
|
||||||
|
# Sum up tokens for each agent from their tasks
|
||||||
|
for task_name, task_metrics in workflow_metrics.per_task.items():
|
||||||
|
agent_name = task_metrics.agent_name
|
||||||
|
if agent_name not in agent_token_sums:
|
||||||
|
agent_token_sums[agent_name] = {
|
||||||
|
'total_tokens': 0,
|
||||||
|
'prompt_tokens': 0,
|
||||||
|
'cached_prompt_tokens': 0,
|
||||||
|
'completion_tokens': 0,
|
||||||
|
'successful_requests': 0
|
||||||
|
}
|
||||||
|
agent_token_sums[agent_name]['total_tokens'] += task_metrics.total_tokens
|
||||||
|
agent_token_sums[agent_name]['prompt_tokens'] += task_metrics.prompt_tokens
|
||||||
|
agent_token_sums[agent_name]['cached_prompt_tokens'] += task_metrics.cached_prompt_tokens
|
||||||
|
agent_token_sums[agent_name]['completion_tokens'] += task_metrics.completion_tokens
|
||||||
|
agent_token_sums[agent_name]['successful_requests'] += task_metrics.successful_requests
|
||||||
|
|
||||||
|
# Create per-agent metrics from the summed task data
|
||||||
for agent in self.agents:
|
for agent in self.agents:
|
||||||
agent_role = getattr(agent, 'role', 'Unknown Agent')
|
agent_role = getattr(agent, 'role', 'Unknown Agent')
|
||||||
agent_id = str(getattr(agent, 'id', ''))
|
agent_id = str(getattr(agent, 'id', ''))
|
||||||
|
|
||||||
if isinstance(agent.llm, BaseLLM):
|
if agent_role in agent_token_sums:
|
||||||
llm_usage = agent.llm.get_token_usage_summary()
|
# Use accurate per-task summed data
|
||||||
total_usage_metrics.add_usage_metrics(llm_usage)
|
sums = agent_token_sums[agent_role]
|
||||||
|
|
||||||
# Create per-agent metrics
|
|
||||||
agent_metrics = AgentTokenMetrics(
|
agent_metrics = AgentTokenMetrics(
|
||||||
agent_name=agent_role,
|
agent_name=agent_role,
|
||||||
agent_id=agent_id,
|
agent_id=agent_id,
|
||||||
total_tokens=llm_usage.total_tokens,
|
total_tokens=sums['total_tokens'],
|
||||||
prompt_tokens=llm_usage.prompt_tokens,
|
prompt_tokens=sums['prompt_tokens'],
|
||||||
cached_prompt_tokens=llm_usage.cached_prompt_tokens,
|
cached_prompt_tokens=sums['cached_prompt_tokens'],
|
||||||
completion_tokens=llm_usage.completion_tokens,
|
completion_tokens=sums['completion_tokens'],
|
||||||
successful_requests=llm_usage.successful_requests
|
successful_requests=sums['successful_requests']
|
||||||
)
|
)
|
||||||
workflow_metrics.per_agent[agent_role] = agent_metrics
|
workflow_metrics.per_agent[agent_role] = agent_metrics
|
||||||
|
|
||||||
|
# Still get total usage for overall metrics
|
||||||
|
if isinstance(agent.llm, BaseLLM):
|
||||||
|
llm_usage = agent.llm.get_token_usage_summary()
|
||||||
|
total_usage_metrics.add_usage_metrics(llm_usage)
|
||||||
else:
|
else:
|
||||||
# fallback litellm
|
# fallback litellm
|
||||||
if hasattr(agent, "_token_process"):
|
if hasattr(agent, "_token_process"):
|
||||||
token_sum = agent._token_process.get_summary()
|
token_sum = agent._token_process.get_summary()
|
||||||
total_usage_metrics.add_usage_metrics(token_sum)
|
total_usage_metrics.add_usage_metrics(token_sum)
|
||||||
|
|
||||||
# Create per-agent metrics from litellm
|
|
||||||
agent_metrics = AgentTokenMetrics(
|
|
||||||
agent_name=agent_role,
|
|
||||||
agent_id=agent_id,
|
|
||||||
total_tokens=token_sum.total_tokens,
|
|
||||||
prompt_tokens=token_sum.prompt_tokens,
|
|
||||||
cached_prompt_tokens=token_sum.cached_prompt_tokens,
|
|
||||||
completion_tokens=token_sum.completion_tokens,
|
|
||||||
successful_requests=token_sum.successful_requests
|
|
||||||
)
|
|
||||||
workflow_metrics.per_agent[agent_role] = agent_metrics
|
|
||||||
|
|
||||||
if self.manager_agent:
|
if self.manager_agent:
|
||||||
manager_role = getattr(self.manager_agent, 'role', 'Manager Agent')
|
manager_role = getattr(self.manager_agent, 'role', 'Manager Agent')
|
||||||
manager_id = str(getattr(self.manager_agent, 'id', ''))
|
manager_id = str(getattr(self.manager_agent, 'id', ''))
|
||||||
|
|||||||
Reference in New Issue
Block a user