mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
* better spacing * works with llama index * works on langchain custom just need delegation to work * cleanup for custom_agent class * works with different argument expectations for agent_executor * cleanup for hierarchial process, better agent_executor args handler and added to the crew agent doc page * removed code examples for langchain + llama index, added to docs instead * added key output if return is not a str for and added some tests * added hinting for CustomAgent class * removed pass as it was not needed * closer just need to figuire ou agentTools * running agents - llamaindex and langchain with base agent * some cleanup on baseAgent * minimum for agent to run for base class and ensure it works with hierarchical process * cleanup for original agent to take on BaseAgent class * Agent takes on langchainagent and cleanup across * token handling working for usage_metrics to continue working * installed llama-index, updated docs and added better name * fixed some type errors * base agent holds token_process * heirarchail process uses proper tools and no longer relies on hasattr for token_processes * removal of test_custom_agent_executions * this fixes copying agents * leveraging an executor class for trigger llamaindex agent * llama index now has ask_human * executor mixins added * added output converter base class * type listed * cleanup for output conversions and tokenprocess eliminated redundancy * properly handling tokens * simplified token calc handling * original agent with base agent builder structure setup * better docs * no more llama-index dep * cleaner docs * test fixes * poetry reverts and better docs * base_agent_tools set for third party agents * updated task and test fix
28 lines
911 B
Python
28 lines
911 B
Python
from typing import Any, Dict
|
|
|
|
|
|
class TokenProcess:
|
|
total_tokens: int = 0
|
|
prompt_tokens: int = 0
|
|
completion_tokens: int = 0
|
|
successful_requests: int = 0
|
|
|
|
def sum_prompt_tokens(self, tokens: int):
|
|
self.prompt_tokens = self.prompt_tokens + tokens
|
|
self.total_tokens = self.total_tokens + tokens
|
|
|
|
def sum_completion_tokens(self, tokens: int):
|
|
self.completion_tokens = self.completion_tokens + tokens
|
|
self.total_tokens = self.total_tokens + tokens
|
|
|
|
def sum_successful_requests(self, requests: int):
|
|
self.successful_requests = self.successful_requests + requests
|
|
|
|
def get_summary(self) -> Dict[str, Any]:
|
|
return {
|
|
"total_tokens": self.total_tokens,
|
|
"prompt_tokens": self.prompt_tokens,
|
|
"completion_tokens": self.completion_tokens,
|
|
"successful_requests": self.successful_requests,
|
|
}
|