mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
* WIP. Figuring out disconnect issue. * Cleaned up logs now that I've isolated the issue to the LLM * more wip. * WIP. It looks like usage metrics has always been broken for async * Update parent crew who is managing for_each loop * Merge in main to bugfix/kickoff-for-each-usage-metrics * Clean up code for review * Add new tests * Final cleanup. Ready for review. * Moving copy functionality from Agent to BaseAgent * Fix renaming issue * Fix linting errors * use BaseAgent instead of Agent where applicable
37 lines
1.2 KiB
Python
37 lines
1.2 KiB
Python
from typing import Any, Dict, List
|
|
|
|
import tiktoken
|
|
from langchain.callbacks.base import BaseCallbackHandler
|
|
from langchain.schema import LLMResult
|
|
|
|
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
|
|
|
|
|
class TokenCalcHandler(BaseCallbackHandler):
|
|
model_name: str = ""
|
|
token_cost_process: TokenProcess
|
|
|
|
def __init__(self, model_name, token_cost_process):
|
|
self.model_name = model_name
|
|
self.token_cost_process = token_cost_process
|
|
|
|
def on_llm_start(
|
|
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
|
) -> None:
|
|
try:
|
|
encoding = tiktoken.encoding_for_model(self.model_name)
|
|
except KeyError:
|
|
encoding = tiktoken.get_encoding("cl100k_base")
|
|
|
|
if self.token_cost_process is None:
|
|
return
|
|
|
|
for prompt in prompts:
|
|
self.token_cost_process.sum_prompt_tokens(len(encoding.encode(prompt)))
|
|
|
|
async def on_llm_new_token(self, token: str, **kwargs) -> None:
|
|
self.token_cost_process.sum_completion_tokens(1)
|
|
|
|
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
|
self.token_cost_process.sum_successful_requests(1)
|