Cached prompt tokens on usage metrics

This commit is contained in:
Thiago Moretto
2024-11-13 10:16:30 -03:00
parent bcfcf88e78
commit 36aa4bcb46
5 changed files with 137 additions and 95 deletions

View File

@@ -4,6 +4,7 @@ from crewai.types.usage_metrics import UsageMetrics
class TokenProcess:
total_tokens: int = 0
prompt_tokens: int = 0
cached_prompt_tokens: int = 0
completion_tokens: int = 0
successful_requests: int = 0
@@ -15,6 +16,10 @@ class TokenProcess:
self.completion_tokens = self.completion_tokens + tokens
self.total_tokens = self.total_tokens + tokens
def sum_cached_prompt_tokens(self, tokens: int):
self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
self.total_tokens = self.total_tokens + tokens
def sum_successful_requests(self, requests: int):
self.successful_requests = self.successful_requests + requests
@@ -22,6 +27,7 @@ class TokenProcess:
return UsageMetrics(
total_tokens=self.total_tokens,
prompt_tokens=self.prompt_tokens,
cached_prompt_tokens=self.cached_prompt_tokens,
completion_tokens=self.completion_tokens,
successful_requests=self.successful_requests,
)