mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-11 00:58:30 +00:00
removing logs and preping new version
This commit is contained in:
@@ -113,7 +113,6 @@ class ToolUsage:
|
||||
|
||||
if not result:
|
||||
try:
|
||||
print(f"Calling tool: {calling.tool_name}")
|
||||
if calling.tool_name in [
|
||||
"Delegate work to co-worker",
|
||||
"Ask question to co-worker",
|
||||
@@ -121,9 +120,7 @@ class ToolUsage:
|
||||
self.task.increment_delegations()
|
||||
|
||||
if calling.arguments:
|
||||
print(f"Calling tool NOW: {calling.tool_name}")
|
||||
result = tool._run(**calling.arguments)
|
||||
print("Got result back from tool")
|
||||
else:
|
||||
result = tool._run()
|
||||
except Exception as e:
|
||||
@@ -224,9 +221,7 @@ class ToolUsage:
|
||||
),
|
||||
max_attemps=1,
|
||||
)
|
||||
print(f"Converter: {converter}")
|
||||
calling = converter.to_pydantic()
|
||||
print(f"Calling: {calling}")
|
||||
|
||||
if isinstance(calling, ConverterError):
|
||||
raise calling
|
||||
|
||||
@@ -27,7 +27,9 @@ class PydanticSchemaParser(BaseModel):
|
||||
field_type = field.annotation
|
||||
if get_origin(field_type) is list:
|
||||
list_item_type = get_args(field_type)[0]
|
||||
if issubclass(list_item_type, BaseModel):
|
||||
if isinstance(list_item_type, type) and issubclass(
|
||||
list_item_type, BaseModel
|
||||
):
|
||||
nested_schema = self._get_model_schema(list_item_type, depth + 1)
|
||||
return f"List[\n{nested_schema}\n{' ' * 4 * depth}]"
|
||||
else:
|
||||
|
||||
60
src/crewai/utilities/token_counter_callback.py
Normal file
60
src/crewai/utilities/token_counter_callback.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import tiktoken
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.schema import LLMResult
|
||||
|
||||
|
||||
class TokenProcess:
|
||||
total_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
successful_requests: int = 0
|
||||
|
||||
def sum_prompt_tokens(self, tokens: int):
|
||||
self.prompt_tokens = self.prompt_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
|
||||
def sum_completion_tokens(self, tokens: int):
|
||||
self.completion_tokens = self.completion_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
|
||||
def sum_successful_requests(self, requests: int):
|
||||
self.successful_requests = self.successful_requests + requests
|
||||
|
||||
def get_summary(self) -> str:
|
||||
return {
|
||||
"total_tokens": self.total_tokens,
|
||||
"prompt_tokens": self.prompt_tokens,
|
||||
"completion_tokens": self.completion_tokens,
|
||||
"successful_requests": self.successful_requests,
|
||||
}
|
||||
|
||||
|
||||
class TokenCalcHandler(BaseCallbackHandler):
|
||||
model: str = ""
|
||||
token_cost_process: TokenProcess
|
||||
|
||||
def __init__(self, model, token_cost_process):
|
||||
self.model = model
|
||||
self.token_cost_process = token_cost_process
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
if "gpt" in self.model:
|
||||
encoding = tiktoken.encoding_for_model(self.model)
|
||||
else:
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
if self.token_cost_process == None:
|
||||
return
|
||||
|
||||
for prompt in prompts:
|
||||
self.token_cost_process.sum_prompt_tokens(len(encoding.encode(prompt)))
|
||||
|
||||
async def on_llm_new_token(self, token: str, **kwargs) -> None:
|
||||
self.token_cost_process.sum_completion_tokens(1)
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
self.token_cost_process.sum_successful_requests(1)
|
||||
Reference in New Issue
Block a user