Removing LangChain and Rebuilding Executor (#1322)

* rebuilding executor

* removing langchain

* Making all tests good

* fixing types and adding ability for nor using system prompts

* improving types

* pleasing the types gods

* pleasing the types gods

* fixing parser, tools and executor

* making sure all tests pass

* final pass

* fixing type

* Updating Docs

* preparing to cut new version
This commit is contained in:
João Moura
2024-09-16 14:14:04 -03:00
committed by GitHub
parent 322780a5f3
commit e77442cf34
177 changed files with 27272 additions and 1618561 deletions

View File

@@ -1,36 +1,17 @@
from typing import Any, Dict, List
import tiktoken
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult
from litellm.integrations.custom_logger import CustomLogger
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
class TokenCalcHandler(BaseCallbackHandler):
model_name: str = ""
token_cost_process: TokenProcess
encoding: tiktoken.Encoding
def __init__(self, model_name, token_cost_process):
self.model_name = model_name
class TokenCalcHandler(CustomLogger):
def __init__(self, token_cost_process: TokenProcess):
self.token_cost_process = token_cost_process
try:
self.encoding = tiktoken.encoding_for_model(self.model_name)
except KeyError:
self.encoding = tiktoken.get_encoding("cl100k_base")
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
def log_success_event(self, kwargs, response_obj, start_time, end_time):
if self.token_cost_process is None:
return
for prompt in prompts:
self.token_cost_process.sum_prompt_tokens(len(self.encoding.encode(prompt)))
async def on_llm_new_token(self, token: str, **kwargs) -> None:
self.token_cost_process.sum_completion_tokens(1)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.token_cost_process.sum_successful_requests(1)
self.token_cost_process.sum_prompt_tokens(response_obj["usage"].prompt_tokens)
self.token_cost_process.sum_completion_tokens(
response_obj["usage"].completion_tokens
)