not overriding llm callbacks

This commit is contained in:
João Moura
2024-04-05 08:24:20 -03:00
parent d17bc33bfb
commit 65824ef814

View File

@@ -156,9 +156,12 @@ class Agent(BaseModel):
def set_agent_executor(self) -> "Agent": def set_agent_executor(self) -> "Agent":
"""set agent executor is set.""" """set agent executor is set."""
if hasattr(self.llm, "model_name"): if hasattr(self.llm, "model_name"):
self.llm.callbacks = [ token_handler = TokenCalcHandler(self.llm.model_name, self._token_process)
TokenCalcHandler(self.llm.model_name, self._token_process) if isinstance(self.llm.callbacks, list):
] self.llm.callbacks.append(token_handler)
else:
self.llm.callbacks = [token_handler]
if not self.agent_executor: if not self.agent_executor:
if not self.cache_handler: if not self.cache_handler:
self.cache_handler = CacheHandler() self.cache_handler = CacheHandler()