use langchain callback handler to support all LLMs

This commit is contained in:
Braelyn Boynton
2024-05-03 15:07:17 -07:00
parent 498bf77f08
commit b9d6ec5721

View File

@@ -25,7 +25,9 @@ from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.utilities import I18N, Logger, Prompts, RPMController
from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess
agentops = None
try:
import agentops
from agentops import track_agent
except ImportError:
@@ -195,6 +197,12 @@ class Agent(BaseModel):
):
self.llm.callbacks.append(token_handler)
if agentops and not any(
isinstance(handler, agentops.LangchainCallbackHandler) for handler in self.llm.callbacks
):
agentops.stop_instrumenting()
self.llm.callbacks.append(agentops.LangchainCallbackHandler())
if not self.agent_executor:
if not self.cache_handler:
self.cache_handler = CacheHandler()