This commit is contained in:
Brandon Hancock
2025-02-13 14:55:11 -05:00
parent a38483e1b4
commit ff32880a54
5 changed files with 16 additions and 43 deletions

View File

@@ -1,22 +0,0 @@
# Initialize a ChatOpenAI model
llm = ChatOpenAI(model="gpt-4o", temperature=0)
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import MemorySaver
# Create the agent with LangGraph
memory = MemorySaver()
agent_executor = create_react_agent(
llm,
tools,
checkpointer=memory
)
# Pass the LangGraph agent to the adapter
wrapped_agent = LangChainAgentAdapter(
langchain_agent=agent_executor,
tools=tools,
role="San Francisco Travel Advisor",
goal="Curate a detailed list of the best neighborhoods to live in, restaurants to dine at, and attractions to visit in San Francisco.",
backstory="An expert travel advisor with insider knowledge of San Francisco's vibrant culture, culinary delights, and hidden gems.",
)

View File

@@ -78,6 +78,8 @@ class BaseAgent(ABC, BaseModel):
Set the rpm controller for the agent.
set_private_attrs() -> "BaseAgent":
Set private attributes.
configure_executor(cache_handler: CacheHandler, rpm_controller: RPMController) -> None:
Configure the agent's executor with both cache and RPM handling.
"""
__hash__ = object.__hash__ # type: ignore
@@ -163,7 +165,6 @@ class BaseAgent(ABC, BaseModel):
tool meets these criteria, it is processed and added to the list of
tools. Otherwise, a ValueError is raised.
"""
print(f"Validating tools: {tools}")
processed_tools = []
for tool in tools:
if isinstance(tool, BaseTool):
@@ -181,7 +182,6 @@ class BaseAgent(ABC, BaseModel):
"Tool must be an instance of BaseTool or "
"an object with 'name', 'func', and 'description' attributes."
)
print(f"Processed tools: {processed_tools}")
return processed_tools
@model_validator(mode="after")
@@ -343,10 +343,6 @@ class BaseAgent(ABC, BaseModel):
# Only create the executor if it hasn't been created yet.
if self.agent_executor is None:
self.create_agent_executor()
else:
print(
"Agent executor already exists, skipping creation in set_cache_handler."
)
def increment_formatting_errors(self) -> None:
self.formatting_errors += 1
@@ -362,7 +358,16 @@ class BaseAgent(ABC, BaseModel):
# Only create the executor if it hasn't been created yet.
if self.agent_executor is None:
self.create_agent_executor()
else:
print(
"Agent executor already exists, skipping creation in set_rpm_controller."
)
def configure_executor(
self, cache_handler: CacheHandler, rpm_controller: RPMController
) -> None:
"""Configure the agent's executor with both cache and RPM handling.
This method delegates to set_cache_handler and set_rpm_controller, applying the configuration
only if the respective flags or values are set.
"""
if self.cache:
self.set_cache_handler(cache_handler)
if self.max_rpm:
self.set_rpm_controller(rpm_controller)

View File

@@ -161,7 +161,6 @@ class LangChainAgentAdapter(BaseAgent):
else:
task_prompt = self._use_trained_data(task_prompt=task_prompt)
# IMPORTANT: create an initial state using "messages" (not "input")
init_state = {"messages": [("user", task_prompt)]}
state = self.agent_executor.invoke(init_state)
@@ -176,7 +175,6 @@ class LangChainAgentAdapter(BaseAgent):
else:
current_output = ""
# If human feedback is required, enter a feedback loop
if task.human_input:
current_output = self._handle_human_feedback(current_output)
@@ -206,7 +204,6 @@ class LangChainAgentAdapter(BaseAgent):
f"Updated answer:"
)
try:
# Use "messages" key for the prompt, like we do in execute_task.
new_state = self.agent_executor.invoke(
{"messages": [("user", new_prompt)]}
)
@@ -309,9 +306,6 @@ class LangChainAgentAdapter(BaseAgent):
else:
used_tools.append(tool)
print("Raw tools:", raw_tools)
print("Used tools:", used_tools)
# Sanitize the agent's role for the "name" field. The allowed pattern is ^[a-zA-Z0-9_-]+$
import re

View File

@@ -340,10 +340,7 @@ class Crew(BaseModel):
if self.agents:
for agent in self.agents:
if self.cache:
agent.set_cache_handler(self._cache_handler)
if self.max_rpm:
agent.set_rpm_controller(self._rpm_controller)
agent.configure_executor(self._cache_handler, self._rpm_controller)
return self
@model_validator(mode="after")

View File

@@ -92,7 +92,6 @@ class BaseTool(BaseModel, ABC):
@classmethod
def from_langchain(cls, tool: Any) -> "BaseTool":
print("CREATING TOOL FROM LANGCHAIN")
"""Create a Tool instance from a CrewStructuredTool.
This method takes a CrewStructuredTool object and converts it into a