refactor: Simplify tool handling in agent adapters

- Changed default value of `tools` parameter in LangGraphAgentAdapter to None for better handling of empty tool lists.
- Updated tool initialization in both LangGraphAgentAdapter and OpenAIAgentAdapter to directly pass the `tools` parameter, removing unnecessary list handling.
- Cleaned up commented-out code in OpenAIConverterAdapter to improve readability.
This commit is contained in:
lorenzejay
2025-04-16 15:53:03 -07:00
parent ef424d3dae
commit d02de6ea38
3 changed files with 4 additions and 14 deletions

View File

@@ -52,7 +52,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
role: str,
goal: str,
backstory: str,
tools: Optional[List[BaseTool]] = [],
tools: Optional[List[BaseTool]] = None,
llm: Any = None,
max_iterations: int = 10,
agent_config: Optional[Dict[str, Any]] = None,
@@ -72,8 +72,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
agent_config=agent_config,
**kwargs,
)
self.tools = tools or []
self._tool_adapter = LangGraphToolAdapter(tools=tools or [])
self._tool_adapter = LangGraphToolAdapter(tools=tools)
self._converter_adapter = LangGraphConverterAdapter(self)
self._max_iterations = max_iterations
self._setup_graph()
@@ -87,7 +86,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
if self._agent_config:
self._graph = create_react_agent(
model=self.llm,
tools=converted_tools or [],
tools=converted_tools,
checkpointer=self._memory,
debug=self.verbose,
**self._agent_config,

View File

@@ -60,11 +60,10 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
role=role,
goal=goal,
backstory=backstory,
tools=tools,
agent_config=agent_config,
**kwargs,
)
self.tools = tools
self._tool_adapter = OpenAIAgentToolAdapter(tools=tools)
self.llm = model
self._converter_adapter = OpenAIConverterAdapter(self)

View File

@@ -73,13 +73,6 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
.format(output_format=self._schema)
)
# output_instructions = f"""
# Your response MUST conform to the following {self._output_format.upper()} schema:
# {self._schema}
# Ensure your final response is properly formatted according to this schema.
# """
# print("output_schema", output_schema)
return f"{base_prompt}\n\n{output_schema}"
def post_process_result(self, result: str) -> str:
@@ -96,7 +89,6 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
"""
if not self._output_format:
return result
print("openai converter adapter result", result)
# Try to extract valid JSON if it's wrapped in code blocks or other text
if isinstance(result, str) and self._output_format in ["json", "pydantic"]:
# First, try to parse as is