diff --git a/src/crewai/agents/agent_adapters/openai_agents/openai_adapter.py b/src/crewai/agents/agent_adapters/openai_agents/openai_adapter.py index f93b6317d..d2db44d65 100644 --- a/src/crewai/agents/agent_adapters/openai_agents/openai_adapter.py +++ b/src/crewai/agents/agent_adapters/openai_agents/openai_adapter.py @@ -3,7 +3,7 @@ from typing import Any, List, Optional from pydantic import Field, PrivateAttr from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter -from crewai.agents.agent_adapters.openai_agents.structured_output_adapter import ( +from crewai.agents.agent_adapters.openai_agents.structured_output_converter import ( OpenAIConverterAdapter, ) from crewai.agents.agent_builder.base_agent import BaseAgent @@ -89,8 +89,8 @@ class OpenAIAgentAdapter(BaseAgentAdapter): tools: Optional[List[BaseTool]] = None, ) -> str: """Execute a task using the OpenAI Assistant""" - self.create_agent_executor(tools) self._converter_adapter.configure_structured_output(task) + self.create_agent_executor(tools) if self.verbose: enable_verbose_stdout_logging() @@ -140,9 +140,10 @@ class OpenAIAgentAdapter(BaseAgentAdapter): """ all_tools = list(self.tools or []) + list(tools or []) + instructions = self._build_system_prompt() self._openai_agent = OpenAIAgent( name=self.role, - instructions=self._build_system_prompt(), + instructions=instructions, model=self.llm, **self._agent_config or {}, ) @@ -169,14 +170,6 @@ class OpenAIAgentAdapter(BaseAgentAdapter): tools = agent_tools.tools() return tools - def get_output_converter( - self, llm: Any, text: str, model: Any, instructions: str - ) -> Any: - """Convert output format if needed""" - from crewai.utilities.converter import Converter - - return Converter(llm=llm, text=text, model=model, instructions=instructions) - def configure_structured_output(self, task) -> None: """Configure the structured output for the specific agent implementation. diff --git a/src/crewai/agents/agent_adapters/openai_agents/structured_output_adapter.py b/src/crewai/agents/agent_adapters/openai_agents/structured_output_converter.py similarity index 87% rename from src/crewai/agents/agent_adapters/openai_agents/structured_output_adapter.py rename to src/crewai/agents/agent_adapters/openai_agents/structured_output_converter.py index 8c0524c01..7f969c63b 100644 --- a/src/crewai/agents/agent_adapters/openai_agents/structured_output_adapter.py +++ b/src/crewai/agents/agent_adapters/openai_agents/structured_output_converter.py @@ -3,6 +3,7 @@ import re from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter from crewai.utilities.converter import generate_model_description +from crewai.utilities.i18n import I18N class OpenAIConverterAdapter(BaseConverterAdapter): @@ -13,7 +14,6 @@ class OpenAIConverterAdapter(BaseConverterAdapter): and post-processes the results when needed. Attributes: - agent_adapter: Reference to the parent OpenAIAgentAdapter _output_format: The expected output format (json, pydantic, or None) _schema: The schema description for the expected output _output_model: The Pydantic model for the output @@ -46,13 +46,13 @@ class OpenAIConverterAdapter(BaseConverterAdapter): if task.output_json: self._output_format = "json" self._schema = generate_model_description(task.output_json) - self._output_model = task.output_json self.agent_adapter._openai_agent.output_type = task.output_json + self._output_model = task.output_json elif task.output_pydantic: self._output_format = "pydantic" self._schema = generate_model_description(task.output_pydantic) - self._output_model = task.output_pydantic self.agent_adapter._openai_agent.output_type = task.output_pydantic + self._output_model = task.output_pydantic def enhance_system_prompt(self, base_prompt: str) -> str: """ @@ -67,14 +67,20 @@ class OpenAIConverterAdapter(BaseConverterAdapter): if not self._output_format: return base_prompt - output_instructions = f""" - Your response MUST conform to the following {self._output_format.upper()} schema: - {self._schema} - - Ensure your final response is properly formatted according to this schema. - """ + output_schema = ( + I18N() + .slice("formatted_task_instructions") + .format(output_format=self._schema) + ) - return f"{base_prompt}\n\n{output_instructions}" + # output_instructions = f""" + # Your response MUST conform to the following {self._output_format.upper()} schema: + # {self._schema} + + # Ensure your final response is properly formatted according to this schema. + # """ + # print("output_schema", output_schema) + return f"{base_prompt}\n\n{output_schema}" def post_process_result(self, result: str) -> str: """ @@ -90,6 +96,7 @@ class OpenAIConverterAdapter(BaseConverterAdapter): """ if not self._output_format: return result + print("openai converter adapter result", result) # Try to extract valid JSON if it's wrapped in code blocks or other text if isinstance(result, str) and self._output_format in ["json", "pydantic"]: # First, try to parse as is diff --git a/tests/agents/agent_adapters/test_base_agent_adapter.py b/tests/agents/agent_adapters/test_base_agent_adapter.py new file mode 100644 index 000000000..2da90b719 --- /dev/null +++ b/tests/agents/agent_adapters/test_base_agent_adapter.py @@ -0,0 +1,113 @@ +from typing import Any, Dict, List, Optional + +import pytest +from pydantic import BaseModel + +from crewai.agent import BaseAgent +from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter +from crewai.tools import BaseTool +from crewai.utilities.token_counter_callback import TokenProcess + + +# Concrete implementation for testing +class ConcreteAgentAdapter(BaseAgentAdapter): + def configure_tools( + self, tools: Optional[List[BaseTool]] = None, **kwargs: Any + ) -> None: + # Simple implementation for testing + self.tools = tools or [] + + def execute_task( + self, + task: Any, + context: Optional[str] = None, + tools: Optional[List[Any]] = None, + ) -> str: + # Dummy implementation needed due to BaseAgent inheritance + return "Task executed" + + def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> Any: + # Dummy implementation + return None + + def get_delegation_tools( + self, tools: List[BaseTool], tool_map: Optional[Dict[str, BaseTool]] + ) -> List[BaseTool]: + # Dummy implementation + return [] + + def _parse_output(self, agent_output: Any, token_process: TokenProcess): + # Dummy implementation + pass + + def get_output_converter(self, tools: Optional[List[BaseTool]] = None) -> Any: + # Dummy implementation + return None + + +def test_base_agent_adapter_initialization(): + """Test initialization of the concrete agent adapter.""" + adapter = ConcreteAgentAdapter( + role="test role", goal="test goal", backstory="test backstory" + ) + assert isinstance(adapter, BaseAgent) + assert isinstance(adapter, BaseAgentAdapter) + assert adapter.role == "test role" + assert adapter._agent_config is None + assert adapter.adapted_structured_output is False + + +def test_base_agent_adapter_initialization_with_config(): + """Test initialization with agent_config.""" + config = {"model": "gpt-4"} + adapter = ConcreteAgentAdapter( + agent_config=config, + role="test role", + goal="test goal", + backstory="test backstory", + ) + assert adapter._agent_config == config + + +def test_configure_tools_method_exists(): + """Test that configure_tools method exists and can be called.""" + adapter = ConcreteAgentAdapter( + role="test role", goal="test goal", backstory="test backstory" + ) + # Create dummy tools if needed, or pass None + tools = [] + adapter.configure_tools(tools) + assert hasattr(adapter, "tools") + assert adapter.tools == tools + + +def test_configure_structured_output_method_exists(): + """Test that configure_structured_output method exists and can be called.""" + adapter = ConcreteAgentAdapter( + role="test role", goal="test goal", backstory="test backstory" + ) + + # Define a dummy structure or pass None/Any + class DummyOutput(BaseModel): + data: str + + structured_output = DummyOutput + adapter.configure_structured_output(structured_output) + # Add assertions here if configure_structured_output modifies state + # For now, just ensuring it runs without error is sufficient + pass + + +def test_base_agent_adapter_inherits_base_agent(): + """Test that BaseAgentAdapter inherits from BaseAgent.""" + assert issubclass(BaseAgentAdapter, BaseAgent) + + +class ConcreteAgentAdapterWithoutRequiredMethods(BaseAgentAdapter): + pass + + +def test_base_agent_adapter_fails_without_required_methods(): + """Test that BaseAgentAdapter fails without required methods.""" + with pytest.raises(TypeError): + ConcreteAgentAdapterWithoutRequiredMethods() # type: ignore diff --git a/tests/agents/agent_adapters/test_base_tool_adapter.py b/tests/agents/agent_adapters/test_base_tool_adapter.py new file mode 100644 index 000000000..3003d92c3 --- /dev/null +++ b/tests/agents/agent_adapters/test_base_tool_adapter.py @@ -0,0 +1,94 @@ +from typing import Any, List +from unittest.mock import Mock + +import pytest + +from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter +from crewai.tools.base_tool import BaseTool + + +class ConcreteToolAdapter(BaseToolAdapter): + def configure_tools(self, tools: List[BaseTool]) -> None: + self.converted_tools = [f"converted_{tool.name}" for tool in tools] + + +@pytest.fixture +def mock_tool_1(): + tool = Mock(spec=BaseTool) + tool.name = "Mock Tool 1" + return tool + + +@pytest.fixture +def mock_tool_2(): + tool = Mock(spec=BaseTool) + tool.name = "MockTool2" + return tool + + +@pytest.fixture +def tools_list(mock_tool_1, mock_tool_2): + return [mock_tool_1, mock_tool_2] + + +def test_initialization_with_tools(tools_list): + adapter = ConcreteToolAdapter(tools=tools_list) + assert adapter.original_tools == tools_list + assert adapter.converted_tools == [] # Conversion happens in configure_tools + + +def test_initialization_without_tools(): + adapter = ConcreteToolAdapter() + assert adapter.original_tools == [] + assert adapter.converted_tools == [] + + +def test_configure_tools(tools_list): + adapter = ConcreteToolAdapter() + adapter.configure_tools(tools_list) + assert adapter.converted_tools == ["converted_Mock Tool 1", "converted_MockTool2"] + assert adapter.original_tools == [] # original_tools is only set in init + + adapter_with_init_tools = ConcreteToolAdapter(tools=tools_list) + adapter_with_init_tools.configure_tools(tools_list) + assert adapter_with_init_tools.converted_tools == [ + "converted_Mock Tool 1", + "converted_MockTool2", + ] + assert adapter_with_init_tools.original_tools == tools_list + + +def test_tools_method(tools_list): + adapter = ConcreteToolAdapter() + adapter.configure_tools(tools_list) + assert adapter.tools() == ["converted_Mock Tool 1", "converted_MockTool2"] + + +def test_tools_method_empty(): + adapter = ConcreteToolAdapter() + assert adapter.tools() == [] + + +def test_sanitize_tool_name_with_spaces(): + adapter = ConcreteToolAdapter() + assert adapter.sanitize_tool_name("Tool With Spaces") == "Tool_With_Spaces" + + +def test_sanitize_tool_name_without_spaces(): + adapter = ConcreteToolAdapter() + assert adapter.sanitize_tool_name("ToolWithoutSpaces") == "ToolWithoutSpaces" + + +def test_sanitize_tool_name_empty(): + adapter = ConcreteToolAdapter() + assert adapter.sanitize_tool_name("") == "" + + +class ConcreteToolAdapterWithoutRequiredMethods(BaseToolAdapter): + pass + + +def test_tool_adapted_fails_without_required_methods(): + """Test that BaseToolAdapter fails without required methods.""" + with pytest.raises(TypeError): + ConcreteToolAdapterWithoutRequiredMethods() # type: ignore