diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 932e443f1..eb486bd4f 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -589,6 +589,23 @@ class Crew(BaseModel): self, inputs: Optional[Dict[str, Any]] = None, ) -> CrewOutput: + """ + Starts the crew to work on its assigned tasks. + + This method initializes all agents, sets up their configurations, and executes + the tasks according to the specified process (sequential or hierarchical). + + For each agent, if no function_calling_llm is specified: + - Uses the crew's function_calling_llm if available + - Otherwise uses the agent's own LLM for function calling, enabling + non-OpenAI models to work without requiring OpenAI credentials + + Args: + inputs: Optional dictionary of inputs to be used in task execution + + Returns: + CrewOutput: The result of the crew's execution + """ try: for before_callback in self.before_kickoff_callbacks: if inputs is None: diff --git a/tests/agent_test.py b/tests/agent_test.py index 5d46645d4..3e93f1800 100644 --- a/tests/agent_test.py +++ b/tests/agent_test.py @@ -1827,6 +1827,9 @@ def test_agent_uses_own_llm_for_function_calling_when_not_specified(): Test that an agent uses its own LLM for function calling when no function_calling_llm is specified, ensuring that non-OpenAI models like Gemini can be used without requiring OpenAI API keys. + + This test verifies the fix for issue #2517, where users would get OpenAI authentication + errors even when using non-OpenAI models like Gemini. """ @tool def simple_tool(input_text: str) -> str: @@ -1853,8 +1856,8 @@ def test_agent_uses_own_llm_for_function_calling_when_not_specified(): agent.execute_task(task, tools=[simple_tool]) args, kwargs = mock_tool_usage.call_args - assert kwargs['function_calling_llm'] == agent.llm - assert kwargs['function_calling_llm'].model.startswith("gemini") + assert kwargs['function_calling_llm'] == agent.llm, "Agent should use its own LLM for function calling" + assert kwargs['function_calling_llm'].model.startswith("gemini"), "Function calling LLM should be Gemini" except Exception as e: if "OPENAI_API_KEY" in str(e): pytest.fail("Test failed with OpenAI API key error despite using Gemini model") diff --git a/tests/crew_test.py b/tests/crew_test.py index 2ee1ccb2a..51a6eb2e3 100644 --- a/tests/crew_test.py +++ b/tests/crew_test.py @@ -4127,6 +4127,10 @@ def test_crew_agents_use_own_llm_for_function_calling(): """ Test that agents in a crew use their own LLM for function calling when no function_calling_llm is specified for either the agent or the crew. + + This test verifies the fix for issue #2517, where users would get OpenAI authentication + errors even when using non-OpenAI models like Gemini. The fix ensures that when no + function_calling_llm is specified, the agent uses its own LLM for function calling. """ @tool def simple_tool(input_text: str) -> str: @@ -4160,8 +4164,8 @@ def test_crew_agents_use_own_llm_for_function_calling(): crew.kickoff() args, kwargs = mock_tool_usage.call_args - assert kwargs['function_calling_llm'] == gemini_agent.llm - assert kwargs['function_calling_llm'].model.startswith("gemini") + assert kwargs['function_calling_llm'] == gemini_agent.llm, "Agent should use its own LLM for function calling" + assert kwargs['function_calling_llm'].model.startswith("gemini"), "Function calling LLM should be Gemini" except Exception as e: if "OPENAI_API_KEY" in str(e): pytest.fail("Test failed with OpenAI API key error despite using Gemini model")