mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-25 16:18:13 +00:00
Improve docstrings and test clarity as requested in PR review
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -589,6 +589,23 @@ class Crew(BaseModel):
|
|||||||
self,
|
self,
|
||||||
inputs: Optional[Dict[str, Any]] = None,
|
inputs: Optional[Dict[str, Any]] = None,
|
||||||
) -> CrewOutput:
|
) -> CrewOutput:
|
||||||
|
"""
|
||||||
|
Starts the crew to work on its assigned tasks.
|
||||||
|
|
||||||
|
This method initializes all agents, sets up their configurations, and executes
|
||||||
|
the tasks according to the specified process (sequential or hierarchical).
|
||||||
|
|
||||||
|
For each agent, if no function_calling_llm is specified:
|
||||||
|
- Uses the crew's function_calling_llm if available
|
||||||
|
- Otherwise uses the agent's own LLM for function calling, enabling
|
||||||
|
non-OpenAI models to work without requiring OpenAI credentials
|
||||||
|
|
||||||
|
Args:
|
||||||
|
inputs: Optional dictionary of inputs to be used in task execution
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CrewOutput: The result of the crew's execution
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
for before_callback in self.before_kickoff_callbacks:
|
for before_callback in self.before_kickoff_callbacks:
|
||||||
if inputs is None:
|
if inputs is None:
|
||||||
|
|||||||
@@ -1827,6 +1827,9 @@ def test_agent_uses_own_llm_for_function_calling_when_not_specified():
|
|||||||
Test that an agent uses its own LLM for function calling when no function_calling_llm
|
Test that an agent uses its own LLM for function calling when no function_calling_llm
|
||||||
is specified, ensuring that non-OpenAI models like Gemini can be used without
|
is specified, ensuring that non-OpenAI models like Gemini can be used without
|
||||||
requiring OpenAI API keys.
|
requiring OpenAI API keys.
|
||||||
|
|
||||||
|
This test verifies the fix for issue #2517, where users would get OpenAI authentication
|
||||||
|
errors even when using non-OpenAI models like Gemini.
|
||||||
"""
|
"""
|
||||||
@tool
|
@tool
|
||||||
def simple_tool(input_text: str) -> str:
|
def simple_tool(input_text: str) -> str:
|
||||||
@@ -1853,8 +1856,8 @@ def test_agent_uses_own_llm_for_function_calling_when_not_specified():
|
|||||||
agent.execute_task(task, tools=[simple_tool])
|
agent.execute_task(task, tools=[simple_tool])
|
||||||
|
|
||||||
args, kwargs = mock_tool_usage.call_args
|
args, kwargs = mock_tool_usage.call_args
|
||||||
assert kwargs['function_calling_llm'] == agent.llm
|
assert kwargs['function_calling_llm'] == agent.llm, "Agent should use its own LLM for function calling"
|
||||||
assert kwargs['function_calling_llm'].model.startswith("gemini")
|
assert kwargs['function_calling_llm'].model.startswith("gemini"), "Function calling LLM should be Gemini"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if "OPENAI_API_KEY" in str(e):
|
if "OPENAI_API_KEY" in str(e):
|
||||||
pytest.fail("Test failed with OpenAI API key error despite using Gemini model")
|
pytest.fail("Test failed with OpenAI API key error despite using Gemini model")
|
||||||
|
|||||||
@@ -4127,6 +4127,10 @@ def test_crew_agents_use_own_llm_for_function_calling():
|
|||||||
"""
|
"""
|
||||||
Test that agents in a crew use their own LLM for function calling when no
|
Test that agents in a crew use their own LLM for function calling when no
|
||||||
function_calling_llm is specified for either the agent or the crew.
|
function_calling_llm is specified for either the agent or the crew.
|
||||||
|
|
||||||
|
This test verifies the fix for issue #2517, where users would get OpenAI authentication
|
||||||
|
errors even when using non-OpenAI models like Gemini. The fix ensures that when no
|
||||||
|
function_calling_llm is specified, the agent uses its own LLM for function calling.
|
||||||
"""
|
"""
|
||||||
@tool
|
@tool
|
||||||
def simple_tool(input_text: str) -> str:
|
def simple_tool(input_text: str) -> str:
|
||||||
@@ -4160,8 +4164,8 @@ def test_crew_agents_use_own_llm_for_function_calling():
|
|||||||
crew.kickoff()
|
crew.kickoff()
|
||||||
|
|
||||||
args, kwargs = mock_tool_usage.call_args
|
args, kwargs = mock_tool_usage.call_args
|
||||||
assert kwargs['function_calling_llm'] == gemini_agent.llm
|
assert kwargs['function_calling_llm'] == gemini_agent.llm, "Agent should use its own LLM for function calling"
|
||||||
assert kwargs['function_calling_llm'].model.startswith("gemini")
|
assert kwargs['function_calling_llm'].model.startswith("gemini"), "Function calling LLM should be Gemini"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if "OPENAI_API_KEY" in str(e):
|
if "OPENAI_API_KEY" in str(e):
|
||||||
pytest.fail("Test failed with OpenAI API key error despite using Gemini model")
|
pytest.fail("Test failed with OpenAI API key error despite using Gemini model")
|
||||||
|
|||||||
Reference in New Issue
Block a user