Fix test mocking approach to use real LLM with mocked call method

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-26 04:19:09 +00:00
parent 0a0a46f972
commit 0fa021dea8

View File

@@ -1675,25 +1675,25 @@ def test_agent_uses_task_knowledge():
mock_knowledge = MockKnowledge.return_value mock_knowledge = MockKnowledge.return_value
mock_knowledge.query.return_value = [{"content": content}] mock_knowledge.query.return_value = [{"content": content}]
# Create an agent with a simple mocked LLM # Create a real LLM but patch its call method
with patch("crewai.llm.LLM", autospec=True) as MockLLM: agent = Agent(
mock_llm = MockLLM.return_value role="Geography Teacher",
mock_llm.call.return_value = "The capital of France is Paris, where the Eiffel Tower is located." goal="Provide accurate geographic information",
backstory="You are a geography expert who teaches students about world capitals.",
agent = Agent( llm=LLM(model="gpt-4o-mini"),
role="Geography Teacher", )
goal="Provide accurate geographic information",
backstory="You are a geography expert who teaches students about world capitals.", # Create a task with knowledge
llm=mock_llm, task = Task(
) description="What is the capital of France?",
expected_output="The capital of France.",
# Create a task with knowledge agent=agent,
task = Task( knowledge=mock_knowledge,
description="What is the capital of France?", )
expected_output="The capital of France.",
agent=agent, # Mock the agent's execute_task method to avoid actual LLM calls
knowledge=mock_knowledge, with patch.object(agent.llm, "call") as mock_llm_call:
) mock_llm_call.return_value = "The capital of France is Paris, where the Eiffel Tower is located."
# Execute the task # Execute the task
result = agent.execute_task(task) result = agent.execute_task(task)
@@ -1723,25 +1723,25 @@ def test_agent_with_empty_task_knowledge():
mock_knowledge = MockKnowledge.return_value mock_knowledge = MockKnowledge.return_value
mock_knowledge.query.return_value = [] mock_knowledge.query.return_value = []
# Create an agent with a simple mocked LLM # Create a real LLM but patch its call method
with patch("crewai.llm.LLM", autospec=True) as MockLLM: agent = Agent(
mock_llm = MockLLM.return_value role="Geography Teacher",
mock_llm.call.return_value = "The capital of France is Paris." goal="Provide accurate geographic information",
backstory="You are a geography expert who teaches students about world capitals.",
agent = Agent( llm=LLM(model="gpt-4o-mini"),
role="Geography Teacher", )
goal="Provide accurate geographic information",
backstory="You are a geography expert who teaches students about world capitals.", # Create a task with empty knowledge
llm=mock_llm, task = Task(
) description="What is the capital of France?",
expected_output="The capital of France.",
# Create a task with empty knowledge agent=agent,
task = Task( knowledge=mock_knowledge,
description="What is the capital of France?", )
expected_output="The capital of France.",
agent=agent, # Mock the agent's execute_task method to avoid actual LLM calls
knowledge=mock_knowledge, with patch.object(agent.llm, "call") as mock_llm_call:
) mock_llm_call.return_value = "The capital of France is Paris."
# Execute the task # Execute the task
result = agent.execute_task(task) result = agent.execute_task(task)