Fix test mocking approach to use real LLM with mocked call method

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-26 04:19:09 +00:00
parent 0a0a46f972
commit 0fa021dea8

View File

@@ -1675,25 +1675,25 @@ def test_agent_uses_task_knowledge():
mock_knowledge = MockKnowledge.return_value mock_knowledge = MockKnowledge.return_value
mock_knowledge.query.return_value = [{"content": content}] mock_knowledge.query.return_value = [{"content": content}]
# Create an agent with a simple mocked LLM # Create a real LLM but patch its call method
with patch("crewai.llm.LLM", autospec=True) as MockLLM: agent = Agent(
mock_llm = MockLLM.return_value role="Geography Teacher",
mock_llm.call.return_value = "The capital of France is Paris, where the Eiffel Tower is located." goal="Provide accurate geographic information",
backstory="You are a geography expert who teaches students about world capitals.",
llm=LLM(model="gpt-4o-mini"),
)
agent = Agent( # Create a task with knowledge
role="Geography Teacher", task = Task(
goal="Provide accurate geographic information", description="What is the capital of France?",
backstory="You are a geography expert who teaches students about world capitals.", expected_output="The capital of France.",
llm=mock_llm, agent=agent,
) knowledge=mock_knowledge,
)
# Create a task with knowledge # Mock the agent's execute_task method to avoid actual LLM calls
task = Task( with patch.object(agent.llm, "call") as mock_llm_call:
description="What is the capital of France?", mock_llm_call.return_value = "The capital of France is Paris, where the Eiffel Tower is located."
expected_output="The capital of France.",
agent=agent,
knowledge=mock_knowledge,
)
# Execute the task # Execute the task
result = agent.execute_task(task) result = agent.execute_task(task)
@@ -1723,25 +1723,25 @@ def test_agent_with_empty_task_knowledge():
mock_knowledge = MockKnowledge.return_value mock_knowledge = MockKnowledge.return_value
mock_knowledge.query.return_value = [] mock_knowledge.query.return_value = []
# Create an agent with a simple mocked LLM # Create a real LLM but patch its call method
with patch("crewai.llm.LLM", autospec=True) as MockLLM: agent = Agent(
mock_llm = MockLLM.return_value role="Geography Teacher",
mock_llm.call.return_value = "The capital of France is Paris." goal="Provide accurate geographic information",
backstory="You are a geography expert who teaches students about world capitals.",
llm=LLM(model="gpt-4o-mini"),
)
agent = Agent( # Create a task with empty knowledge
role="Geography Teacher", task = Task(
goal="Provide accurate geographic information", description="What is the capital of France?",
backstory="You are a geography expert who teaches students about world capitals.", expected_output="The capital of France.",
llm=mock_llm, agent=agent,
) knowledge=mock_knowledge,
)
# Create a task with empty knowledge # Mock the agent's execute_task method to avoid actual LLM calls
task = Task( with patch.object(agent.llm, "call") as mock_llm_call:
description="What is the capital of France?", mock_llm_call.return_value = "The capital of France is Paris."
expected_output="The capital of France.",
agent=agent,
knowledge=mock_knowledge,
)
# Execute the task # Execute the task
result = agent.execute_task(task) result = agent.execute_task(task)