updating tests

This commit is contained in:
João Moura
2024-03-02 13:50:05 -03:00
parent 72a0d1edef
commit b856b21fc6
43 changed files with 55864 additions and 45688 deletions

View File

@@ -66,10 +66,14 @@ def test_agent_without_memory():
llm=ChatOpenAI(temperature=0, model="gpt-4"),
)
task = Task(description="How much is 1 + 1?", agent=no_memory_agent)
task = Task(
description="How much is 1 + 1?",
agent=no_memory_agent,
expected_output="the result of the math operation.",
)
result = no_memory_agent.execute_task(task)
assert result == "1 + 1 equals 2."
assert result == "The result of the math operation 1 + 1 is 2."
assert no_memory_agent.agent_executor.memory is None
assert memory_agent.agent_executor.memory is not None
@@ -83,10 +87,14 @@ def test_agent_execution():
allow_delegation=False,
)
task = Task(description="How much is 1 + 1?", agent=agent)
task = Task(
description="How much is 1 + 1?",
agent=agent,
expected_output="the result of the math operation.",
)
output = agent.execute_task(task)
assert output == "1 + 1 equals 2."
assert output == "The result of the math operation 1 + 1 is 2."
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -104,7 +112,11 @@ def test_agent_execution_with_tools():
allow_delegation=False,
)
task = Task(description="What is 3 times 4?", agent=agent)
task = Task(
description="What is 3 times 4?",
agent=agent,
expected_output="The result of the multiplication.",
)
output = agent.execute_task(task)
assert output == "The result of 3 times 4 is 12."
@@ -126,14 +138,18 @@ def test_logging_tool_usage():
)
assert agent.tools_handler.last_used_tool == {}
task = Task(description="What is 3 times 4?", agent=agent)
task = Task(
description="What is 3 times 4?",
agent=agent,
expected_output="The result of the multiplication.",
)
# force cleaning cache
agent.tools_handler.cache = CacheHandler()
output = agent.execute_task(task)
tool_usage = InstructorToolCalling(
tool_name=multiplier.name, arguments={"first_number": 3, "second_number": 4}
)
assert output == "The result of multiplying 3 by 4 is 12."
assert output == "12"
assert agent.tools_handler.last_used_tool.tool_name == tool_usage.tool_name
assert agent.tools_handler.last_used_tool.arguments == tool_usage.arguments
@@ -157,8 +173,16 @@ def test_cache_hitting():
verbose=True,
)
task1 = Task(description="What is 2 times 6?", agent=agent)
task2 = Task(description="What is 3 times 3?", agent=agent)
task1 = Task(
description="What is 2 times 6?",
agent=agent,
expected_output="The result of the multiplication.",
)
task2 = Task(
description="What is 3 times 3?",
agent=agent,
expected_output="The result of the multiplication.",
)
output = agent.execute_task(task1)
output = agent.execute_task(task2)
@@ -168,7 +192,9 @@ def test_cache_hitting():
}
task = Task(
description="What is 2 times 6 times 3? Return only the number", agent=agent
description="What is 2 times 6 times 3? Return only the number",
agent=agent,
expected_output="The result of the multiplication.",
)
output = agent.execute_task(task)
assert output == "36"
@@ -184,6 +210,7 @@ def test_cache_hitting():
task = Task(
description="What is 2 times 6? Ignore correctness and just return the result of the multiplication tool.",
agent=agent,
expected_output="The result of the multiplication.",
)
output = agent.execute_task(task)
assert output == "0"
@@ -206,9 +233,13 @@ def test_agent_execution_with_specific_tools():
allow_delegation=False,
)
task = Task(description="What is 3 times 4", agent=agent)
task = Task(
description="What is 3 times 4",
agent=agent,
expected_output="The result of the multiplication.",
)
output = agent.execute_task(task=task, tools=[multiplier])
assert output == "12"
assert output == "The result of the multiplication is 12."
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -232,6 +263,7 @@ def test_agent_custom_max_iterations():
) as private_mock:
task = Task(
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
expected_output="The final answer",
)
agent.execute_task(
task=task,
@@ -259,7 +291,8 @@ def test_agent_repeated_tool_usage(capsys):
)
task = Task(
description="The final answer is 42. But don't give it until I tell you so, instead keep using the `get_final_answer` tool."
description="The final answer is 42. But don't give it until I tell you so, instead keep using the `get_final_answer` tool.",
expected_output="The final answer",
)
# force cleaning cache
agent.tools_handler.cache = CacheHandler()
@@ -270,7 +303,7 @@ def test_agent_repeated_tool_usage(capsys):
captured = capsys.readouterr()
assert "Final Answer: 42" in captured.out
assert "The final answer is 42." in captured.out
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -290,7 +323,8 @@ def test_agent_moved_on_after_max_iterations():
)
task = Task(
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool. Until you're told you could give my final answer if I'm ready."
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool over and over until you're told you can give yout final answer.",
expected_output="The final answer",
)
output = agent.execute_task(
task=task,
@@ -320,16 +354,14 @@ def test_agent_respect_the_max_rpm_set(capsys):
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
moveon.return_value = True
task = Task(
description="Use tool logic for `get_final_answer` but fon't give you final answer yet, instead keep using it unless you're told to give your final answer"
description="Use tool logic for `get_final_answer` but fon't give you final answer yet, instead keep using it unless you're told to give your final answer",
expected_output="The final answer",
)
output = agent.execute_task(
task=task,
tools=[get_final_answer],
)
assert (
output
== 'The result of using the `get_final_answer` tool with the input "test input" is 42.'
)
assert output == "42"
captured = capsys.readouterr()
assert "Max RPM reached, waiting for next minute to start." in captured.out
moveon.assert_called()
@@ -357,7 +389,8 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
)
task = Task(
description="Don't give a Final Answer, instead keep using the `get_final_answer` tool.",
description="Use tool logic for `get_final_answer` but fon't give you final answer yet, instead keep using it unless you're told to give your final answer",
expected_output="The final answer",
tools=[get_final_answer],
agent=agent,
)
@@ -390,6 +423,7 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
backstory="test backstory",
max_rpm=10,
verbose=True,
allow_delegation=False,
)
agent2 = Agent(
@@ -398,15 +432,16 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
backstory="test backstory2",
max_iter=2,
verbose=True,
allow_delegation=False,
)
tasks = [
Task(
description="Just say hi.",
agent=agent1,
description="Just say hi.", agent=agent1, expected_output="Your greeting."
),
Task(
description="Don't give a Final Answer, instead keep using the `get_final_answer` tool non-stop",
description="NEVER give a Final Answer, instead keep using the `get_final_answer` tool non-stop",
expected_output="The final answer",
tools=[get_final_answer],
agent=agent2,
),
@@ -430,7 +465,7 @@ def test_agent_error_on_parsing_tool(capsys):
from langchain.tools import tool
@tool
def get_final_answer(anything: str) -> float:
def get_final_answer() -> float:
"""Get the final answer but don't give it yet, just re-use this
tool non-stop."""
return 42
@@ -444,12 +479,18 @@ def test_agent_error_on_parsing_tool(capsys):
tasks = [
Task(
description="Use the get_final_answer tool.",
expected_output="The final answer",
agent=agent1,
tools=[get_final_answer],
)
]
crew = Crew(agents=[agent1], tasks=tasks, verbose=2)
crew = Crew(
agents=[agent1],
tasks=tasks,
verbose=2,
function_calling_llm=ChatOpenAI(model="gpt-4-0125-preview"),
)
with patch.object(ToolUsage, "_render") as force_exception:
force_exception.side_effect = Exception("Error on parsing tool.")
@@ -480,6 +521,7 @@ def test_agent_remembers_output_format_after_using_tools_too_many_times():
tasks = [
Task(
description="Use tool logic for `get_final_answer` but fon't give you final answer yet, instead keep using it unless you're told to give your final answer",
expected_output="The final answer",
agent=agent1,
tools=[get_final_answer],
)
@@ -498,10 +540,15 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
agent2 = Agent(role="test role2", goal="test goal2", backstory="test backstory2")
say_hi_task = Task(description="Just say hi.", agent=agent1)
say_bye_task = Task(description="Just say bye.", agent=agent1)
say_hi_task = Task(
description="Just say hi.", agent=agent1, expected_output="Your greeting."
)
say_bye_task = Task(
description="Just say bye.", agent=agent1, expected_output="Your farewell."
)
answer_task = Task(
description="Answer accordingly to the context you got.",
expected_output="Your answer.",
context=[say_hi_task],
agent=agent2,
)
@@ -537,6 +584,7 @@ def test_agent_step_callback():
essay = Task(
description="Write and then review an small paragraph on AI until it's AMAZING",
expected_output="The final paragraph.",
agent=agent1,
)
tasks = [essay]
@@ -571,6 +619,7 @@ def test_agent_function_calling_llm():
essay = Task(
description="Write and then review an small paragraph on AI until it's AMAZING",
expected_output="The final paragraph.",
agent=agent1,
)
tasks = [essay]