mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 23:58:34 +00:00
Removing LangChain and Rebuilding Executor (#1322)
* rebuilding executor * removing langchain * Making all tests good * fixing types and adding ability for nor using system prompts * improving types * pleasing the types gods * pleasing the types gods * fixing parser, tools and executor * making sure all tests pass * final pass * fixing type * Updating Docs * preparing to cut new version
This commit is contained in:
@@ -6,15 +6,14 @@ from unittest.mock import patch
|
||||
import pytest
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.executor import CrewAgentExecutor
|
||||
from crewai.agents.parser import CrewAgentParser
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.llm import LLM
|
||||
from crewai.agents.parser import CrewAgentParser, OutputParserException
|
||||
from crewai.tools.tool_calling import InstructorToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.utilities import RPMController
|
||||
from langchain.schema import AgentAction
|
||||
from langchain.tools import tool
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_openai import ChatOpenAI
|
||||
from crewai_tools import tool
|
||||
from crewai.agents.parser import AgentAction
|
||||
|
||||
|
||||
def test_agent_creation():
|
||||
@@ -28,15 +27,20 @@ def test_agent_creation():
|
||||
|
||||
def test_agent_default_values():
|
||||
agent = Agent(role="test role", goal="test goal", backstory="test backstory")
|
||||
|
||||
assert isinstance(agent.llm, ChatOpenAI)
|
||||
assert agent.llm.model_name == "gpt-4o"
|
||||
assert agent.llm.temperature == 0.7
|
||||
assert agent.llm.verbose is False
|
||||
assert agent.allow_delegation is True
|
||||
assert agent.llm == "gpt-4o"
|
||||
assert agent.allow_delegation is False
|
||||
|
||||
|
||||
def test_custom_llm():
|
||||
agent = Agent(
|
||||
role="test role", goal="test goal", backstory="test backstory", llm="gpt-4"
|
||||
)
|
||||
assert agent.llm == "gpt-4"
|
||||
|
||||
|
||||
def test_custom_llm_with_langchain():
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
@@ -44,9 +48,7 @@ def test_custom_llm():
|
||||
llm=ChatOpenAI(temperature=0, model="gpt-4"),
|
||||
)
|
||||
|
||||
assert isinstance(agent.llm, ChatOpenAI)
|
||||
assert agent.llm.model_name == "gpt-4"
|
||||
assert agent.llm.temperature == 0
|
||||
assert agent.llm == "gpt-4"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -89,7 +91,7 @@ def test_agent_execution_with_tools():
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
output = agent.execute_task(task)
|
||||
assert output == "The result of 3 times 4 is 12."
|
||||
assert output == "The result of the multiplication is 12."
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -104,7 +106,6 @@ def test_logging_tool_usage():
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
tools=[multiplier],
|
||||
allow_delegation=False,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
@@ -120,7 +121,7 @@ def test_logging_tool_usage():
|
||||
tool_usage = InstructorToolCalling(
|
||||
tool_name=multiplier.name, arguments={"first_number": 3, "second_number": 4}
|
||||
)
|
||||
assert output == "12"
|
||||
assert output == "The result of 3 times 4 is 12."
|
||||
assert agent.tools_handler.last_used_tool.tool_name == tool_usage.tool_name
|
||||
assert agent.tools_handler.last_used_tool.arguments == tool_usage.arguments
|
||||
|
||||
@@ -274,13 +275,67 @@ def test_agent_execution_with_specific_tools():
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
output = agent.execute_task(task=task, tools=[multiplier])
|
||||
assert output == "The result of the multiplication is 12."
|
||||
assert output == "The result of the multiplication of 3 times 4 is 12."
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_powered_by_new_o_model_family_that_allows_skipping_tool():
|
||||
@tool
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
llm="o1-preview",
|
||||
max_iter=3,
|
||||
use_system_prompt=False,
|
||||
allow_delegation=False,
|
||||
use_stop_words=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="What is 3 times 4?",
|
||||
agent=agent,
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
output = agent.execute_task(task=task, tools=[multiplier])
|
||||
assert output == "12"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_powered_by_new_o_model_family_that_uses_tool():
|
||||
@tool
|
||||
def comapny_customer_data() -> float:
|
||||
"""Useful for getting customer related data."""
|
||||
return "The company has 42 customers"
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
llm="o1-preview",
|
||||
max_iter=3,
|
||||
use_system_prompt=False,
|
||||
allow_delegation=False,
|
||||
use_stop_words=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="How many customers does the company have?",
|
||||
agent=agent,
|
||||
expected_output="The number of customers",
|
||||
)
|
||||
output = agent.execute_task(task=task, tools=[comapny_customer_data])
|
||||
assert output == "The company has 42 customers"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_custom_max_iterations():
|
||||
@tool
|
||||
def get_final_answer(numbers) -> float:
|
||||
def get_final_answer() -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
@@ -294,7 +349,7 @@ def test_agent_custom_max_iterations():
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_iter_next_step", wraps=agent.agent_executor._iter_next_step
|
||||
LLM, "call", wraps=LLM("gpt-4o", stop=["\nObservation:"]).call
|
||||
) as private_mock:
|
||||
task = Task(
|
||||
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
@@ -304,13 +359,13 @@ def test_agent_custom_max_iterations():
|
||||
task=task,
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
private_mock.assert_called_once()
|
||||
assert private_mock.call_count == 2
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_repeated_tool_usage(capsys):
|
||||
@tool
|
||||
def get_final_answer(anything: str) -> float:
|
||||
def get_final_answer() -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
@@ -320,7 +375,7 @@ def test_agent_repeated_tool_usage(capsys):
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
max_iter=4,
|
||||
llm=ChatOpenAI(model="gpt-4"),
|
||||
llm="gpt-4",
|
||||
allow_delegation=False,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -357,7 +412,7 @@ def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
max_iter=4,
|
||||
llm=ChatOpenAI(model="gpt-4"),
|
||||
llm="gpt-4",
|
||||
allow_delegation=False,
|
||||
verbose=True,
|
||||
cache=False,
|
||||
@@ -383,7 +438,7 @@ def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_moved_on_after_max_iterations():
|
||||
@tool
|
||||
def get_final_answer(numbers) -> float:
|
||||
def get_final_answer() -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
@@ -404,13 +459,13 @@ def test_agent_moved_on_after_max_iterations():
|
||||
task=task,
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
assert output == "42"
|
||||
assert output == "The final answer is 42."
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_respect_the_max_rpm_set(capsys):
|
||||
@tool
|
||||
def get_final_answer(anything: str) -> float:
|
||||
def get_final_answer() -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
@@ -444,11 +499,10 @@ def test_agent_respect_the_max_rpm_set(capsys):
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
|
||||
from unittest.mock import patch
|
||||
|
||||
from langchain.tools import tool
|
||||
from crewai_tools import tool
|
||||
|
||||
@tool
|
||||
def get_final_answer(numbers) -> float:
|
||||
def get_final_answer() -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
@@ -483,10 +537,10 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
|
||||
def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
from unittest.mock import patch
|
||||
|
||||
from langchain.tools import tool
|
||||
from crewai_tools import tool
|
||||
|
||||
@tool
|
||||
def get_final_answer(numbers) -> float:
|
||||
def get_final_answer() -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
@@ -496,6 +550,7 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
max_rpm=10,
|
||||
max_iter=2,
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
)
|
||||
@@ -504,7 +559,7 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
role="test role2",
|
||||
goal="test goal2",
|
||||
backstory="test backstory2",
|
||||
max_iter=2,
|
||||
max_iter=1,
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
)
|
||||
@@ -514,7 +569,7 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
description="Just say hi.", agent=agent1, expected_output="Your greeting."
|
||||
),
|
||||
Task(
|
||||
description="NEVER give a Final Answer, instead keep using the `get_final_answer` tool non-stop",
|
||||
description="NEVER give a Final Answer, unless you are told otherwise, instead keep using the `get_final_answer` tool non-stop, until you must give you best final answer",
|
||||
expected_output="The final answer",
|
||||
tools=[get_final_answer],
|
||||
agent=agent2,
|
||||
@@ -535,8 +590,7 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_error_on_parsing_tool(capsys):
|
||||
from unittest.mock import patch
|
||||
|
||||
from langchain.tools import tool
|
||||
from crewai_tools import tool
|
||||
|
||||
@tool
|
||||
def get_final_answer() -> float:
|
||||
@@ -548,6 +602,7 @@ def test_agent_error_on_parsing_tool(capsys):
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
max_iter=1,
|
||||
verbose=True,
|
||||
)
|
||||
tasks = [
|
||||
@@ -563,7 +618,7 @@ def test_agent_error_on_parsing_tool(capsys):
|
||||
agents=[agent1],
|
||||
tasks=tasks,
|
||||
verbose=True,
|
||||
function_calling_llm=ChatOpenAI(model="gpt-4-0125-preview"),
|
||||
function_calling_llm="gpt-4o",
|
||||
)
|
||||
|
||||
with patch.object(ToolUsage, "_render") as force_exception:
|
||||
@@ -577,10 +632,10 @@ def test_agent_error_on_parsing_tool(capsys):
|
||||
def test_agent_remembers_output_format_after_using_tools_too_many_times():
|
||||
from unittest.mock import patch
|
||||
|
||||
from langchain.tools import tool
|
||||
from crewai_tools import tool
|
||||
|
||||
@tool
|
||||
def get_final_answer(anything: str) -> float:
|
||||
def get_final_answer() -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
@@ -611,7 +666,6 @@ def test_agent_remembers_output_format_after_using_tools_too_many_times():
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_use_specific_tasks_output_as_context(capsys):
|
||||
agent1 = Agent(role="test role", goal="test goal", backstory="test backstory")
|
||||
|
||||
agent2 = Agent(role="test role2", goal="test goal2", backstory="test backstory2")
|
||||
|
||||
say_hi_task = Task(
|
||||
@@ -631,7 +685,7 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
|
||||
|
||||
crew = Crew(agents=[agent1, agent2], tasks=tasks)
|
||||
result = crew.kickoff()
|
||||
print("LOWER RESULT", result.raw)
|
||||
|
||||
assert "bye" not in result.raw.lower()
|
||||
assert "hi" in result.raw.lower() or "hello" in result.raw.lower()
|
||||
|
||||
@@ -640,12 +694,12 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
|
||||
def test_agent_step_callback():
|
||||
class StepCallback:
|
||||
def callback(self, step):
|
||||
print(step)
|
||||
pass
|
||||
|
||||
with patch.object(StepCallback, "callback") as callback:
|
||||
|
||||
@tool
|
||||
def learn_about_AI(topic) -> str:
|
||||
def learn_about_AI() -> str:
|
||||
"""Useful for when you need to learn about AI to write an paragraph about it."""
|
||||
return "AI is a very broad field."
|
||||
|
||||
@@ -672,36 +726,51 @@ def test_agent_step_callback():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_function_calling_llm():
|
||||
from langchain_openai import ChatOpenAI
|
||||
llm = "gpt-4o"
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
@tool
|
||||
def learn_about_AI() -> str:
|
||||
"""Useful for when you need to learn about AI to write an paragraph about it."""
|
||||
return "AI is a very broad field."
|
||||
|
||||
with patch.object(llm.client, "create", wraps=llm.client.create) as private_mock:
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
tools=[learn_about_AI],
|
||||
llm="gpt-4o",
|
||||
max_iter=2,
|
||||
function_calling_llm=llm,
|
||||
)
|
||||
|
||||
@tool
|
||||
def learn_about_AI(topic) -> str:
|
||||
"""Useful for when you need to learn about AI to write an paragraph about it."""
|
||||
return "AI is a very broad field."
|
||||
essay = Task(
|
||||
description="Write and then review an small paragraph on AI until it's AMAZING",
|
||||
expected_output="The final paragraph.",
|
||||
agent=agent1,
|
||||
)
|
||||
tasks = [essay]
|
||||
crew = Crew(agents=[agent1], tasks=tasks)
|
||||
from unittest.mock import patch, Mock
|
||||
import instructor
|
||||
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
tools=[learn_about_AI],
|
||||
llm=ChatOpenAI(model="gpt-4-0125-preview"),
|
||||
function_calling_llm=llm,
|
||||
)
|
||||
|
||||
essay = Task(
|
||||
description="Write and then review an small paragraph on AI until it's AMAZING",
|
||||
expected_output="The final paragraph.",
|
||||
agent=agent1,
|
||||
)
|
||||
tasks = [essay]
|
||||
crew = Crew(agents=[agent1], tasks=tasks)
|
||||
with patch.object(instructor, "from_litellm") as mock_from_litellm:
|
||||
mock_client = Mock()
|
||||
mock_from_litellm.return_value = mock_client
|
||||
mock_chat = Mock()
|
||||
mock_client.chat = mock_chat
|
||||
mock_completions = Mock()
|
||||
mock_chat.completions = mock_completions
|
||||
mock_create = Mock()
|
||||
mock_completions.create = mock_create
|
||||
|
||||
crew.kickoff()
|
||||
private_mock.assert_called()
|
||||
|
||||
mock_from_litellm.assert_called()
|
||||
mock_create.assert_called()
|
||||
calls = mock_create.call_args_list
|
||||
assert any(
|
||||
call.kwargs.get("model") == "gpt-4o" for call in calls
|
||||
), "Instructor was not created with the expected model"
|
||||
|
||||
|
||||
def test_agent_count_formatting_error():
|
||||
@@ -714,8 +783,7 @@ def test_agent_count_formatting_error():
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
parser = CrewAgentParser()
|
||||
parser.agent = agent1
|
||||
parser = CrewAgentParser(agent=agent1)
|
||||
|
||||
with patch.object(Agent, "increment_formatting_errors") as mock_count_errors:
|
||||
test_text = "This text does not match expected formats."
|
||||
@@ -751,7 +819,6 @@ def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
|
||||
crew = Crew(agents=[agent1], tasks=tasks)
|
||||
|
||||
result = crew.kickoff()
|
||||
print("RESULT: ", result.raw)
|
||||
assert result.raw == "Howdy!"
|
||||
|
||||
|
||||
@@ -792,22 +859,6 @@ def test_tool_usage_information_is_appended_to_agent():
|
||||
]
|
||||
|
||||
|
||||
def test_agent_llm_uses_token_calc_handler_with_llm_has_model_name():
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
assert len(agent1.llm.callbacks) == 1
|
||||
assert agent1.llm.callbacks[0].__class__.__name__ == "TokenCalcHandler"
|
||||
assert agent1.llm.callbacks[0].model_name == "gpt-4o"
|
||||
assert (
|
||||
agent1.llm.callbacks[0].token_cost_process.__class__.__name__ == "TokenProcess"
|
||||
)
|
||||
|
||||
|
||||
def test_agent_definition_based_on_dict():
|
||||
config = {
|
||||
"role": "test role",
|
||||
@@ -846,7 +897,7 @@ def test_agent_human_input():
|
||||
)
|
||||
|
||||
with patch.object(CrewAgentExecutor, "_ask_human_input") as mock_human_input:
|
||||
mock_human_input.return_value = "Hello"
|
||||
mock_human_input.return_value = "Don't say hi, say Hello instead!"
|
||||
output = agent.execute_task(task)
|
||||
mock_human_input.assert_called_once()
|
||||
assert output == "Hello"
|
||||
@@ -870,6 +921,31 @@ def test_interpolate_inputs():
|
||||
assert agent.backstory == "I am the master of nothing"
|
||||
|
||||
|
||||
def test_not_using_system_prompt():
|
||||
agent = Agent(
|
||||
role="{topic} specialist",
|
||||
goal="Figure {goal} out",
|
||||
backstory="I am the master of {role}",
|
||||
use_system_prompt=False,
|
||||
)
|
||||
|
||||
agent.create_agent_executor()
|
||||
assert not agent.agent_executor.prompt.get("user")
|
||||
assert not agent.agent_executor.prompt.get("system")
|
||||
|
||||
|
||||
def test_using_system_prompt():
|
||||
agent = Agent(
|
||||
role="{topic} specialist",
|
||||
goal="Figure {goal} out",
|
||||
backstory="I am the master of {role}",
|
||||
)
|
||||
|
||||
agent.create_agent_executor()
|
||||
assert agent.agent_executor.prompt.get("user")
|
||||
assert agent.agent_executor.prompt.get("system")
|
||||
|
||||
|
||||
def test_system_and_prompt_template():
|
||||
agent = Agent(
|
||||
role="{topic} specialist",
|
||||
@@ -886,13 +962,11 @@ def test_system_and_prompt_template():
|
||||
{{ .Response }}<|eot_id|>""",
|
||||
)
|
||||
|
||||
template = agent.agent_executor.agent.dict()["runnable"]["middle"][0]["template"]
|
||||
assert (
|
||||
template
|
||||
== """<|start_header_id|>system<|end_header_id|>
|
||||
expected_prompt = """<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
You are {role}. {backstory}
|
||||
Your personal goal is: {goal}To give my best complete final answer to the task use the exact following format:
|
||||
Your personal goal is: {goal}
|
||||
To give my best complete final answer to the task use the exact following format:
|
||||
|
||||
Thought: I now can give a great answer
|
||||
Final Answer: my best complete final answer to the task.
|
||||
@@ -906,12 +980,22 @@ Current Task: {input}
|
||||
|
||||
Begin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!
|
||||
|
||||
Thought:
|
||||
{agent_scratchpad}<|eot_id|>
|
||||
Thought:<|eot_id|>
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
|
||||
"""
|
||||
)
|
||||
|
||||
with patch.object(CrewAgentExecutor, "_format_prompt") as mock_format_prompt:
|
||||
mock_format_prompt.return_value = expected_prompt
|
||||
|
||||
# Trigger the _format_prompt method
|
||||
agent.agent_executor._format_prompt("dummy_prompt", {})
|
||||
|
||||
# Assert that _format_prompt was called
|
||||
mock_format_prompt.assert_called_once()
|
||||
|
||||
# Assert that the returned prompt matches the expected prompt
|
||||
assert mock_format_prompt.return_value == expected_prompt
|
||||
|
||||
|
||||
@patch("crewai.agent.CrewTrainingHandler")
|
||||
@@ -1000,16 +1084,18 @@ def test_agent_max_retry_limit():
|
||||
[
|
||||
mock.call(
|
||||
{
|
||||
"input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi \n you MUST return the actual complete content as the final answer, not a summary.",
|
||||
"input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||
"tool_names": "",
|
||||
"tools": "",
|
||||
"ask_for_human_input": True,
|
||||
}
|
||||
),
|
||||
mock.call(
|
||||
{
|
||||
"input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi \n you MUST return the actual complete content as the final answer, not a summary.",
|
||||
"input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||
"tool_names": "",
|
||||
"tools": "",
|
||||
"ask_for_human_input": True,
|
||||
}
|
||||
),
|
||||
]
|
||||
@@ -1024,11 +1110,14 @@ def test_handle_context_length_exceeds_limit():
|
||||
backstory="test backstory",
|
||||
)
|
||||
original_action = AgentAction(
|
||||
tool="test_tool", tool_input="test_input", log="test_log"
|
||||
tool="test_tool",
|
||||
tool_input="test_input",
|
||||
text="test_log",
|
||||
thought="test_thought",
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_iter_next_step", wraps=agent.agent_executor._iter_next_step
|
||||
CrewAgentExecutor, "invoke", wraps=agent.agent_executor.invoke
|
||||
) as private_mock:
|
||||
task = Task(
|
||||
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
@@ -1038,25 +1127,23 @@ def test_handle_context_length_exceeds_limit():
|
||||
task=task,
|
||||
)
|
||||
private_mock.assert_called_once()
|
||||
with patch("crewai.agents.executor.click") as mock_prompt:
|
||||
mock_prompt.return_value = "y"
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_handle_context_length"
|
||||
) as mock_handle_context:
|
||||
mock_handle_context.side_effect = ValueError(
|
||||
"Context length limit exceeded"
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_handle_context_length"
|
||||
) as mock_handle_context:
|
||||
mock_handle_context.side_effect = ValueError(
|
||||
"Context length limit exceeded"
|
||||
)
|
||||
|
||||
long_input = "This is a very long input. " * 10000
|
||||
|
||||
# Attempt to handle context length, expecting the mocked error
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
agent.agent_executor._handle_context_length(
|
||||
[(original_action, long_input)]
|
||||
)
|
||||
|
||||
long_input = "This is a very long input. " * 10000
|
||||
|
||||
# Attempt to handle context length, expecting the mocked error
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
agent.agent_executor._handle_context_length(
|
||||
[(original_action, long_input)]
|
||||
)
|
||||
|
||||
assert "Context length limit exceeded" in str(excinfo.value)
|
||||
mock_handle_context.assert_called_once()
|
||||
assert "Context length limit exceeded" in str(excinfo.value)
|
||||
mock_handle_context.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1065,11 +1152,12 @@ def test_handle_context_length_exceeds_limit_cli_no():
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
sliding_context_window=False,
|
||||
)
|
||||
task = Task(description="test task", agent=agent, expected_output="test output")
|
||||
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_iter_next_step", wraps=agent.agent_executor._iter_next_step
|
||||
CrewAgentExecutor, "invoke", wraps=agent.agent_executor.invoke
|
||||
) as private_mock:
|
||||
task = Task(
|
||||
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
@@ -1079,10 +1167,8 @@ def test_handle_context_length_exceeds_limit_cli_no():
|
||||
task=task,
|
||||
)
|
||||
private_mock.assert_called_once()
|
||||
with patch("crewai.agents.executor.click") as mock_prompt:
|
||||
mock_prompt.return_value = "n"
|
||||
pytest.raises(SystemExit)
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_handle_context_length"
|
||||
) as mock_handle_context:
|
||||
mock_handle_context.assert_not_called()
|
||||
pytest.raises(SystemExit)
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_handle_context_length"
|
||||
) as mock_handle_context:
|
||||
mock_handle_context.assert_not_called()
|
||||
|
||||
Reference in New Issue
Block a user