mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 12:28:30 +00:00
* WIP. Figuring out disconnect issue. * Cleaned up logs now that I've isolated the issue to the LLM * more wip. * WIP. It looks like usage metrics has always been broken for async * Update parent crew who is managing for_each loop * Merge in main to bugfix/kickoff-for-each-usage-metrics * Clean up code for review * Add new tests * Final cleanup. Ready for review. * Moving copy functionality from Agent to BaseAgent * Fix renaming issue * Fix linting errors * use BaseAgent instead of Agent where applicable
1473 lines
54 KiB
Python
1473 lines
54 KiB
Python
"""Test Agent creation and execution basic functionality."""
|
|
|
|
import json
|
|
from unittest import mock
|
|
from unittest.mock import patch
|
|
|
|
import pydantic_core
|
|
import pytest
|
|
|
|
from crewai.agent import Agent
|
|
from crewai.agents.cache import CacheHandler
|
|
from crewai.crew import Crew
|
|
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
|
from crewai.process import Process
|
|
from crewai.task import Task
|
|
from crewai.utilities import Logger, RPMController
|
|
|
|
ceo = Agent(
|
|
role="CEO",
|
|
goal="Make sure the writers in your company produce amazing content.",
|
|
backstory="You're an long time CEO of a content creation agency with a Senior Writer on the team. You're now working on a new project and want to make sure the content produced is amazing.",
|
|
allow_delegation=True,
|
|
)
|
|
|
|
researcher = Agent(
|
|
role="Researcher",
|
|
goal="Make the best research and analysis on content about AI and AI agents",
|
|
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
|
allow_delegation=False,
|
|
)
|
|
|
|
writer = Agent(
|
|
role="Senior Writer",
|
|
goal="Write the best content about AI and AI agents.",
|
|
backstory="You're a senior writer, specialized in technology, software engineering, AI and startups. You work as a freelancer and are now working on writing content for a new customer.",
|
|
allow_delegation=False,
|
|
)
|
|
|
|
|
|
def test_crew_config_conditional_requirement():
|
|
with pytest.raises(ValueError):
|
|
Crew(process=Process.sequential)
|
|
|
|
config = json.dumps(
|
|
{
|
|
"agents": [
|
|
{
|
|
"role": "Senior Researcher",
|
|
"goal": "Make the best research and analysis on content about AI and AI agents",
|
|
"backstory": "You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
|
},
|
|
{
|
|
"role": "Senior Writer",
|
|
"goal": "Write the best content about AI and AI agents.",
|
|
"backstory": "You're a senior writer, specialized in technology, software engineering, AI and startups. You work as a freelancer and are now working on writing content for a new customer.",
|
|
},
|
|
],
|
|
"tasks": [
|
|
{
|
|
"description": "Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
|
"expected_output": "Bullet point list of 5 important events.",
|
|
"agent": "Senior Researcher",
|
|
},
|
|
{
|
|
"description": "Write a 1 amazing paragraph highlight for each idead that showcases how good an article about this topic could be, check references if necessary or search for more content but make sure it's unique, interesting and well written. Return the list of ideas with their paragraph and your notes.",
|
|
"expected_output": "A 4 paragraph article about AI.",
|
|
"agent": "Senior Writer",
|
|
},
|
|
],
|
|
}
|
|
)
|
|
parsed_config = json.loads(config)
|
|
|
|
try:
|
|
crew = Crew(process=Process.sequential, config=config)
|
|
except ValueError:
|
|
pytest.fail("Unexpected ValidationError raised")
|
|
|
|
assert [agent.role for agent in crew.agents] == [
|
|
agent["role"] for agent in parsed_config["agents"]
|
|
]
|
|
assert [task.description for task in crew.tasks] == [
|
|
task["description"] for task in parsed_config["tasks"]
|
|
]
|
|
|
|
|
|
def test_crew_config_with_wrong_keys():
|
|
no_tasks_config = json.dumps(
|
|
{
|
|
"agents": [
|
|
{
|
|
"role": "Senior Researcher",
|
|
"goal": "Make the best research and analysis on content about AI and AI agents",
|
|
"backstory": "You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
|
}
|
|
]
|
|
}
|
|
)
|
|
|
|
no_agents_config = json.dumps(
|
|
{
|
|
"tasks": [
|
|
{
|
|
"description": "Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
|
"agent": "Senior Researcher",
|
|
}
|
|
]
|
|
}
|
|
)
|
|
with pytest.raises(ValueError):
|
|
Crew(process=Process.sequential, config='{"wrong_key": "wrong_value"}')
|
|
with pytest.raises(ValueError):
|
|
Crew(process=Process.sequential, config=no_tasks_config)
|
|
with pytest.raises(ValueError):
|
|
Crew(process=Process.sequential, config=no_agents_config)
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_crew_creation():
|
|
tasks = [
|
|
Task(
|
|
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
|
expected_output="Bullet point list of 5 important events.",
|
|
agent=researcher,
|
|
),
|
|
Task(
|
|
description="Write a 1 amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
|
expected_output="A 4 paragraph article about AI.",
|
|
agent=writer,
|
|
),
|
|
]
|
|
|
|
crew = Crew(
|
|
agents=[researcher, writer],
|
|
process=Process.sequential,
|
|
tasks=tasks,
|
|
)
|
|
|
|
assert (
|
|
crew.kickoff()
|
|
== "1. **The Rise of AI in Healthcare**: The convergence of AI and healthcare is a promising frontier, offering unprecedented opportunities for disease diagnosis and patient outcome prediction. AI's potential to revolutionize healthcare lies in its capacity to synthesize vast amounts of data, generating precise and efficient results. This technological breakthrough, however, is not just about improving accuracy and efficiency; it's about saving lives. As we stand on the precipice of this transformative era, we must prepare for the complex challenges and ethical questions it poses, while embracing its ability to reshape healthcare as we know it.\n\n2. **Ethical Implications of AI**: As AI intertwines with our daily lives, it presents a complex web of ethical dilemmas. This fusion of technology, philosophy, and ethics is not merely academically intriguing but profoundly impacts the fabric of our society. The questions raised range from decision-making transparency to accountability, and from privacy to potential biases. As we navigate this ethical labyrinth, it is crucial to establish robust frameworks and regulations to ensure that AI serves humanity, and not the other way around.\n\n3. **AI and Data Privacy**: The rise of AI brings with it an insatiable appetite for data, spawning new debates around privacy rights. Balancing the potential benefits of AI with the right to privacy is a unique challenge that intersects technology, law, and human rights. In an increasingly digital world, where personal information forms the backbone of many services, we must grapple with these issues. It's time to redefine the concept of privacy and devise innovative solutions that ensure our digital footprints are not abused.\n\n4. **AI in Job Market**: The discourse around AI's impact on employment is a narrative of contrast, a tale of displacement and creation. On one hand, AI threatens to automate a multitude of jobs, on the other, it promises to create new roles that we cannot yet imagine. This intersection of technology, economics, and labor rights is a critical dialogue that will shape our future. As we stand at this crossroads, we must not only brace ourselves for the changes but also seize the opportunities that this technological wave brings.\n\n5. **Future of AI Agents**: The evolution of AI agents signifies a leap towards a future where AI is not just a tool, but a partner. These sophisticated AI agents, employed in customer service to personal assistants, are redefining our interactions with technology. As we gaze into the future of AI agents, we see a landscape of possibilities and challenges. This journey will be about harnessing the potential of AI agents while navigating the issues of trust, dependence, and ethical use."
|
|
)
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_hierarchical_process():
|
|
from langchain_openai import ChatOpenAI
|
|
|
|
task = Task(
|
|
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
|
expected_output="5 bullet points with a paragraph for each idea.",
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=[researcher, writer],
|
|
process=Process.hierarchical,
|
|
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"),
|
|
tasks=[task],
|
|
)
|
|
result = crew.kickoff()
|
|
assert (
|
|
result
|
|
== "1. 'Demystifying AI: An in-depth exploration of Artificial Intelligence for the layperson' - In this piece, we will unravel the enigma of AI, simplifying its complexities into digestible information for the everyday individual. By using relatable examples and analogies, we will journey through the neural networks and machine learning algorithms that define AI, without the jargon and convoluted explanations that often accompany such topics.\n\n2. 'The Role of AI in Startups: A Game Changer?' - Startups today are harnessing the power of AI to revolutionize their businesses. This article will delve into how AI, as an innovative force, is shaping the startup ecosystem, transforming everything from customer service to product development. We'll explore real-life case studies of startups that have leveraged AI to accelerate their growth and disrupt their respective industries.\n\n3. 'AI and Ethics: Navigating the Complex Landscape' - AI brings with it not just technological advancements, but ethical dilemmas as well. This article will engage readers in a thought-provoking discussion on the ethical implications of AI, exploring issues like bias in algorithms, privacy concerns, job displacement, and the moral responsibility of AI developers. We will also discuss potential solutions and frameworks to address these challenges.\n\n4. 'Unveiling the AI Agents: The Future of Customer Service' - AI agents are poised to reshape the customer service landscape, offering businesses the ability to provide round-the-clock support and personalized experiences. In this article, we'll dive deep into the world of AI agents, examining how they work, their benefits and limitations, and how they're set to redefine customer interactions in the digital age.\n\n5. 'From Science Fiction to Reality: AI in Everyday Life' - AI, once a concept limited to the realm of sci-fi, has now permeated our daily lives. This article will highlight the ubiquitous presence of AI, from voice assistants and recommendation algorithms, to autonomous vehicles and smart homes. We'll explore how AI, in its various forms, is transforming our everyday experiences, making the future seem a lot closer than we imagined."
|
|
)
|
|
|
|
|
|
def test_manager_llm_requirement_for_hierarchical_process():
|
|
task = Task(
|
|
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
|
expected_output="5 bullet points with a paragraph for each idea.",
|
|
)
|
|
|
|
with pytest.raises(pydantic_core._pydantic_core.ValidationError):
|
|
Crew(
|
|
agents=[researcher, writer],
|
|
process=Process.hierarchical,
|
|
tasks=[task],
|
|
)
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_crew_with_delegating_agents():
|
|
tasks = [
|
|
Task(
|
|
description="Produce and amazing 1 paragraph draft of an article about AI Agents.",
|
|
expected_output="A 4 paragraph article about AI.",
|
|
agent=ceo,
|
|
)
|
|
]
|
|
|
|
crew = Crew(
|
|
agents=[ceo, writer],
|
|
process=Process.sequential,
|
|
tasks=tasks,
|
|
)
|
|
|
|
assert (
|
|
crew.kickoff()
|
|
== "AI Agents, simply put, are intelligent systems that can perceive their environment and take actions to reach specific goals. Imagine them as digital assistants that can learn, adapt and make decisions. They operate in the realms of software or hardware, like a chatbot on a website or a self-driving car. The key to their intelligence is their ability to learn from their experiences, making them better at their tasks over time. In today's interconnected world, AI agents are transforming our lives. They enhance customer service experiences, streamline business processes, and even predict trends in data. Vehicles equipped with AI agents are making transportation safer. In healthcare, AI agents are helping to diagnose diseases, personalizing treatment plans, and monitoring patient health. As we embrace the digital era, these AI agents are not just important, they're becoming indispensable, shaping a future where technology works intuitively and intelligently to meet our needs."
|
|
)
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_crew_verbose_output(capsys):
|
|
tasks = [
|
|
Task(
|
|
description="Research AI advancements.",
|
|
expected_output="A full report on AI advancements.",
|
|
agent=researcher,
|
|
),
|
|
Task(
|
|
description="Write about AI in healthcare.",
|
|
expected_output="A 4 paragraph article about AI.",
|
|
agent=writer,
|
|
),
|
|
]
|
|
|
|
crew = Crew(
|
|
agents=[researcher, writer],
|
|
tasks=tasks,
|
|
process=Process.sequential,
|
|
verbose=True,
|
|
)
|
|
|
|
crew.kickoff()
|
|
captured = capsys.readouterr()
|
|
expected_strings = [
|
|
"[DEBUG]: == Working Agent: Researcher",
|
|
"[INFO]: == Starting Task: Research AI advancements.",
|
|
"[DEBUG]: == [Researcher] Task output:",
|
|
"[DEBUG]: == Working Agent: Senior Writer",
|
|
"[INFO]: == Starting Task: Write about AI in healthcare.",
|
|
"[DEBUG]: == [Senior Writer] Task output:",
|
|
]
|
|
|
|
for expected_string in expected_strings:
|
|
assert expected_string in captured.out
|
|
|
|
# Now test with verbose set to False
|
|
crew._logger = Logger(verbose_level=False)
|
|
crew.kickoff()
|
|
captured = capsys.readouterr()
|
|
assert captured.out == ""
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_crew_verbose_levels_output(capsys):
|
|
tasks = [
|
|
Task(
|
|
description="Write about AI advancements.",
|
|
expected_output="A 4 paragraph article about AI.",
|
|
agent=researcher,
|
|
)
|
|
]
|
|
|
|
crew = Crew(agents=[researcher], tasks=tasks, process=Process.sequential, verbose=1)
|
|
|
|
crew.kickoff()
|
|
captured = capsys.readouterr()
|
|
expected_strings = ["Working Agent: Researcher", "[Researcher] Task output:"]
|
|
|
|
for expected_string in expected_strings:
|
|
assert expected_string in captured.out
|
|
|
|
# Now test with verbose set to 2
|
|
crew._logger = Logger(verbose_level=2)
|
|
crew.kickoff()
|
|
captured = capsys.readouterr()
|
|
expected_strings = [
|
|
"Working Agent: Researcher",
|
|
"Starting Task: Write about AI advancements.",
|
|
"[Researcher] Task output:",
|
|
]
|
|
|
|
for expected_string in expected_strings:
|
|
assert expected_string in captured.out
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_cache_hitting_between_agents():
|
|
from unittest.mock import call, patch
|
|
|
|
from langchain.tools import tool
|
|
|
|
@tool
|
|
def multiplier(first_number: int, second_number: int) -> float:
|
|
"""Useful for when you need to multiply two numbers together."""
|
|
return first_number * second_number
|
|
|
|
tasks = [
|
|
Task(
|
|
description="What is 2 tims 6? Return only the number.",
|
|
expected_output="the result of multiplication",
|
|
tools=[multiplier],
|
|
agent=ceo,
|
|
),
|
|
Task(
|
|
description="What is 2 times 6? Return only the number.",
|
|
expected_output="the result of multiplication",
|
|
tools=[multiplier],
|
|
agent=researcher,
|
|
),
|
|
]
|
|
|
|
crew = Crew(
|
|
agents=[ceo, researcher],
|
|
tasks=tasks,
|
|
)
|
|
|
|
with patch.object(CacheHandler, "read") as read:
|
|
read.return_value = "12"
|
|
crew.kickoff()
|
|
assert read.call_count == 2, "read was not called exactly twice"
|
|
# Check if read was called with the expected arguments
|
|
expected_calls = [
|
|
call(tool="multiplier", input={"first_number": 2, "second_number": 6}),
|
|
call(tool="multiplier", input={"first_number": 2, "second_number": 6}),
|
|
]
|
|
read.assert_has_calls(expected_calls, any_order=False)
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_api_calls_throttling(capsys):
|
|
from unittest.mock import patch
|
|
|
|
from langchain.tools import tool
|
|
from langchain_openai import ChatOpenAI
|
|
|
|
@tool
|
|
def get_final_answer(anything) -> float:
|
|
"""Get the final answer but don't give it yet, just re-use this
|
|
tool non-stop."""
|
|
return 42
|
|
|
|
agent = Agent(
|
|
role="test role",
|
|
goal="test goal",
|
|
backstory="test backstory",
|
|
max_iter=5,
|
|
allow_delegation=False,
|
|
verbose=True,
|
|
llm=ChatOpenAI(model="gpt-4-0125-preview"),
|
|
)
|
|
|
|
task = Task(
|
|
description="Don't give a Final Answer, instead keep using the `get_final_answer` tool.",
|
|
expected_output="The final answer.",
|
|
tools=[get_final_answer],
|
|
agent=agent,
|
|
)
|
|
|
|
crew = Crew(agents=[agent], tasks=[task], max_rpm=2, verbose=2)
|
|
|
|
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
|
|
moveon.return_value = True
|
|
crew.kickoff()
|
|
captured = capsys.readouterr()
|
|
assert "Max RPM reached, waiting for next minute to start." in captured.out
|
|
moveon.assert_called()
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_crew_full_output():
|
|
agent = Agent(
|
|
role="test role",
|
|
goal="test goal",
|
|
backstory="test backstory",
|
|
allow_delegation=False,
|
|
verbose=True,
|
|
)
|
|
|
|
task1 = Task(
|
|
description="just say hi!",
|
|
expected_output="your greeting",
|
|
agent=agent,
|
|
)
|
|
task2 = Task(
|
|
description="just say hello!",
|
|
expected_output="your greeting",
|
|
agent=agent,
|
|
)
|
|
|
|
crew = Crew(agents=[agent], tasks=[task1, task2], full_output=True)
|
|
|
|
result = crew.kickoff()
|
|
|
|
assert result == {
|
|
"final_output": "Hello!",
|
|
"tasks_outputs": [task1.output, task2.output],
|
|
"usage_metrics": {
|
|
"total_tokens": 348,
|
|
"prompt_tokens": 314,
|
|
"completion_tokens": 34,
|
|
"successful_requests": 2,
|
|
},
|
|
}
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_crew_kickoff_for_each_full_ouput():
|
|
inputs = [
|
|
{"topic": "dog"},
|
|
{"topic": "cat"},
|
|
{"topic": "apple"},
|
|
]
|
|
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="1 bullet point about {topic} that's under 15 words.",
|
|
agent=agent,
|
|
)
|
|
|
|
crew = Crew(agents=[agent], tasks=[task], full_output=True)
|
|
results = crew.kickoff_for_each(inputs=inputs)
|
|
|
|
assert len(results) == len(inputs)
|
|
for result in results:
|
|
assert "usage_metrics" in result
|
|
assert isinstance(result["usage_metrics"], dict)
|
|
|
|
# Assert that all required keys are in usage_metrics and their values are not None
|
|
for key in [
|
|
"total_tokens",
|
|
"prompt_tokens",
|
|
"completion_tokens",
|
|
"successful_requests",
|
|
]:
|
|
assert key in result["usage_metrics"]
|
|
assert result["usage_metrics"][key] > 0
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
@pytest.mark.asyncio
|
|
async def test_crew_async_kickoff_for_each_full_ouput():
|
|
inputs = [
|
|
{"topic": "dog"},
|
|
{"topic": "cat"},
|
|
{"topic": "apple"},
|
|
]
|
|
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="1 bullet point about {topic} that's under 15 words.",
|
|
agent=agent,
|
|
)
|
|
|
|
crew = Crew(agents=[agent], tasks=[task], full_output=True)
|
|
results = await crew.kickoff_for_each_async(inputs=inputs)
|
|
|
|
assert len(results) == len(inputs)
|
|
for result in results:
|
|
assert "usage_metrics" in result
|
|
assert isinstance(result["usage_metrics"], dict)
|
|
|
|
# Assert that all required keys are in usage_metrics and their values are not None
|
|
for key in [
|
|
"total_tokens",
|
|
"prompt_tokens",
|
|
"completion_tokens",
|
|
"successful_requests",
|
|
]:
|
|
assert key in result["usage_metrics"]
|
|
# TODO: FIX THIS WHEN USAGE METRICS ARE RE-DONE
|
|
# assert result["usage_metrics"][key] > 0
|
|
|
|
|
|
def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
|
|
agent = Agent(
|
|
role="test role",
|
|
goal="test goal",
|
|
backstory="test backstory",
|
|
allow_delegation=False,
|
|
verbose=True,
|
|
)
|
|
|
|
task = Task(
|
|
description="just say hi!",
|
|
expected_output="your greeting",
|
|
agent=agent,
|
|
)
|
|
|
|
Crew(agents=[agent], tasks=[task], verbose=2)
|
|
|
|
assert agent._rpm_controller is None
|
|
|
|
|
|
def test_async_task_execution():
|
|
import threading
|
|
from unittest.mock import patch
|
|
|
|
from crewai.tasks.task_output import TaskOutput
|
|
|
|
list_ideas = Task(
|
|
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
|
expected_output="Bullet point list of 5 important events.",
|
|
agent=researcher,
|
|
async_execution=True,
|
|
)
|
|
list_important_history = Task(
|
|
description="Research the history of AI and give me the 5 most important events that shaped the technology.",
|
|
expected_output="Bullet point list of 5 important events.",
|
|
agent=researcher,
|
|
async_execution=True,
|
|
)
|
|
write_article = Task(
|
|
description="Write an article about the history of AI and its most important events.",
|
|
expected_output="A 4 paragraph article about AI.",
|
|
agent=writer,
|
|
context=[list_ideas, list_important_history],
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=[researcher, writer],
|
|
process=Process.sequential,
|
|
tasks=[list_ideas, list_important_history, write_article],
|
|
)
|
|
|
|
with patch.object(Agent, "execute_task") as execute:
|
|
execute.return_value = "ok"
|
|
with patch.object(threading.Thread, "start") as start:
|
|
thread = threading.Thread(target=lambda: None, args=()).start()
|
|
start.return_value = thread
|
|
with patch.object(threading.Thread, "join", wraps=thread.join()) as join:
|
|
list_ideas.output = TaskOutput(
|
|
description="A 4 paragraph article about AI.",
|
|
raw_output="ok",
|
|
agent="writer",
|
|
)
|
|
list_important_history.output = TaskOutput(
|
|
description="A 4 paragraph article about AI.",
|
|
raw_output="ok",
|
|
agent="writer",
|
|
)
|
|
crew.kickoff()
|
|
start.assert_called()
|
|
join.assert_called()
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_kickoff_for_each_single_input():
|
|
"""Tests if kickoff_for_each works with a single input."""
|
|
from unittest.mock import patch
|
|
|
|
inputs = [{"topic": "dog"}]
|
|
expected_outputs = ["Dogs are loyal companions and popular pets."]
|
|
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="1 bullet point about {topic} that's under 15 words.",
|
|
agent=agent,
|
|
)
|
|
|
|
with patch.object(Agent, "execute_task") as mock_execute_task:
|
|
mock_execute_task.side_effect = expected_outputs
|
|
crew = Crew(agents=[agent], tasks=[task])
|
|
results = crew.kickoff_for_each(inputs=inputs)
|
|
|
|
assert len(results) == 1
|
|
assert results == expected_outputs
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_kickoff_for_each_multiple_inputs():
|
|
"""Tests if kickoff_for_each works with multiple inputs."""
|
|
from unittest.mock import patch
|
|
|
|
inputs = [
|
|
{"topic": "dog"},
|
|
{"topic": "cat"},
|
|
{"topic": "apple"},
|
|
]
|
|
expected_outputs = [
|
|
"Dogs are loyal companions and popular pets.",
|
|
"Cats are independent and low-maintenance pets.",
|
|
"Apples are a rich source of dietary fiber and vitamin C.",
|
|
]
|
|
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="1 bullet point about {topic} that's under 15 words.",
|
|
agent=agent,
|
|
)
|
|
|
|
with patch.object(Agent, "execute_task") as mock_execute_task:
|
|
mock_execute_task.side_effect = expected_outputs
|
|
crew = Crew(agents=[agent], tasks=[task])
|
|
results = crew.kickoff_for_each(inputs=inputs)
|
|
|
|
assert len(results) == len(inputs)
|
|
for i, res in enumerate(results):
|
|
assert res == expected_outputs[i]
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_kickoff_for_each_empty_input():
|
|
"""Tests if kickoff_for_each handles an empty input list."""
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="1 bullet point about {topic} that's under 15 words.",
|
|
agent=agent,
|
|
)
|
|
|
|
crew = Crew(agents=[agent], tasks=[task])
|
|
results = crew.kickoff_for_each(inputs=[])
|
|
assert results == []
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_kickoff_for_each_invalid_input():
|
|
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
|
|
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="1 bullet point about {topic} that's under 15 words.",
|
|
agent=agent,
|
|
)
|
|
|
|
crew = Crew(agents=[agent], tasks=[task])
|
|
|
|
with pytest.raises(TypeError):
|
|
# Pass a string instead of a list
|
|
crew.kickoff_for_each("invalid input")
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_kickoff_for_each_error_handling():
|
|
"""Tests error handling in kickoff_for_each when kickoff raises an error."""
|
|
from unittest.mock import patch
|
|
|
|
inputs = [
|
|
{"topic": "dog"},
|
|
{"topic": "cat"},
|
|
{"topic": "apple"},
|
|
]
|
|
expected_outputs = [
|
|
"Dogs are loyal companions and popular pets.",
|
|
"Cats are independent and low-maintenance pets.",
|
|
"Apples are a rich source of dietary fiber and vitamin C.",
|
|
]
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="1 bullet point about {topic} that's under 15 words.",
|
|
agent=agent,
|
|
)
|
|
|
|
crew = Crew(agents=[agent], tasks=[task])
|
|
|
|
with patch.object(Crew, "kickoff") as mock_kickoff:
|
|
mock_kickoff.side_effect = expected_outputs[:2] + [
|
|
Exception("Simulated kickoff error")
|
|
]
|
|
with pytest.raises(Exception, match="Simulated kickoff error"):
|
|
crew.kickoff_for_each(inputs=inputs)
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
@pytest.mark.asyncio
|
|
async def test_kickoff_async_basic_functionality_and_output():
|
|
"""Tests the basic functionality and output of kickoff_async."""
|
|
from unittest.mock import patch
|
|
|
|
inputs = {"topic": "dog"}
|
|
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="1 bullet point about {topic} that's under 15 words.",
|
|
agent=agent,
|
|
)
|
|
|
|
# Create the crew
|
|
crew = Crew(
|
|
agents=[agent],
|
|
tasks=[task],
|
|
)
|
|
|
|
expected_output = "This is a sample output from kickoff."
|
|
with patch.object(Crew, "kickoff", return_value=expected_output) as mock_kickoff:
|
|
result = await crew.kickoff_async(inputs)
|
|
|
|
assert isinstance(result, str), "Result should be a string"
|
|
assert result == expected_output, "Result should match expected output"
|
|
mock_kickoff.assert_called_once_with(inputs)
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
@pytest.mark.asyncio
|
|
async def test_async_kickoff_for_each_async_basic_functionality_and_output():
|
|
"""Tests the basic functionality and output of akickoff_for_each_async."""
|
|
from unittest.mock import patch
|
|
|
|
inputs = [
|
|
{"topic": "dog"},
|
|
{"topic": "cat"},
|
|
{"topic": "apple"},
|
|
]
|
|
|
|
# Define expected outputs for each input
|
|
expected_outputs = [
|
|
"Dogs are loyal companions and popular pets.",
|
|
"Cats are independent and low-maintenance pets.",
|
|
"Apples are a rich source of dietary fiber and vitamin C.",
|
|
]
|
|
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="1 bullet point about {topic} that's under 15 words.",
|
|
agent=agent,
|
|
)
|
|
|
|
with patch.object(
|
|
Crew, "kickoff_async", side_effect=expected_outputs
|
|
) as mock_kickoff_async:
|
|
crew = Crew(agents=[agent], tasks=[task])
|
|
|
|
results = await crew.kickoff_for_each_async(inputs)
|
|
|
|
assert len(results) == len(inputs)
|
|
assert results == expected_outputs
|
|
for input_data in inputs:
|
|
mock_kickoff_async.assert_any_call(inputs=input_data)
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
@pytest.mark.asyncio
|
|
async def test_async_kickoff_for_each_async_empty_input():
|
|
"""Tests if akickoff_for_each_async handles an empty input list."""
|
|
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="1 bullet point about {topic} that's under 15 words.",
|
|
agent=agent,
|
|
)
|
|
|
|
# Create the crew
|
|
crew = Crew(
|
|
agents=[agent],
|
|
tasks=[task],
|
|
)
|
|
|
|
# Call the function we are testing
|
|
results = await crew.kickoff_for_each_async([])
|
|
|
|
# Assertion
|
|
assert results == [], "Result should be an empty list when input is empty"
|
|
|
|
|
|
def test_set_agents_step_callback():
|
|
from unittest.mock import patch
|
|
|
|
researcher_agent = Agent(
|
|
role="Researcher",
|
|
goal="Make the best research and analysis on content about AI and AI agents",
|
|
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
|
allow_delegation=False,
|
|
)
|
|
|
|
list_ideas = Task(
|
|
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
|
expected_output="Bullet point list of 5 important events.",
|
|
agent=researcher_agent,
|
|
async_execution=True,
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=[researcher_agent],
|
|
process=Process.sequential,
|
|
tasks=[list_ideas],
|
|
step_callback=lambda: None,
|
|
)
|
|
|
|
with patch.object(Agent, "execute_task") as execute:
|
|
execute.return_value = "ok"
|
|
crew.kickoff()
|
|
assert researcher_agent.step_callback is not None
|
|
|
|
|
|
def test_dont_set_agents_step_callback_if_already_set():
|
|
from unittest.mock import patch
|
|
|
|
def agent_callback(_):
|
|
pass
|
|
|
|
def crew_callback(_):
|
|
pass
|
|
|
|
researcher_agent = Agent(
|
|
role="Researcher",
|
|
goal="Make the best research and analysis on content about AI and AI agents",
|
|
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
|
allow_delegation=False,
|
|
step_callback=agent_callback,
|
|
)
|
|
|
|
list_ideas = Task(
|
|
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
|
expected_output="Bullet point list of 5 important events.",
|
|
agent=researcher_agent,
|
|
async_execution=True,
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=[researcher_agent],
|
|
process=Process.sequential,
|
|
tasks=[list_ideas],
|
|
step_callback=crew_callback,
|
|
)
|
|
|
|
with patch.object(Agent, "execute_task") as execute:
|
|
execute.return_value = "ok"
|
|
crew.kickoff()
|
|
assert researcher_agent.step_callback is not crew_callback
|
|
assert researcher_agent.step_callback is agent_callback
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_crew_function_calling_llm():
|
|
from unittest.mock import patch
|
|
|
|
from langchain.tools import tool
|
|
from langchain_openai import ChatOpenAI
|
|
|
|
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
|
|
|
with patch.object(llm.client, "create", wraps=llm.client.create) as private_mock:
|
|
|
|
@tool
|
|
def learn_about_AI(topic) -> float:
|
|
"""Useful for when you need to learn about AI to write an paragraph about it."""
|
|
return "AI is a very broad field."
|
|
|
|
agent1 = Agent(
|
|
role="test role",
|
|
goal="test goal",
|
|
backstory="test backstory",
|
|
llm=ChatOpenAI(model="gpt-4-0125-preview"),
|
|
tools=[learn_about_AI],
|
|
)
|
|
|
|
essay = Task(
|
|
description="Write and then review an small paragraph on AI until it's AMAZING",
|
|
expected_output="A 4 paragraph article about AI.",
|
|
agent=agent1,
|
|
)
|
|
tasks = [essay]
|
|
crew = Crew(agents=[agent1], tasks=tasks, function_calling_llm=llm)
|
|
crew.kickoff()
|
|
private_mock.assert_called()
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_task_with_no_arguments():
|
|
from langchain.tools import tool
|
|
|
|
@tool
|
|
def return_data() -> str:
|
|
"Useful to get the sales related data"
|
|
return "January: 5, February: 10, March: 15, April: 20, May: 25"
|
|
|
|
researcher = Agent(
|
|
role="Researcher",
|
|
goal="Make the best research and analysis on content about AI and AI agents",
|
|
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
|
tools=[return_data],
|
|
allow_delegation=False,
|
|
)
|
|
|
|
task = Task(
|
|
description="Look at the available data nd give me a sense on the total number of sales.",
|
|
expected_output="The total number of sales as an integer",
|
|
agent=researcher,
|
|
)
|
|
|
|
crew = Crew(agents=[researcher], tasks=[task])
|
|
|
|
result = crew.kickoff()
|
|
assert result == "75"
|
|
|
|
|
|
def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
|
from crewai_tools import CodeInterpreterTool
|
|
|
|
programmer = Agent(
|
|
role="Programmer",
|
|
goal="Write code to solve problems.",
|
|
backstory="You're a programmer who loves to solve problems with code.",
|
|
allow_delegation=False,
|
|
allow_code_execution=True,
|
|
)
|
|
|
|
task = Task(
|
|
description="How much is 2 + 2?",
|
|
expected_output="The result of the sum as an integer.",
|
|
agent=programmer,
|
|
)
|
|
|
|
crew = Crew(agents=[programmer], tasks=[task])
|
|
crew.kickoff()
|
|
assert len(programmer.tools) == 1
|
|
assert programmer.tools[0].__class__ == CodeInterpreterTool
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_delegation_is_not_enabled_if_there_are_only_one_agent():
|
|
from unittest.mock import patch
|
|
|
|
researcher = Agent(
|
|
role="Researcher",
|
|
goal="Make the best research and analysis on content about AI and AI agents",
|
|
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
|
allow_delegation=True,
|
|
)
|
|
|
|
task = Task(
|
|
description="Look at the available data nd give me a sense on the total number of sales.",
|
|
expected_output="The total number of sales as an integer",
|
|
agent=researcher,
|
|
)
|
|
|
|
crew = Crew(agents=[researcher], tasks=[task])
|
|
with patch.object(Task, "execute") as execute:
|
|
execute.return_value = "ok"
|
|
crew.kickoff()
|
|
assert task.tools == []
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_agents_do_not_get_delegation_tools_with_there_is_only_one_agent():
|
|
agent = Agent(
|
|
role="Researcher",
|
|
goal="Be super empathetic.",
|
|
backstory="You're love to sey howdy.",
|
|
allow_delegation=False,
|
|
)
|
|
|
|
task = Task(description="say howdy", expected_output="Howdy!", agent=agent)
|
|
|
|
crew = Crew(agents=[agent], tasks=[task])
|
|
|
|
result = crew.kickoff()
|
|
assert (
|
|
result
|
|
== "Howdy! I hope this message finds you well and brings a smile to your face. Have a fantastic day!"
|
|
)
|
|
assert len(agent.tools) == 0
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_agent_usage_metrics_are_captured_for_sequential_process():
|
|
agent = Agent(
|
|
role="Researcher",
|
|
goal="Be super empathetic.",
|
|
backstory="You're love to sey howdy.",
|
|
allow_delegation=False,
|
|
)
|
|
|
|
task = Task(description="say howdy", expected_output="Howdy!", agent=agent)
|
|
|
|
crew = Crew(agents=[agent], tasks=[task])
|
|
|
|
result = crew.kickoff()
|
|
assert result == "Howdy!"
|
|
assert crew.usage_metrics == {
|
|
"completion_tokens": 17,
|
|
"prompt_tokens": 158,
|
|
"successful_requests": 1,
|
|
"total_tokens": 175,
|
|
}
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_agent_usage_metrics_are_captured_for_hierarchical_process():
|
|
from langchain_openai import ChatOpenAI
|
|
|
|
agent = Agent(
|
|
role="Researcher",
|
|
goal="Be super empathetic.",
|
|
backstory="You're love to sey howdy.",
|
|
allow_delegation=False,
|
|
)
|
|
|
|
task = Task(description="Ask the researched to say hi!", expected_output="Howdy!")
|
|
|
|
crew = Crew(
|
|
agents=[agent],
|
|
tasks=[task],
|
|
process=Process.hierarchical,
|
|
manager_llm=ChatOpenAI(temperature=0, model="gpt-4o"),
|
|
)
|
|
|
|
result = crew.kickoff()
|
|
assert result == '"Howdy!"'
|
|
|
|
assert crew.usage_metrics == {
|
|
"total_tokens": 1927,
|
|
"prompt_tokens": 1557,
|
|
"completion_tokens": 370,
|
|
"successful_requests": 4,
|
|
}
|
|
|
|
|
|
def test_crew_inputs_interpolate_both_agents_and_tasks():
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="{points} bullet points about {topic}.",
|
|
)
|
|
|
|
crew = Crew(agents=[agent], tasks=[task], inputs={"topic": "AI", "points": 5})
|
|
inputs = {"topic": "AI", "points": 5}
|
|
crew._interpolate_inputs(inputs=inputs) # Manual call for now
|
|
|
|
assert crew.tasks[0].description == "Give me an analysis around AI."
|
|
assert crew.tasks[0].expected_output == "5 bullet points about AI."
|
|
assert crew.agents[0].role == "AI Researcher"
|
|
assert crew.agents[0].goal == "Express hot takes on AI."
|
|
assert crew.agents[0].backstory == "You have a lot of experience with AI."
|
|
|
|
|
|
def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
|
|
from unittest.mock import patch
|
|
|
|
agent = Agent(
|
|
role="{topic} Researcher",
|
|
goal="Express hot takes on {topic}.",
|
|
backstory="You have a lot of experience with {topic}.",
|
|
)
|
|
|
|
task = Task(
|
|
description="Give me an analysis around {topic}.",
|
|
expected_output="{points} bullet points about {topic}.",
|
|
agent=agent,
|
|
)
|
|
|
|
crew = Crew(agents=[agent], tasks=[task])
|
|
|
|
with patch.object(Agent, "execute_task") as execute:
|
|
with patch.object(
|
|
Agent, "interpolate_inputs", wraps=agent.interpolate_inputs
|
|
) as interpolate_agent_inputs:
|
|
with patch.object(
|
|
Task, "interpolate_inputs", wraps=task.interpolate_inputs
|
|
) as interpolate_task_inputs:
|
|
execute.return_value = "ok"
|
|
crew.kickoff(inputs={"topic": "AI", "points": 5})
|
|
interpolate_agent_inputs.assert_called()
|
|
interpolate_task_inputs.assert_called()
|
|
|
|
|
|
def test_task_callback_on_crew():
|
|
from unittest.mock import patch
|
|
|
|
researcher_agent = Agent(
|
|
role="Researcher",
|
|
goal="Make the best research and analysis on content about AI and AI agents",
|
|
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
|
allow_delegation=False,
|
|
)
|
|
|
|
list_ideas = Task(
|
|
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
|
expected_output="Bullet point list of 5 important events.",
|
|
agent=researcher_agent,
|
|
async_execution=True,
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=[researcher_agent],
|
|
process=Process.sequential,
|
|
tasks=[list_ideas],
|
|
task_callback=lambda: None,
|
|
)
|
|
|
|
with patch.object(Agent, "execute_task") as execute:
|
|
execute.return_value = "ok"
|
|
crew.kickoff()
|
|
assert list_ideas.callback is not None
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_tools_with_custom_caching():
|
|
from unittest.mock import patch
|
|
|
|
from crewai_tools import tool
|
|
|
|
@tool
|
|
def multiplcation_tool(first_number: int, second_number: int) -> str:
|
|
"""Useful for when you need to multiply two numbers together."""
|
|
return first_number * second_number
|
|
|
|
def cache_func(args, result):
|
|
cache = result % 2 == 0
|
|
return cache
|
|
|
|
multiplcation_tool.cache_function = cache_func
|
|
|
|
writer1 = Agent(
|
|
role="Writer",
|
|
goal="You write lesssons of math for kids.",
|
|
backstory="You're an expert in writting and you love to teach kids but you know nothing of math.",
|
|
tools=[multiplcation_tool],
|
|
allow_delegation=False,
|
|
)
|
|
|
|
writer2 = Agent(
|
|
role="Writer",
|
|
goal="You write lesssons of math for kids.",
|
|
backstory="You're an expert in writting and you love to teach kids but you know nothing of math.",
|
|
tools=[multiplcation_tool],
|
|
allow_delegation=False,
|
|
)
|
|
|
|
task1 = Task(
|
|
description="What is 2 times 6? Return only the number after using the multiplication tool.",
|
|
expected_output="the result of multiplication",
|
|
agent=writer1,
|
|
)
|
|
|
|
task2 = Task(
|
|
description="What is 3 times 1? Return only the number after using the multiplication tool.",
|
|
expected_output="the result of multiplication",
|
|
agent=writer1,
|
|
)
|
|
|
|
task3 = Task(
|
|
description="What is 2 times 6? Return only the number after using the multiplication tool.",
|
|
expected_output="the result of multiplication",
|
|
agent=writer2,
|
|
)
|
|
|
|
task4 = Task(
|
|
description="What is 3 times 1? Return only the number after using the multiplication tool.",
|
|
expected_output="the result of multiplication",
|
|
agent=writer2,
|
|
)
|
|
|
|
crew = Crew(agents=[writer1, writer2], tasks=[task1, task2, task3, task4])
|
|
|
|
with patch.object(
|
|
CacheHandler, "add", wraps=crew._cache_handler.add
|
|
) as add_to_cache:
|
|
with patch.object(CacheHandler, "read", wraps=crew._cache_handler.read) as _:
|
|
result = crew.kickoff()
|
|
add_to_cache.assert_called_once_with(
|
|
tool="multiplcation_tool",
|
|
input={"first_number": 2, "second_number": 6},
|
|
output=12,
|
|
)
|
|
assert result == "3"
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_using_contextual_memory():
|
|
from unittest.mock import patch
|
|
|
|
math_researcher = Agent(
|
|
role="Researcher",
|
|
goal="You research about math.",
|
|
backstory="You're an expert in research and you love to learn new things.",
|
|
allow_delegation=False,
|
|
)
|
|
|
|
task1 = Task(
|
|
description="Research a topic to teach a kid aged 6 about math.",
|
|
expected_output="A topic, explanation, angle, and examples.",
|
|
agent=math_researcher,
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=[math_researcher],
|
|
tasks=[task1],
|
|
memory=True,
|
|
)
|
|
|
|
with patch.object(ContextualMemory, "build_context_for_task") as contextual_mem:
|
|
crew.kickoff()
|
|
contextual_mem.assert_called_once()
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_disabled_memory_using_contextual_memory():
|
|
from unittest.mock import patch
|
|
|
|
math_researcher = Agent(
|
|
role="Researcher",
|
|
goal="You research about math.",
|
|
backstory="You're an expert in research and you love to learn new things.",
|
|
allow_delegation=False,
|
|
)
|
|
|
|
task1 = Task(
|
|
description="Research a topic to teach a kid aged 6 about math.",
|
|
expected_output="A topic, explanation, angle, and examples.",
|
|
agent=math_researcher,
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=[math_researcher],
|
|
tasks=[task1],
|
|
memory=False,
|
|
)
|
|
|
|
with patch.object(ContextualMemory, "build_context_for_task") as contextual_mem:
|
|
crew.kickoff()
|
|
contextual_mem.assert_not_called()
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_crew_log_file_output(tmp_path):
|
|
test_file = tmp_path / "logs.txt"
|
|
tasks = [
|
|
Task(
|
|
description="Say Hi",
|
|
expected_output="The word: Hi",
|
|
agent=researcher,
|
|
)
|
|
]
|
|
|
|
crew = Crew(agents=[researcher], tasks=tasks, output_log_file=str(test_file))
|
|
crew.kickoff()
|
|
assert test_file.exists()
|
|
|
|
|
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
def test_manager_agent():
|
|
from unittest.mock import patch
|
|
|
|
task = Task(
|
|
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
|
expected_output="5 bullet points with a paragraph for each idea.",
|
|
)
|
|
|
|
manager = Agent(
|
|
role="Manager",
|
|
goal="Manage the crew and ensure the tasks are completed efficiently.",
|
|
backstory="You're an experienced manager, skilled in overseeing complex projects and guiding teams to success. Your role is to coordinate the efforts of the crew members, ensuring that each task is completed on time and to the highest standard.",
|
|
allow_delegation=False,
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=[researcher, writer],
|
|
process=Process.hierarchical,
|
|
manager_agent=manager,
|
|
tasks=[task],
|
|
)
|
|
|
|
with patch.object(Task, "execute") as execute:
|
|
crew.kickoff()
|
|
assert manager.allow_delegation is True
|
|
execute.assert_called()
|
|
|
|
|
|
def test_manager_agent_in_agents_raises_exception():
|
|
task = Task(
|
|
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
|
expected_output="5 bullet points with a paragraph for each idea.",
|
|
)
|
|
|
|
manager = Agent(
|
|
role="Manager",
|
|
goal="Manage the crew and ensure the tasks are completed efficiently.",
|
|
backstory="You're an experienced manager, skilled in overseeing complex projects and guiding teams to success. Your role is to coordinate the efforts of the crew members, ensuring that each task is completed on time and to the highest standard.",
|
|
allow_delegation=False,
|
|
)
|
|
|
|
with pytest.raises(pydantic_core._pydantic_core.ValidationError):
|
|
Crew(
|
|
agents=[researcher, writer, manager],
|
|
process=Process.hierarchical,
|
|
manager_agent=manager,
|
|
tasks=[task],
|
|
)
|
|
|
|
|
|
def test_manager_agent_with_tools_raises_exception():
|
|
from crewai_tools import tool
|
|
|
|
@tool
|
|
def testing_tool(first_number: int, second_number: int) -> int:
|
|
"""Useful for when you need to multiply two numbers together."""
|
|
return first_number * second_number
|
|
|
|
task = Task(
|
|
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
|
expected_output="5 bullet points with a paragraph for each idea.",
|
|
)
|
|
|
|
manager = Agent(
|
|
role="Manager",
|
|
goal="Manage the crew and ensure the tasks are completed efficiently.",
|
|
backstory="You're an experienced manager, skilled in overseeing complex projects and guiding teams to success. Your role is to coordinate the efforts of the crew members, ensuring that each task is completed on time and to the highest standard.",
|
|
allow_delegation=False,
|
|
tools=[testing_tool],
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=[researcher, writer],
|
|
process=Process.hierarchical,
|
|
manager_agent=manager,
|
|
tasks=[task],
|
|
)
|
|
|
|
with pytest.raises(Exception):
|
|
crew.kickoff()
|
|
|
|
|
|
@patch("crewai.crew.Crew.kickoff")
|
|
@patch("crewai.crew.CrewTrainingHandler")
|
|
@patch("crewai.crew.TaskEvaluator")
|
|
def test_crew_train_success(task_evaluator, crew_training_handler, kickoff):
|
|
task = Task(
|
|
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
|
expected_output="5 bullet points with a paragraph for each idea.",
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=[researcher, writer],
|
|
tasks=[task],
|
|
)
|
|
crew.train(n_iterations=2, inputs={"topic": "AI"})
|
|
task_evaluator.assert_has_calls(
|
|
[
|
|
mock.call(researcher),
|
|
mock.call().evaluate_training_data(
|
|
training_data=crew_training_handler().load(),
|
|
agent_id=str(researcher.id),
|
|
),
|
|
mock.call().evaluate_training_data().model_dump(),
|
|
mock.call(writer),
|
|
mock.call().evaluate_training_data(
|
|
training_data=crew_training_handler().load(),
|
|
agent_id=str(writer.id),
|
|
),
|
|
mock.call().evaluate_training_data().model_dump(),
|
|
]
|
|
)
|
|
|
|
crew_training_handler.assert_has_calls(
|
|
[
|
|
mock.call("training_data.pkl"),
|
|
mock.call().load(),
|
|
mock.call("trained_agents_data.pkl"),
|
|
mock.call().save_trained_data(
|
|
agent_id="Researcher",
|
|
trained_data=task_evaluator().evaluate_training_data().model_dump(),
|
|
),
|
|
mock.call("trained_agents_data.pkl"),
|
|
mock.call().save_trained_data(
|
|
agent_id="Senior Writer",
|
|
trained_data=task_evaluator().evaluate_training_data().model_dump(),
|
|
),
|
|
mock.call(),
|
|
mock.call().load(),
|
|
mock.call(),
|
|
mock.call().load(),
|
|
]
|
|
)
|
|
|
|
kickoff.assert_has_calls(
|
|
[mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})]
|
|
)
|
|
|
|
|
|
def test_crew_train_error():
|
|
task = Task(
|
|
description="Come up with a list of 5 interesting ideas to explore for an article",
|
|
expected_output="5 bullet points with a paragraph for each idea.",
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=[researcher, writer],
|
|
tasks=[task],
|
|
)
|
|
|
|
with pytest.raises(TypeError) as e:
|
|
crew.train()
|
|
assert "train() missing 1 required positional argument: 'n_iterations'" in str(
|
|
e
|
|
)
|
|
|
|
|
|
def test__setup_for_training():
|
|
researcher.allow_delegation = True
|
|
writer.allow_delegation = True
|
|
agents = [researcher, writer]
|
|
task = Task(
|
|
description="Come up with a list of 5 interesting ideas to explore for an article",
|
|
expected_output="5 bullet points with a paragraph for each idea.",
|
|
)
|
|
|
|
crew = Crew(
|
|
agents=agents,
|
|
tasks=[task],
|
|
)
|
|
|
|
assert crew._train is False
|
|
assert task.human_input is False
|
|
|
|
for agent in agents:
|
|
assert agent.allow_delegation is True
|
|
|
|
crew._setup_for_training()
|
|
|
|
assert crew._train is True
|
|
assert task.human_input is True
|
|
|
|
for agent in agents:
|
|
assert agent.allow_delegation is False
|