mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 15:48:29 +00:00
feat: change human input for unit testing
added documentation and unit test
This commit is contained in:
@@ -23,6 +23,7 @@ Tasks in CrewAI can be designed to require collaboration between agents. For exa
|
||||
| **Output Pydantic** *(optional)* | Takes a pydantic model and returns the output as a pydantic object. **Agent LLM needs to be using an OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
|
||||
| **Output File** *(optional)* | Takes a file path and saves the output of the task on it. |
|
||||
| **Callback** *(optional)* | A function to be executed after the task is completed. |
|
||||
| **Human Input** *(optional)* | Indicates whether the agent should ask for feedback at the end of the task |
|
||||
|
||||
## Creating a Task
|
||||
|
||||
@@ -224,4 +225,4 @@ These validations help in maintaining the consistency and reliability of task ex
|
||||
|
||||
## Conclusion
|
||||
|
||||
Tasks are the driving force behind the actions of agents in crewAI. By properly defining tasks and their outcomes, you set the stage for your AI agents to work effectively, either independently or as a collaborative unit. Equipping tasks with appropriate tools, understanding the execution process, and following robust validation practices are crucial for maximizing CrewAI's potential, ensuring agents are effectively prepared for their assignments and that tasks are executed as intended.
|
||||
Tasks are the driving force behind the actions of agents in crewAI. By properly defining tasks and their outcomes, you set the stage for your AI agents to work effectively, either independently or as a collaborative unit. Equipping tasks with appropriate tools, understanding the execution process, and following robust validation practices are crucial for maximizing CrewAI's potential, ensuring agents are effectively prepared for their assignments and that tasks are executed as intended.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: Human Input on Execution
|
||||
description: Comprehensive guide on integrating CrewAI with human input during execution in complex decision-making processes or when needed help during complex tasks.
|
||||
description: Comprehensive guide on integrating CrewAI with human input during execution in complex decision-making processes or when needed help during complex tasks.
|
||||
---
|
||||
|
||||
# Human Input in Agent Execution
|
||||
@@ -9,7 +9,7 @@ Human input plays a pivotal role in several agent execution scenarios, enabling
|
||||
|
||||
## Using Human Input with CrewAI
|
||||
|
||||
Incorporating human input with CrewAI is straightforward, enhancing the agent's ability to make informed decisions. While the documentation previously mentioned using a "LangChain Tool" and a specific "DuckDuckGoSearchRun" tool from `langchain_community.tools`, it's important to clarify that the integration of such tools should align with the actual capabilities and configurations defined within your `Agent` class setup.
|
||||
Incorporating human input with CrewAI is straightforward, enhancing the agent's ability to make informed decisions. While the documentation previously mentioned using a "LangChain Tool" and a specific "DuckDuckGoSearchRun" tool from `langchain_community.tools`, it's important to clarify that the integration of such tools should align with the actual capabilities and configurations defined within your `Agent` class setup. Now it is a simple flag in the task itself that needs to be turned on.
|
||||
|
||||
### Example:
|
||||
|
||||
@@ -23,14 +23,10 @@ import os
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
from langchain.agents import load_tools
|
||||
|
||||
os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
|
||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||
|
||||
|
||||
# Loading Human Tools
|
||||
human_tools = load_tools(["human"])
|
||||
# Loading Tools
|
||||
search_tool = SerperDevTool()
|
||||
|
||||
# Define your agents with roles, goals, and tools
|
||||
@@ -44,7 +40,7 @@ researcher = Agent(
|
||||
),
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
tools=[search_tool]+human_tools # Passing human tools to the agent
|
||||
tools=[search_tool]
|
||||
)
|
||||
writer = Agent(
|
||||
role='Tech Content Strategist',
|
||||
@@ -67,6 +63,7 @@ task1 = Task(
|
||||
),
|
||||
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out',
|
||||
agent=researcher,
|
||||
human_input=True, # setting the flag on for human input in this task
|
||||
)
|
||||
|
||||
task2 = Task(
|
||||
|
||||
@@ -18,7 +18,7 @@ from crewai.utilities import I18N
|
||||
|
||||
class CrewAgentExecutor(AgentExecutor):
|
||||
_i18n: I18N = I18N()
|
||||
_should_ask_for_human_input: bool = False
|
||||
should_ask_for_human_input: bool = False
|
||||
llm: Any = None
|
||||
iterations: int = 0
|
||||
task: Any = None
|
||||
@@ -57,7 +57,7 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
intermediate_steps: List[Tuple[AgentAction, str]] = []
|
||||
# Allowing human input given task setting
|
||||
if self.task.human_input:
|
||||
self._should_ask_for_human_input = True
|
||||
self.should_ask_for_human_input = True
|
||||
# Let's start tracking the number of iterations and time elapsed
|
||||
self.iterations = 0
|
||||
time_elapsed = 0.0
|
||||
@@ -173,9 +173,9 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
|
||||
# If the tool chosen is the finishing tool, then we end and return.
|
||||
if isinstance(output, AgentFinish):
|
||||
if self._should_ask_for_human_input:
|
||||
if self.should_ask_for_human_input:
|
||||
# Making sure we only ask for it once, so disabling for the next thought loop
|
||||
self._should_ask_for_human_input = False
|
||||
self.should_ask_for_human_input = False
|
||||
human_feedback = self._ask_human_input(output.return_values["output"])
|
||||
action = AgentAction(
|
||||
tool="Human Input", tool_input=human_feedback, log=output.log
|
||||
|
||||
@@ -680,3 +680,30 @@ def test_agent_definition_based_on_dict():
|
||||
assert agent.backstory == "test backstory"
|
||||
assert agent.verbose == True
|
||||
assert agent.tools == []
|
||||
|
||||
|
||||
# test for human input
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_human_input():
|
||||
from unittest.mock import patch
|
||||
|
||||
config = {
|
||||
"role": "test role",
|
||||
"goal": "test goal",
|
||||
"backstory": "test backstory",
|
||||
}
|
||||
|
||||
agent = Agent(config=config)
|
||||
|
||||
task = Task(
|
||||
agent=agent,
|
||||
description="Say the word: Hi",
|
||||
expected_output="The word: Hi",
|
||||
human_input=True,
|
||||
)
|
||||
|
||||
with patch.object(CrewAgentExecutor, "_ask_human_input") as mock_human_input:
|
||||
mock_human_input.return_value = "Hello"
|
||||
output = agent.execute_task(task)
|
||||
mock_human_input.assert_called_once()
|
||||
assert output == "Hello"
|
||||
|
||||
Reference in New Issue
Block a user