mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 00:28:31 +00:00
feat: change human input for unit testing
added documentation and unit test
This commit is contained in:
@@ -23,6 +23,7 @@ Tasks in CrewAI can be designed to require collaboration between agents. For exa
|
|||||||
| **Output Pydantic** *(optional)* | Takes a pydantic model and returns the output as a pydantic object. **Agent LLM needs to be using an OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
|
| **Output Pydantic** *(optional)* | Takes a pydantic model and returns the output as a pydantic object. **Agent LLM needs to be using an OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
|
||||||
| **Output File** *(optional)* | Takes a file path and saves the output of the task on it. |
|
| **Output File** *(optional)* | Takes a file path and saves the output of the task on it. |
|
||||||
| **Callback** *(optional)* | A function to be executed after the task is completed. |
|
| **Callback** *(optional)* | A function to be executed after the task is completed. |
|
||||||
|
| **Human Input** *(optional)* | Indicates whether the agent should ask for feedback at the end of the task |
|
||||||
|
|
||||||
## Creating a Task
|
## Creating a Task
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Human input plays a pivotal role in several agent execution scenarios, enabling
|
|||||||
|
|
||||||
## Using Human Input with CrewAI
|
## Using Human Input with CrewAI
|
||||||
|
|
||||||
Incorporating human input with CrewAI is straightforward, enhancing the agent's ability to make informed decisions. While the documentation previously mentioned using a "LangChain Tool" and a specific "DuckDuckGoSearchRun" tool from `langchain_community.tools`, it's important to clarify that the integration of such tools should align with the actual capabilities and configurations defined within your `Agent` class setup.
|
Incorporating human input with CrewAI is straightforward, enhancing the agent's ability to make informed decisions. While the documentation previously mentioned using a "LangChain Tool" and a specific "DuckDuckGoSearchRun" tool from `langchain_community.tools`, it's important to clarify that the integration of such tools should align with the actual capabilities and configurations defined within your `Agent` class setup. Now it is a simple flag in the task itself that needs to be turned on.
|
||||||
|
|
||||||
### Example:
|
### Example:
|
||||||
|
|
||||||
@@ -23,14 +23,10 @@ import os
|
|||||||
from crewai import Agent, Task, Crew
|
from crewai import Agent, Task, Crew
|
||||||
from crewai_tools import SerperDevTool
|
from crewai_tools import SerperDevTool
|
||||||
|
|
||||||
from langchain.agents import load_tools
|
|
||||||
|
|
||||||
os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
|
os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
|
||||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||||
|
|
||||||
|
# Loading Tools
|
||||||
# Loading Human Tools
|
|
||||||
human_tools = load_tools(["human"])
|
|
||||||
search_tool = SerperDevTool()
|
search_tool = SerperDevTool()
|
||||||
|
|
||||||
# Define your agents with roles, goals, and tools
|
# Define your agents with roles, goals, and tools
|
||||||
@@ -44,7 +40,7 @@ researcher = Agent(
|
|||||||
),
|
),
|
||||||
verbose=True,
|
verbose=True,
|
||||||
allow_delegation=False,
|
allow_delegation=False,
|
||||||
tools=[search_tool]+human_tools # Passing human tools to the agent
|
tools=[search_tool]
|
||||||
)
|
)
|
||||||
writer = Agent(
|
writer = Agent(
|
||||||
role='Tech Content Strategist',
|
role='Tech Content Strategist',
|
||||||
@@ -67,6 +63,7 @@ task1 = Task(
|
|||||||
),
|
),
|
||||||
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out',
|
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out',
|
||||||
agent=researcher,
|
agent=researcher,
|
||||||
|
human_input=True, # setting the flag on for human input in this task
|
||||||
)
|
)
|
||||||
|
|
||||||
task2 = Task(
|
task2 = Task(
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ from crewai.utilities import I18N
|
|||||||
|
|
||||||
class CrewAgentExecutor(AgentExecutor):
|
class CrewAgentExecutor(AgentExecutor):
|
||||||
_i18n: I18N = I18N()
|
_i18n: I18N = I18N()
|
||||||
_should_ask_for_human_input: bool = False
|
should_ask_for_human_input: bool = False
|
||||||
llm: Any = None
|
llm: Any = None
|
||||||
iterations: int = 0
|
iterations: int = 0
|
||||||
task: Any = None
|
task: Any = None
|
||||||
@@ -57,7 +57,7 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
intermediate_steps: List[Tuple[AgentAction, str]] = []
|
intermediate_steps: List[Tuple[AgentAction, str]] = []
|
||||||
# Allowing human input given task setting
|
# Allowing human input given task setting
|
||||||
if self.task.human_input:
|
if self.task.human_input:
|
||||||
self._should_ask_for_human_input = True
|
self.should_ask_for_human_input = True
|
||||||
# Let's start tracking the number of iterations and time elapsed
|
# Let's start tracking the number of iterations and time elapsed
|
||||||
self.iterations = 0
|
self.iterations = 0
|
||||||
time_elapsed = 0.0
|
time_elapsed = 0.0
|
||||||
@@ -173,9 +173,9 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
|
|
||||||
# If the tool chosen is the finishing tool, then we end and return.
|
# If the tool chosen is the finishing tool, then we end and return.
|
||||||
if isinstance(output, AgentFinish):
|
if isinstance(output, AgentFinish):
|
||||||
if self._should_ask_for_human_input:
|
if self.should_ask_for_human_input:
|
||||||
# Making sure we only ask for it once, so disabling for the next thought loop
|
# Making sure we only ask for it once, so disabling for the next thought loop
|
||||||
self._should_ask_for_human_input = False
|
self.should_ask_for_human_input = False
|
||||||
human_feedback = self._ask_human_input(output.return_values["output"])
|
human_feedback = self._ask_human_input(output.return_values["output"])
|
||||||
action = AgentAction(
|
action = AgentAction(
|
||||||
tool="Human Input", tool_input=human_feedback, log=output.log
|
tool="Human Input", tool_input=human_feedback, log=output.log
|
||||||
|
|||||||
@@ -680,3 +680,30 @@ def test_agent_definition_based_on_dict():
|
|||||||
assert agent.backstory == "test backstory"
|
assert agent.backstory == "test backstory"
|
||||||
assert agent.verbose == True
|
assert agent.verbose == True
|
||||||
assert agent.tools == []
|
assert agent.tools == []
|
||||||
|
|
||||||
|
|
||||||
|
# test for human input
|
||||||
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_agent_human_input():
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
config = {
|
||||||
|
"role": "test role",
|
||||||
|
"goal": "test goal",
|
||||||
|
"backstory": "test backstory",
|
||||||
|
}
|
||||||
|
|
||||||
|
agent = Agent(config=config)
|
||||||
|
|
||||||
|
task = Task(
|
||||||
|
agent=agent,
|
||||||
|
description="Say the word: Hi",
|
||||||
|
expected_output="The word: Hi",
|
||||||
|
human_input=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch.object(CrewAgentExecutor, "_ask_human_input") as mock_human_input:
|
||||||
|
mock_human_input.return_value = "Hello"
|
||||||
|
output = agent.execute_task(task)
|
||||||
|
mock_human_input.assert_called_once()
|
||||||
|
assert output == "Hello"
|
||||||
|
|||||||
Reference in New Issue
Block a user