feat: change human input for unit testing

added documentation and unit test
This commit is contained in:
GabeKoga
2024-04-01 10:01:54 -03:00
parent 1774fe8561
commit a07c255e06
4 changed files with 38 additions and 13 deletions

View File

@@ -23,6 +23,7 @@ Tasks in CrewAI can be designed to require collaboration between agents. For exa
| **Output Pydantic** *(optional)* | Takes a pydantic model and returns the output as a pydantic object. **Agent LLM needs to be using an OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
| **Output File** *(optional)* | Takes a file path and saves the output of the task on it. |
| **Callback** *(optional)* | A function to be executed after the task is completed. |
| **Human Input** *(optional)* | Indicates whether the agent should ask for feedback at the end of the task |
## Creating a Task

View File

@@ -9,7 +9,7 @@ Human input plays a pivotal role in several agent execution scenarios, enabling
## Using Human Input with CrewAI
Incorporating human input with CrewAI is straightforward, enhancing the agent's ability to make informed decisions. While the documentation previously mentioned using a "LangChain Tool" and a specific "DuckDuckGoSearchRun" tool from `langchain_community.tools`, it's important to clarify that the integration of such tools should align with the actual capabilities and configurations defined within your `Agent` class setup.
Incorporating human input with CrewAI is straightforward, enhancing the agent's ability to make informed decisions. While the documentation previously mentioned using a "LangChain Tool" and a specific "DuckDuckGoSearchRun" tool from `langchain_community.tools`, it's important to clarify that the integration of such tools should align with the actual capabilities and configurations defined within your `Agent` class setup. Now it is a simple flag in the task itself that needs to be turned on.
### Example:
@@ -23,14 +23,10 @@ import os
from crewai import Agent, Task, Crew
from crewai_tools import SerperDevTool
from langchain.agents import load_tools
os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
os.environ["OPENAI_API_KEY"] = "Your Key"
# Loading Human Tools
human_tools = load_tools(["human"])
# Loading Tools
search_tool = SerperDevTool()
# Define your agents with roles, goals, and tools
@@ -44,7 +40,7 @@ researcher = Agent(
),
verbose=True,
allow_delegation=False,
tools=[search_tool]+human_tools # Passing human tools to the agent
tools=[search_tool]
)
writer = Agent(
role='Tech Content Strategist',
@@ -67,6 +63,7 @@ task1 = Task(
),
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out',
agent=researcher,
human_input=True, # setting the flag on for human input in this task
)
task2 = Task(

View File

@@ -18,7 +18,7 @@ from crewai.utilities import I18N
class CrewAgentExecutor(AgentExecutor):
_i18n: I18N = I18N()
_should_ask_for_human_input: bool = False
should_ask_for_human_input: bool = False
llm: Any = None
iterations: int = 0
task: Any = None
@@ -57,7 +57,7 @@ class CrewAgentExecutor(AgentExecutor):
intermediate_steps: List[Tuple[AgentAction, str]] = []
# Allowing human input given task setting
if self.task.human_input:
self._should_ask_for_human_input = True
self.should_ask_for_human_input = True
# Let's start tracking the number of iterations and time elapsed
self.iterations = 0
time_elapsed = 0.0
@@ -173,9 +173,9 @@ class CrewAgentExecutor(AgentExecutor):
# If the tool chosen is the finishing tool, then we end and return.
if isinstance(output, AgentFinish):
if self._should_ask_for_human_input:
if self.should_ask_for_human_input:
# Making sure we only ask for it once, so disabling for the next thought loop
self._should_ask_for_human_input = False
self.should_ask_for_human_input = False
human_feedback = self._ask_human_input(output.return_values["output"])
action = AgentAction(
tool="Human Input", tool_input=human_feedback, log=output.log

View File

@@ -680,3 +680,30 @@ def test_agent_definition_based_on_dict():
assert agent.backstory == "test backstory"
assert agent.verbose == True
assert agent.tools == []
# test for human input
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_human_input():
from unittest.mock import patch
config = {
"role": "test role",
"goal": "test goal",
"backstory": "test backstory",
}
agent = Agent(config=config)
task = Task(
agent=agent,
description="Say the word: Hi",
expected_output="The word: Hi",
human_input=True,
)
with patch.object(CrewAgentExecutor, "_ask_human_input") as mock_human_input:
mock_human_input.return_value = "Hello"
output = agent.execute_task(task)
mock_human_input.assert_called_once()
assert output == "Hello"