fix: update CrewAgentExecutor.invoke type signature

- Change inputs parameter from Dict[str, str] to Dict[str, Union[str, bool, None]]
- Matches actual usage where ask_for_human_input can be bool or None
This commit is contained in:
Greyson LaLonde
2025-07-22 10:27:58 -04:00
parent a893e6030b
commit cf0a17f099
2 changed files with 3 additions and 4 deletions

View File

@@ -557,7 +557,7 @@ class Agent(BaseAgent):
"input": task_prompt,
"tool_names": self.agent_executor.tools_names,
"tools": self.agent_executor.tools_description,
"ask_for_human_input": bool(task.human_input),
"ask_for_human_input": task.human_input,
}
)["output"]

View File

@@ -96,7 +96,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
)
)
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
def invoke(self, inputs: Dict[str, Union[str, bool, None]]) -> Dict[str, Any]:
if "system" in self.prompt:
system_prompt = self._format_prompt(self.prompt.get("system", ""), inputs)
user_prompt = self._format_prompt(self.prompt.get("user", ""), inputs)
@@ -122,7 +122,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
handle_unknown_error(self._printer, e)
raise
if self.ask_for_human_input:
formatted_answer = self._handle_human_feedback(formatted_answer)
@@ -156,7 +155,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
from_task=self.task
from_task=self.task,
)
formatted_answer = process_llm_response(answer, self.use_stop_words)