v1 of HITL working

This commit is contained in:
Brandon Hancock
2024-12-03 17:05:28 -05:00
parent 1af95f5146
commit 00197b9690
2 changed files with 52 additions and 7 deletions

View File

@@ -87,35 +87,79 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.llm.stop = self.stop
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
print("Invoke method called with inputs:", inputs)
if "system" in self.prompt:
system_prompt = self._format_prompt(self.prompt.get("system", ""), inputs)
user_prompt = self._format_prompt(self.prompt.get("user", ""), inputs)
print("System prompt:", system_prompt)
print("User prompt:", user_prompt)
self.messages.append(self._format_msg(system_prompt, role="system"))
self.messages.append(self._format_msg(user_prompt))
else:
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
print("User prompt:", user_prompt)
self.messages.append(self._format_msg(user_prompt))
self._show_start_logs()
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
formatted_answer = self._invoke_loop()
print("Initial ask_for_human_input:", self.ask_for_human_input)
if self.ask_for_human_input:
formatted_answer = self._invoke_loop()
print("Initial formatted answer:", formatted_answer)
while self.ask_for_human_input:
print("Entering feedback loop")
human_feedback = self._ask_human_input(formatted_answer.output)
print("Human feedback received:", human_feedback)
if self.crew and self.crew._train:
self._handle_crew_training_output(formatted_answer, human_feedback)
# Making sure we only ask for it once, so disabling for the next thought loop
self.ask_for_human_input = False
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
formatted_answer = self._invoke_loop()
# Make an LLM call to verify if additional changes are requested based on human feedback
additional_changes_requested_prompt = self._i18n.slice(
"human_feedback_classification"
).format(feedback=human_feedback)
print(
"Additional changes prompt for LLM:",
additional_changes_requested_prompt,
)
additional_changes_requested_response = (
self.llm.call(
[
self._format_msg(
additional_changes_requested_prompt, role="system"
)
],
callbacks=self.callbacks,
)
.strip()
.lower()
)
print(
"Additional changes response from LLM:",
additional_changes_requested_response,
)
if additional_changes_requested_response == "false":
print("Human is satisfied, exiting loop")
self.ask_for_human_input = False
else:
print("Human wants changes, continuing loop")
self.ask_for_human_input = True
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
formatted_answer = self._invoke_loop()
print("Updated formatted answer:", formatted_answer)
if self.crew and self.crew._train:
self._handle_crew_training_output(formatted_answer)
self._create_short_term_memory(formatted_answer)
self._create_long_term_memory(formatted_answer)
print("Final output:", formatted_answer.output)
return {"output": formatted_answer.output}
def _invoke_loop(self, formatted_answer=None):

View File

@@ -22,7 +22,8 @@
"sumamrize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python."
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
"human_feedback_classification": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with 'True' if further changes are needed, or 'False' if the user is satisfied. **Important** Do not include any additional commentary outside of your 'True' or 'False' response.\n\nFeedback: \"{feedback}\""
},
"errors": {
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",