mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-11 17:18:29 +00:00
Clean up
This commit is contained in:
@@ -87,20 +87,15 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.llm.stop = self.stop
|
||||
|
||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
print("prompt: ", self.prompt)
|
||||
print("inputs: ", inputs)
|
||||
if "system" in self.prompt:
|
||||
system_prompt = self._format_prompt(self.prompt.get("system", ""), inputs)
|
||||
print("system_prompt: ", system_prompt)
|
||||
user_prompt = self._format_prompt(self.prompt.get("user", ""), inputs)
|
||||
print("user_prompt: ", user_prompt)
|
||||
self.messages.append(self._format_msg(system_prompt, role="system"))
|
||||
self.messages.append(self._format_msg(user_prompt))
|
||||
else:
|
||||
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
|
||||
self.messages.append(self._format_msg(user_prompt))
|
||||
|
||||
print("total messages at invoke: ", len(self.messages))
|
||||
self._show_start_logs()
|
||||
|
||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||
@@ -417,12 +412,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
AgentFinish: The final output after incorporating human feedback.
|
||||
"""
|
||||
while self.ask_for_human_input:
|
||||
print("Messages at human feedback:")
|
||||
for idx, message in enumerate(self.messages, start=1):
|
||||
print(f"Message {idx}: {message}")
|
||||
print("Total messages at human feedback: ", len(self.messages))
|
||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||
print("Human feedback: ", human_feedback)
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(formatted_answer, human_feedback)
|
||||
@@ -473,9 +463,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.ask_for_human_input = True
|
||||
# Add human feedback to messages
|
||||
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
|
||||
print("Messages after human feedback:")
|
||||
for idx, message in enumerate(self.messages, start=1):
|
||||
print(f"Message {idx}: {message}")
|
||||
# Invoke the loop again with updated messages
|
||||
formatted_answer = self._invoke_loop()
|
||||
|
||||
|
||||
@@ -167,8 +167,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
_routers: Dict[str, str] = {}
|
||||
_router_paths: Dict[str, List[str]] = {}
|
||||
initial_state: Union[Type[T], T, None] = None
|
||||
|
||||
# Define a single event emitter signal
|
||||
event_emitter = Signal("event_emitter")
|
||||
|
||||
def __class_getitem__(cls: Type["Flow"], item: Type[T]) -> Type["Flow"]:
|
||||
@@ -264,7 +262,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
Returns:
|
||||
The final output from the flow execution.
|
||||
"""
|
||||
# Emit flow_started event
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=FlowStartedEvent(
|
||||
@@ -306,7 +303,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
# Determine the final output (from the last executed method)
|
||||
final_output = self._method_outputs[-1] if self._method_outputs else None
|
||||
|
||||
# Emit flow_finished event
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=FlowFinishedEvent(
|
||||
@@ -380,7 +376,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
try:
|
||||
method = self._methods[listener_name]
|
||||
|
||||
# Emit method_execution_started event
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=MethodExecutionStartedEvent(
|
||||
@@ -405,7 +400,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
# If listener does not expect parameters, call without arguments
|
||||
listener_result = await self._execute_method(listener_name, method)
|
||||
|
||||
# Emit method_execution_finished event
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=MethodExecutionFinishedEvent(
|
||||
|
||||
Reference in New Issue
Block a user