mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-19 12:58:14 +00:00
Making all tests good
This commit is contained in:
@@ -63,7 +63,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
|
||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
print("starting invoke")
|
||||
if "system" in self.prompt:
|
||||
system_prompt = self._format_prompt(self.prompt["system"], inputs)
|
||||
user_prompt = self._format_prompt(self.prompt["user"], inputs)
|
||||
@@ -73,13 +72,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
else:
|
||||
user_prompt = self._format_prompt(self.prompt["prompt"], inputs)
|
||||
self.messages.append(self._format_msg(user_prompt))
|
||||
print("after messages")
|
||||
print(self.messages)
|
||||
self.ask_for_human_input = inputs.get("ask_for_human_input", False)
|
||||
|
||||
formatted_answer = self._invoke_loop()
|
||||
print("111after formatted_answer")
|
||||
print(formatted_answer)
|
||||
|
||||
if self.ask_for_human_input:
|
||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||
@@ -93,36 +88,25 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _invoke_loop(self, formatted_answer=None):
|
||||
print("starting _invoke_loop")
|
||||
try:
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
|
||||
print("******* messages")
|
||||
print(self.messages)
|
||||
answer = LLM(
|
||||
self.llm, stop=self.stop, callbacks=self.callbacks
|
||||
).call(self.messages)
|
||||
print("after answer")
|
||||
print(answer)
|
||||
|
||||
self.iterations += 1
|
||||
formatted_answer = self._format_answer(answer)
|
||||
print("222after formatted_answer")
|
||||
print(formatted_answer)
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
action_result = self._use_tool(formatted_answer)
|
||||
formatted_answer.text += f"\nObservation: {action_result}"
|
||||
print("after formatted_answer.text")
|
||||
print(formatted_answer.text)
|
||||
|
||||
if self.step_callback:
|
||||
formatted_answer.result = action_result
|
||||
self.step_callback(formatted_answer)
|
||||
if self._should_force_answer():
|
||||
print("starting _should_force_answer")
|
||||
if self.have_forced_answer:
|
||||
print("forcing answer")
|
||||
return {
|
||||
"output": self._i18n.errors(
|
||||
"force_final_answer_error"
|
||||
@@ -139,13 +123,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
except OutputParserException as e:
|
||||
print("********* ERROR1")
|
||||
self.messages.append({"role": "assistant", "content": e.error})
|
||||
self._invoke_loop(formatted_answer)
|
||||
|
||||
except Exception as e:
|
||||
print("********* ERRORw")
|
||||
print(e)
|
||||
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
|
||||
str(e)
|
||||
):
|
||||
|
||||
@@ -8,7 +8,6 @@ class LLM:
|
||||
self.stop = stop
|
||||
self.model = model
|
||||
litellm.callbacks = callbacks
|
||||
litellm.set_verbose = True
|
||||
|
||||
def call(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
|
||||
response = completion(
|
||||
|
||||
@@ -243,18 +243,13 @@ class Task(BaseModel):
|
||||
tools = tools or self.tools or []
|
||||
|
||||
self.processed_by_agents.add(agent.role)
|
||||
print("====================================================")
|
||||
print("context", self.prompt_context)
|
||||
print("context", agent.role)
|
||||
print("context", context)
|
||||
|
||||
result = agent.execute_task(
|
||||
task=self,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
print("result", result)
|
||||
print("====================================================")
|
||||
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
|
||||
task_output = TaskOutput(
|
||||
|
||||
@@ -24,17 +24,26 @@ class RPMController(BaseModel):
|
||||
return self
|
||||
|
||||
def check_or_wait(self):
|
||||
print("check_or_waitcheck_or_waitcheck_or_waitcheck_or_wait")
|
||||
if self.max_rpm is None:
|
||||
return True
|
||||
|
||||
def _check_and_increment():
|
||||
print(
|
||||
"_check_and_increment_check_and_increment_check_and_increment_check_and_increment"
|
||||
)
|
||||
if self.max_rpm is not None and self._current_rpm < self.max_rpm:
|
||||
self._current_rpm += 1
|
||||
print("111111111111111")
|
||||
print("self._current_rpm", self._current_rpm)
|
||||
print("self.max_rpm", self.max_rpm)
|
||||
return True
|
||||
elif self.max_rpm is not None:
|
||||
print("22222222222222")
|
||||
self.logger.log(
|
||||
"info", "Max RPM reached, waiting for next minute to start."
|
||||
)
|
||||
print("CARALHO")
|
||||
self._wait_for_next_minute()
|
||||
self._current_rpm = 1
|
||||
return True
|
||||
@@ -52,7 +61,7 @@ class RPMController(BaseModel):
|
||||
self._timer = None
|
||||
|
||||
def _wait_for_next_minute(self):
|
||||
time.sleep(60)
|
||||
time.sleep(1)
|
||||
self._current_rpm = 0
|
||||
|
||||
def _reset_request_count(self):
|
||||
|
||||
Reference in New Issue
Block a user