mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
* WIP * WIP * wip * wip * WIP * More WIP * Its working but needs a massive clean up * output type works now * Usage metrics fixed * more testing * WIP * cleaning up * Update logger * 99% done. Need to make docs match new example * cleanup * drop hard coded examples * docs * Clean up * Fix errors * Trying to fix CI issues * more type checker fixes * More type checking fixes * Update LiteAgent documentation for clarity and consistency; replace WebsiteSearchTool with SerperDevTool, and improve formatting in examples. * fix fingerprinting issues * fix type-checker * Fix type-checker issue by adding type ignore comment for cache read in ToolUsage class * Add optional agent parameter to CrewAgentParser and enhance action handling logic * Remove unused parameters from ToolUsage instantiation in tests and clean up debug print statement in CrewAgentParser. * Remove deprecated test files and examples for LiteAgent; add comprehensive tests for LiteAgent functionality, including tool usage and structured output handling. * Remove unused variable 'result' from ToolUsage class to clean up code. * Add initialization for 'result' variable in ToolUsage class to resolve type-checker warnings * Refactor agent_utils.py by removing unused event imports and adding missing commas in function definitions. Update test_events.py to reflect changes in expected event counts and adjust assertions accordingly. Modify test_tools_emits_error_events.yaml to include new headers and update response content for consistency with recent API changes. * Enhance tests in crew_test.py by verifying cache behavior in test_tools_with_custom_caching and ensuring proper agent initialization with added commas in test_crew_kickoff_for_each_works_with_manager_agent_copy. * Update agent tests to reflect changes in expected call counts and improve response formatting in YAML cassette. Adjusted mock call count from 2 to 3 and refined interaction formats for clarity and consistency. * Refactor agent tests to update model versions and improve response formatting in YAML cassettes. Changed model references from 'o1-preview' to 'o3-mini' and adjusted interaction formats for consistency. Enhanced error handling in context length tests and refined mock setups for better clarity. * Update tool usage logging to ensure tool arguments are consistently formatted as strings. Adjust agent test cases to reflect changes in maximum iterations and expected outputs, enhancing clarity in assertions. Update YAML cassettes to align with new response formats and improve overall consistency across tests. * Update YAML cassette for LLM tests to reflect changes in response structure and model version. Adjusted request and response headers, including updated content length and user agent. Enhanced token limits and request counts for improved testing accuracy. * Update tool usage logging to store tool arguments as native types instead of strings, enhancing data integrity and usability. * Refactor agent tests by removing outdated test cases and updating YAML cassettes to reflect changes in tool usage and response formats. Adjusted request and response headers, including user agent and content length, for improved accuracy in testing. Enhanced interaction formats for consistency across tests. * Add Excalidraw diagram file for visual representation of input-output flow Created a new Excalidraw file that includes a diagram illustrating the input box, database, and output box with connecting arrows. This visual aid enhances understanding of the data flow within the application. * Remove redundant error handling for action and final answer in CrewAgentParser. Update tests to reflect this change by deleting the corresponding test case. --------- Co-authored-by: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Co-authored-by: Lorenze Jay <lorenzejaytech@gmail.com>
432 lines
13 KiB
Python
432 lines
13 KiB
Python
import json
|
|
import re
|
|
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
|
|
|
|
from crewai.agents.parser import (
|
|
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
|
|
AgentAction,
|
|
AgentFinish,
|
|
CrewAgentParser,
|
|
OutputParserException,
|
|
)
|
|
from crewai.llm import LLM
|
|
from crewai.llms.base_llm import BaseLLM
|
|
from crewai.tools import BaseTool as CrewAITool
|
|
from crewai.tools.base_tool import BaseTool
|
|
from crewai.tools.structured_tool import CrewStructuredTool
|
|
from crewai.tools.tool_types import ToolResult
|
|
from crewai.utilities import I18N, Printer
|
|
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
|
|
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
|
LLMContextLengthExceededException,
|
|
)
|
|
|
|
|
|
def parse_tools(tools: List[BaseTool]) -> List[CrewStructuredTool]:
|
|
"""Parse tools to be used for the task."""
|
|
tools_list = []
|
|
|
|
for tool in tools:
|
|
if isinstance(tool, CrewAITool):
|
|
tools_list.append(tool.to_structured_tool())
|
|
else:
|
|
raise ValueError("Tool is not a CrewStructuredTool or BaseTool")
|
|
|
|
return tools_list
|
|
|
|
|
|
def get_tool_names(tools: Sequence[Union[CrewStructuredTool, BaseTool]]) -> str:
|
|
"""Get the names of the tools."""
|
|
return ", ".join([t.name for t in tools])
|
|
|
|
|
|
def render_text_description_and_args(
|
|
tools: Sequence[Union[CrewStructuredTool, BaseTool]],
|
|
) -> str:
|
|
"""Render the tool name, description, and args in plain text.
|
|
|
|
search: This tool is used for search, args: {"query": {"type": "string"}}
|
|
calculator: This tool is used for math, \
|
|
args: {"expression": {"type": "string"}}
|
|
"""
|
|
tool_strings = []
|
|
for tool in tools:
|
|
tool_strings.append(tool.description)
|
|
|
|
return "\n".join(tool_strings)
|
|
|
|
|
|
def has_reached_max_iterations(iterations: int, max_iterations: int) -> bool:
|
|
"""Check if the maximum number of iterations has been reached."""
|
|
return iterations >= max_iterations
|
|
|
|
|
|
def handle_max_iterations_exceeded(
|
|
formatted_answer: Union[AgentAction, AgentFinish, None],
|
|
printer: Printer,
|
|
i18n: I18N,
|
|
messages: List[Dict[str, str]],
|
|
llm: Union[LLM, BaseLLM],
|
|
callbacks: List[Any],
|
|
) -> Union[AgentAction, AgentFinish]:
|
|
"""
|
|
Handles the case when the maximum number of iterations is exceeded.
|
|
Performs one more LLM call to get the final answer.
|
|
|
|
Parameters:
|
|
formatted_answer: The last formatted answer from the agent.
|
|
|
|
Returns:
|
|
The final formatted answer after exceeding max iterations.
|
|
"""
|
|
printer.print(
|
|
content="Maximum iterations reached. Requesting final answer.",
|
|
color="yellow",
|
|
)
|
|
|
|
if formatted_answer and hasattr(formatted_answer, "text"):
|
|
assistant_message = (
|
|
formatted_answer.text + f'\n{i18n.errors("force_final_answer")}'
|
|
)
|
|
else:
|
|
assistant_message = i18n.errors("force_final_answer")
|
|
|
|
messages.append(format_message_for_llm(assistant_message, role="assistant"))
|
|
|
|
# Perform one more LLM call to get the final answer
|
|
answer = llm.call(
|
|
messages,
|
|
callbacks=callbacks,
|
|
)
|
|
|
|
if answer is None or answer == "":
|
|
printer.print(
|
|
content="Received None or empty response from LLM call.",
|
|
color="red",
|
|
)
|
|
raise ValueError("Invalid response from LLM call - None or empty.")
|
|
|
|
formatted_answer = format_answer(answer)
|
|
# Return the formatted answer, regardless of its type
|
|
return formatted_answer
|
|
|
|
|
|
def format_message_for_llm(prompt: str, role: str = "user") -> Dict[str, str]:
|
|
prompt = prompt.rstrip()
|
|
return {"role": role, "content": prompt}
|
|
|
|
|
|
def format_answer(answer: str) -> Union[AgentAction, AgentFinish]:
|
|
"""Format a response from the LLM into an AgentAction or AgentFinish."""
|
|
try:
|
|
return CrewAgentParser.parse_text(answer)
|
|
except Exception:
|
|
# If parsing fails, return a default AgentFinish
|
|
return AgentFinish(
|
|
thought="Failed to parse LLM response",
|
|
output=answer,
|
|
text=answer,
|
|
)
|
|
|
|
|
|
def enforce_rpm_limit(
|
|
request_within_rpm_limit: Optional[Callable[[], bool]] = None,
|
|
) -> None:
|
|
"""Enforce the requests per minute (RPM) limit if applicable."""
|
|
if request_within_rpm_limit:
|
|
request_within_rpm_limit()
|
|
|
|
|
|
def get_llm_response(
|
|
llm: Union[LLM, BaseLLM],
|
|
messages: List[Dict[str, str]],
|
|
callbacks: List[Any],
|
|
printer: Printer,
|
|
) -> str:
|
|
"""Call the LLM and return the response, handling any invalid responses."""
|
|
try:
|
|
answer = llm.call(
|
|
messages,
|
|
callbacks=callbacks,
|
|
)
|
|
except Exception as e:
|
|
printer.print(
|
|
content=f"Error during LLM call: {e}",
|
|
color="red",
|
|
)
|
|
raise e
|
|
if not answer:
|
|
printer.print(
|
|
content="Received None or empty response from LLM call.",
|
|
color="red",
|
|
)
|
|
raise ValueError("Invalid response from LLM call - None or empty.")
|
|
|
|
return answer
|
|
|
|
|
|
def process_llm_response(
|
|
answer: str, use_stop_words: bool
|
|
) -> Union[AgentAction, AgentFinish]:
|
|
"""Process the LLM response and format it into an AgentAction or AgentFinish."""
|
|
if not use_stop_words:
|
|
try:
|
|
# Preliminary parsing to check for errors.
|
|
format_answer(answer)
|
|
except OutputParserException as e:
|
|
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
|
|
answer = answer.split("Observation:")[0].strip()
|
|
|
|
return format_answer(answer)
|
|
|
|
|
|
def handle_agent_action_core(
|
|
formatted_answer: AgentAction,
|
|
tool_result: ToolResult,
|
|
messages: Optional[List[Dict[str, str]]] = None,
|
|
step_callback: Optional[Callable] = None,
|
|
show_logs: Optional[Callable] = None,
|
|
) -> Union[AgentAction, AgentFinish]:
|
|
"""Core logic for handling agent actions and tool results.
|
|
|
|
Args:
|
|
formatted_answer: The agent's action
|
|
tool_result: The result of executing the tool
|
|
messages: Optional list of messages to append results to
|
|
step_callback: Optional callback to execute after processing
|
|
show_logs: Optional function to show logs
|
|
|
|
Returns:
|
|
Either an AgentAction or AgentFinish
|
|
"""
|
|
if step_callback:
|
|
step_callback(tool_result)
|
|
|
|
formatted_answer.text += f"\nObservation: {tool_result.result}"
|
|
formatted_answer.result = tool_result.result
|
|
|
|
if tool_result.result_as_answer:
|
|
return AgentFinish(
|
|
thought="",
|
|
output=tool_result.result,
|
|
text=formatted_answer.text,
|
|
)
|
|
|
|
if show_logs:
|
|
show_logs(formatted_answer)
|
|
|
|
if messages is not None:
|
|
messages.append({"role": "assistant", "content": tool_result.result})
|
|
|
|
return formatted_answer
|
|
|
|
|
|
def handle_unknown_error(printer: Any, exception: Exception) -> None:
|
|
"""Handle unknown errors by informing the user.
|
|
|
|
Args:
|
|
printer: Printer instance for output
|
|
exception: The exception that occurred
|
|
"""
|
|
printer.print(
|
|
content="An unknown error occurred. Please check the details below.",
|
|
color="red",
|
|
)
|
|
printer.print(
|
|
content=f"Error details: {exception}",
|
|
color="red",
|
|
)
|
|
|
|
|
|
def handle_output_parser_exception(
|
|
e: OutputParserException,
|
|
messages: List[Dict[str, str]],
|
|
iterations: int,
|
|
log_error_after: int = 3,
|
|
printer: Optional[Any] = None,
|
|
) -> AgentAction:
|
|
"""Handle OutputParserException by updating messages and formatted_answer.
|
|
|
|
Args:
|
|
e: The OutputParserException that occurred
|
|
messages: List of messages to append to
|
|
iterations: Current iteration count
|
|
log_error_after: Number of iterations after which to log errors
|
|
printer: Optional printer instance for logging
|
|
|
|
Returns:
|
|
AgentAction: A formatted answer with the error
|
|
"""
|
|
messages.append({"role": "user", "content": e.error})
|
|
|
|
formatted_answer = AgentAction(
|
|
text=e.error,
|
|
tool="",
|
|
tool_input="",
|
|
thought="",
|
|
)
|
|
|
|
if iterations > log_error_after and printer:
|
|
printer.print(
|
|
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
|
color="red",
|
|
)
|
|
|
|
return formatted_answer
|
|
|
|
|
|
def is_context_length_exceeded(exception: Exception) -> bool:
|
|
"""Check if the exception is due to context length exceeding.
|
|
|
|
Args:
|
|
exception: The exception to check
|
|
|
|
Returns:
|
|
bool: True if the exception is due to context length exceeding
|
|
"""
|
|
return LLMContextLengthExceededException(str(exception))._is_context_limit_error(
|
|
str(exception)
|
|
)
|
|
|
|
|
|
def handle_context_length(
|
|
respect_context_window: bool,
|
|
printer: Any,
|
|
messages: List[Dict[str, str]],
|
|
llm: Any,
|
|
callbacks: List[Any],
|
|
i18n: Any,
|
|
) -> None:
|
|
"""Handle context length exceeded by either summarizing or raising an error.
|
|
|
|
Args:
|
|
respect_context_window: Whether to respect context window
|
|
printer: Printer instance for output
|
|
messages: List of messages to summarize
|
|
llm: LLM instance for summarization
|
|
callbacks: List of callbacks for LLM
|
|
i18n: I18N instance for messages
|
|
"""
|
|
if respect_context_window:
|
|
printer.print(
|
|
content="Context length exceeded. Summarizing content to fit the model context window.",
|
|
color="yellow",
|
|
)
|
|
summarize_messages(messages, llm, callbacks, i18n)
|
|
else:
|
|
printer.print(
|
|
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
|
color="red",
|
|
)
|
|
raise SystemExit(
|
|
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
|
|
)
|
|
|
|
|
|
def summarize_messages(
|
|
messages: List[Dict[str, str]],
|
|
llm: Any,
|
|
callbacks: List[Any],
|
|
i18n: Any,
|
|
) -> None:
|
|
"""Summarize messages to fit within context window.
|
|
|
|
Args:
|
|
messages: List of messages to summarize
|
|
llm: LLM instance for summarization
|
|
callbacks: List of callbacks for LLM
|
|
i18n: I18N instance for messages
|
|
"""
|
|
messages_groups = []
|
|
for message in messages:
|
|
content = message["content"]
|
|
cut_size = llm.get_context_window_size()
|
|
for i in range(0, len(content), cut_size):
|
|
messages_groups.append({"content": content[i : i + cut_size]})
|
|
|
|
summarized_contents = []
|
|
for group in messages_groups:
|
|
summary = llm.call(
|
|
[
|
|
format_message_for_llm(
|
|
i18n.slice("summarizer_system_message"), role="system"
|
|
),
|
|
format_message_for_llm(
|
|
i18n.slice("summarize_instruction").format(group=group["content"]),
|
|
),
|
|
],
|
|
callbacks=callbacks,
|
|
)
|
|
summarized_contents.append({"content": str(summary)})
|
|
|
|
merged_summary = " ".join(content["content"] for content in summarized_contents)
|
|
|
|
messages.clear()
|
|
messages.append(
|
|
format_message_for_llm(
|
|
i18n.slice("summary").format(merged_summary=merged_summary)
|
|
)
|
|
)
|
|
|
|
|
|
def show_agent_logs(
|
|
printer: Printer,
|
|
agent_role: str,
|
|
formatted_answer: Optional[Union[AgentAction, AgentFinish]] = None,
|
|
task_description: Optional[str] = None,
|
|
verbose: bool = False,
|
|
) -> None:
|
|
"""Show agent logs for both start and execution states.
|
|
|
|
Args:
|
|
printer: Printer instance for output
|
|
agent_role: Role of the agent
|
|
formatted_answer: Optional AgentAction or AgentFinish for execution logs
|
|
task_description: Optional task description for start logs
|
|
verbose: Whether to show verbose output
|
|
"""
|
|
if not verbose:
|
|
return
|
|
|
|
agent_role = agent_role.split("\n")[0]
|
|
|
|
if formatted_answer is None:
|
|
# Start logs
|
|
printer.print(
|
|
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
|
|
)
|
|
if task_description:
|
|
printer.print(
|
|
content=f"\033[95m## Task:\033[00m \033[92m{task_description}\033[00m"
|
|
)
|
|
else:
|
|
# Execution logs
|
|
printer.print(
|
|
content=f"\n\n\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
|
|
)
|
|
|
|
if isinstance(formatted_answer, AgentAction):
|
|
thought = re.sub(r"\n+", "\n", formatted_answer.thought)
|
|
formatted_json = json.dumps(
|
|
formatted_answer.tool_input,
|
|
indent=2,
|
|
ensure_ascii=False,
|
|
)
|
|
if thought and thought != "":
|
|
printer.print(
|
|
content=f"\033[95m## Thought:\033[00m \033[92m{thought}\033[00m"
|
|
)
|
|
printer.print(
|
|
content=f"\033[95m## Using tool:\033[00m \033[92m{formatted_answer.tool}\033[00m"
|
|
)
|
|
printer.print(
|
|
content=f"\033[95m## Tool Input:\033[00m \033[92m\n{formatted_json}\033[00m"
|
|
)
|
|
printer.print(
|
|
content=f"\033[95m## Tool Output:\033[00m \033[92m\n{formatted_answer.result}\033[00m"
|
|
)
|
|
elif isinstance(formatted_answer, AgentFinish):
|
|
printer.print(
|
|
content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m\n\n"
|
|
)
|