Its working but needs a massive clean up

This commit is contained in:
Brandon Hancock
2025-03-25 13:38:52 -04:00
parent 998afcd498
commit 06854fff86
7 changed files with 355 additions and 302 deletions

View File

@@ -79,61 +79,61 @@ async def main():
result = await agent.kickoff_async("What is the population of Tokyo in 2023?") result = await agent.kickoff_async("What is the population of Tokyo in 2023?")
print(f"Raw response: {result.raw}") print(f"Raw response: {result.raw}")
# Example 2: Query with structured output # # Example 2: Query with structured output
print("\n=== Example 2: Structured Output ===") # print("\n=== Example 2: Structured Output ===")
structured_query = """ # structured_query = """
Research the impact of climate change on coral reefs. # Research the impact of climate change on coral reefs.
YOU MUST format your response as a valid JSON object with the following structure:
{
"main_findings": "A summary of the main findings",
"key_points": ["Point 1", "Point 2", "Point 3"],
"sources": ["Source 1", "Source 2"]
}
Include at least 3 key points and 2 sources. Wrap your JSON in ```json and ``` tags.
"""
result = await agent.kickoff_async(structured_query) # YOU MUST format your response as a valid JSON object with the following structure:
# {
# "main_findings": "A summary of the main findings",
# "key_points": ["Point 1", "Point 2", "Point 3"],
# "sources": ["Source 1", "Source 2"]
# }
if result.pydantic: # Include at least 3 key points and 2 sources. Wrap your JSON in ```json and ``` tags.
# Cast to the specific type for better IDE support # """
research_result = cast(ResearchResult, result.pydantic)
print(f"Main findings: {research_result.main_findings}")
print("\nKey points:")
for i, point in enumerate(research_result.key_points, 1):
print(f"{i}. {point}")
print("\nSources:")
for i, source in enumerate(research_result.sources, 1):
print(f"{i}. {source}")
else:
print(f"Raw response: {result.raw}")
print(
"\nNote: Structured output was not generated. The LLM may need more explicit instructions to format the response as JSON."
)
# Example 3: Multi-turn conversation # result = await agent.kickoff_async(structured_query)
print("\n=== Example 3: Multi-turn Conversation ===")
messages = [
{"role": "user", "content": "I'm planning a trip to Japan."},
{
"role": "assistant",
"content": "That sounds exciting! Japan is a beautiful country with rich culture, delicious food, and stunning landscapes. What would you like to know about Japan to help with your trip planning?",
},
{
"role": "user",
"content": "What are the best times to visit Tokyo and Kyoto?",
},
]
result = await agent.kickoff_async(messages) # if result.pydantic:
print(f"Response: {result.raw}") # # Cast to the specific type for better IDE support
# research_result = cast(ResearchResult, result.pydantic)
# print(f"Main findings: {research_result.main_findings}")
# print("\nKey points:")
# for i, point in enumerate(research_result.key_points, 1):
# print(f"{i}. {point}")
# print("\nSources:")
# for i, source in enumerate(research_result.sources, 1):
# print(f"{i}. {source}")
# else:
# print(f"Raw response: {result.raw}")
# print(
# "\nNote: Structured output was not generated. The LLM may need more explicit instructions to format the response as JSON."
# )
# Print usage metrics if available # # Example 3: Multi-turn conversation
if result.usage_metrics: # print("\n=== Example 3: Multi-turn Conversation ===")
print("\nUsage metrics:") # messages = [
for key, value in result.usage_metrics.items(): # {"role": "user", "content": "I'm planning a trip to Japan."},
print(f"{key}: {value}") # {
# "role": "assistant",
# "content": "That sounds exciting! Japan is a beautiful country with rich culture, delicious food, and stunning landscapes. What would you like to know about Japan to help with your trip planning?",
# },
# {
# "role": "user",
# "content": "What are the best times to visit Tokyo and Kyoto?",
# },
# ]
# result = await agent.kickoff_async(messages)
# print(f"Response: {result.raw}")
# # Print usage metrics if available
# if result.usage_metrics:
# print("\nUsage metrics:")
# for key, value in result.usage_metrics.items():
# print(f"{key}: {value}")
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -2,7 +2,7 @@ import uuid
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from copy import copy as shallow_copy from copy import copy as shallow_copy
from hashlib import md5 from hashlib import md5
from typing import Any, Dict, List, Optional, TypeVar from typing import Any, Callable, Dict, List, Optional, TypeVar
from pydantic import ( from pydantic import (
UUID4, UUID4,
@@ -150,6 +150,9 @@ class BaseAgent(ABC, BaseModel):
default_factory=SecurityConfig, default_factory=SecurityConfig,
description="Security configuration for the agent, including fingerprinting.", description="Security configuration for the agent, including fingerprinting.",
) )
callbacks: List[Callable] = Field(
default=[], description="Callbacks to be used for the agent"
)
@model_validator(mode="before") @model_validator(mode="before")
@classmethod @classmethod

View File

@@ -1,5 +1,5 @@
import time import time
from typing import TYPE_CHECKING, Optional from typing import TYPE_CHECKING
from crewai.memory.entity.entity_memory_item import EntityMemoryItem from crewai.memory.entity.entity_memory_item import EntityMemoryItem
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem

View File

@@ -6,14 +6,11 @@ from typing import Any, Callable, Dict, List, Optional, Union
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.parser import ( from crewai.agents.parser import (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
AgentAction, AgentAction,
AgentFinish, AgentFinish,
CrewAgentParser,
OutputParserException, OutputParserException,
) )
from crewai.agents.tools_handler import ToolsHandler from crewai.agents.tools_handler import ToolsHandler
from crewai.lite_agent import LiteAgent
from crewai.llm import LLM from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.structured_tool import CrewStructuredTool
@@ -21,7 +18,6 @@ from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.utilities import I18N, Printer from crewai.utilities import I18N, Printer
from crewai.utilities.agent_utils import ( from crewai.utilities.agent_utils import (
enforce_rpm_limit, enforce_rpm_limit,
format_answer,
format_message_for_llm, format_message_for_llm,
get_llm_response, get_llm_response,
handle_max_iterations_exceeded, handle_max_iterations_exceeded,
@@ -59,7 +55,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
agent: BaseAgent, agent: BaseAgent,
prompt: dict[str, str], prompt: dict[str, str],
max_iter: int, max_iter: int,
tools: List[Union[CrewStructuredTool, BaseTool]], tools: List[CrewStructuredTool],
tools_names: str, tools_names: str,
stop_words: List[str], stop_words: List[str],
tools_description: str, tools_description: str,

View File

@@ -4,7 +4,7 @@ import re
import uuid import uuid
from typing import Any, Callable, Dict, List, Optional, Type, Union, cast from typing import Any, Callable, Dict, List, Optional, Type, Union, cast
from pydantic import BaseModel, Field, PrivateAttr, model_validator from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
@@ -12,17 +12,18 @@ from crewai.agents.cache import CacheHandler
from crewai.agents.parser import ( from crewai.agents.parser import (
AgentAction, AgentAction,
AgentFinish, AgentFinish,
CrewAgentParser,
OutputParserException, OutputParserException,
) )
from crewai.agents.tools_handler import ToolsHandler from crewai.agents.tools_handler import ToolsHandler
from crewai.llm import LLM from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.types.usage_metrics import UsageMetrics from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities import I18N from crewai.utilities import I18N
from crewai.utilities.agent_utils import ( from crewai.utilities.agent_utils import (
enforce_rpm_limit, enforce_rpm_limit,
format_message_for_llm,
get_llm_response, get_llm_response,
get_tool_names, get_tool_names,
handle_max_iterations_exceeded, handle_max_iterations_exceeded,
@@ -32,12 +33,16 @@ from crewai.utilities.agent_utils import (
render_text_description_and_args, render_text_description_and_args,
) )
from crewai.utilities.events.agent_events import ( from crewai.utilities.events.agent_events import (
LiteAgentExecutionCompletedEvent,
LiteAgentExecutionErrorEvent,
LiteAgentExecutionStartedEvent, LiteAgentExecutionStartedEvent,
) )
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent from crewai.utilities.events.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
)
from crewai.utilities.llm_utils import create_llm from crewai.utilities.llm_utils import create_llm
from crewai.utilities.printer import Printer from crewai.utilities.printer import Printer
from crewai.utilities.token_counter_callback import TokenCalcHandler from crewai.utilities.token_counter_callback import TokenCalcHandler
@@ -103,7 +108,9 @@ class LiteAgent(BaseModel):
role: str = Field(description="Role of the agent") role: str = Field(description="Role of the agent")
goal: str = Field(description="Goal of the agent") goal: str = Field(description="Goal of the agent")
backstory: str = Field(description="Backstory of the agent") backstory: str = Field(description="Backstory of the agent")
llm: LLM = Field(description="Language model that will run the agent") llm: Union[str, InstanceOf[LLM], Any] = Field(
description="Language model that will run the agent"
)
tools: List[BaseTool] = Field( tools: List[BaseTool] = Field(
default_factory=list, description="Tools at agent's disposal" default_factory=list, description="Tools at agent's disposal"
) )
@@ -119,15 +126,17 @@ class LiteAgent(BaseModel):
response_format: Optional[Type[BaseModel]] = Field( response_format: Optional[Type[BaseModel]] = Field(
default=None, description="Pydantic model for structured output" default=None, description="Pydantic model for structured output"
) )
step_callback: Optional[Any] = Field(
default=None,
description="Callback to be executed after each step of the agent execution.",
)
tools_results: List[Dict[str, Any]] = Field( tools_results: List[Dict[str, Any]] = Field(
default=[], description="Results of the tools used by the agent." default=[], description="Results of the tools used by the agent."
) )
respect_context_window: bool = Field(
default=True,
description="Whether to respect the context window of the LLM",
)
callbacks: List[Callable] = Field(
default=[], description="Callbacks to be used for the agent"
)
_parsed_tools: List[CrewStructuredTool] = PrivateAttr(default_factory=list)
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess) _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
_cache_handler: CacheHandler = PrivateAttr(default_factory=CacheHandler) _cache_handler: CacheHandler = PrivateAttr(default_factory=CacheHandler)
_times_executed: int = PrivateAttr(default=0) _times_executed: int = PrivateAttr(default=0)
@@ -143,6 +152,7 @@ class LiteAgent(BaseModel):
_delegations: Dict[str, int] = PrivateAttr(default_factory=dict) _delegations: Dict[str, int] = PrivateAttr(default_factory=dict)
# Internationalization # Internationalization
_printer: Printer = PrivateAttr(default_factory=Printer) _printer: Printer = PrivateAttr(default_factory=Printer)
i18n: I18N = Field(default=I18N(), description="Internationalization settings.") i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
request_within_rpm_limit: Optional[Callable[[], bool]] = Field( request_within_rpm_limit: Optional[Callable[[], bool]] = Field(
default=None, default=None,
@@ -152,16 +162,31 @@ class LiteAgent(BaseModel):
default=True, default=True,
description="Whether to use stop words to prevent the LLM from using tools", description="Whether to use stop words to prevent the LLM from using tools",
) )
tool_name_to_tool_map: Dict[str, Union[CrewStructuredTool, BaseTool]] = Field(
default_factory=dict,
description="Mapping of tool names to tool instances",
)
@model_validator(mode="after") @model_validator(mode="after")
def setup_llm(self): def setup_llm(self):
"""Set up the LLM and other components after initialization.""" """Set up the LLM and other components after initialization."""
if self.llm is None: self.llm = create_llm(self.llm)
raise ValueError("LLM must be provided")
if not isinstance(self.llm, LLM): if not isinstance(self.llm, LLM):
self.llm = create_llm(self.llm) raise ValueError("Unable to create LLM instance")
self.use_stop_words = self.llm.supports_stop_words()
# Initialize callbacks
token_callback = TokenCalcHandler(token_cost_process=self._token_process)
self._callbacks = [token_callback]
return self
@model_validator(mode="after")
def parse_tools(self):
"""Parse the tools and convert them to CrewStructuredTool instances."""
self._parsed_tools = parse_tools(self.tools)
# Initialize tool name to tool mapping
self.tool_name_to_tool_map = {tool.name: tool for tool in self._parsed_tools}
return self return self
@@ -177,14 +202,14 @@ class LiteAgent(BaseModel):
def _get_default_system_prompt(self) -> str: def _get_default_system_prompt(self) -> str:
"""Get the default system prompt for the agent.""" """Get the default system prompt for the agent."""
if self.tools: if self._parsed_tools:
# Use the prompt template for agents with tools # Use the prompt template for agents with tools
return self.i18n.slice("lite_agent_system_prompt_with_tools").format( return self.i18n.slice("lite_agent_system_prompt_with_tools").format(
role=self.role, role=self.role,
backstory=self.backstory, backstory=self.backstory,
goal=self.goal, goal=self.goal,
tools=render_text_description_and_args(self.tools), tools=render_text_description_and_args(self._parsed_tools),
tool_names=get_tool_names(self.tools), tool_names=get_tool_names(self._parsed_tools),
) )
else: else:
# Use the prompt template for agents without tools # Use the prompt template for agents without tools
@@ -211,102 +236,6 @@ class LiteAgent(BaseModel):
return formatted_messages return formatted_messages
def _extract_structured_output(self, text: str) -> Optional[BaseModel]:
"""Extract structured output from text if response_format is set."""
if not self.response_format:
return None
try:
# Try to extract JSON from the text
json_match = re.search(r"```json\s*([\s\S]*?)\s*```", text)
if json_match:
json_str = json_match.group(1)
json_data = json.loads(json_str)
else:
# Try to parse the entire text as JSON
try:
json_data = json.loads(text)
except json.JSONDecodeError:
# If that fails, use a more lenient approach to find JSON-like content
potential_json = re.search(r"(\{[\s\S]*\})", text)
if potential_json:
json_data = json.loads(potential_json.group(1))
else:
return None
# Convert to Pydantic model
return self.response_format.model_validate(json_data)
except Exception as e:
if self.verbose:
print(f"Error extracting structured output: {e}")
return None
def _preprocess_model_output(self, text: str) -> str:
"""Preprocess the model output to correct common formatting issues."""
# Skip if the text is empty
if not text or text.strip() == "":
return "Thought: I need to provide an answer.\n\nFinal Answer: I don't have enough information to provide a complete answer."
# Remove 'Action' or 'Final Answer' from anywhere after a proper Thought
if "Thought:" in text and ("Action:" in text and "Final Answer:" in text):
# This is a case where both Action and Final Answer appear - clear conflict
# Check which one appears first and keep only that one
action_index = text.find("Action:")
final_answer_index = text.find("Final Answer:")
if action_index != -1 and final_answer_index != -1:
if action_index < final_answer_index:
# Keep only the Action part
text = text[:final_answer_index]
else:
# Keep only the Final Answer part
text = text[:action_index] + text[final_answer_index:]
if self.verbose:
print("Removed conflicting Action/Final Answer parts")
# Check if this looks like a tool usage attempt without proper formatting
if any(tool.name in text for tool in self.tools) and "Action:" not in text:
# Try to extract tool name and input
for tool in self.tools:
if tool.name in text:
# Find the tool name in the text
parts = text.split(tool.name, 1)
if len(parts) > 1:
# Try to extract input as JSON
input_text = parts[1]
json_match = re.search(r"(\{[\s\S]*\})", input_text)
if json_match:
# Construct a properly formatted response
formatted = "Thought: I need to use a tool to help with this task.\n\n"
formatted += f"Action: {tool.name}\n\n"
formatted += f"Action Input: {json_match.group(1)}\n"
if self.verbose:
print(f"Reformatted tool usage: {tool.name}")
return formatted
# Check if this looks like a final answer without proper formatting
if (
"Final Answer:" not in text
and not any(tool.name in text for tool in self.tools)
and "Action:" not in text
):
# This might be a direct response, format it as a final answer
# Don't format if text already has a "Thought:" section
if "Thought:" not in text:
formatted = "Thought: I can now provide the final answer.\n\n"
formatted += f"Final Answer: {text}\n"
if self.verbose:
print("Reformatted as final answer")
return formatted
return text
def kickoff(self, messages: Union[str, List[Dict[str, str]]]) -> LiteAgentOutput: def kickoff(self, messages: Union[str, List[Dict[str, str]]]) -> LiteAgentOutput:
""" """
Execute the agent with the given messages. Execute the agent with the given messages.
@@ -347,7 +276,7 @@ class LiteAgent(BaseModel):
"role": self.role, "role": self.role,
"goal": self.goal, "goal": self.goal,
"backstory": self.backstory, "backstory": self.backstory,
"tools": self.tools, "tools": self._parsed_tools,
"verbose": self.verbose, "verbose": self.verbose,
} }
@@ -356,7 +285,7 @@ class LiteAgent(BaseModel):
self, self,
event=LiteAgentExecutionStartedEvent( event=LiteAgentExecutionStartedEvent(
agent_info=agent_info, agent_info=agent_info,
tools=self.tools, tools=self._parsed_tools,
messages=messages, messages=messages,
), ),
) )
@@ -364,61 +293,29 @@ class LiteAgent(BaseModel):
try: try:
# Execute the agent using invoke loop # Execute the agent using invoke loop
result = await self._invoke() result = await self._invoke()
except AssertionError:
# Extract structured output if response_format is set self._printer.print(
pydantic_output = None content="Agent failed to reach a final answer. This is likely a bug - please report it.",
if self.response_format: color="red",
structured_output = self._extract_structured_output(result)
if isinstance(structured_output, BaseModel):
pydantic_output = structured_output
# Create output object
usage_metrics = {}
if hasattr(self._token_process, "get_summary"):
usage_metrics_obj = self._token_process.get_summary()
if isinstance(usage_metrics_obj, UsageMetrics):
usage_metrics = usage_metrics_obj.model_dump()
output = LiteAgentOutput(
raw=result,
pydantic=pydantic_output,
agent_role=self.role,
usage_metrics=usage_metrics,
) )
raise
# Emit event for agent execution completion
crewai_event_bus.emit(
self,
event=LiteAgentExecutionCompletedEvent(
agent_info=agent_info,
output=result,
),
)
return output
except Exception as e: except Exception as e:
# Emit event for agent execution error self._handle_unknown_error(e)
crewai_event_bus.emit( if e.__class__.__module__.startswith("litellm"):
self, # Do not retry on litellm errors
event=LiteAgentExecutionErrorEvent( raise e
agent_info=agent_info, else:
error=str(e), raise e
),
)
# Retry if we haven't exceeded the retry limit # TODO: CREATE AND RETURN LiteAgentOutput
self._times_executed += 1 return LiteAgentOutput(
if self._times_executed <= self._max_retry_limit: raw=result.text,
if self.verbose: pydantic=None, # TODO: Add pydantic output
print( agent_role=self.role,
f"Retrying agent execution ({self._times_executed}/{self._max_retry_limit})..." usage_metrics=None, # TODO: Add usage metrics
) )
return await self.kickoff_async(messages)
raise e async def _invoke(self) -> AgentFinish:
async def _invoke(self) -> str:
""" """
Run the agent's thought process until it reaches a conclusion or max iterations. Run the agent's thought process until it reaches a conclusion or max iterations.
Similar to _invoke_loop in CrewAgentExecutor. Similar to _invoke_loop in CrewAgentExecutor.
@@ -426,18 +323,8 @@ class LiteAgent(BaseModel):
Returns: Returns:
str: The final result of the agent execution. str: The final result of the agent execution.
""" """
# # Set up tools handler for tool execution # Use the stored callbacks
# tools_handler = ToolsHandler(cache=self._cache_handler) callbacks = self._callbacks
# TODO: MOVE TO INIT
# Set up callbacks for token tracking
token_callback = TokenCalcHandler(token_cost_process=self._token_process)
callbacks = [token_callback]
# # Prepare tool configurations
# parsed_tools = parse_tools(self.tools)
# tools_description = render_text_description_and_args(parsed_tools)
# tools_names = get_tool_names(parsed_tools)
# Execute the agent loop # Execute the agent loop
formatted_answer = None formatted_answer = None
@@ -449,14 +336,14 @@ class LiteAgent(BaseModel):
printer=self._printer, printer=self._printer,
i18n=self.i18n, i18n=self.i18n,
messages=self._messages, messages=self._messages,
llm=self.llm, llm=cast(LLM, self.llm),
callbacks=callbacks, callbacks=callbacks,
) )
enforce_rpm_limit(self.request_within_rpm_limit) enforce_rpm_limit(self.request_within_rpm_limit)
answer = get_llm_response( answer = get_llm_response(
llm=self.llm, llm=cast(LLM, self.llm),
messages=self._messages, messages=self._messages,
callbacks=callbacks, callbacks=callbacks,
printer=self._printer, printer=self._printer,
@@ -471,11 +358,31 @@ class LiteAgent(BaseModel):
formatted_answer, tool_result formatted_answer, tool_result
) )
self._invoke_step_callback(formatted_answer)
self._append_message(formatted_answer.text, role="assistant") self._append_message(formatted_answer.text, role="assistant")
except OutputParserException as e:
formatted_answer = self._handle_output_parser_exception(e)
except Exception as e: except Exception as e:
print(f"Error: {e}") if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors
raise e
if self._is_context_length_exceeded(e):
self._handle_context_length()
continue
else:
self._handle_unknown_error(e)
raise e
finally:
self._iterations += 1
# During the invoke loop, formatted_answer alternates between AgentAction
# (when the agent is using tools) and eventually becomes AgentFinish
# (when the agent reaches a final answer). This assertion confirms we've
# reached a final answer and helps type checking understand this transition.
assert isinstance(formatted_answer, AgentFinish)
self._show_logs(formatted_answer)
return formatted_answer
def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult: def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult:
try: try:
@@ -490,12 +397,12 @@ class LiteAgent(BaseModel):
), ),
) )
tool_usage = ToolUsage( tool_usage = ToolUsage(
tools=self.tools,
original_tools=self.tools, # TODO: INVESTIGATE DIFF BETWEEN THIS AND ABOVE
tools_description=render_text_description_and_args(self.tools),
tools_names=get_tool_names(self.tools),
agent=self, agent=self,
tools=self._parsed_tools,
action=agent_action, action=agent_action,
tools_handler=None,
task=None,
function_calling_llm=None,
) )
tool_calling = tool_usage.parse_tool_calling(agent_action.text) tool_calling = tool_usage.parse_tool_calling(agent_action.text)
@@ -504,9 +411,9 @@ class LiteAgent(BaseModel):
return ToolResult(result=tool_result, result_as_answer=False) return ToolResult(result=tool_result, result_as_answer=False)
else: else:
if tool_calling.tool_name.casefold().strip() in [ if tool_calling.tool_name.casefold().strip() in [
name.casefold().strip() for name in self.tool_name_to_tool_map tool.name.casefold().strip() for tool in self._parsed_tools
] or tool_calling.tool_name.casefold().replace("_", " ") in [ ] or tool_calling.tool_name.casefold().replace("_", " ") in [
name.casefold().strip() for name in self.tool_name_to_tool_map tool.name.casefold().strip() for tool in self._parsed_tools
]: ]:
tool_result = tool_usage.use(tool_calling, agent_action.text) tool_result = tool_usage.use(tool_calling, agent_action.text)
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name) tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
@@ -515,23 +422,164 @@ class LiteAgent(BaseModel):
result=tool_result, result_as_answer=tool.result_as_answer result=tool_result, result_as_answer=tool.result_as_answer
) )
else: else:
tool_result = self._i18n.errors("wrong_tool_name").format( tool_result = self.i18n.errors("wrong_tool_name").format(
tool=tool_calling.tool_name, tool=tool_calling.tool_name,
tools=", ".join([tool.name.casefold() for tool in self.tools]), tools=", ".join(
[tool.name.casefold() for tool in self._parsed_tools]
),
) )
return ToolResult(result=tool_result, result_as_answer=False) return ToolResult(result=tool_result, result_as_answer=False)
except Exception as e: except Exception as e:
if self.agent: crewai_event_bus.emit(
crewai_event_bus.emit( self,
self, event=ToolUsageErrorEvent(
event=ToolUsageErrorEvent( # validation error agent_key=self.key,
agent_key=self.agent.key, agent_role=self.role,
agent_role=self.agent.role, tool_name=agent_action.tool,
tool_name=agent_action.tool, tool_args=agent_action.tool_input,
tool_args=agent_action.tool_input, tool_class=agent_action.tool,
tool_class=agent_action.tool, error=str(e),
error=str(e), ),
), )
)
raise e raise e
def _handle_agent_action(
self, formatted_answer: AgentAction, tool_result: ToolResult
) -> Union[AgentAction, AgentFinish]:
"""Handle the AgentAction, execute tools, and process the results."""
formatted_answer.text += f"\nObservation: {tool_result.result}"
formatted_answer.result = tool_result.result
if tool_result.result_as_answer:
return AgentFinish(
thought="",
output=tool_result.result,
text=formatted_answer.text,
)
self._show_logs(formatted_answer)
return formatted_answer
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
if self.verbose:
agent_role = self.role.split("\n")[0]
if isinstance(formatted_answer, AgentAction):
thought = re.sub(r"\n+", "\n", formatted_answer.thought)
formatted_json = json.dumps(
formatted_answer.tool_input,
indent=2,
ensure_ascii=False,
)
self._printer.print(
content=f"\n\n\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
if thought and thought != "":
self._printer.print(
content=f"\033[95m## Thought:\033[00m \033[92m{thought}\033[00m"
)
self._printer.print(
content=f"\033[95m## Using tool:\033[00m \033[92m{formatted_answer.tool}\033[00m"
)
self._printer.print(
content=f"\033[95m## Tool Input:\033[00m \033[92m\n{formatted_json}\033[00m"
)
self._printer.print(
content=f"\033[95m## Tool Output:\033[00m \033[92m\n{formatted_answer.result}\033[00m"
)
elif isinstance(formatted_answer, AgentFinish):
self._printer.print(
content=f"\n\n\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
self._printer.print(
content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m\n\n"
)
def _append_message(self, text: str, role: str = "assistant") -> None:
"""Append a message to the message list with the given role."""
self._messages.append(format_message_for_llm(text, role=role))
def _handle_output_parser_exception(self, e: OutputParserException) -> AgentAction:
"""Handle OutputParserException by updating messages and formatted_answer."""
self._messages.append({"role": "user", "content": e.error})
formatted_answer = AgentAction(
text=e.error,
tool="",
tool_input="",
thought="",
)
MAX_ITERATIONS = 3
if self._iterations > MAX_ITERATIONS:
self._printer.print(
content=f"Error parsing LLM output, agent will retry: {e.error}",
color="red",
)
return formatted_answer
def _is_context_length_exceeded(self, exception: Exception) -> bool:
"""Check if the exception is due to context length exceeding."""
return LLMContextLengthExceededException(
str(exception)
)._is_context_limit_error(str(exception))
def _handle_context_length(self) -> None:
if self.respect_context_window:
self._printer.print(
content="Context length exceeded. Summarizing content to fit the model context window.",
color="yellow",
)
self._summarize_messages()
else:
self._printer.print(
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
color="red",
)
raise SystemExit(
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
)
def _summarize_messages(self) -> None:
messages_groups = []
for message in self.messages:
content = message["content"]
cut_size = cast(LLM, self.llm).get_context_window_size()
for i in range(0, len(content), cut_size):
messages_groups.append(content[i : i + cut_size])
summarized_contents = []
for group in messages_groups:
summary = cast(LLM, self.llm).call(
[
format_message_for_llm(
self.i18n.slice("summarizer_system_message"), role="system"
),
format_message_for_llm(
self.i18n.slice("summarize_instruction").format(group=group),
),
],
callbacks=self.callbacks,
)
summarized_contents.append(summary)
merged_summary = " ".join(str(content) for content in summarized_contents)
self.messages = [
format_message_for_llm(
self.i18n.slice("summary").format(merged_summary=merged_summary)
)
]
def _handle_unknown_error(self, exception: Exception) -> None:
"""Handle unknown errors by informing the user."""
self._printer.print(
content="An unknown error occurred. Please check the details below.",
color="red",
)
self._printer.print(
content=f"Error details: {exception}",
color="red",
)

View File

@@ -5,14 +5,12 @@ import time
from difflib import SequenceMatcher from difflib import SequenceMatcher
from json import JSONDecodeError from json import JSONDecodeError
from textwrap import dedent from textwrap import dedent
from typing import Any, Dict, List, Optional, Union from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
import json5 import json5
from json_repair import repair_json from json_repair import repair_json
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.tools_handler import ToolsHandler from crewai.agents.tools_handler import ToolsHandler
from crewai.lite_agent import LiteAgent
from crewai.task import Task from crewai.task import Task
from crewai.telemetry import Telemetry from crewai.telemetry import Telemetry
from crewai.tools import BaseTool from crewai.tools import BaseTool
@@ -31,6 +29,10 @@ from crewai.utilities.events.tool_usage_events import (
ToolValidateInputErrorEvent, ToolValidateInputErrorEvent,
) )
if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.lite_agent import LiteAgent
OPENAI_BIGGER_MODELS = [ OPENAI_BIGGER_MODELS = [
"gpt-4", "gpt-4",
"gpt-4o", "gpt-4o",
@@ -67,10 +69,10 @@ class ToolUsage:
def __init__( def __init__(
self, self,
tools_handler: Optional[ToolsHandler], tools_handler: Optional[ToolsHandler],
tools: List[Union[CrewStructuredTool, BaseTool]], tools: List[CrewStructuredTool],
task: Task, task: Optional[Task],
function_calling_llm: Any, function_calling_llm: Any,
agent: Union[BaseAgent, LiteAgent], agent: Union["BaseAgent", "LiteAgent"],
action: Any, action: Any,
) -> None: ) -> None:
self._i18n: I18N = agent.i18n self._i18n: I18N = agent.i18n
@@ -103,18 +105,21 @@ class ToolUsage:
def use( def use(
self, calling: Union[ToolCalling, InstructorToolCalling], tool_string: str self, calling: Union[ToolCalling, InstructorToolCalling], tool_string: str
) -> str: ) -> str:
print("USING A TOOL", calling, tool_string)
if isinstance(calling, ToolUsageErrorException): if isinstance(calling, ToolUsageErrorException):
error = calling.message error = calling.message
if self.agent.verbose: if self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red") self._printer.print(content=f"\n\n{error}\n", color="red")
self.task.increment_tools_errors() if self.task:
self.task.increment_tools_errors()
return error return error
try: try:
tool = self._select_tool(calling.tool_name) tool = self._select_tool(calling.tool_name)
except Exception as e: except Exception as e:
error = getattr(e, "message", str(e)) error = getattr(e, "message", str(e))
self.task.increment_tools_errors() if self.task:
self.task.increment_tools_errors()
if self.agent.verbose: if self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red") self._printer.print(content=f"\n\n{error}\n", color="red")
return error return error
@@ -126,7 +131,8 @@ class ToolUsage:
except Exception as e: except Exception as e:
error = getattr(e, "message", str(e)) error = getattr(e, "message", str(e))
self.task.increment_tools_errors() if self.task:
self.task.increment_tools_errors()
if self.agent.verbose: if self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red") self._printer.print(content=f"\n\n{error}\n", color="red")
return error return error
@@ -139,6 +145,8 @@ class ToolUsage:
tool: CrewStructuredTool, tool: CrewStructuredTool,
calling: Union[ToolCalling, InstructorToolCalling], calling: Union[ToolCalling, InstructorToolCalling],
) -> str: ) -> str:
print("USING A TOOL: ", tool)
print("Type of tool: ", type(tool))
if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None) if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
try: try:
result = self._i18n.errors("task_repeated_usage").format( result = self._i18n.errors("task_repeated_usage").format(
@@ -153,7 +161,8 @@ class ToolUsage:
return result # type: ignore # Fix the return type of this function return result # type: ignore # Fix the return type of this function
except Exception: except Exception:
self.task.increment_tools_errors() if self.task:
self.task.increment_tools_errors()
started_at = time.time() started_at = time.time()
from_cache = False from_cache = False
@@ -184,7 +193,8 @@ class ToolUsage:
coworker = ( coworker = (
calling.arguments.get("coworker") if calling.arguments else None calling.arguments.get("coworker") if calling.arguments else None
) )
self.task.increment_delegations(coworker) if self.task:
self.task.increment_delegations(coworker)
if calling.arguments: if calling.arguments:
try: try:
@@ -211,14 +221,16 @@ class ToolUsage:
error = ToolUsageErrorException( error = ToolUsageErrorException(
f'\n{error_message}.\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}' f'\n{error_message}.\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
).message ).message
self.task.increment_tools_errors() if self.task:
self.task.increment_tools_errors()
if self.agent.verbose: if self.agent.verbose:
self._printer.print( self._printer.print(
content=f"\n\n{error_message}\n", color="red" content=f"\n\n{error_message}\n", color="red"
) )
return error # type: ignore # No return value expected return error # type: ignore # No return value expected
self.task.increment_tools_errors() if self.task:
self.task.increment_tools_errors()
return self.use(calling=calling, tool_string=tool_string) # type: ignore # No return value expected return self.use(calling=calling, tool_string=tool_string) # type: ignore # No return value expected
if self.tools_handler: if self.tools_handler:
@@ -266,13 +278,16 @@ class ToolUsage:
return result # type: ignore # No return value expected return result # type: ignore # No return value expected
def _format_result(self, result: Any) -> None: def _format_result(self, result: Any) -> None:
self.task.used_tools += 1 if self.task:
self.task.used_tools += 1
if self._should_remember_format(): # type: ignore # "_should_remember_format" of "ToolUsage" does not return a value (it only ever returns None) if self._should_remember_format(): # type: ignore # "_should_remember_format" of "ToolUsage" does not return a value (it only ever returns None)
result = self._remember_format(result=result) # type: ignore # "_remember_format" of "ToolUsage" does not return a value (it only ever returns None) result = self._remember_format(result=result) # type: ignore # "_remember_format" of "ToolUsage" does not return a value (it only ever returns None)
return result return result
def _should_remember_format(self) -> bool: def _should_remember_format(self) -> bool:
return self.task.used_tools % self._remember_format_after_usages == 0 if self.task:
return self.task.used_tools % self._remember_format_after_usages == 0
return False
def _remember_format(self, result: str) -> None: def _remember_format(self, result: str) -> None:
result = str(result) result = str(result)
@@ -308,7 +323,8 @@ class ToolUsage:
> 0.85 > 0.85
): ):
return tool return tool
self.task.increment_tools_errors() if self.task:
self.task.increment_tools_errors()
tool_selection_data = { tool_selection_data = {
"agent_key": self.agent.key, "agent_key": self.agent.key,
"agent_role": self.agent.role, "agent_role": self.agent.role,
@@ -421,7 +437,8 @@ class ToolUsage:
self._run_attempts += 1 self._run_attempts += 1
if self._run_attempts > self._max_parsing_attempts: if self._run_attempts > self._max_parsing_attempts:
self._telemetry.tool_usage_error(llm=self.function_calling_llm) self._telemetry.tool_usage_error(llm=self.function_calling_llm)
self.task.increment_tools_errors() if self.task:
self.task.increment_tools_errors()
if self.agent.verbose: if self.agent.verbose:
self._printer.print(content=f"\n\n{e}\n", color="red") self._printer.print(content=f"\n\n{e}\n", color="red")
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling") return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
@@ -452,6 +469,7 @@ class ToolUsage:
if isinstance(arguments, dict): if isinstance(arguments, dict):
return arguments return arguments
except (ValueError, SyntaxError): except (ValueError, SyntaxError):
repaired_input = repair_json(tool_input)
pass # Continue to the next parsing attempt pass # Continue to the next parsing attempt
# Attempt 3: Parse as JSON5 # Attempt 3: Parse as JSON5
@@ -530,7 +548,7 @@ class ToolUsage:
"agent_key": self.agent.key, "agent_key": self.agent.key,
"agent_role": (self.agent._original_role or self.agent.role), "agent_role": (self.agent._original_role or self.agent.role),
"run_attempts": self._run_attempts, "run_attempts": self._run_attempts,
"delegations": self.task.delegations, "delegations": self.task.delegations if self.task else 0,
"tool_name": tool.name, "tool_name": tool.name,
"tool_args": tool_calling.arguments, "tool_args": tool_calling.arguments,
"tool_class": tool.__class__.__name__, "tool_class": tool.__class__.__name__,

View File

@@ -1,6 +1,5 @@
from typing import Any, Callable, Dict, List, Optional, Union from typing import Any, Callable, Dict, List, Optional, Sequence, Union
from crewai.agent import Agent
from crewai.agents.parser import ( from crewai.agents.parser import (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE, FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
AgentAction, AgentAction,
@@ -8,45 +7,34 @@ from crewai.agents.parser import (
CrewAgentParser, CrewAgentParser,
OutputParserException, OutputParserException,
) )
from crewai.lite_agent import ToolResult
from crewai.llm import LLM from crewai.llm import LLM
from crewai.tools import BaseTool as CrewAITool from crewai.tools import BaseTool as CrewAITool
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.i18n import I18N from crewai.utilities.i18n import I18N
from crewai.utilities.printer import Printer from crewai.utilities.printer import Printer
def parse_tools(tools: List[BaseTool]) -> List[Union[CrewStructuredTool, BaseTool]]: def parse_tools(tools: List[BaseTool]) -> List[CrewStructuredTool]:
"""Parse tools to be used for the task.""" """Parse tools to be used for the task."""
tools_list = [] tools_list = []
try:
for tool in tools: for tool in tools:
if isinstance(tool, CrewAITool): if isinstance(tool, CrewAITool):
tools_list.append(tool.to_structured_tool()) tools_list.append(tool.to_structured_tool())
else: else:
tools_list.append(tool) raise ValueError("Tool is not a CrewStructuredTool or BaseTool")
except ModuleNotFoundError:
tools_list = []
for tool in tools:
tools_list.append(tool)
return tools_list return tools_list
def get_tool_names(tools: List[Union[CrewStructuredTool, BaseTool]]) -> str: def get_tool_names(tools: Sequence[Union[CrewStructuredTool, BaseTool]]) -> str:
"""Get the names of the tools.""" """Get the names of the tools."""
return ", ".join([t.name for t in tools]) return ", ".join([t.name for t in tools])
def render_text_description_and_args( def render_text_description_and_args(
tools: List[Union[CrewStructuredTool, BaseTool]] tools: Sequence[Union[CrewStructuredTool, BaseTool]]
) -> str: ) -> str:
"""Render the tool name, description, and args in plain text. """Render the tool name, description, and args in plain text.