Merge remote-tracking branch 'origin/main' into devin/1744191265-fix-taskoutput-import

This commit is contained in:
Lucas Gomide
2025-04-09 11:21:16 -03:00
330 changed files with 74765 additions and 46500 deletions

View File

@@ -6,6 +6,7 @@ from crewai.crews.crew_output import CrewOutput
from crewai.flow.flow import Flow
from crewai.knowledge.knowledge import Knowledge
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.process import Process
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
@@ -16,15 +17,16 @@ warnings.filterwarnings(
category=UserWarning,
module="pydantic.main",
)
__version__ = "0.86.0"
__version__ = "0.108.0"
__all__ = [
"Agent",
"Crew",
"CrewOutput",
"Flow",
"Knowledge",
"LLM",
"Process",
"Task",
"LLM",
"BaseLLM",
"Flow",
"Knowledge",
"TaskOutput",
]

View File

@@ -1,44 +1,41 @@
import os
import re
import shutil
import subprocess
from typing import Any, Dict, List, Literal, Optional, Union
from typing import Any, Dict, List, Literal, Optional, Sequence, Union
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
from crewai.agents import CacheHandler
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.cli.constants import ENV_VARS, LITELLM_PARAMS
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
from crewai.llm import LLM
from crewai.llm import BaseLLM
from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.security import Fingerprint
from crewai.task import Task
from crewai.tools import BaseTool
from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.tools.base_tool import Tool
from crewai.utilities import Converter, Prompts
from crewai.utilities.agent_utils import (
get_tool_names,
parse_tools,
render_text_description_and_args,
)
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
from crewai.utilities.converter import generate_model_description
from crewai.utilities.events.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
AgentExecutionStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.llm_utils import create_llm
from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.training_handler import CrewTrainingHandler
agentops = None
try:
import agentops # type: ignore # Name "agentops" is already defined
from agentops import track_agent # type: ignore
except ImportError:
def track_agent():
def noop(f):
return f
return noop
@track_agent()
class Agent(BaseAgent):
"""Represents an agent in a system.
@@ -55,13 +52,13 @@ class Agent(BaseAgent):
llm: The language model that will run the agent.
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
max_iter: Maximum number of iterations for an agent to execute a task.
memory: Whether the agent should have memory or not.
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
verbose: Whether the agent execution should be in verbose mode.
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
tools: Tools at agents disposal
step_callback: Callback to be executed after each step of the agent execution.
knowledge_sources: Knowledge sources for the agent.
embedder: Embedder configuration for the agent.
"""
_times_executed: int = PrivateAttr(default=0)
@@ -71,9 +68,6 @@ class Agent(BaseAgent):
)
agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
cache_handler: InstanceOf[CacheHandler] = Field(
default=None, description="An instance of the CacheHandler class."
)
step_callback: Optional[Any] = Field(
default=None,
description="Callback to be executed after each step of the agent execution.",
@@ -82,10 +76,10 @@ class Agent(BaseAgent):
default=True,
description="Use system prompt for the agent.",
)
llm: Union[str, InstanceOf[LLM], Any] = Field(
llm: Union[str, InstanceOf[BaseLLM], Any] = Field(
description="Language model that will run the agent.", default=None
)
function_calling_llm: Optional[Any] = Field(
function_calling_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
description="Language model that will run the agent.", default=None
)
system_template: Optional[str] = Field(
@@ -97,9 +91,6 @@ class Agent(BaseAgent):
response_template: Optional[str] = Field(
default=None, description="Response format for the agent."
)
tools_results: Optional[List[Any]] = Field(
default=[], description="Results of the tools used by the agent."
)
allow_code_execution: Optional[bool] = Field(
default=False, description="Enable code execution for the agent."
)
@@ -107,10 +98,6 @@ class Agent(BaseAgent):
default=True,
description="Keep messages under the context window size by summarizing content.",
)
max_iter: int = Field(
default=20,
description="Maximum number of iterations for an agent to execute a task before giving it's best answer",
)
max_retry_limit: int = Field(
default=2,
description="Maximum number of retries for an agent to execute a task when an error occurs.",
@@ -123,105 +110,20 @@ class Agent(BaseAgent):
default="safe",
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
)
embedder_config: Optional[Dict[str, Any]] = Field(
embedder: Optional[Dict[str, Any]] = Field(
default=None,
description="Embedder configuration for the agent.",
)
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
default=None,
description="Knowledge sources for the agent.",
)
_knowledge: Optional[Knowledge] = PrivateAttr(
default=None,
)
@model_validator(mode="after")
def post_init_setup(self):
self._set_knowledge()
self.agent_ops_agent_name = self.role
unaccepted_attributes = [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_REGION_NAME",
]
# Handle different cases for self.llm
if isinstance(self.llm, str):
# If it's a string, create an LLM instance
self.llm = LLM(model=self.llm)
elif isinstance(self.llm, LLM):
# If it's already an LLM instance, keep it as is
pass
elif self.llm is None:
# Determine the model name from environment variables or use default
model_name = (
os.environ.get("OPENAI_MODEL_NAME")
or os.environ.get("MODEL")
or "gpt-4o-mini"
)
llm_params = {"model": model_name}
api_base = os.environ.get("OPENAI_API_BASE") or os.environ.get(
"OPENAI_BASE_URL"
)
if api_base:
llm_params["base_url"] = api_base
set_provider = model_name.split("/")[0] if "/" in model_name else "openai"
# Iterate over all environment variables to find matching API keys or use defaults
for provider, env_vars in ENV_VARS.items():
if provider == set_provider:
for env_var in env_vars:
# Check if the environment variable is set
key_name = env_var.get("key_name")
if key_name and key_name not in unaccepted_attributes:
env_value = os.environ.get(key_name)
if env_value:
key_name = key_name.lower()
for pattern in LITELLM_PARAMS:
if pattern in key_name:
key_name = pattern
break
llm_params[key_name] = env_value
# Check for default values if the environment variable is not set
elif env_var.get("default", False):
for key, value in env_var.items():
if key not in ["prompt", "key_name", "default"]:
# Only add default if the key is already set in os.environ
if key in os.environ:
llm_params[key] = value
self.llm = LLM(**llm_params)
else:
# For any other type, attempt to extract relevant attributes
llm_params = {
"model": getattr(self.llm, "model_name", None)
or getattr(self.llm, "deployment_name", None)
or str(self.llm),
"temperature": getattr(self.llm, "temperature", None),
"max_tokens": getattr(self.llm, "max_tokens", None),
"logprobs": getattr(self.llm, "logprobs", None),
"timeout": getattr(self.llm, "timeout", None),
"max_retries": getattr(self.llm, "max_retries", None),
"api_key": getattr(self.llm, "api_key", None),
"base_url": getattr(self.llm, "base_url", None),
"organization": getattr(self.llm, "organization", None),
}
# Remove None values to avoid passing unnecessary parameters
llm_params = {k: v for k, v in llm_params.items() if v is not None}
self.llm = LLM(**llm_params)
# Similar handling for function_calling_llm
if self.function_calling_llm:
if isinstance(self.function_calling_llm, str):
self.function_calling_llm = LLM(model=self.function_calling_llm)
elif not isinstance(self.function_calling_llm, LLM):
self.function_calling_llm = LLM(
model=getattr(self.function_calling_llm, "model_name", None)
or getattr(self.function_calling_llm, "deployment_name", None)
or str(self.function_calling_llm)
)
self.llm = create_llm(self.llm)
if self.function_calling_llm and not isinstance(
self.function_calling_llm, BaseLLM
):
self.function_calling_llm = create_llm(self.function_calling_llm)
if not self.agent_executor:
self._setup_agent_executor()
@@ -236,17 +138,20 @@ class Agent(BaseAgent):
self.cache_handler = CacheHandler()
self.set_cache_handler(self.cache_handler)
def _set_knowledge(self):
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
try:
if self.embedder is None and crew_embedder:
self.embedder = crew_embedder
if self.knowledge_sources:
knowledge_agent_name = f"{self.role.replace(' ', '_')}"
if isinstance(self.knowledge_sources, list) and all(
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
):
self._knowledge = Knowledge(
self.knowledge = Knowledge(
sources=self.knowledge_sources,
embedder_config=self.embedder_config,
collection_name=knowledge_agent_name,
embedder=self.embedder,
collection_name=self.role,
storage=self.knowledge_storage or None,
)
except (TypeError, ValueError) as e:
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
@@ -280,13 +185,15 @@ class Agent(BaseAgent):
if task.output_json:
# schema = json.dumps(task.output_json, indent=2)
schema = generate_model_description(task.output_json)
task_prompt += "\n" + self.i18n.slice(
"formatted_task_instructions"
).format(output_format=schema)
elif task.output_pydantic:
schema = generate_model_description(task.output_pydantic)
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
output_format=schema
)
task_prompt += "\n" + self.i18n.slice(
"formatted_task_instructions"
).format(output_format=schema)
if context:
task_prompt = self.i18n.slice("task_with_context").format(
@@ -300,13 +207,14 @@ class Agent(BaseAgent):
self.crew._long_term_memory,
self.crew._entity_memory,
self.crew._user_memory,
self.crew._external_memory,
)
memory = contextual_memory.build_context_for_task(task, context)
if memory.strip() != "":
task_prompt += self.i18n.slice("memory").format(memory=memory)
if self._knowledge:
agent_knowledge_snippets = self._knowledge.query([task.prompt()])
if self.knowledge:
agent_knowledge_snippets = self.knowledge.query([task.prompt()])
if agent_knowledge_snippets:
agent_knowledge_context = extract_knowledge_context(
agent_knowledge_snippets
@@ -330,6 +238,15 @@ class Agent(BaseAgent):
task_prompt = self._use_trained_data(task_prompt=task_prompt)
try:
crewai_event_bus.emit(
self,
event=AgentExecutionStartedEvent(
agent=self,
tools=self.tools,
task_prompt=task_prompt,
task=task,
),
)
result = self.agent_executor.invoke(
{
"input": task_prompt,
@@ -339,8 +256,27 @@ class Agent(BaseAgent):
}
)["output"]
except Exception as e:
if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors
crewai_event_bus.emit(
self,
event=AgentExecutionErrorEvent(
agent=self,
task=task,
error=str(e),
),
)
raise e
self._times_executed += 1
if self._times_executed > self.max_retry_limit:
crewai_event_bus.emit(
self,
event=AgentExecutionErrorEvent(
agent=self,
task=task,
error=str(e),
),
)
raise e
result = self.execute_task(task, context, tools)
@@ -353,7 +289,10 @@ class Agent(BaseAgent):
for tool_result in self.tools_results: # type: ignore # Item "None" of "list[Any] | None" has no attribute "__iter__" (not iterable)
if tool_result.get("result_as_answer", False):
result = tool_result["result"]
crewai_event_bus.emit(
self,
event=AgentExecutionCompletedEvent(agent=self, task=task, output=result),
)
return result
def create_agent_executor(
@@ -364,12 +303,12 @@ class Agent(BaseAgent):
Returns:
An instance of the CrewAgentExecutor class.
"""
tools = tools or self.tools or []
parsed_tools = self._parse_tools(tools)
raw_tools: List[BaseTool] = tools or self.tools or []
parsed_tools = parse_tools(raw_tools)
prompt = Prompts(
agent=self,
tools=tools,
has_tools=len(raw_tools) > 0,
i18n=self.i18n,
use_system_prompt=self.use_system_prompt,
system_template=self.system_template,
@@ -391,12 +330,12 @@ class Agent(BaseAgent):
crew=self.crew,
tools=parsed_tools,
prompt=prompt,
original_tools=tools,
original_tools=raw_tools,
stop_words=stop_words,
max_iter=self.max_iter,
tools_handler=self.tools_handler,
tools_names=self.__tools_names(parsed_tools),
tools_description=self._render_text_description_and_args(parsed_tools),
tools_names=get_tool_names(parsed_tools),
tools_description=render_text_description_and_args(parsed_tools),
step_callback=self.step_callback,
function_calling_llm=self.function_calling_llm,
respect_context_window=self.respect_context_window,
@@ -411,13 +350,14 @@ class Agent(BaseAgent):
tools = agent_tools.tools()
return tools
def get_multimodal_tools(self) -> List[Tool]:
def get_multimodal_tools(self) -> Sequence[BaseTool]:
from crewai.tools.agent_tools.add_image_tool import AddImageTool
return [AddImageTool()]
def get_code_execution_tools(self):
try:
from crewai_tools import CodeInterpreterTool
from crewai_tools import CodeInterpreterTool # type: ignore
# Set the unsafe_mode based on the code_execution_mode attribute
unsafe_mode = self.code_execution_mode == "unsafe"
@@ -430,25 +370,6 @@ class Agent(BaseAgent):
def get_output_converter(self, llm, text, model, instructions):
return Converter(llm=llm, text=text, model=model, instructions=instructions)
def _parse_tools(self, tools: List[Any]) -> List[Any]: # type: ignore
"""Parse tools to be used for the task."""
tools_list = []
try:
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
from crewai.tools import BaseTool as CrewAITool
for tool in tools:
if isinstance(tool, CrewAITool):
tools_list.append(tool.to_structured_tool())
else:
tools_list.append(tool)
except ModuleNotFoundError:
tools_list = []
for tool in tools:
tools_list.append(tool)
return tools_list
def _training_handler(self, task_prompt: str) -> str:
"""Handle training data for the agent task prompt to improve output on Training."""
if data := CrewTrainingHandler(TRAINING_DATA_FILE).load():
@@ -494,23 +415,6 @@ class Agent(BaseAgent):
return description
def _render_text_description_and_args(self, tools: List[BaseTool]) -> str:
"""Render the tool name, description, and args in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, \
args: {"expression": {"type": "string"}}
"""
tool_strings = []
for tool in tools:
tool_strings.append(tool.description)
return "\n".join(tool_strings)
def _validate_docker_installation(self) -> None:
"""Check if Docker is installed and running."""
if not shutil.which("docker"):
@@ -530,9 +434,18 @@ class Agent(BaseAgent):
f"Docker is not running. Please start Docker to use code execution with agent: {self.role}"
)
@staticmethod
def __tools_names(tools) -> str:
return ", ".join([t.name for t in tools])
def __repr__(self):
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
@property
def fingerprint(self) -> Fingerprint:
"""
Get the agent's fingerprint.
Returns:
Fingerprint: The agent's fingerprint
"""
return self.security_config.fingerprint
def set_fingerprint(self, fingerprint: Fingerprint):
self.security_config.fingerprint = fingerprint

View File

@@ -2,7 +2,7 @@ import uuid
from abc import ABC, abstractmethod
from copy import copy as shallow_copy
from hashlib import md5
from typing import Any, Dict, List, Optional, TypeVar
from typing import Any, Callable, Dict, List, Optional, TypeVar
from pydantic import (
UUID4,
@@ -18,10 +18,14 @@ from pydantic_core import PydanticCustomError
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
from crewai.agents.cache.cache_handler import CacheHandler
from crewai.agents.tools_handler import ToolsHandler
from crewai.tools import BaseTool
from crewai.tools.base_tool import Tool
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.security.security_config import SecurityConfig
from crewai.tools.base_tool import BaseTool, Tool
from crewai.utilities import I18N, Logger, RPMController
from crewai.utilities.config import process_config
from crewai.utilities.converter import Converter
from crewai.utilities.string_utils import interpolate_only
T = TypeVar("T", bound="BaseAgent")
@@ -40,7 +44,7 @@ class BaseAgent(ABC, BaseModel):
max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution.
allow_delegation (bool): Allow delegation of tasks to agents.
tools (Optional[List[Any]]): Tools at the agent's disposal.
max_iter (Optional[int]): Maximum iterations for an agent to execute a task.
max_iter (int): Maximum iterations for an agent to execute a task.
agent_executor (InstanceOf): An instance of the CrewAgentExecutor class.
llm (Any): Language model that will run the agent.
crew (Any): Crew to which the agent belongs.
@@ -48,6 +52,9 @@ class BaseAgent(ABC, BaseModel):
cache_handler (InstanceOf[CacheHandler]): An instance of the CacheHandler class.
tools_handler (InstanceOf[ToolsHandler]): An instance of the ToolsHandler class.
max_tokens: Maximum number of tokens for the agent to generate in a response.
knowledge_sources: Knowledge sources for the agent.
knowledge_storage: Custom knowledge storage for the agent.
security_config: Security configuration for the agent, including fingerprinting.
Methods:
@@ -65,8 +72,6 @@ class BaseAgent(ABC, BaseModel):
Interpolate inputs into the agent description and backstory.
set_cache_handler(cache_handler: CacheHandler) -> None:
Set the cache handler for the agent.
increment_formatting_errors() -> None:
Increment formatting errors.
copy() -> "BaseAgent":
Create a copy of the agent.
set_rpm_controller(rpm_controller: RPMController) -> None:
@@ -84,9 +89,6 @@ class BaseAgent(ABC, BaseModel):
_original_backstory: Optional[str] = PrivateAttr(default=None)
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
formatting_errors: int = Field(
default=0, description="Number of formatting errors."
)
role: str = Field(description="Role of the agent")
goal: str = Field(description="Objective of the agent")
backstory: str = Field(description="Backstory of the agent")
@@ -107,10 +109,10 @@ class BaseAgent(ABC, BaseModel):
default=False,
description="Enable agent to delegate and ask questions among each other.",
)
tools: Optional[List[Any]] = Field(
tools: Optional[List[BaseTool]] = Field(
default_factory=list, description="Tools at agents' disposal"
)
max_iter: Optional[int] = Field(
max_iter: int = Field(
default=25, description="Maximum iterations for an agent to execute a task"
)
agent_executor: InstanceOf = Field(
@@ -121,15 +123,37 @@ class BaseAgent(ABC, BaseModel):
)
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
cache_handler: InstanceOf[CacheHandler] = Field(
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
default=None, description="An instance of the CacheHandler class."
)
tools_handler: InstanceOf[ToolsHandler] = Field(
default=None, description="An instance of the ToolsHandler class."
default_factory=ToolsHandler,
description="An instance of the ToolsHandler class.",
)
tools_results: List[Dict[str, Any]] = Field(
default=[], description="Results of the tools used by the agent."
)
max_tokens: Optional[int] = Field(
default=None, description="Maximum number of tokens for the agent's execution."
)
knowledge: Optional[Knowledge] = Field(
default=None, description="Knowledge for the agent."
)
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
default=None,
description="Knowledge sources for the agent.",
)
knowledge_storage: Optional[Any] = Field(
default=None,
description="Custom knowledge storage for the agent.",
)
security_config: SecurityConfig = Field(
default_factory=SecurityConfig,
description="Security configuration for the agent, including fingerprinting.",
)
callbacks: List[Callable] = Field(
default=[], description="Callbacks to be used for the agent"
)
@model_validator(mode="before")
@classmethod
@@ -183,6 +207,10 @@ class BaseAgent(ABC, BaseModel):
if not self._token_process:
self._token_process = TokenProcess()
# Initialize security_config if not provided
if self.security_config is None:
self.security_config = SecurityConfig()
return self
@field_validator("id", mode="before")
@@ -227,10 +255,6 @@ class BaseAgent(ABC, BaseModel):
def create_agent_executor(self, tools=None) -> None:
pass
@abstractmethod
def _parse_tools(self, tools: List[BaseTool]) -> List[BaseTool]:
pass
@abstractmethod
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]:
"""Set the task tools that init BaseAgenTools class."""
@@ -239,7 +263,7 @@ class BaseAgent(ABC, BaseModel):
@abstractmethod
def get_output_converter(
self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str
):
) -> Converter:
"""Get the converter class for the agent to create json/pydantic outputs."""
pass
@@ -256,13 +280,44 @@ class BaseAgent(ABC, BaseModel):
"tools_handler",
"cache_handler",
"llm",
"knowledge_sources",
"knowledge_storage",
"knowledge",
}
# Copy llm and clear callbacks
# Copy llm
existing_llm = shallow_copy(self.llm)
copied_knowledge = shallow_copy(self.knowledge)
copied_knowledge_storage = shallow_copy(self.knowledge_storage)
# Properly copy knowledge sources if they exist
existing_knowledge_sources = None
if self.knowledge_sources:
# Create a shared storage instance for all knowledge sources
shared_storage = (
self.knowledge_sources[0].storage if self.knowledge_sources else None
)
existing_knowledge_sources = []
for source in self.knowledge_sources:
copied_source = (
source.model_copy()
if hasattr(source, "model_copy")
else shallow_copy(source)
)
# Ensure all copied sources use the same storage instance
copied_source.storage = shared_storage
existing_knowledge_sources.append(copied_source)
copied_data = self.model_dump(exclude=exclude)
copied_data = {k: v for k, v in copied_data.items() if v is not None}
copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools)
copied_agent = type(self)(
**copied_data,
llm=existing_llm,
tools=self.tools,
knowledge_sources=existing_knowledge_sources,
knowledge=copied_knowledge,
knowledge_storage=copied_knowledge_storage,
)
return copied_agent
@@ -276,9 +331,15 @@ class BaseAgent(ABC, BaseModel):
self._original_backstory = self.backstory
if inputs:
self.role = self._original_role.format(**inputs)
self.goal = self._original_goal.format(**inputs)
self.backstory = self._original_backstory.format(**inputs)
self.role = interpolate_only(
input_string=self._original_role, inputs=inputs
)
self.goal = interpolate_only(
input_string=self._original_goal, inputs=inputs
)
self.backstory = interpolate_only(
input_string=self._original_backstory, inputs=inputs
)
def set_cache_handler(self, cache_handler: CacheHandler) -> None:
"""Set the cache handler for the agent.
@@ -292,9 +353,6 @@ class BaseAgent(ABC, BaseModel):
self.tools_handler.cache = cache_handler
self.create_agent_executor()
def increment_formatting_errors(self) -> None:
self.formatting_errors += 1
def set_rpm_controller(self, rpm_controller: RPMController) -> None:
"""Set the rpm controller for the agent.
@@ -304,3 +362,6 @@ class BaseAgent(ABC, BaseModel):
if not self._rpm_controller:
self._rpm_controller = rpm_controller
self.create_agent_executor()
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
pass

View File

@@ -1,5 +1,5 @@
import time
from typing import TYPE_CHECKING, Optional
from typing import TYPE_CHECKING
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
@@ -15,19 +15,14 @@ if TYPE_CHECKING:
class CrewAgentExecutorMixin:
crew: Optional["Crew"]
agent: Optional["BaseAgent"]
task: Optional["Task"]
crew: "Crew"
agent: "BaseAgent"
task: "Task"
iterations: int
have_forced_answer: bool
max_iter: int
_i18n: I18N
_printer: Printer = Printer()
def _should_force_answer(self) -> bool:
"""Determine if a forced answer is required based on iteration count."""
return (self.iterations >= self.max_iter) and not self.have_forced_answer
def _create_short_term_memory(self, output) -> None:
"""Create and save a short-term memory item if conditions are met."""
if (
@@ -52,6 +47,27 @@ class CrewAgentExecutorMixin:
print(f"Failed to add to short term memory: {e}")
pass
def _create_external_memory(self, output) -> None:
"""Create and save a external-term memory item if conditions are met."""
if (
self.crew
and self.agent
and self.task
and hasattr(self.crew, "_external_memory")
and self.crew._external_memory
):
try:
self.crew._external_memory.save(
value=output.text,
metadata={
"description": self.task.description,
},
agent=self.agent.role,
)
except Exception as e:
print(f"Failed to add to external memory: {e}")
pass
def _create_long_term_memory(self, output) -> None:
"""Create and save long-term and entity memory items based on evaluation."""
if (
@@ -100,18 +116,34 @@ class CrewAgentExecutorMixin:
pass
def _ask_human_input(self, final_answer: str) -> str:
"""Prompt human input for final decision making."""
"""Prompt human input with mode-appropriate messaging."""
self._printer.print(
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
)
self._printer.print(
content=(
# Training mode prompt (single iteration)
if self.crew and getattr(self.crew, "_train", False):
prompt = (
"\n\n=====\n"
"## Please provide feedback on the Final Result and the Agent's actions. "
"Respond with 'looks good' or a similar phrase when you're satisfied.\n"
"## TRAINING MODE: Provide feedback to improve the agent's performance.\n"
"This will be used to train better versions of the agent.\n"
"Please provide detailed feedback about the result quality and reasoning process.\n"
"=====\n"
),
color="bold_yellow",
)
return input()
)
# Regular human-in-the-loop prompt (multiple iterations)
else:
prompt = (
"\n\n=====\n"
"## HUMAN FEEDBACK: Provide feedback on the Final Result and Agent's actions.\n"
"Please follow these guidelines:\n"
" - If you are happy with the result, simply hit Enter without typing anything.\n"
" - Otherwise, provide specific improvement requests.\n"
" - You can provide multiple rounds of feedback until satisfied.\n"
"=====\n"
)
self._printer.print(content=prompt, color="bold_yellow")
response = input()
if response.strip() != "":
self._printer.print(content="\nProcessing your feedback...", color="cyan")
return response

View File

@@ -25,17 +25,17 @@ class OutputConverter(BaseModel, ABC):
llm: Any = Field(description="The language model to be used to convert the text.")
model: Any = Field(description="The model to be used to convert the text.")
instructions: str = Field(description="Conversion instructions to the LLM.")
max_attempts: Optional[int] = Field(
max_attempts: int = Field(
description="Max number of attempts to try to get the output formatted.",
default=3,
)
@abstractmethod
def to_pydantic(self, current_attempt=1):
def to_pydantic(self, current_attempt=1) -> BaseModel:
"""Convert text to pydantic."""
pass
@abstractmethod
def to_json(self, current_attempt=1):
def to_json(self, current_attempt=1) -> dict:
"""Convert text to json."""
pass

View File

@@ -2,25 +2,26 @@ from crewai.types.usage_metrics import UsageMetrics
class TokenProcess:
total_tokens: int = 0
prompt_tokens: int = 0
cached_prompt_tokens: int = 0
completion_tokens: int = 0
successful_requests: int = 0
def __init__(self) -> None:
self.total_tokens: int = 0
self.prompt_tokens: int = 0
self.cached_prompt_tokens: int = 0
self.completion_tokens: int = 0
self.successful_requests: int = 0
def sum_prompt_tokens(self, tokens: int):
self.prompt_tokens = self.prompt_tokens + tokens
self.total_tokens = self.total_tokens + tokens
def sum_prompt_tokens(self, tokens: int) -> None:
self.prompt_tokens += tokens
self.total_tokens += tokens
def sum_completion_tokens(self, tokens: int):
self.completion_tokens = self.completion_tokens + tokens
self.total_tokens = self.total_tokens + tokens
def sum_completion_tokens(self, tokens: int) -> None:
self.completion_tokens += tokens
self.total_tokens += tokens
def sum_cached_prompt_tokens(self, tokens: int):
self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
def sum_cached_prompt_tokens(self, tokens: int) -> None:
self.cached_prompt_tokens += tokens
def sum_successful_requests(self, requests: int):
self.successful_requests = self.successful_requests + requests
def sum_successful_requests(self, requests: int) -> None:
self.successful_requests += requests
def get_summary(self) -> UsageMetrics:
return UsageMetrics(

View File

@@ -1,35 +1,40 @@
import json
import re
from dataclasses import dataclass
from typing import Any, Dict, List, Union
from typing import Any, Callable, Dict, List, Optional, Union
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.parser import (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
AgentAction,
AgentFinish,
CrewAgentParser,
OutputParserException,
)
from crewai.agents.tools_handler import ToolsHandler
from crewai.llm import BaseLLM
from crewai.tools.base_tool import BaseTool
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_types import ToolResult
from crewai.utilities import I18N, Printer
from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
from crewai.utilities.agent_utils import (
enforce_rpm_limit,
format_message_for_llm,
get_llm_response,
handle_agent_action_core,
handle_context_length,
handle_max_iterations_exceeded,
handle_output_parser_exception,
handle_unknown_error,
has_reached_max_iterations,
is_context_length_exceeded,
process_llm_response,
show_agent_logs,
)
from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
from crewai.utilities.logger import Logger
from crewai.utilities.tool_utils import execute_tool_and_check_finality
from crewai.utilities.training_handler import CrewTrainingHandler
@dataclass
class ToolResult:
result: Any
result_as_answer: bool
class CrewAgentExecutor(CrewAgentExecutorMixin):
_logger: Logger = Logger()
@@ -41,7 +46,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
agent: BaseAgent,
prompt: dict[str, str],
max_iter: int,
tools: List[BaseTool],
tools: List[CrewStructuredTool],
tools_names: str,
stop_words: List[str],
tools_description: str,
@@ -50,11 +55,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
original_tools: List[Any] = [],
function_calling_llm: Any = None,
respect_context_window: bool = False,
request_within_rpm_limit: Any = None,
request_within_rpm_limit: Optional[Callable[[], bool]] = None,
callbacks: List[Any] = [],
):
self._i18n: I18N = I18N()
self.llm = llm
self.llm: BaseLLM = llm
self.task = task
self.agent = agent
self.crew = crew
@@ -77,224 +82,210 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.messages: List[Dict[str, str]] = []
self.iterations = 0
self.log_error_after = 3
self.have_forced_answer = False
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
self.tool_name_to_tool_map: Dict[str, Union[CrewStructuredTool, BaseTool]] = {
tool.name: tool for tool in self.tools
}
if self.llm.stop:
self.llm.stop = list(set(self.llm.stop + self.stop))
else:
self.llm.stop = self.stop
existing_stop = self.llm.stop or []
self.llm.stop = list(
set(
existing_stop + self.stop
if isinstance(existing_stop, list)
else self.stop
)
)
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
if "system" in self.prompt:
system_prompt = self._format_prompt(self.prompt.get("system", ""), inputs)
user_prompt = self._format_prompt(self.prompt.get("user", ""), inputs)
self.messages.append(self._format_msg(system_prompt, role="system"))
self.messages.append(self._format_msg(user_prompt))
self.messages.append(format_message_for_llm(system_prompt, role="system"))
self.messages.append(format_message_for_llm(user_prompt))
else:
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
self.messages.append(self._format_msg(user_prompt))
self.messages.append(format_message_for_llm(user_prompt))
self._show_start_logs()
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
formatted_answer = self._invoke_loop()
try:
formatted_answer = self._invoke_loop()
except AssertionError:
self._printer.print(
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
color="red",
)
raise
except Exception as e:
handle_unknown_error(self._printer, e)
if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors
raise e
else:
raise e
if self.ask_for_human_input:
formatted_answer = self._handle_human_feedback(formatted_answer)
self._create_short_term_memory(formatted_answer)
self._create_long_term_memory(formatted_answer)
self._create_external_memory(formatted_answer)
return {"output": formatted_answer.output}
def _invoke_loop(self, formatted_answer=None):
try:
while not isinstance(formatted_answer, AgentFinish):
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
answer = self.llm.call(
self.messages,
def _invoke_loop(self) -> AgentFinish:
"""
Main loop to invoke the agent's thought process until it reaches a conclusion
or the maximum number of iterations is reached.
"""
formatted_answer = None
while not isinstance(formatted_answer, AgentFinish):
try:
if has_reached_max_iterations(self.iterations, self.max_iter):
formatted_answer = handle_max_iterations_exceeded(
formatted_answer,
printer=self._printer,
i18n=self._i18n,
messages=self.messages,
llm=self.llm,
callbacks=self.callbacks,
)
if answer is None or answer == "":
self._printer.print(
content="Received None or empty response from LLM call.",
color="red",
)
raise ValueError(
"Invalid response from LLM call - None or empty."
)
enforce_rpm_limit(self.request_within_rpm_limit)
if not self.use_stop_words:
try:
self._format_answer(answer)
except OutputParserException as e:
if (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE
in e.error
):
answer = answer.split("Observation:")[0].strip()
answer = get_llm_response(
llm=self.llm,
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
)
formatted_answer = process_llm_response(answer, self.use_stop_words)
self.iterations += 1
formatted_answer = self._format_answer(answer)
if isinstance(formatted_answer, AgentAction):
tool_result = self._execute_tool_and_check_finality(
formatted_answer
)
# Directly append the result to the messages if the
# tool is "Add image to content" in case of multimodal
# agents
if formatted_answer.tool == self._i18n.tools("add_image")["name"]:
self.messages.append(tool_result.result)
continue
else:
if self.step_callback:
self.step_callback(tool_result)
formatted_answer.text += f"\nObservation: {tool_result.result}"
formatted_answer.result = tool_result.result
if tool_result.result_as_answer:
return AgentFinish(
thought="",
output=tool_result.result,
text=formatted_answer.text,
if isinstance(formatted_answer, AgentAction):
# Extract agent fingerprint if available
fingerprint_context = {}
if (
self.agent
and hasattr(self.agent, "security_config")
and hasattr(self.agent.security_config, "fingerprint")
):
fingerprint_context = {
"agent_fingerprint": str(
self.agent.security_config.fingerprint
)
self._show_logs(formatted_answer)
}
if self.step_callback:
self.step_callback(formatted_answer)
if self._should_force_answer():
if self.have_forced_answer:
return AgentFinish(
thought="",
output=self._i18n.errors(
"force_final_answer_error"
).format(formatted_answer.text),
text=formatted_answer.text,
)
else:
formatted_answer.text += (
f'\n{self._i18n.errors("force_final_answer")}'
)
self.have_forced_answer = True
self.messages.append(
self._format_msg(formatted_answer.text, role="assistant")
tool_result = execute_tool_and_check_finality(
agent_action=formatted_answer,
fingerprint_context=fingerprint_context,
tools=self.tools,
i18n=self._i18n,
agent_key=self.agent.key if self.agent else None,
agent_role=self.agent.role if self.agent else None,
tools_handler=self.tools_handler,
task=self.task,
agent=self.agent,
function_calling_llm=self.function_calling_llm,
)
formatted_answer = self._handle_agent_action(
formatted_answer, tool_result
)
except OutputParserException as e:
self.messages.append({"role": "user", "content": e.error})
if self.iterations > self.log_error_after:
self._printer.print(
content=f"Error parsing LLM output, agent will retry: {e.error}",
color="red",
self._invoke_step_callback(formatted_answer)
self._append_message(formatted_answer.text, role="assistant")
except OutputParserException as e:
formatted_answer = handle_output_parser_exception(
e=e,
messages=self.messages,
iterations=self.iterations,
log_error_after=self.log_error_after,
printer=self._printer,
)
return self._invoke_loop(formatted_answer)
except Exception as e:
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
str(e)
):
self._handle_context_length()
return self._invoke_loop(formatted_answer)
else:
raise e
except Exception as e:
if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors
raise e
if is_context_length_exceeded(e):
handle_context_length(
respect_context_window=self.respect_context_window,
printer=self._printer,
messages=self.messages,
llm=self.llm,
callbacks=self.callbacks,
i18n=self._i18n,
)
continue
else:
handle_unknown_error(self._printer, e)
raise e
finally:
self.iterations += 1
# During the invoke loop, formatted_answer alternates between AgentAction
# (when the agent is using tools) and eventually becomes AgentFinish
# (when the agent reaches a final answer). This assertion confirms we've
# reached a final answer and helps type checking understand this transition.
assert isinstance(formatted_answer, AgentFinish)
self._show_logs(formatted_answer)
return formatted_answer
def _handle_agent_action(
self, formatted_answer: AgentAction, tool_result: ToolResult
) -> Union[AgentAction, AgentFinish]:
"""Handle the AgentAction, execute tools, and process the results."""
# Special case for add_image_tool
add_image_tool = self._i18n.tools("add_image")
if (
isinstance(add_image_tool, dict)
and formatted_answer.tool.casefold().strip()
== add_image_tool.get("name", "").casefold().strip()
):
self.messages.append({"role": "assistant", "content": tool_result.result})
return formatted_answer
return handle_agent_action_core(
formatted_answer=formatted_answer,
tool_result=tool_result,
messages=self.messages,
step_callback=self.step_callback,
show_logs=self._show_logs,
)
def _invoke_step_callback(self, formatted_answer) -> None:
"""Invoke the step callback if it exists."""
if self.step_callback:
self.step_callback(formatted_answer)
def _append_message(self, text: str, role: str = "assistant") -> None:
"""Append a message to the message list with the given role."""
self.messages.append(format_message_for_llm(text, role=role))
def _show_start_logs(self):
"""Show logs for the start of agent execution."""
if self.agent is None:
raise ValueError("Agent cannot be None")
if self.agent.verbose or (
hasattr(self, "crew") and getattr(self.crew, "verbose", False)
):
agent_role = self.agent.role.split("\n")[0]
self._printer.print(
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
self._printer.print(
content=f"\033[95m## Task:\033[00m \033[92m{self.task.description}\033[00m"
)
show_agent_logs(
printer=self._printer,
agent_role=self.agent.role,
task_description=(
getattr(self.task, "description") if self.task else "Not Found"
),
verbose=self.agent.verbose
or (hasattr(self, "crew") and getattr(self.crew, "verbose", False)),
)
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
"""Show logs for the agent's execution."""
if self.agent is None:
raise ValueError("Agent cannot be None")
if self.agent.verbose or (
hasattr(self, "crew") and getattr(self.crew, "verbose", False)
):
agent_role = self.agent.role.split("\n")[0]
if isinstance(formatted_answer, AgentAction):
thought = re.sub(r"\n+", "\n", formatted_answer.thought)
formatted_json = json.dumps(
formatted_answer.tool_input,
indent=2,
ensure_ascii=False,
)
self._printer.print(
content=f"\n\n\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
if thought and thought != "":
self._printer.print(
content=f"\033[95m## Thought:\033[00m \033[92m{thought}\033[00m"
)
self._printer.print(
content=f"\033[95m## Using tool:\033[00m \033[92m{formatted_answer.tool}\033[00m"
)
self._printer.print(
content=f"\033[95m## Tool Input:\033[00m \033[92m\n{formatted_json}\033[00m"
)
self._printer.print(
content=f"\033[95m## Tool Output:\033[00m \033[92m\n{formatted_answer.result}\033[00m"
)
elif isinstance(formatted_answer, AgentFinish):
self._printer.print(
content=f"\n\n\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
self._printer.print(
content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m\n\n"
)
def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult:
tool_usage = ToolUsage(
tools_handler=self.tools_handler,
tools=self.tools,
original_tools=self.original_tools,
tools_description=self.tools_description,
tools_names=self.tools_names,
function_calling_llm=self.function_calling_llm,
task=self.task, # type: ignore[arg-type]
agent=self.agent,
action=agent_action,
show_agent_logs(
printer=self._printer,
agent_role=self.agent.role,
formatted_answer=formatted_answer,
verbose=self.agent.verbose
or (hasattr(self, "crew") and getattr(self.crew, "verbose", False)),
)
tool_calling = tool_usage.parse(agent_action.text)
if isinstance(tool_calling, ToolUsageErrorException):
tool_result = tool_calling.message
return ToolResult(result=tool_result, result_as_answer=False)
else:
if tool_calling.tool_name.casefold().strip() in [
name.casefold().strip() for name in self.tool_name_to_tool_map
] or tool_calling.tool_name.casefold().replace("_", " ") in [
name.casefold().strip() for name in self.tool_name_to_tool_map
]:
tool_result = tool_usage.use(tool_calling, agent_action.text)
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
if tool:
return ToolResult(
result=tool_result, result_as_answer=tool.result_as_answer
)
else:
tool_result = self._i18n.errors("wrong_tool_name").format(
tool=tool_calling.tool_name,
tools=", ".join([tool.name.casefold() for tool in self.tools]),
)
return ToolResult(result=tool_result, result_as_answer=False)
def _summarize_messages(self) -> None:
messages_groups = []
@@ -302,100 +293,78 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
content = message["content"]
cut_size = self.llm.get_context_window_size()
for i in range(0, len(content), cut_size):
messages_groups.append(content[i : i + cut_size])
messages_groups.append({"content": content[i : i + cut_size]})
summarized_contents = []
for group in messages_groups:
summary = self.llm.call(
[
self._format_msg(
format_message_for_llm(
self._i18n.slice("summarizer_system_message"), role="system"
),
self._format_msg(
self._i18n.slice("summarize_instruction").format(group=group),
format_message_for_llm(
self._i18n.slice("summarize_instruction").format(
group=group["content"]
),
),
],
callbacks=self.callbacks,
)
summarized_contents.append(summary)
summarized_contents.append({"content": str(summary)})
merged_summary = " ".join(str(content) for content in summarized_contents)
merged_summary = " ".join(content["content"] for content in summarized_contents)
self.messages = [
self._format_msg(
format_message_for_llm(
self._i18n.slice("summary").format(merged_summary=merged_summary)
)
]
def _handle_context_length(self) -> None:
if self.respect_context_window:
def _handle_crew_training_output(
self, result: AgentFinish, human_feedback: Optional[str] = None
) -> None:
"""Handle the process of saving training data."""
agent_id = str(self.agent.id) # type: ignore
train_iteration = (
getattr(self.crew, "_train_iteration", None) if self.crew else None
)
if train_iteration is None or not isinstance(train_iteration, int):
self._printer.print(
content="Context length exceeded. Summarizing content to fit the model context window.",
color="yellow",
)
self._summarize_messages()
else:
self._printer.print(
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
content="Invalid or missing train iteration. Cannot save training data.",
color="red",
)
raise SystemExit(
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
)
return
def _handle_crew_training_output(
self, result: AgentFinish, human_feedback: str | None = None
) -> None:
"""Function to handle the process of the training data."""
agent_id = str(self.agent.id) # type: ignore
# Load training data
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
training_data = training_handler.load()
training_data = training_handler.load() or {}
# Check if training data exists, human input is not requested, and self.crew is valid
if training_data and not self.ask_for_human_input:
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
train_iteration = self.crew._train_iteration
if agent_id in training_data and isinstance(train_iteration, int):
training_data[agent_id][train_iteration][
"improved_output"
] = result.output
training_handler.save(training_data)
else:
self._printer.print(
content="Invalid train iteration type or agent_id not in training data.",
color="red",
)
else:
self._printer.print(
content="Crew is None or does not have _train_iteration attribute.",
color="red",
)
# Initialize or retrieve agent's training data
agent_training_data = training_data.get(agent_id, {})
if self.ask_for_human_input and human_feedback is not None:
training_data = {
if human_feedback is not None:
# Save initial output and human feedback
agent_training_data[train_iteration] = {
"initial_output": result.output,
"human_feedback": human_feedback,
"agent": agent_id,
"agent_role": self.agent.role, # type: ignore
}
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
train_iteration = self.crew._train_iteration
if isinstance(train_iteration, int):
CrewTrainingHandler(TRAINING_DATA_FILE).append(
train_iteration, agent_id, training_data
)
else:
self._printer.print(
content="Invalid train iteration type. Expected int.",
color="red",
)
else:
# Save improved output
if train_iteration in agent_training_data:
agent_training_data[train_iteration]["improved_output"] = result.output
else:
self._printer.print(
content="Crew is None or does not have _train_iteration attribute.",
content=(
f"No existing training data for agent {agent_id} and iteration "
f"{train_iteration}. Cannot save improved output."
),
color="red",
)
return
# Update the training data and save
training_data[agent_id] = agent_training_data
training_handler.save(training_data)
def _format_prompt(self, prompt: str, inputs: Dict[str, str]) -> str:
prompt = prompt.replace("{input}", inputs["input"])
@@ -403,87 +372,83 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
prompt = prompt.replace("{tools}", inputs["tools"])
return prompt
def _format_answer(self, answer: str) -> Union[AgentAction, AgentFinish]:
return CrewAgentParser(agent=self.agent).parse(answer)
def _format_msg(self, prompt: str, role: str = "user") -> Dict[str, str]:
prompt = prompt.rstrip()
return {"role": role, "content": prompt}
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
"""
Handles the human feedback loop, allowing the user to provide feedback
on the agent's output and determining if additional iterations are needed.
"""Handle human feedback with different flows for training vs regular use.
Parameters:
formatted_answer (AgentFinish): The initial output from the agent.
Args:
formatted_answer: The initial AgentFinish result to get feedback on
Returns:
AgentFinish: The final output after incorporating human feedback.
AgentFinish: The final answer after processing feedback
"""
human_feedback = self._ask_human_input(formatted_answer.output)
if self._is_training_mode():
return self._handle_training_feedback(formatted_answer, human_feedback)
return self._handle_regular_feedback(formatted_answer, human_feedback)
def _is_training_mode(self) -> bool:
"""Check if crew is in training mode."""
return bool(self.crew and self.crew._train)
def _handle_training_feedback(
self, initial_answer: AgentFinish, feedback: str
) -> AgentFinish:
"""Process feedback for training scenarios with single iteration."""
self._handle_crew_training_output(initial_answer, feedback)
self.messages.append(
format_message_for_llm(
self._i18n.slice("feedback_instructions").format(feedback=feedback)
)
)
improved_answer = self._invoke_loop()
self._handle_crew_training_output(improved_answer)
self.ask_for_human_input = False
return improved_answer
def _handle_regular_feedback(
self, current_answer: AgentFinish, initial_feedback: str
) -> AgentFinish:
"""Process feedback for regular use with potential multiple iterations."""
feedback = initial_feedback
answer = current_answer
while self.ask_for_human_input:
human_feedback = self._ask_human_input(formatted_answer.output)
if self.crew and self.crew._train:
self._handle_crew_training_output(formatted_answer, human_feedback)
# Make an LLM call to verify if additional changes are requested based on human feedback
additional_changes_prompt = self._i18n.slice(
"human_feedback_classification"
).format(feedback=human_feedback)
retry_count = 0
llm_call_successful = False
additional_changes_response = None
while retry_count < MAX_LLM_RETRY and not llm_call_successful:
try:
additional_changes_response = (
self.llm.call(
[
self._format_msg(
additional_changes_prompt, role="system"
)
],
callbacks=self.callbacks,
)
.strip()
.lower()
)
llm_call_successful = True
except Exception as e:
retry_count += 1
self._printer.print(
content=f"Error during LLM call to classify human feedback: {e}. Retrying... ({retry_count}/{MAX_LLM_RETRY})",
color="red",
)
if not llm_call_successful:
self._printer.print(
content="Error processing feedback after multiple attempts.",
color="red",
)
# If the user provides a blank response, assume they are happy with the result
if feedback.strip() == "":
self.ask_for_human_input = False
break
if additional_changes_response == "false":
self.ask_for_human_input = False
elif additional_changes_response == "true":
self.ask_for_human_input = True
# Add human feedback to messages
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
# Invoke the loop again with updated messages
formatted_answer = self._invoke_loop()
if self.crew and self.crew._train:
self._handle_crew_training_output(formatted_answer)
else:
# Unexpected response
self._printer.print(
content=f"Unexpected response from LLM: '{additional_changes_response}'. Assuming no additional changes requested.",
color="red",
)
self.ask_for_human_input = False
answer = self._process_feedback_iteration(feedback)
feedback = self._ask_human_input(answer.output)
return formatted_answer
return answer
def _process_feedback_iteration(self, feedback: str) -> AgentFinish:
"""Process a single feedback iteration."""
self.messages.append(
format_message_for_llm(
self._i18n.slice("feedback_instructions").format(feedback=feedback)
)
)
return self._invoke_loop()
def _log_feedback_error(self, retry_count: int, error: Exception) -> None:
"""Log feedback processing errors."""
self._printer.print(
content=(
f"Error processing feedback: {error}. "
f"Retrying... ({retry_count + 1}/{MAX_LLM_RETRY})"
),
color="red",
)
def _log_max_retries_exceeded(self) -> None:
"""Log when max retries for feedback processing are exceeded."""
self._printer.print(
content=(
f"Failed to process feedback after {MAX_LLM_RETRY} attempts. "
"Ending feedback loop."
),
color="red",
)

View File

@@ -1,5 +1,5 @@
import re
from typing import Any, Union
from typing import Any, Optional, Union
from json_repair import repair_json
@@ -67,9 +67,23 @@ class CrewAgentParser:
_i18n: I18N = I18N()
agent: Any = None
def __init__(self, agent: Any):
def __init__(self, agent: Optional[Any] = None):
self.agent = agent
@staticmethod
def parse_text(text: str) -> Union[AgentAction, AgentFinish]:
"""
Static method to parse text into an AgentAction or AgentFinish without needing to instantiate the class.
Args:
text: The text to parse.
Returns:
Either an AgentAction or AgentFinish based on the parsed content.
"""
parser = CrewAgentParser()
return parser.parse(text)
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
thought = self._extract_thought(text)
includes_answer = FINAL_ANSWER_ACTION in text
@@ -77,11 +91,18 @@ class CrewAgentParser:
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
raise OutputParserException(
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}"
)
if includes_answer:
final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip()
# Check whether the final answer ends with triple backticks.
if final_answer.endswith("```"):
# Count occurrences of triple backticks in the final answer.
count = final_answer.count("```")
# If count is odd then it's an unmatched trailing set; remove it.
if count % 2 != 0:
final_answer = final_answer[:-3].rstrip()
return AgentFinish(thought, final_answer, text)
elif action_match:
action = action_match.group(1)
clean_action = self._clean_action(action)
@@ -92,40 +113,37 @@ class CrewAgentParser:
return AgentAction(thought, clean_action, safe_tool_input, text)
elif includes_answer:
final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish(thought, final_answer, text)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
self.agent.increment_formatting_errors()
raise OutputParserException(
f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{self._i18n.slice('final_answer_format')}",
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
self.agent.increment_formatting_errors()
raise OutputParserException(
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
)
else:
format = self._i18n.slice("format_without_tools")
error = f"{format}"
self.agent.increment_formatting_errors()
raise OutputParserException(
error,
)
def _extract_thought(self, text: str) -> str:
regex = r"(.*?)(?:\n\nAction|\n\nFinal Answer)"
thought_match = re.search(regex, text, re.DOTALL)
if thought_match:
return thought_match.group(1).strip()
return ""
thought_index = text.find("\nAction")
if thought_index == -1:
thought_index = text.find("\nFinal Answer")
if thought_index == -1:
return ""
thought = text[:thought_index].strip()
# Remove any triple backticks from the thought string
thought = thought.replace("```", "").strip()
return thought
def _clean_action(self, text: str) -> str:
"""Clean action string by removing non-essential formatting characters."""
return re.sub(r"^\s*\*+\s*|\s*\*+\s*$", "", text).strip()
return text.strip().strip("*").strip()
def _safe_repair_json(self, tool_input: str) -> str:
UNABLE_TO_REPAIR_JSON_RESULTS = ['""', "{}"]

View File

@@ -1,11 +1,13 @@
import os
from importlib.metadata import version as get_version
from typing import Optional
from typing import Optional, Tuple
import click
from crewai.cli.add_crew_to_flow import add_crew_to_flow
from crewai.cli.create_crew import create_crew
from crewai.cli.create_flow import create_flow
from crewai.cli.crew_chat import run_chat
from crewai.memory.storage.kickoff_task_outputs_storage import (
KickoffTaskOutputsSQLiteStorage,
)
@@ -201,7 +203,6 @@ def install(context):
@crewai.command()
def run():
"""Run the Crew."""
click.echo("Running the Crew")
run_crew()
@@ -342,5 +343,18 @@ def flow_add_crew(crew_name):
add_crew_to_flow(crew_name)
@crewai.command()
def chat():
"""
Start a conversation with the Crew, collecting user-supplied inputs,
and using the Chat LLM to generate responses.
"""
click.secho(
"\nStarting a conversation with the Crew\n" "Type 'exit' or Ctrl+C to quit.\n",
)
run_chat()
if __name__ == "__main__":
crewai()

View File

@@ -17,6 +17,12 @@ ENV_VARS = {
"key_name": "GEMINI_API_KEY",
}
],
"nvidia_nim": [
{
"prompt": "Enter your NVIDIA API key (press Enter to skip)",
"key_name": "NVIDIA_NIM_API_KEY",
}
],
"groq": [
{
"prompt": "Enter your GROQ API key (press Enter to skip)",
@@ -85,6 +91,12 @@ ENV_VARS = {
"key_name": "CEREBRAS_API_KEY",
},
],
"sambanova": [
{
"prompt": "Enter your SambaNovaCloud API key (press Enter to skip)",
"key_name": "SAMBANOVA_API_KEY",
}
],
}
@@ -92,12 +104,14 @@ PROVIDERS = [
"openai",
"anthropic",
"gemini",
"nvidia_nim",
"groq",
"ollama",
"watson",
"bedrock",
"azure",
"cerebras",
"sambanova",
]
MODELS = {
@@ -114,6 +128,75 @@ MODELS = {
"gemini/gemini-gemma-2-9b-it",
"gemini/gemini-gemma-2-27b-it",
],
"nvidia_nim": [
"nvidia_nim/nvidia/mistral-nemo-minitron-8b-8k-instruct",
"nvidia_nim/nvidia/nemotron-4-mini-hindi-4b-instruct",
"nvidia_nim/nvidia/llama-3.1-nemotron-70b-instruct",
"nvidia_nim/nvidia/llama3-chatqa-1.5-8b",
"nvidia_nim/nvidia/llama3-chatqa-1.5-70b",
"nvidia_nim/nvidia/vila",
"nvidia_nim/nvidia/neva-22",
"nvidia_nim/nvidia/nemotron-mini-4b-instruct",
"nvidia_nim/nvidia/usdcode-llama3-70b-instruct",
"nvidia_nim/nvidia/nemotron-4-340b-instruct",
"nvidia_nim/meta/codellama-70b",
"nvidia_nim/meta/llama2-70b",
"nvidia_nim/meta/llama3-8b-instruct",
"nvidia_nim/meta/llama3-70b-instruct",
"nvidia_nim/meta/llama-3.1-8b-instruct",
"nvidia_nim/meta/llama-3.1-70b-instruct",
"nvidia_nim/meta/llama-3.1-405b-instruct",
"nvidia_nim/meta/llama-3.2-1b-instruct",
"nvidia_nim/meta/llama-3.2-3b-instruct",
"nvidia_nim/meta/llama-3.2-11b-vision-instruct",
"nvidia_nim/meta/llama-3.2-90b-vision-instruct",
"nvidia_nim/meta/llama-3.1-70b-instruct",
"nvidia_nim/google/gemma-7b",
"nvidia_nim/google/gemma-2b",
"nvidia_nim/google/codegemma-7b",
"nvidia_nim/google/codegemma-1.1-7b",
"nvidia_nim/google/recurrentgemma-2b",
"nvidia_nim/google/gemma-2-9b-it",
"nvidia_nim/google/gemma-2-27b-it",
"nvidia_nim/google/gemma-2-2b-it",
"nvidia_nim/google/deplot",
"nvidia_nim/google/paligemma",
"nvidia_nim/mistralai/mistral-7b-instruct-v0.2",
"nvidia_nim/mistralai/mixtral-8x7b-instruct-v0.1",
"nvidia_nim/mistralai/mistral-large",
"nvidia_nim/mistralai/mixtral-8x22b-instruct-v0.1",
"nvidia_nim/mistralai/mistral-7b-instruct-v0.3",
"nvidia_nim/nv-mistralai/mistral-nemo-12b-instruct",
"nvidia_nim/mistralai/mamba-codestral-7b-v0.1",
"nvidia_nim/microsoft/phi-3-mini-128k-instruct",
"nvidia_nim/microsoft/phi-3-mini-4k-instruct",
"nvidia_nim/microsoft/phi-3-small-8k-instruct",
"nvidia_nim/microsoft/phi-3-small-128k-instruct",
"nvidia_nim/microsoft/phi-3-medium-4k-instruct",
"nvidia_nim/microsoft/phi-3-medium-128k-instruct",
"nvidia_nim/microsoft/phi-3.5-mini-instruct",
"nvidia_nim/microsoft/phi-3.5-moe-instruct",
"nvidia_nim/microsoft/kosmos-2",
"nvidia_nim/microsoft/phi-3-vision-128k-instruct",
"nvidia_nim/microsoft/phi-3.5-vision-instruct",
"nvidia_nim/databricks/dbrx-instruct",
"nvidia_nim/snowflake/arctic",
"nvidia_nim/aisingapore/sea-lion-7b-instruct",
"nvidia_nim/ibm/granite-8b-code-instruct",
"nvidia_nim/ibm/granite-34b-code-instruct",
"nvidia_nim/ibm/granite-3.0-8b-instruct",
"nvidia_nim/ibm/granite-3.0-3b-a800m-instruct",
"nvidia_nim/mediatek/breeze-7b-instruct",
"nvidia_nim/upstage/solar-10.7b-instruct",
"nvidia_nim/writer/palmyra-med-70b-32k",
"nvidia_nim/writer/palmyra-med-70b",
"nvidia_nim/writer/palmyra-fin-70b-32k",
"nvidia_nim/01-ai/yi-large",
"nvidia_nim/deepseek-ai/deepseek-coder-6.7b-instruct",
"nvidia_nim/rakuten/rakutenai-7b-instruct",
"nvidia_nim/rakuten/rakutenai-7b-chat",
"nvidia_nim/baichuan-inc/baichuan2-13b-chat",
],
"groq": [
"groq/llama-3.1-8b-instant",
"groq/llama-3.1-70b-versatile",
@@ -133,10 +216,43 @@ MODELS = {
"watsonx/ibm/granite-3-8b-instruct",
],
"bedrock": [
"bedrock/us.amazon.nova-pro-v1:0",
"bedrock/us.amazon.nova-micro-v1:0",
"bedrock/us.amazon.nova-lite-v1:0",
"bedrock/us.anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0",
"bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0",
"bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
"bedrock/us.anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/us.anthropic.claude-3-opus-20240229-v1:0",
"bedrock/us.anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
"bedrock/us.meta.llama3-2-90b-instruct-v1:0",
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
"bedrock/us.meta.llama3-1-8b-instruct-v1:0",
"bedrock/us.meta.llama3-1-70b-instruct-v1:0",
"bedrock/us.meta.llama3-3-70b-instruct-v1:0",
"bedrock/us.meta.llama3-1-405b-instruct-v1:0",
"bedrock/eu.anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/eu.anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/eu.anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/eu.meta.llama3-2-3b-instruct-v1:0",
"bedrock/eu.meta.llama3-2-1b-instruct-v1:0",
"bedrock/apac.anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/apac.anthropic.claude-3-5-sonnet-20241022-v2:0",
"bedrock/apac.anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/apac.anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/amazon.nova-pro-v1:0",
"bedrock/amazon.nova-micro-v1:0",
"bedrock/amazon.nova-lite-v1:0",
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
"bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
"bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0",
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/anthropic.claude-v2:1",
"bedrock/anthropic.claude-v2",
"bedrock/anthropic.claude-instant-v1",
@@ -151,13 +267,26 @@ MODELS = {
"bedrock/ai21.j2-mid-v1",
"bedrock/ai21.j2-ultra-v1",
"bedrock/ai21.jamba-instruct-v1:0",
"bedrock/meta.llama2-13b-chat-v1",
"bedrock/meta.llama2-70b-chat-v1",
"bedrock/mistral.mistral-7b-instruct-v0:2",
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
],
"sambanova": [
"sambanova/Meta-Llama-3.3-70B-Instruct",
"sambanova/QwQ-32B-Preview",
"sambanova/Qwen2.5-72B-Instruct",
"sambanova/Qwen2.5-Coder-32B-Instruct",
"sambanova/Meta-Llama-3.1-405B-Instruct",
"sambanova/Meta-Llama-3.1-70B-Instruct",
"sambanova/Meta-Llama-3.1-8B-Instruct",
"sambanova/Llama-3.2-90B-Vision-Instruct",
"sambanova/Llama-3.2-11B-Vision-Instruct",
"sambanova/Meta-Llama-3.2-3B-Instruct",
"sambanova/Meta-Llama-3.2-1B-Instruct",
],
}
DEFAULT_LLM_MODEL = "gpt-4o-mini"
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"

536
src/crewai/cli/crew_chat.py Normal file
View File

@@ -0,0 +1,536 @@
import json
import platform
import re
import sys
import threading
import time
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple
import click
import tomli
from packaging import version
from crewai.cli.utils import read_toml
from crewai.cli.version import get_crewai_version
from crewai.crew import Crew
from crewai.llm import LLM, BaseLLM
from crewai.types.crew_chat import ChatInputField, ChatInputs
from crewai.utilities.llm_utils import create_llm
MIN_REQUIRED_VERSION = "0.98.0"
def check_conversational_crews_version(
crewai_version: str, pyproject_data: dict
) -> bool:
"""
Check if the installed crewAI version supports conversational crews.
Args:
crewai_version: The current version of crewAI.
pyproject_data: Dictionary containing pyproject.toml data.
Returns:
bool: True if version check passes, False otherwise.
"""
try:
if version.parse(crewai_version) < version.parse(MIN_REQUIRED_VERSION):
click.secho(
"You are using an older version of crewAI that doesn't support conversational crews. "
"Run 'uv upgrade crewai' to get the latest version.",
fg="red",
)
return False
except version.InvalidVersion:
click.secho("Invalid crewAI version format detected.", fg="red")
return False
return True
def run_chat():
"""
Runs an interactive chat loop using the Crew's chat LLM with function calling.
Incorporates crew_name, crew_description, and input fields to build a tool schema.
Exits if crew_name or crew_description are missing.
"""
crewai_version = get_crewai_version()
pyproject_data = read_toml()
if not check_conversational_crews_version(crewai_version, pyproject_data):
return
crew, crew_name = load_crew_and_name()
chat_llm = initialize_chat_llm(crew)
if not chat_llm:
return
# Indicate that the crew is being analyzed
click.secho(
"\nAnalyzing crew and required inputs - this may take 3 to 30 seconds "
"depending on the complexity of your crew.",
fg="white",
)
# Start loading indicator
loading_complete = threading.Event()
loading_thread = threading.Thread(target=show_loading, args=(loading_complete,))
loading_thread.start()
try:
crew_chat_inputs = generate_crew_chat_inputs(crew, crew_name, chat_llm)
crew_tool_schema = generate_crew_tool_schema(crew_chat_inputs)
system_message = build_system_message(crew_chat_inputs)
# Call the LLM to generate the introductory message
introductory_message = chat_llm.call(
messages=[{"role": "system", "content": system_message}]
)
finally:
# Stop loading indicator
loading_complete.set()
loading_thread.join()
# Indicate that the analysis is complete
click.secho("\nFinished analyzing crew.\n", fg="white")
click.secho(f"Assistant: {introductory_message}\n", fg="green")
messages = [
{"role": "system", "content": system_message},
{"role": "assistant", "content": introductory_message},
]
available_functions = {
crew_chat_inputs.crew_name: create_tool_function(crew, messages),
}
chat_loop(chat_llm, messages, crew_tool_schema, available_functions)
def show_loading(event: threading.Event):
"""Display animated loading dots while processing."""
while not event.is_set():
print(".", end="", flush=True)
time.sleep(1)
print()
def initialize_chat_llm(crew: Crew) -> Optional[LLM | BaseLLM]:
"""Initializes the chat LLM and handles exceptions."""
try:
return create_llm(crew.chat_llm)
except Exception as e:
click.secho(
f"Unable to find a Chat LLM. Please make sure you set chat_llm on the crew: {e}",
fg="red",
)
return None
def build_system_message(crew_chat_inputs: ChatInputs) -> str:
"""Builds the initial system message for the chat."""
required_fields_str = (
", ".join(
f"{field.name} (desc: {field.description or 'n/a'})"
for field in crew_chat_inputs.inputs
)
or "(No required fields detected)"
)
return (
"You are a helpful AI assistant for the CrewAI platform. "
"Your primary purpose is to assist users with the crew's specific tasks. "
"You can answer general questions, but should guide users back to the crew's purpose afterward. "
"For example, after answering a general question, remind the user of your main purpose, such as generating a research report, and prompt them to specify a topic or task related to the crew's purpose. "
"You have a function (tool) you can call by name if you have all required inputs. "
f"Those required inputs are: {required_fields_str}. "
"Once you have them, call the function. "
"Please keep your responses concise and friendly. "
"If a user asks a question outside the crew's scope, provide a brief answer and remind them of the crew's purpose. "
"After calling the tool, be prepared to take user feedback and make adjustments as needed. "
"If you are ever unsure about a user's request or need clarification, ask the user for more information. "
"Before doing anything else, introduce yourself with a friendly message like: 'Hey! I'm here to help you with [crew's purpose]. Could you please provide me with [inputs] so we can get started?' "
"For example: 'Hey! I'm here to help you with uncovering and reporting cutting-edge developments through thorough research and detailed analysis. Could you please provide me with a topic you're interested in? This will help us generate a comprehensive research report and detailed analysis.'"
f"\nCrew Name: {crew_chat_inputs.crew_name}"
f"\nCrew Description: {crew_chat_inputs.crew_description}"
)
def create_tool_function(crew: Crew, messages: List[Dict[str, str]]) -> Any:
"""Creates a wrapper function for running the crew tool with messages."""
def run_crew_tool_with_messages(**kwargs):
return run_crew_tool(crew, messages, **kwargs)
return run_crew_tool_with_messages
def flush_input():
"""Flush any pending input from the user."""
if platform.system() == "Windows":
# Windows platform
import msvcrt
while msvcrt.kbhit():
msvcrt.getch()
else:
# Unix-like platforms (Linux, macOS)
import termios
termios.tcflush(sys.stdin, termios.TCIFLUSH)
def chat_loop(chat_llm, messages, crew_tool_schema, available_functions):
"""Main chat loop for interacting with the user."""
while True:
try:
# Flush any pending input before accepting new input
flush_input()
user_input = get_user_input()
handle_user_input(
user_input, chat_llm, messages, crew_tool_schema, available_functions
)
except KeyboardInterrupt:
click.echo("\nExiting chat. Goodbye!")
break
except Exception as e:
click.secho(f"An error occurred: {e}", fg="red")
break
def get_user_input() -> str:
"""Collect multi-line user input with exit handling."""
click.secho(
"\nYou (type your message below. Press 'Enter' twice when you're done):",
fg="blue",
)
user_input_lines = []
while True:
line = input()
if line.strip().lower() == "exit":
return "exit"
if line == "":
break
user_input_lines.append(line)
return "\n".join(user_input_lines)
def handle_user_input(
user_input: str,
chat_llm: LLM,
messages: List[Dict[str, str]],
crew_tool_schema: Dict[str, Any],
available_functions: Dict[str, Any],
) -> None:
if user_input.strip().lower() == "exit":
click.echo("Exiting chat. Goodbye!")
return
if not user_input.strip():
click.echo("Empty message. Please provide input or type 'exit' to quit.")
return
messages.append({"role": "user", "content": user_input})
# Indicate that assistant is processing
click.echo()
click.secho("Assistant is processing your input. Please wait...", fg="green")
# Process assistant's response
final_response = chat_llm.call(
messages=messages,
tools=[crew_tool_schema],
available_functions=available_functions,
)
messages.append({"role": "assistant", "content": final_response})
click.secho(f"\nAssistant: {final_response}\n", fg="green")
def generate_crew_tool_schema(crew_inputs: ChatInputs) -> dict:
"""
Dynamically build a Littellm 'function' schema for the given crew.
crew_name: The name of the crew (used for the function 'name').
crew_inputs: A ChatInputs object containing crew_description
and a list of input fields (each with a name & description).
"""
properties = {}
for field in crew_inputs.inputs:
properties[field.name] = {
"type": "string",
"description": field.description or "No description provided",
}
required_fields = [field.name for field in crew_inputs.inputs]
return {
"type": "function",
"function": {
"name": crew_inputs.crew_name,
"description": crew_inputs.crew_description or "No crew description",
"parameters": {
"type": "object",
"properties": properties,
"required": required_fields,
},
},
}
def run_crew_tool(crew: Crew, messages: List[Dict[str, str]], **kwargs):
"""
Runs the crew using crew.kickoff(inputs=kwargs) and returns the output.
Args:
crew (Crew): The crew instance to run.
messages (List[Dict[str, str]]): The chat messages up to this point.
**kwargs: The inputs collected from the user.
Returns:
str: The output from the crew's execution.
Raises:
SystemExit: Exits the chat if an error occurs during crew execution.
"""
try:
# Serialize 'messages' to JSON string before adding to kwargs
kwargs["crew_chat_messages"] = json.dumps(messages)
# Run the crew with the provided inputs
crew_output = crew.kickoff(inputs=kwargs)
# Convert CrewOutput to a string to send back to the user
result = str(crew_output)
return result
except Exception as e:
# Exit the chat and show the error message
click.secho("An error occurred while running the crew:", fg="red")
click.secho(str(e), fg="red")
sys.exit(1)
def load_crew_and_name() -> Tuple[Crew, str]:
"""
Loads the crew by importing the crew class from the user's project.
Returns:
Tuple[Crew, str]: A tuple containing the Crew instance and the name of the crew.
"""
# Get the current working directory
cwd = Path.cwd()
# Path to the pyproject.toml file
pyproject_path = cwd / "pyproject.toml"
if not pyproject_path.exists():
raise FileNotFoundError("pyproject.toml not found in the current directory.")
# Load the pyproject.toml file using 'tomli'
with pyproject_path.open("rb") as f:
pyproject_data = tomli.load(f)
# Get the project name from the 'project' section
project_name = pyproject_data["project"]["name"]
folder_name = project_name
# Derive the crew class name from the project name
# E.g., if project_name is 'my_project', crew_class_name is 'MyProject'
crew_class_name = project_name.replace("_", " ").title().replace(" ", "")
# Add the 'src' directory to sys.path
src_path = cwd / "src"
if str(src_path) not in sys.path:
sys.path.insert(0, str(src_path))
# Import the crew module
crew_module_name = f"{folder_name}.crew"
try:
crew_module = __import__(crew_module_name, fromlist=[crew_class_name])
except ImportError as e:
raise ImportError(f"Failed to import crew module {crew_module_name}: {e}")
# Get the crew class from the module
try:
crew_class = getattr(crew_module, crew_class_name)
except AttributeError:
raise AttributeError(
f"Crew class {crew_class_name} not found in module {crew_module_name}"
)
# Instantiate the crew
crew_instance = crew_class().crew()
return crew_instance, crew_class_name
def generate_crew_chat_inputs(crew: Crew, crew_name: str, chat_llm) -> ChatInputs:
"""
Generates the ChatInputs required for the crew by analyzing the tasks and agents.
Args:
crew (Crew): The crew object containing tasks and agents.
crew_name (str): The name of the crew.
chat_llm: The chat language model to use for AI calls.
Returns:
ChatInputs: An object containing the crew's name, description, and input fields.
"""
# Extract placeholders from tasks and agents
required_inputs = fetch_required_inputs(crew)
# Generate descriptions for each input using AI
input_fields = []
for input_name in required_inputs:
description = generate_input_description_with_ai(input_name, crew, chat_llm)
input_fields.append(ChatInputField(name=input_name, description=description))
# Generate crew description using AI
crew_description = generate_crew_description_with_ai(crew, chat_llm)
return ChatInputs(
crew_name=crew_name, crew_description=crew_description, inputs=input_fields
)
def fetch_required_inputs(crew: Crew) -> Set[str]:
"""
Extracts placeholders from the crew's tasks and agents.
Args:
crew (Crew): The crew object.
Returns:
Set[str]: A set of placeholder names.
"""
placeholder_pattern = re.compile(r"\{(.+?)\}")
required_inputs: Set[str] = set()
# Scan tasks
for task in crew.tasks:
text = f"{task.description or ''} {task.expected_output or ''}"
required_inputs.update(placeholder_pattern.findall(text))
# Scan agents
for agent in crew.agents:
text = f"{agent.role or ''} {agent.goal or ''} {agent.backstory or ''}"
required_inputs.update(placeholder_pattern.findall(text))
return required_inputs
def generate_input_description_with_ai(input_name: str, crew: Crew, chat_llm) -> str:
"""
Generates an input description using AI based on the context of the crew.
Args:
input_name (str): The name of the input placeholder.
crew (Crew): The crew object.
chat_llm: The chat language model to use for AI calls.
Returns:
str: A concise description of the input.
"""
# Gather context from tasks and agents where the input is used
context_texts = []
placeholder_pattern = re.compile(r"\{(.+?)\}")
for task in crew.tasks:
if (
f"{{{input_name}}}" in task.description
or f"{{{input_name}}}" in task.expected_output
):
# Replace placeholders with input names
task_description = placeholder_pattern.sub(
lambda m: m.group(1), task.description or ""
)
expected_output = placeholder_pattern.sub(
lambda m: m.group(1), task.expected_output or ""
)
context_texts.append(f"Task Description: {task_description}")
context_texts.append(f"Expected Output: {expected_output}")
for agent in crew.agents:
if (
f"{{{input_name}}}" in agent.role
or f"{{{input_name}}}" in agent.goal
or f"{{{input_name}}}" in agent.backstory
):
# Replace placeholders with input names
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role or "")
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal or "")
agent_backstory = placeholder_pattern.sub(
lambda m: m.group(1), agent.backstory or ""
)
context_texts.append(f"Agent Role: {agent_role}")
context_texts.append(f"Agent Goal: {agent_goal}")
context_texts.append(f"Agent Backstory: {agent_backstory}")
context = "\n".join(context_texts)
if not context:
# If no context is found for the input, raise an exception as per instruction
raise ValueError(f"No context found for input '{input_name}'.")
prompt = (
f"Based on the following context, write a concise description (15 words or less) of the input '{input_name}'.\n"
"Provide only the description, without any extra text or labels. Do not include placeholders like '{topic}' in the description.\n"
"Context:\n"
f"{context}"
)
response = chat_llm.call(messages=[{"role": "user", "content": prompt}])
description = response.strip()
return description
def generate_crew_description_with_ai(crew: Crew, chat_llm) -> str:
"""
Generates a brief description of the crew using AI.
Args:
crew (Crew): The crew object.
chat_llm: The chat language model to use for AI calls.
Returns:
str: A concise description of the crew's purpose (15 words or less).
"""
# Gather context from tasks and agents
context_texts = []
placeholder_pattern = re.compile(r"\{(.+?)\}")
for task in crew.tasks:
# Replace placeholders with input names
task_description = placeholder_pattern.sub(
lambda m: m.group(1), task.description or ""
)
expected_output = placeholder_pattern.sub(
lambda m: m.group(1), task.expected_output or ""
)
context_texts.append(f"Task Description: {task_description}")
context_texts.append(f"Expected Output: {expected_output}")
for agent in crew.agents:
# Replace placeholders with input names
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role or "")
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal or "")
agent_backstory = placeholder_pattern.sub(
lambda m: m.group(1), agent.backstory or ""
)
context_texts.append(f"Agent Role: {agent_role}")
context_texts.append(f"Agent Goal: {agent_goal}")
context_texts.append(f"Agent Backstory: {agent_backstory}")
context = "\n".join(context_texts)
if not context:
raise ValueError("No context found for generating crew description.")
prompt = (
"Based on the following context, write a concise, action-oriented description (15 words or less) of the crew's purpose.\n"
"Provide only the description, without any extra text or labels. Do not include placeholders like '{topic}' in the description.\n"
"Context:\n"
f"{context}"
)
response = chat_llm.call(messages=[{"role": "user", "content": prompt}])
crew_description = response.strip()
return crew_description

View File

@@ -1,4 +1,5 @@
import subprocess
from functools import lru_cache
class Repository:
@@ -35,6 +36,7 @@ class Repository:
encoding="utf-8",
).strip()
@lru_cache(maxsize=None)
def is_git_repo(self) -> bool:
"""Check if the current directory is a git repository."""
try:

View File

@@ -3,6 +3,10 @@ import subprocess
import click
# Be mindful about changing this.
# on some enviorments we don't use this command but instead uv sync directly
# so if you expect this to support more things you will need to replicate it there
# ask @joaomdmoura if you are unsure
def install_crew(proxy_options: list[str]) -> None:
"""
Install the crew by running the UV command to lock and install.

View File

@@ -2,11 +2,7 @@ import subprocess
import click
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
from crewai.memory.entity.entity_memory import EntityMemory
from crewai.memory.long_term.long_term_memory import LongTermMemory
from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
from crewai.cli.utils import get_crew
def reset_memories_command(
@@ -30,30 +26,35 @@ def reset_memories_command(
"""
try:
crew = get_crew()
if not crew:
raise ValueError("No crew found.")
if all:
ShortTermMemory().reset()
EntityMemory().reset()
LongTermMemory().reset()
TaskOutputStorageHandler().reset()
KnowledgeStorage().reset()
crew.reset_memories(command_type="all")
click.echo("All memories have been reset.")
else:
if long:
LongTermMemory().reset()
click.echo("Long term memory has been reset.")
return
if short:
ShortTermMemory().reset()
click.echo("Short term memory has been reset.")
if entity:
EntityMemory().reset()
click.echo("Entity memory has been reset.")
if kickoff_outputs:
TaskOutputStorageHandler().reset()
click.echo("Latest Kickoff outputs stored has been reset.")
if knowledge:
KnowledgeStorage().reset()
click.echo("Knowledge has been reset.")
if not any([long, short, entity, kickoff_outputs, knowledge]):
click.echo(
"No memory type specified. Please specify at least one type to reset."
)
return
if long:
crew.reset_memories(command_type="long")
click.echo("Long term memory has been reset.")
if short:
crew.reset_memories(command_type="short")
click.echo("Short term memory has been reset.")
if entity:
crew.reset_memories(command_type="entity")
click.echo("Entity memory has been reset.")
if kickoff_outputs:
crew.reset_memories(command_type="kickoff_outputs")
click.echo("Latest Kickoff outputs stored has been reset.")
if knowledge:
crew.reset_memories(command_type="knowledge")
click.echo("Knowledge has been reset.")
except subprocess.CalledProcessError as e:
click.echo(f"An error occurred while resetting the memories: {e}", err=True)

View File

@@ -1,4 +1,6 @@
import subprocess
from enum import Enum
from typing import List, Optional
import click
from packaging import version
@@ -7,16 +9,24 @@ from crewai.cli.utils import read_toml
from crewai.cli.version import get_crewai_version
class CrewType(Enum):
STANDARD = "standard"
FLOW = "flow"
def run_crew() -> None:
"""
Run the crew by running a command in the UV environment.
Run the crew or flow by running a command in the UV environment.
Starting from version 0.103.0, this command can be used to run both
standard crews and flows. For flows, it detects the type from pyproject.toml
and automatically runs the appropriate command.
"""
command = ["uv", "run", "run_crew"]
crewai_version = get_crewai_version()
min_required_version = "0.71.0"
pyproject_data = read_toml()
# Check for legacy poetry configuration
if pyproject_data.get("tool", {}).get("poetry") and (
version.parse(crewai_version) < version.parse(min_required_version)
):
@@ -26,18 +36,54 @@ def run_crew() -> None:
fg="red",
)
# Determine crew type
is_flow = pyproject_data.get("tool", {}).get("crewai", {}).get("type") == "flow"
crew_type = CrewType.FLOW if is_flow else CrewType.STANDARD
# Display appropriate message
click.echo(f"Running the {'Flow' if is_flow else 'Crew'}")
# Execute the appropriate command
execute_command(crew_type)
def execute_command(crew_type: CrewType) -> None:
"""
Execute the appropriate command based on crew type.
Args:
crew_type: The type of crew to run
"""
command = ["uv", "run", "kickoff" if crew_type == CrewType.FLOW else "run_crew"]
try:
subprocess.run(command, capture_output=False, text=True, check=True)
except subprocess.CalledProcessError as e:
click.echo(f"An error occurred while running the crew: {e}", err=True)
click.echo(e.output, err=True, nl=True)
if pyproject_data.get("tool", {}).get("poetry"):
click.secho(
"It's possible that you are using an old version of crewAI that uses poetry, please run `crewai update` to update your pyproject.toml to use uv.",
fg="yellow",
)
handle_error(e, crew_type)
except Exception as e:
click.echo(f"An unexpected error occurred: {e}", err=True)
def handle_error(error: subprocess.CalledProcessError, crew_type: CrewType) -> None:
"""
Handle subprocess errors with appropriate messaging.
Args:
error: The subprocess error that occurred
crew_type: The type of crew that was being run
"""
entity_type = "flow" if crew_type == CrewType.FLOW else "crew"
click.echo(f"An error occurred while running the {entity_type}: {error}", err=True)
if error.output:
click.echo(error.output, err=True, nl=True)
pyproject_data = read_toml()
if pyproject_data.get("tool", {}).get("poetry"):
click.secho(
"It's possible that you are using an old version of crewAI that uses poetry, "
"please run `crewai update` to update your pyproject.toml to use uv.",
fg="yellow",
)

View File

@@ -1,2 +1,3 @@
.env
__pycache__/
.DS_Store

View File

@@ -2,7 +2,7 @@ research_task:
description: >
Conduct a thorough research about {topic}
Make sure you find any interesting and relevant information given
the current year is 2024.
the current year is {current_year}.
expected_output: >
A list with 10 bullet points of the most relevant information about {topic}
agent: researcher

View File

@@ -1,62 +1,62 @@
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
# If you want to run a snippet of code before or after the crew starts,
# If you want to run a snippet of code before or after the crew starts,
# you can use the @before_kickoff and @after_kickoff decorators
# https://docs.crewai.com/concepts/crews#example-crew-class-with-decorators
@CrewBase
class {{crew_name}}():
"""{{crew_name}} crew"""
"""{{crew_name}} crew"""
# Learn more about YAML configuration files here:
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
# Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
agents_config = 'config/agents.yaml'
tasks_config = 'config/tasks.yaml'
# Learn more about YAML configuration files here:
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
# Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
agents_config = 'config/agents.yaml'
tasks_config = 'config/tasks.yaml'
# If you would like to add tools to your agents, you can learn more about it here:
# https://docs.crewai.com/concepts/agents#agent-tools
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config['researcher'],
verbose=True
)
# If you would like to add tools to your agents, you can learn more about it here:
# https://docs.crewai.com/concepts/agents#agent-tools
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config['researcher'],
verbose=True
)
@agent
def reporting_analyst(self) -> Agent:
return Agent(
config=self.agents_config['reporting_analyst'],
verbose=True
)
@agent
def reporting_analyst(self) -> Agent:
return Agent(
config=self.agents_config['reporting_analyst'],
verbose=True
)
# To learn more about structured task outputs,
# task dependencies, and task callbacks, check out the documentation:
# https://docs.crewai.com/concepts/tasks#overview-of-a-task
@task
def research_task(self) -> Task:
return Task(
config=self.tasks_config['research_task'],
)
# To learn more about structured task outputs,
# task dependencies, and task callbacks, check out the documentation:
# https://docs.crewai.com/concepts/tasks#overview-of-a-task
@task
def research_task(self) -> Task:
return Task(
config=self.tasks_config['research_task'],
)
@task
def reporting_task(self) -> Task:
return Task(
config=self.tasks_config['reporting_task'],
output_file='report.md'
)
@task
def reporting_task(self) -> Task:
return Task(
config=self.tasks_config['reporting_task'],
output_file='report.md'
)
@crew
def crew(self) -> Crew:
"""Creates the {{crew_name}} crew"""
# To learn how to add knowledge sources to your crew, check out the documentation:
# https://docs.crewai.com/concepts/knowledge#what-is-knowledge
@crew
def crew(self) -> Crew:
"""Creates the {{crew_name}} crew"""
# To learn how to add knowledge sources to your crew, check out the documentation:
# https://docs.crewai.com/concepts/knowledge#what-is-knowledge
return Crew(
agents=self.agents, # Automatically created by the @agent decorator
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
# process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/
)
return Crew(
agents=self.agents, # Automatically created by the @agent decorator
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
# process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/
)

View File

@@ -2,6 +2,8 @@
import sys
import warnings
from datetime import datetime
from {{folder_name}}.crew import {{crew_name}}
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
@@ -16,9 +18,14 @@ def run():
Run the crew.
"""
inputs = {
'topic': 'AI LLMs'
'topic': 'AI LLMs',
'current_year': str(datetime.now().year)
}
{{crew_name}}().crew().kickoff(inputs=inputs)
try:
{{crew_name}}().crew().kickoff(inputs=inputs)
except Exception as e:
raise Exception(f"An error occurred while running the crew: {e}")
def train():
@@ -49,10 +56,11 @@ def test():
Test the crew execution and returns the results.
"""
inputs = {
"topic": "AI LLMs"
"topic": "AI LLMs",
"current_year": str(datetime.now().year)
}
try:
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
except Exception as e:
raise Exception(f"An error occurred while replaying the crew: {e}")
raise Exception(f"An error occurred while testing the crew: {e}")

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.86.0,<1.0.0"
"crewai[tools]>=0.108.0,<1.0.0"
]
[project.scripts]

View File

@@ -1,3 +1,4 @@
.env
__pycache__/
lib/
.DS_Store

View File

@@ -30,13 +30,13 @@ crewai install
## Running the Project
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
To kickstart your flow and begin execution, run this from the root folder of your project:
```bash
crewai run
```
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
This command initializes the {{name}} Flow as defined in your configuration.
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.

View File

@@ -3,7 +3,7 @@ from random import randint
from pydantic import BaseModel
from crewai.flow.flow import Flow, listen, start
from crewai.flow import Flow, listen, start
from {{folder_name}}.crews.poem_crew.poem_crew import PoemCrew

View File

@@ -5,11 +5,12 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.86.0,<1.0.0",
"crewai[tools]>=0.108.0,<1.0.0",
]
[project.scripts]
kickoff = "{{folder_name}}.main:kickoff"
run_crew = "{{folder_name}}.main:kickoff"
plot = "{{folder_name}}.main:plot"
[build-system]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.86.0"
"crewai[tools]>=0.108.0"
]
[tool.crewai]

View File

@@ -9,6 +9,7 @@ import tomli
from rich.console import Console
from crewai.cli.constants import ENV_VARS
from crewai.crew import Crew
if sys.version_info >= (3, 11):
import tomllib
@@ -247,3 +248,66 @@ def write_env_file(folder_path, env_vars):
with open(env_file_path, "w") as file:
for key, value in env_vars.items():
file.write(f"{key}={value}\n")
def get_crew(crew_path: str = "crew.py", require: bool = False) -> Crew | None:
"""Get the crew instance from the crew.py file."""
try:
import importlib.util
import os
for root, _, files in os.walk("."):
if crew_path in files:
crew_os_path = os.path.join(root, crew_path)
try:
spec = importlib.util.spec_from_file_location(
"crew_module", crew_os_path
)
if not spec or not spec.loader:
continue
module = importlib.util.module_from_spec(spec)
try:
sys.modules[spec.name] = module
spec.loader.exec_module(module)
for attr_name in dir(module):
attr = getattr(module, attr_name)
try:
if isinstance(attr, Crew) and hasattr(attr, "kickoff"):
print(
f"Found valid crew object in attribute '{attr_name}' at {crew_os_path}."
)
return attr
except Exception as e:
print(f"Error processing attribute {attr_name}: {e}")
continue
except Exception as exec_error:
print(f"Error executing module: {exec_error}")
import traceback
print(f"Traceback: {traceback.format_exc()}")
except (ImportError, AttributeError) as e:
if require:
console.print(
f"Error importing crew from {crew_path}: {str(e)}",
style="bold red",
)
continue
break
if require:
console.print("No valid Crew instance found in crew.py", style="bold red")
raise SystemExit
return None
except Exception as e:
if require:
console.print(
f"Unexpected error while loading crew: {str(e)}", style="bold red"
)
raise SystemExit
return None

View File

@@ -1,10 +1,12 @@
import asyncio
import json
import re
import uuid
import warnings
from concurrent.futures import Future
from copy import copy as shallow_copy
from hashlib import md5
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union, cast
from pydantic import (
UUID4,
@@ -24,37 +26,46 @@ from crewai.agents.cache import CacheHandler
from crewai.crews.crew_output import CrewOutput
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.llm import LLM
from crewai.llm import LLM, BaseLLM
from crewai.memory.entity.entity_memory import EntityMemory
from crewai.memory.external.external_memory import ExternalMemory
from crewai.memory.long_term.long_term_memory import LongTermMemory
from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.memory.user.user_memory import UserMemory
from crewai.process import Process
from crewai.security import Fingerprint, SecurityConfig
from crewai.task import Task
from crewai.tasks.conditional_task import ConditionalTask
from crewai.tasks.task_output import TaskOutput
from crewai.telemetry import Telemetry
from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.tools.base_tool import Tool
from crewai.tools.base_tool import BaseTool, Tool
from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities import I18N, FileHandler, Logger, RPMController
from crewai.utilities.constants import TRAINING_DATA_FILE
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
from crewai.utilities.events.crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewKickoffStartedEvent,
CrewTestCompletedEvent,
CrewTestFailedEvent,
CrewTestStartedEvent,
CrewTrainCompletedEvent,
CrewTrainFailedEvent,
CrewTrainStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.event_listener import EventListener
from crewai.utilities.formatter import (
aggregate_raw_outputs_from_task_outputs,
aggregate_raw_outputs_from_tasks,
)
from crewai.utilities.llm_utils import create_llm
from crewai.utilities.planning_handler import CrewPlanner
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
from crewai.utilities.training_handler import CrewTrainingHandler
try:
import agentops # type: ignore
except ImportError:
agentops = None
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
@@ -81,6 +92,8 @@ class Crew(BaseModel):
step_callback: Callback to be executed after each step for every agents execution.
share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models.
planning: Plan the crew execution and add the plan to the crew.
chat_llm: The language model used for orchestrating chat interactions with the crew.
security_config: Security configuration for the crew, including fingerprinting.
"""
__hash__ = object.__hash__ # type: ignore
@@ -93,6 +106,7 @@ class Crew(BaseModel):
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
_user_memory: Optional[InstanceOf[UserMemory]] = PrivateAttr()
_external_memory: Optional[InstanceOf[ExternalMemory]] = PrivateAttr()
_train: Optional[bool] = PrivateAttr(default=False)
_train_iteration: Optional[int] = PrivateAttr()
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
@@ -133,6 +147,10 @@ class Crew(BaseModel):
default=None,
description="An instance of the UserMemory to be used by the Crew to store/fetch memories of a specific user.",
)
external_memory: Optional[InstanceOf[ExternalMemory]] = Field(
default=None,
description="An Instance of the ExternalMemory to be used by the Crew",
)
embedder: Optional[dict] = Field(
default=None,
description="Configuration for the embedder to be used for the crew.",
@@ -141,13 +159,13 @@ class Crew(BaseModel):
default=None,
description="Metrics for the LLM usage during all tasks execution.",
)
manager_llm: Optional[Any] = Field(
manager_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
description="Language model that will run the agent.", default=None
)
manager_agent: Optional[BaseAgent] = Field(
description="Custom agent that will be used as manager.", default=None
)
function_calling_llm: Optional[Any] = Field(
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
description="Language model that will run the agent.", default=None
)
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
@@ -175,19 +193,19 @@ class Crew(BaseModel):
default=None,
description="Maximum number of requests per minute for the crew execution to be respected.",
)
prompt_file: str = Field(
prompt_file: Optional[str] = Field(
default=None,
description="Path to the prompt json file to be used for the crew.",
)
output_log_file: Optional[str] = Field(
output_log_file: Optional[Union[bool, str]] = Field(
default=None,
description="output_log_file",
description="Path to the log file to be saved",
)
planning: Optional[bool] = Field(
default=False,
description="Plan the crew execution and add the plan to the crew.",
)
planning_llm: Optional[Any] = Field(
planning_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
default=None,
description="Language model that will run the AgentPlanner if planning is True.",
)
@@ -203,8 +221,17 @@ class Crew(BaseModel):
default=None,
description="Knowledge sources for the crew. Add knowledge sources to the knowledge object.",
)
_knowledge: Optional[Knowledge] = PrivateAttr(
chat_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
default=None,
description="LLM used to handle chatting with the crew.",
)
knowledge: Optional[Knowledge] = Field(
default=None,
description="Knowledge for the crew.",
)
security_config: SecurityConfig = Field(
default_factory=SecurityConfig,
description="Security configuration for the crew, including fingerprinting.",
)
@field_validator("id", mode="before")
@@ -234,22 +261,18 @@ class Crew(BaseModel):
@model_validator(mode="after")
def set_private_attrs(self) -> "Crew":
"""Set private attributes."""
self._cache_handler = CacheHandler()
event_listener = EventListener()
event_listener.verbose = self.verbose
event_listener.formatter.verbose = self.verbose
self._logger = Logger(verbose=self.verbose)
if self.output_log_file:
self._file_handler = FileHandler(self.output_log_file)
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
if self.function_calling_llm:
if isinstance(self.function_calling_llm, str):
self.function_calling_llm = LLM(model=self.function_calling_llm)
elif not isinstance(self.function_calling_llm, LLM):
self.function_calling_llm = LLM(
model=getattr(self.function_calling_llm, "model_name", None)
or getattr(self.function_calling_llm, "deployment_name", None)
or str(self.function_calling_llm)
)
self._telemetry = Telemetry()
self._telemetry.set_tracer()
if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
self.function_calling_llm = create_llm(self.function_calling_llm)
return self
@model_validator(mode="after")
@@ -272,12 +295,26 @@ class Crew(BaseModel):
if self.entity_memory
else EntityMemory(crew=self, embedder_config=self.embedder)
)
if hasattr(self, "memory_config") and self.memory_config is not None:
self._user_memory = (
self.user_memory if self.user_memory else UserMemory(crew=self)
)
self._external_memory = (
# External memory doesnt support a default value since it was designed to be managed entirely externally
self.external_memory.set_crew(self)
if self.external_memory
else None
)
if (
self.memory_config
and "user_memory" in self.memory_config
and self.memory_config.get("provider") == "mem0"
): # Check for user_memory in config
user_memory_config = self.memory_config["user_memory"]
if isinstance(
user_memory_config, dict
): # Check if it's a configuration dict
self._user_memory = UserMemory(crew=self)
else:
raise TypeError("user_memory must be a configuration dictionary")
else:
self._user_memory = None
self._user_memory = None # No user memory if not in config
return self
@model_validator(mode="after")
@@ -288,9 +325,9 @@ class Crew(BaseModel):
if isinstance(self.knowledge_sources, list) and all(
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
):
self._knowledge = Knowledge(
self.knowledge = Knowledge(
sources=self.knowledge_sources,
embedder_config=self.embedder,
embedder=self.embedder,
collection_name="crew",
)
@@ -377,6 +414,22 @@ class Crew(BaseModel):
return self
@model_validator(mode="after")
def validate_must_have_non_conditional_task(self) -> "Crew":
"""Ensure that a crew has at least one non-conditional task."""
if not self.tasks:
return self
non_conditional_count = sum(
1 for task in self.tasks if not isinstance(task, ConditionalTask)
)
if non_conditional_count == 0:
raise PydanticCustomError(
"only_conditional_tasks",
"Crew must include at least one non-conditional task",
{},
)
return self
@model_validator(mode="after")
def validate_first_task(self) -> "Crew":
"""Ensure the first task is not a ConditionalTask."""
@@ -438,11 +491,21 @@ class Crew(BaseModel):
@property
def key(self) -> str:
source = [agent.key for agent in self.agents] + [
source: List[str] = [agent.key for agent in self.agents] + [
task.key for task in self.tasks
]
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
@property
def fingerprint(self) -> Fingerprint:
"""
Get the crew's fingerprint.
Returns:
Fingerprint: The crew's fingerprint
"""
return self.security_config.fingerprint
def _setup_from_config(self):
assert self.config is not None, "Config should not be None."
@@ -488,81 +551,121 @@ class Crew(BaseModel):
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
) -> None:
"""Trains the crew for a given number of iterations."""
train_crew = self.copy()
train_crew._setup_for_training(filename)
try:
crewai_event_bus.emit(
self,
CrewTrainStartedEvent(
crew_name=self.name or "crew",
n_iterations=n_iterations,
filename=filename,
inputs=inputs,
),
)
train_crew = self.copy()
train_crew._setup_for_training(filename)
for n_iteration in range(n_iterations):
train_crew._train_iteration = n_iteration
train_crew.kickoff(inputs=inputs)
for n_iteration in range(n_iterations):
train_crew._train_iteration = n_iteration
train_crew.kickoff(inputs=inputs)
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
for agent in train_crew.agents:
if training_data.get(str(agent.id)):
result = TaskEvaluator(agent).evaluate_training_data(
training_data=training_data, agent_id=str(agent.id)
)
for agent in train_crew.agents:
if training_data.get(str(agent.id)):
result = TaskEvaluator(agent).evaluate_training_data(
training_data=training_data, agent_id=str(agent.id)
)
CrewTrainingHandler(filename).save_trained_data(
agent_id=str(agent.role), trained_data=result.model_dump()
)
CrewTrainingHandler(filename).save_trained_data(
agent_id=str(agent.role), trained_data=result.model_dump()
)
crewai_event_bus.emit(
self,
CrewTrainCompletedEvent(
crew_name=self.name or "crew",
n_iterations=n_iterations,
filename=filename,
),
)
except Exception as e:
crewai_event_bus.emit(
self,
CrewTrainFailedEvent(error=str(e), crew_name=self.name or "crew"),
)
self._logger.log("error", f"Training failed: {e}", color="red")
CrewTrainingHandler(TRAINING_DATA_FILE).clear()
CrewTrainingHandler(filename).clear()
raise
def kickoff(
self,
inputs: Optional[Dict[str, Any]] = None,
) -> CrewOutput:
for before_callback in self.before_kickoff_callbacks:
inputs = before_callback(inputs)
try:
for before_callback in self.before_kickoff_callbacks:
if inputs is None:
inputs = {}
inputs = before_callback(inputs)
"""Starts the crew to work on its assigned tasks."""
self._execution_span = self._telemetry.crew_execution_span(self, inputs)
self._task_output_handler.reset()
self._logging_color = "bold_purple"
if inputs is not None:
self._inputs = inputs
self._interpolate_inputs(inputs)
self._set_tasks_callbacks()
i18n = I18N(prompt_file=self.prompt_file)
for agent in self.agents:
agent.i18n = i18n
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
agent.crew = self # type: ignore[attr-defined]
# TODO: Create an AgentFunctionCalling protocol for future refactoring
if not agent.function_calling_llm: # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
agent.function_calling_llm = self.function_calling_llm # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
if not agent.step_callback: # type: ignore # "BaseAgent" has no attribute "step_callback"
agent.step_callback = self.step_callback # type: ignore # "BaseAgent" has no attribute "step_callback"
agent.create_agent_executor()
if self.planning:
self._handle_crew_planning()
metrics: List[UsageMetrics] = []
if self.process == Process.sequential:
result = self._run_sequential_process()
elif self.process == Process.hierarchical:
result = self._run_hierarchical_process()
else:
raise NotImplementedError(
f"The process '{self.process}' is not implemented yet."
crewai_event_bus.emit(
self,
CrewKickoffStartedEvent(crew_name=self.name or "crew", inputs=inputs),
)
for after_callback in self.after_kickoff_callbacks:
result = after_callback(result)
# Starts the crew to work on its assigned tasks.
self._task_output_handler.reset()
self._logging_color = "bold_purple"
metrics += [agent._token_process.get_summary() for agent in self.agents]
if inputs is not None:
self._inputs = inputs
self._interpolate_inputs(inputs)
self._set_tasks_callbacks()
self.usage_metrics = UsageMetrics()
for metric in metrics:
self.usage_metrics.add_usage_metrics(metric)
i18n = I18N(prompt_file=self.prompt_file)
return result
for agent in self.agents:
agent.i18n = i18n
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
agent.crew = self # type: ignore[attr-defined]
agent.set_knowledge(crew_embedder=self.embedder)
# TODO: Create an AgentFunctionCalling protocol for future refactoring
if not agent.function_calling_llm: # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
agent.function_calling_llm = self.function_calling_llm # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
if not agent.step_callback: # type: ignore # "BaseAgent" has no attribute "step_callback"
agent.step_callback = self.step_callback # type: ignore # "BaseAgent" has no attribute "step_callback"
agent.create_agent_executor()
if self.planning:
self._handle_crew_planning()
metrics: List[UsageMetrics] = []
if self.process == Process.sequential:
result = self._run_sequential_process()
elif self.process == Process.hierarchical:
result = self._run_hierarchical_process()
else:
raise NotImplementedError(
f"The process '{self.process}' is not implemented yet."
)
for after_callback in self.after_kickoff_callbacks:
result = after_callback(result)
metrics += [agent._token_process.get_summary() for agent in self.agents]
self.usage_metrics = UsageMetrics()
for metric in metrics:
self.usage_metrics.add_usage_metrics(metric)
return result
except Exception as e:
crewai_event_bus.emit(
self,
CrewKickoffFailedEvent(error=str(e), crew_name=self.name or "crew"),
)
raise
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]:
"""Executes the Crew's workflow for each input in the list and aggregates results."""
@@ -671,11 +774,7 @@ class Crew(BaseModel):
manager.tools = []
raise Exception("Manager agent should not have tools")
else:
self.manager_llm = (
getattr(self.manager_llm, "model_name", None)
or getattr(self.manager_llm, "deployment_name", None)
or self.manager_llm
)
self.manager_llm = create_llm(self.manager_llm)
manager = Agent(
role=i18n.retrieve("hierarchical_manager_agent", "role"),
goal=i18n.retrieve("hierarchical_manager_agent", "goal"),
@@ -726,10 +825,11 @@ class Crew(BaseModel):
# Determine which tools to use - task tools take precedence over agent tools
tools_for_task = task.tools or agent_to_use.tools or []
# Prepare tools and ensure they're compatible with task execution
tools_for_task = self._prepare_tools(
agent_to_use,
task,
tools_for_task
cast(Union[List[Tool], List[BaseTool]], tools_for_task),
)
self._log_task_start(task, agent_to_use.role)
@@ -739,6 +839,7 @@ class Crew(BaseModel):
task, task_outputs, futures, task_index, was_replayed
)
if skipped_task_output:
task_outputs.append(skipped_task_output)
continue
if task.async_execution:
@@ -748,7 +849,7 @@ class Crew(BaseModel):
future = task.execute_async(
agent=agent_to_use,
context=context,
tools=tools_for_task,
tools=cast(List[BaseTool], tools_for_task),
)
futures.append((task, future, task_index))
else:
@@ -760,9 +861,9 @@ class Crew(BaseModel):
task_output = task.execute_sync(
agent=agent_to_use,
context=context,
tools=tools_for_task,
tools=cast(List[BaseTool], tools_for_task),
)
task_outputs = [task_output]
task_outputs.append(task_output)
self._process_task_result(task, task_output)
self._store_execution_log(task, task_output, task_index, was_replayed)
@@ -783,7 +884,7 @@ class Crew(BaseModel):
task_outputs = self._process_async_tasks(futures, was_replayed)
futures.clear()
previous_output = task_outputs[task_index - 1] if task_outputs else None
previous_output = task_outputs[-1] if task_outputs else None
if previous_output is not None and not task.should_execute(previous_output):
self._logger.log(
"debug",
@@ -797,36 +898,53 @@ class Crew(BaseModel):
return skipped_task_output
return None
def _prepare_tools(self, agent: BaseAgent, task: Task, tools: List[Tool]) -> List[Tool]:
def _prepare_tools(
self, agent: BaseAgent, task: Task, tools: Union[List[Tool], List[BaseTool]]
) -> List[BaseTool]:
# Add delegation tools if agent allows delegation
if agent.allow_delegation:
if hasattr(agent, "allow_delegation") and getattr(
agent, "allow_delegation", False
):
if self.process == Process.hierarchical:
if self.manager_agent:
tools = self._update_manager_tools(task, tools)
else:
raise ValueError("Manager agent is required for hierarchical process.")
raise ValueError(
"Manager agent is required for hierarchical process."
)
elif agent and agent.allow_delegation:
elif agent:
tools = self._add_delegation_tools(task, tools)
# Add code execution tools if agent allows code execution
if agent.allow_code_execution:
if hasattr(agent, "allow_code_execution") and getattr(
agent, "allow_code_execution", False
):
tools = self._add_code_execution_tools(agent, tools)
if agent and agent.multimodal:
if (
agent
and hasattr(agent, "multimodal")
and getattr(agent, "multimodal", False)
):
tools = self._add_multimodal_tools(agent, tools)
return tools
# Return a List[BaseTool] which is compatible with both Task.execute_sync and Task.execute_async
return cast(List[BaseTool], tools)
def _get_agent_to_use(self, task: Task) -> Optional[BaseAgent]:
if self.process == Process.hierarchical:
return self.manager_agent
return task.agent
def _merge_tools(self, existing_tools: List[Tool], new_tools: List[Tool]) -> List[Tool]:
def _merge_tools(
self,
existing_tools: Union[List[Tool], List[BaseTool]],
new_tools: Union[List[Tool], List[BaseTool]],
) -> List[BaseTool]:
"""Merge new tools into existing tools list, avoiding duplicates by tool name."""
if not new_tools:
return existing_tools
return cast(List[BaseTool], existing_tools)
# Create mapping of tool names to new tools
new_tool_map = {tool.name: tool for tool in new_tools}
@@ -837,27 +955,49 @@ class Crew(BaseModel):
# Add all new tools
tools.extend(new_tools)
return tools
return cast(List[BaseTool], tools)
def _inject_delegation_tools(self, tools: List[Tool], task_agent: BaseAgent, agents: List[BaseAgent]):
delegation_tools = task_agent.get_delegation_tools(agents)
return self._merge_tools(tools, delegation_tools)
def _inject_delegation_tools(
self,
tools: Union[List[Tool], List[BaseTool]],
task_agent: BaseAgent,
agents: List[BaseAgent],
) -> List[BaseTool]:
if hasattr(task_agent, "get_delegation_tools"):
delegation_tools = task_agent.get_delegation_tools(agents)
# Cast delegation_tools to the expected type for _merge_tools
return self._merge_tools(tools, cast(List[BaseTool], delegation_tools))
return cast(List[BaseTool], tools)
def _add_multimodal_tools(self, agent: BaseAgent, tools: List[Tool]):
multimodal_tools = agent.get_multimodal_tools()
return self._merge_tools(tools, multimodal_tools)
def _add_multimodal_tools(
self, agent: BaseAgent, tools: Union[List[Tool], List[BaseTool]]
) -> List[BaseTool]:
if hasattr(agent, "get_multimodal_tools"):
multimodal_tools = agent.get_multimodal_tools()
# Cast multimodal_tools to the expected type for _merge_tools
return self._merge_tools(tools, cast(List[BaseTool], multimodal_tools))
return cast(List[BaseTool], tools)
def _add_code_execution_tools(self, agent: BaseAgent, tools: List[Tool]):
code_tools = agent.get_code_execution_tools()
return self._merge_tools(tools, code_tools)
def _add_code_execution_tools(
self, agent: BaseAgent, tools: Union[List[Tool], List[BaseTool]]
) -> List[BaseTool]:
if hasattr(agent, "get_code_execution_tools"):
code_tools = agent.get_code_execution_tools()
# Cast code_tools to the expected type for _merge_tools
return self._merge_tools(tools, cast(List[BaseTool], code_tools))
return cast(List[BaseTool], tools)
def _add_delegation_tools(self, task: Task, tools: List[Tool]):
def _add_delegation_tools(
self, task: Task, tools: Union[List[Tool], List[BaseTool]]
) -> List[BaseTool]:
agents_for_delegation = [agent for agent in self.agents if agent != task.agent]
if len(self.agents) > 1 and len(agents_for_delegation) > 0 and task.agent:
if not tools:
tools = []
tools = self._inject_delegation_tools(tools, task.agent, agents_for_delegation)
return tools
tools = self._inject_delegation_tools(
tools, task.agent, agents_for_delegation
)
return cast(List[BaseTool], tools)
def _log_task_start(self, task: Task, role: str = "None"):
if self.output_log_file:
@@ -865,13 +1005,17 @@ class Crew(BaseModel):
task_name=task.name, task=task.description, agent=role, status="started"
)
def _update_manager_tools(self, task: Task, tools: List[Tool]):
def _update_manager_tools(
self, task: Task, tools: Union[List[Tool], List[BaseTool]]
) -> List[BaseTool]:
if self.manager_agent:
if task.agent:
tools = self._inject_delegation_tools(tools, task.agent, [task.agent])
else:
tools = self._inject_delegation_tools(tools, self.manager_agent, self.agents)
return tools
tools = self._inject_delegation_tools(
tools, self.manager_agent, self.agents
)
return cast(List[BaseTool], tools)
def _get_context(self, task: Task, task_outputs: List[TaskOutput]):
context = (
@@ -893,20 +1037,29 @@ class Crew(BaseModel):
)
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
if len(task_outputs) != 1:
raise ValueError(
"Something went wrong. Kickoff should return only one task output."
)
final_task_output = task_outputs[0]
if not task_outputs:
raise ValueError("No task outputs available to create crew output.")
# Filter out empty outputs and get the last valid one as the main output
valid_outputs = [t for t in task_outputs if t.raw]
if not valid_outputs:
raise ValueError("No valid task outputs available to create crew output.")
final_task_output = valid_outputs[-1]
final_string_output = final_task_output.raw
self._finish_execution(final_string_output)
token_usage = self.calculate_usage_metrics()
crewai_event_bus.emit(
self,
CrewKickoffCompletedEvent(
crew_name=self.name or "crew", output=final_task_output
),
)
return CrewOutput(
raw=final_task_output.raw,
pydantic=final_task_output.pydantic,
json_dict=final_task_output.json_dict,
tasks_output=[task.output for task in self.tasks if task.output],
tasks_output=task_outputs,
token_usage=token_usage,
)
@@ -979,12 +1132,42 @@ class Crew(BaseModel):
return result
def query_knowledge(self, query: List[str]) -> Union[List[Dict[str, Any]], None]:
if self._knowledge:
return self._knowledge.query(query)
if self.knowledge:
return self.knowledge.query(query)
return None
def fetch_inputs(self) -> Set[str]:
"""
Gathers placeholders (e.g., {something}) referenced in tasks or agents.
Scans each task's 'description' + 'expected_output', and each agent's
'role', 'goal', and 'backstory'.
Returns a set of all discovered placeholder names.
"""
placeholder_pattern = re.compile(r"\{(.+?)\}")
required_inputs: Set[str] = set()
# Scan tasks for inputs
for task in self.tasks:
# description and expected_output might contain e.g. {topic}, {user_name}, etc.
text = f"{task.description or ''} {task.expected_output or ''}"
required_inputs.update(placeholder_pattern.findall(text))
# Scan agents for inputs
for agent in self.agents:
# role, goal, backstory might have placeholders like {role_detail}, etc.
text = f"{agent.role or ''} {agent.goal or ''} {agent.backstory or ''}"
required_inputs.update(placeholder_pattern.findall(text))
return required_inputs
def copy(self):
"""Create a deep copy of the Crew."""
"""
Creates a deep copy of the Crew instance.
Returns:
Crew: A new instance with copied components
"""
exclude = {
"id",
@@ -996,16 +1179,26 @@ class Crew(BaseModel):
"_short_term_memory",
"_long_term_memory",
"_entity_memory",
"_external_memory",
"_telemetry",
"agents",
"tasks",
"knowledge_sources",
"knowledge",
"manager_agent",
"manager_llm",
}
cloned_agents = [agent.copy() for agent in self.agents]
manager_agent = self.manager_agent.copy() if self.manager_agent else None
manager_llm = shallow_copy(self.manager_llm) if self.manager_llm else None
task_mapping = {}
cloned_tasks = []
existing_knowledge_sources = shallow_copy(self.knowledge_sources)
existing_knowledge = shallow_copy(self.knowledge)
for task in self.tasks:
cloned_task = task.copy(cloned_agents, task_mapping)
cloned_tasks.append(cloned_task)
@@ -1025,7 +1218,15 @@ class Crew(BaseModel):
copied_data.pop("agents", None)
copied_data.pop("tasks", None)
copied_crew = Crew(**copied_data, agents=cloned_agents, tasks=cloned_tasks)
copied_crew = Crew(
**copied_data,
agents=cloned_agents,
tasks=cloned_tasks,
knowledge_sources=existing_knowledge_sources,
knowledge=existing_knowledge,
manager_agent=manager_agent,
manager_llm=manager_llm,
)
return copied_crew
@@ -1038,7 +1239,7 @@ class Crew(BaseModel):
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Interpolates the inputs in the tasks and agents."""
[
task.interpolate_inputs(
task.interpolate_inputs_and_add_conversation_history(
# type: ignore # "interpolate_inputs" of "Task" does not return a value (it only ever returns None)
inputs
)
@@ -1051,13 +1252,6 @@ class Crew(BaseModel):
def _finish_execution(self, final_string_output: str) -> None:
if self.max_rpm:
self._rpm_controller.stop_rpm_counter()
if agentops:
agentops.end_session(
end_state="Success",
end_state_reason="Finished Execution",
is_auto_end=True,
)
self._telemetry.end_crew(self, final_string_output)
def calculate_usage_metrics(self) -> UsageMetrics:
"""Calculates and returns the usage metrics."""
@@ -1075,25 +1269,133 @@ class Crew(BaseModel):
def test(
self,
n_iterations: int,
openai_model_name: Optional[str] = None,
eval_llm: Union[str, InstanceOf[BaseLLM]],
inputs: Optional[Dict[str, Any]] = None,
) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
test_crew = self.copy()
try:
# Create LLM instance and ensure it's of type LLM for CrewEvaluator
llm_instance = create_llm(eval_llm)
if not llm_instance:
raise ValueError("Failed to create LLM instance.")
self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew,
n_iterations,
inputs,
openai_model_name, # type: ignore[arg-type]
) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
crewai_event_bus.emit(
self,
CrewTestStartedEvent(
crew_name=self.name or "crew",
n_iterations=n_iterations,
eval_llm=llm_instance,
inputs=inputs,
),
)
test_crew = self.copy()
evaluator = CrewEvaluator(test_crew, llm_instance)
for i in range(1, n_iterations + 1):
evaluator.set_iteration(i)
test_crew.kickoff(inputs=inputs)
for i in range(1, n_iterations + 1):
evaluator.set_iteration(i)
test_crew.kickoff(inputs=inputs)
evaluator.print_crew_evaluation_result()
evaluator.print_crew_evaluation_result()
crewai_event_bus.emit(
self,
CrewTestCompletedEvent(
crew_name=self.name or "crew",
),
)
except Exception as e:
crewai_event_bus.emit(
self,
CrewTestFailedEvent(error=str(e), crew_name=self.name or "crew"),
)
raise
def __repr__(self):
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
def reset_memories(self, command_type: str) -> None:
"""Reset specific or all memories for the crew.
Args:
command_type: Type of memory to reset.
Valid options: 'long', 'short', 'entity', 'knowledge',
'kickoff_outputs', or 'all'
Raises:
ValueError: If an invalid command type is provided.
RuntimeError: If memory reset operation fails.
"""
VALID_TYPES = frozenset(
[
"long",
"short",
"entity",
"knowledge",
"kickoff_outputs",
"all",
"external",
]
)
if command_type not in VALID_TYPES:
raise ValueError(
f"Invalid command type. Must be one of: {', '.join(sorted(VALID_TYPES))}"
)
try:
if command_type == "all":
self._reset_all_memories()
else:
self._reset_specific_memory(command_type)
self._logger.log("info", f"{command_type} memory has been reset")
except Exception as e:
error_msg = f"Failed to reset {command_type} memory: {str(e)}"
self._logger.log("error", error_msg)
raise RuntimeError(error_msg) from e
def _reset_all_memories(self) -> None:
"""Reset all available memory systems."""
memory_systems = [
("short term", getattr(self, "_short_term_memory", None)),
("entity", getattr(self, "_entity_memory", None)),
("external", getattr(self, "_external_memory", None)),
("long term", getattr(self, "_long_term_memory", None)),
("task output", getattr(self, "_task_output_handler", None)),
("knowledge", getattr(self, "knowledge", None)),
]
for name, system in memory_systems:
if system is not None:
try:
system.reset()
except Exception as e:
raise RuntimeError(f"Failed to reset {name} memory") from e
def _reset_specific_memory(self, memory_type: str) -> None:
"""Reset a specific memory system.
Args:
memory_type: Type of memory to reset
Raises:
RuntimeError: If the specified memory system fails to reset
"""
reset_functions = {
"long": (self._long_term_memory, "long term"),
"short": (self._short_term_memory, "short term"),
"entity": (self._entity_memory, "entity"),
"knowledge": (self.knowledge, "knowledge"),
"kickoff_outputs": (self._task_output_handler, "task output"),
"external": (self._external_memory, "external"),
}
memory_system, name = reset_functions[memory_type]
if memory_system is None:
raise RuntimeError(f"{name} memory system is not initialized")
try:
memory_system.reset()
except Exception as e:
raise RuntimeError(f"Failed to reset {name} memory") from e

View File

@@ -1,3 +1,5 @@
from crewai.flow.flow import Flow
from crewai.flow.flow import Flow, start, listen, or_, and_, router
from crewai.flow.persistence import persist
__all__ = ["Flow", "start", "listen", "or_", "and_", "router", "persist"]
__all__ = ["Flow"]

File diff suppressed because it is too large Load Diff

View File

@@ -1,33 +0,0 @@
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, Optional
@dataclass
class Event:
type: str
flow_name: str
timestamp: datetime = field(init=False)
def __post_init__(self):
self.timestamp = datetime.now()
@dataclass
class FlowStartedEvent(Event):
pass
@dataclass
class MethodExecutionStartedEvent(Event):
method_name: str
@dataclass
class MethodExecutionFinishedEvent(Event):
method_name: str
@dataclass
class FlowFinishedEvent(Event):
result: Optional[Any] = None

View File

@@ -1,12 +1,14 @@
# flow_visualizer.py
import os
from pathlib import Path
from pyvis.network import Network
from crewai.flow.config import COLORS, NODE_STYLES
from crewai.flow.html_template_handler import HTMLTemplateHandler
from crewai.flow.legend_generator import generate_legend_items_html, get_legend_items
from crewai.flow.path_utils import safe_path_join, validate_path_exists
from crewai.flow.utils import calculate_node_levels
from crewai.flow.visualization_utils import (
add_edges,
@@ -16,89 +18,209 @@ from crewai.flow.visualization_utils import (
class FlowPlot:
"""Handles the creation and rendering of flow visualization diagrams."""
def __init__(self, flow):
"""
Initialize FlowPlot with a flow object.
Parameters
----------
flow : Flow
A Flow instance to visualize.
Raises
------
ValueError
If flow object is invalid or missing required attributes.
"""
if not hasattr(flow, '_methods'):
raise ValueError("Invalid flow object: missing '_methods' attribute")
if not hasattr(flow, '_listeners'):
raise ValueError("Invalid flow object: missing '_listeners' attribute")
if not hasattr(flow, '_start_methods'):
raise ValueError("Invalid flow object: missing '_start_methods' attribute")
self.flow = flow
self.colors = COLORS
self.node_styles = NODE_STYLES
def plot(self, filename):
net = Network(
directed=True,
height="750px",
width="100%",
bgcolor=self.colors["bg"],
layout=None,
)
# Set options to disable physics
net.set_options(
"""
var options = {
"nodes": {
"font": {
"multi": "html"
}
},
"physics": {
"enabled": false
}
}
"""
)
Generate and save an HTML visualization of the flow.
# Calculate levels for nodes
node_levels = calculate_node_levels(self.flow)
Parameters
----------
filename : str
Name of the output file (without extension).
# Compute positions
node_positions = compute_positions(self.flow, node_levels)
Raises
------
ValueError
If filename is invalid or network generation fails.
IOError
If file operations fail or visualization cannot be generated.
RuntimeError
If network visualization generation fails.
"""
if not filename or not isinstance(filename, str):
raise ValueError("Filename must be a non-empty string")
try:
# Initialize network
net = Network(
directed=True,
height="750px",
width="100%",
bgcolor=self.colors["bg"],
layout=None,
)
# Add nodes to the network
add_nodes_to_network(net, self.flow, node_positions, self.node_styles)
# Set options to disable physics
net.set_options(
"""
var options = {
"nodes": {
"font": {
"multi": "html"
}
},
"physics": {
"enabled": false
}
}
"""
)
# Add edges to the network
add_edges(net, self.flow, node_positions, self.colors)
# Calculate levels for nodes
try:
node_levels = calculate_node_levels(self.flow)
except Exception as e:
raise ValueError(f"Failed to calculate node levels: {str(e)}")
network_html = net.generate_html()
final_html_content = self._generate_final_html(network_html)
# Compute positions
try:
node_positions = compute_positions(self.flow, node_levels)
except Exception as e:
raise ValueError(f"Failed to compute node positions: {str(e)}")
# Save the final HTML content to the file
with open(f"{filename}.html", "w", encoding="utf-8") as f:
f.write(final_html_content)
print(f"Plot saved as {filename}.html")
# Add nodes to the network
try:
add_nodes_to_network(net, self.flow, node_positions, self.node_styles)
except Exception as e:
raise RuntimeError(f"Failed to add nodes to network: {str(e)}")
self._cleanup_pyvis_lib()
# Add edges to the network
try:
add_edges(net, self.flow, node_positions, self.colors)
except Exception as e:
raise RuntimeError(f"Failed to add edges to network: {str(e)}")
# Generate HTML
try:
network_html = net.generate_html()
final_html_content = self._generate_final_html(network_html)
except Exception as e:
raise RuntimeError(f"Failed to generate network visualization: {str(e)}")
# Save the final HTML content to the file
try:
with open(f"{filename}.html", "w", encoding="utf-8") as f:
f.write(final_html_content)
print(f"Plot saved as {filename}.html")
except IOError as e:
raise IOError(f"Failed to save flow visualization to {filename}.html: {str(e)}")
except (ValueError, RuntimeError, IOError) as e:
raise e
except Exception as e:
raise RuntimeError(f"Unexpected error during flow visualization: {str(e)}")
finally:
self._cleanup_pyvis_lib()
def _generate_final_html(self, network_html):
# Extract just the body content from the generated HTML
current_dir = os.path.dirname(__file__)
template_path = os.path.join(
current_dir, "assets", "crewai_flow_visual_template.html"
)
logo_path = os.path.join(current_dir, "assets", "crewai_logo.svg")
"""
Generate the final HTML content with network visualization and legend.
html_handler = HTMLTemplateHandler(template_path, logo_path)
network_body = html_handler.extract_body_content(network_html)
Parameters
----------
network_html : str
HTML content generated by pyvis Network.
# Generate the legend items HTML
legend_items = get_legend_items(self.colors)
legend_items_html = generate_legend_items_html(legend_items)
final_html_content = html_handler.generate_final_html(
network_body, legend_items_html
)
return final_html_content
Returns
-------
str
Complete HTML content with styling and legend.
Raises
------
IOError
If template or logo files cannot be accessed.
ValueError
If network_html is invalid.
"""
if not network_html:
raise ValueError("Invalid network HTML content")
try:
# Extract just the body content from the generated HTML
current_dir = os.path.dirname(__file__)
template_path = safe_path_join("assets", "crewai_flow_visual_template.html", root=current_dir)
logo_path = safe_path_join("assets", "crewai_logo.svg", root=current_dir)
if not os.path.exists(template_path):
raise IOError(f"Template file not found: {template_path}")
if not os.path.exists(logo_path):
raise IOError(f"Logo file not found: {logo_path}")
html_handler = HTMLTemplateHandler(template_path, logo_path)
network_body = html_handler.extract_body_content(network_html)
# Generate the legend items HTML
legend_items = get_legend_items(self.colors)
legend_items_html = generate_legend_items_html(legend_items)
final_html_content = html_handler.generate_final_html(
network_body, legend_items_html
)
return final_html_content
except Exception as e:
raise IOError(f"Failed to generate visualization HTML: {str(e)}")
def _cleanup_pyvis_lib(self):
# Clean up the generated lib folder
lib_folder = os.path.join(os.getcwd(), "lib")
"""
Clean up the generated lib folder from pyvis.
This method safely removes the temporary lib directory created by pyvis
during network visualization generation.
"""
try:
lib_folder = safe_path_join("lib", root=os.getcwd())
if os.path.exists(lib_folder) and os.path.isdir(lib_folder):
import shutil
shutil.rmtree(lib_folder)
except ValueError as e:
print(f"Error validating lib folder path: {e}")
except Exception as e:
print(f"Error cleaning up {lib_folder}: {e}")
print(f"Error cleaning up lib folder: {e}")
def plot_flow(flow, filename="flow_plot"):
"""
Convenience function to create and save a flow visualization.
Parameters
----------
flow : Flow
Flow instance to visualize.
filename : str, optional
Output filename without extension, by default "flow_plot".
Raises
------
ValueError
If flow object or filename is invalid.
IOError
If file operations fail.
"""
visualizer = FlowPlot(flow)
visualizer.plot(filename)

View File

@@ -1,26 +1,53 @@
import base64
import re
from pathlib import Path
from crewai.flow.path_utils import safe_path_join, validate_path_exists
class HTMLTemplateHandler:
"""Handles HTML template processing and generation for flow visualization diagrams."""
def __init__(self, template_path, logo_path):
self.template_path = template_path
self.logo_path = logo_path
"""
Initialize HTMLTemplateHandler with validated template and logo paths.
Parameters
----------
template_path : str
Path to the HTML template file.
logo_path : str
Path to the logo image file.
Raises
------
ValueError
If template or logo paths are invalid or files don't exist.
"""
try:
self.template_path = validate_path_exists(template_path, "file")
self.logo_path = validate_path_exists(logo_path, "file")
except ValueError as e:
raise ValueError(f"Invalid template or logo path: {e}")
def read_template(self):
"""Read and return the HTML template file contents."""
with open(self.template_path, "r", encoding="utf-8") as f:
return f.read()
def encode_logo(self):
"""Convert the logo SVG file to base64 encoded string."""
with open(self.logo_path, "rb") as logo_file:
logo_svg_data = logo_file.read()
return base64.b64encode(logo_svg_data).decode("utf-8")
def extract_body_content(self, html):
"""Extract and return content between body tags from HTML string."""
match = re.search("<body.*?>(.*?)</body>", html, re.DOTALL)
return match.group(1) if match else ""
def generate_legend_items_html(self, legend_items):
"""Generate HTML markup for the legend items."""
legend_items_html = ""
for item in legend_items:
if "border" in item:
@@ -48,6 +75,7 @@ class HTMLTemplateHandler:
return legend_items_html
def generate_final_html(self, network_body, legend_items_html, title="Flow Plot"):
"""Combine all components into final HTML document with network visualization."""
html_template = self.read_template()
logo_svg_base64 = self.encode_logo()

View File

@@ -1,3 +1,4 @@
def get_legend_items(colors):
return [
{"label": "Start Method", "color": colors["start"]},

View File

@@ -0,0 +1,135 @@
"""
Path utilities for secure file operations in CrewAI flow module.
This module provides utilities for secure path handling to prevent directory
traversal attacks and ensure paths remain within allowed boundaries.
"""
import os
from pathlib import Path
from typing import List, Union
def safe_path_join(*parts: str, root: Union[str, Path, None] = None) -> str:
"""
Safely join path components and ensure the result is within allowed boundaries.
Parameters
----------
*parts : str
Variable number of path components to join.
root : Union[str, Path, None], optional
Root directory to use as base. If None, uses current working directory.
Returns
-------
str
String representation of the resolved path.
Raises
------
ValueError
If the resulting path would be outside the root directory
or if any path component is invalid.
"""
if not parts:
raise ValueError("No path components provided")
try:
# Convert all parts to strings and clean them
clean_parts = [str(part).strip() for part in parts if part]
if not clean_parts:
raise ValueError("No valid path components provided")
# Establish root directory
root_path = Path(root).resolve() if root else Path.cwd()
# Join and resolve the full path
full_path = Path(root_path, *clean_parts).resolve()
# Check if the resolved path is within root
if not str(full_path).startswith(str(root_path)):
raise ValueError(
f"Invalid path: Potential directory traversal. Path must be within {root_path}"
)
return str(full_path)
except Exception as e:
if isinstance(e, ValueError):
raise
raise ValueError(f"Invalid path components: {str(e)}")
def validate_path_exists(path: Union[str, Path], file_type: str = "file") -> str:
"""
Validate that a path exists and is of the expected type.
Parameters
----------
path : Union[str, Path]
Path to validate.
file_type : str, optional
Expected type ('file' or 'directory'), by default 'file'.
Returns
-------
str
Validated path as string.
Raises
------
ValueError
If path doesn't exist or is not of expected type.
"""
try:
path_obj = Path(path).resolve()
if not path_obj.exists():
raise ValueError(f"Path does not exist: {path}")
if file_type == "file" and not path_obj.is_file():
raise ValueError(f"Path is not a file: {path}")
elif file_type == "directory" and not path_obj.is_dir():
raise ValueError(f"Path is not a directory: {path}")
return str(path_obj)
except Exception as e:
if isinstance(e, ValueError):
raise
raise ValueError(f"Invalid path: {str(e)}")
def list_files(directory: Union[str, Path], pattern: str = "*") -> List[str]:
"""
Safely list files in a directory matching a pattern.
Parameters
----------
directory : Union[str, Path]
Directory to search in.
pattern : str, optional
Glob pattern to match files against, by default "*".
Returns
-------
List[str]
List of matching file paths.
Raises
------
ValueError
If directory is invalid or inaccessible.
"""
try:
dir_path = Path(directory).resolve()
if not dir_path.is_dir():
raise ValueError(f"Not a directory: {directory}")
return [str(p) for p in dir_path.glob(pattern) if p.is_file()]
except Exception as e:
if isinstance(e, ValueError):
raise
raise ValueError(f"Error listing files: {str(e)}")

View File

@@ -0,0 +1,18 @@
"""
CrewAI Flow Persistence.
This module provides interfaces and implementations for persisting flow states.
"""
from typing import Any, Dict, TypeVar, Union
from pydantic import BaseModel
from crewai.flow.persistence.base import FlowPersistence
from crewai.flow.persistence.decorators import persist
from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
__all__ = ["FlowPersistence", "persist", "SQLiteFlowPersistence"]
StateType = TypeVar('StateType', bound=Union[Dict[str, Any], BaseModel])
DictStateType = Dict[str, Any]

View File

@@ -0,0 +1,53 @@
"""Base class for flow state persistence."""
import abc
from typing import Any, Dict, Optional, Union
from pydantic import BaseModel
class FlowPersistence(abc.ABC):
"""Abstract base class for flow state persistence.
This class defines the interface that all persistence implementations must follow.
It supports both structured (Pydantic BaseModel) and unstructured (dict) states.
"""
@abc.abstractmethod
def init_db(self) -> None:
"""Initialize the persistence backend.
This method should handle any necessary setup, such as:
- Creating tables
- Establishing connections
- Setting up indexes
"""
pass
@abc.abstractmethod
def save_state(
self,
flow_uuid: str,
method_name: str,
state_data: Union[Dict[str, Any], BaseModel]
) -> None:
"""Persist the flow state after method completion.
Args:
flow_uuid: Unique identifier for the flow instance
method_name: Name of the method that just completed
state_data: Current state data (either dict or Pydantic model)
"""
pass
@abc.abstractmethod
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
"""Load the most recent state for a given flow UUID.
Args:
flow_uuid: Unique identifier for the flow instance
Returns:
The most recent state as a dictionary, or None if no state exists
"""
pass

View File

@@ -0,0 +1,254 @@
"""
Decorators for flow state persistence.
Example:
```python
from crewai.flow.flow import Flow, start
from crewai.flow.persistence import persist, SQLiteFlowPersistence
class MyFlow(Flow):
@start()
@persist(SQLiteFlowPersistence())
def sync_method(self):
# Synchronous method implementation
pass
@start()
@persist(SQLiteFlowPersistence())
async def async_method(self):
# Asynchronous method implementation
await some_async_operation()
```
"""
import asyncio
import functools
import logging
from typing import (
Any,
Callable,
Optional,
Type,
TypeVar,
Union,
cast,
)
from pydantic import BaseModel
from crewai.flow.persistence.base import FlowPersistence
from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
from crewai.utilities.printer import Printer
logger = logging.getLogger(__name__)
T = TypeVar("T")
# Constants for log messages
LOG_MESSAGES = {
"save_state": "Saving flow state to memory for ID: {}",
"save_error": "Failed to persist state for method {}: {}",
"state_missing": "Flow instance has no state",
"id_missing": "Flow state must have an 'id' field for persistence"
}
class PersistenceDecorator:
"""Class to handle flow state persistence with consistent logging."""
_printer = Printer() # Class-level printer instance
@classmethod
def persist_state(cls, flow_instance: Any, method_name: str, persistence_instance: FlowPersistence, verbose: bool = False) -> None:
"""Persist flow state with proper error handling and logging.
This method handles the persistence of flow state data, including proper
error handling and colored console output for status updates.
Args:
flow_instance: The flow instance whose state to persist
method_name: Name of the method that triggered persistence
persistence_instance: The persistence backend to use
verbose: Whether to log persistence operations
Raises:
ValueError: If flow has no state or state lacks an ID
RuntimeError: If state persistence fails
AttributeError: If flow instance lacks required state attributes
"""
try:
state = getattr(flow_instance, 'state', None)
if state is None:
raise ValueError("Flow instance has no state")
flow_uuid: Optional[str] = None
if isinstance(state, dict):
flow_uuid = state.get('id')
elif isinstance(state, BaseModel):
flow_uuid = getattr(state, 'id', None)
if not flow_uuid:
raise ValueError("Flow state must have an 'id' field for persistence")
# Log state saving only if verbose is True
if verbose:
cls._printer.print(LOG_MESSAGES["save_state"].format(flow_uuid), color="cyan")
logger.info(LOG_MESSAGES["save_state"].format(flow_uuid))
try:
persistence_instance.save_state(
flow_uuid=flow_uuid,
method_name=method_name,
state_data=state,
)
except Exception as e:
error_msg = LOG_MESSAGES["save_error"].format(method_name, str(e))
cls._printer.print(error_msg, color="red")
logger.error(error_msg)
raise RuntimeError(f"State persistence failed: {str(e)}") from e
except AttributeError:
error_msg = LOG_MESSAGES["state_missing"]
cls._printer.print(error_msg, color="red")
logger.error(error_msg)
raise ValueError(error_msg)
except (TypeError, ValueError) as e:
error_msg = LOG_MESSAGES["id_missing"]
cls._printer.print(error_msg, color="red")
logger.error(error_msg)
raise ValueError(error_msg) from e
def persist(persistence: Optional[FlowPersistence] = None, verbose: bool = False):
"""Decorator to persist flow state.
This decorator can be applied at either the class level or method level.
When applied at the class level, it automatically persists all flow method
states. When applied at the method level, it persists only that method's
state.
Args:
persistence: Optional FlowPersistence implementation to use.
If not provided, uses SQLiteFlowPersistence.
verbose: Whether to log persistence operations. Defaults to False.
Returns:
A decorator that can be applied to either a class or method
Raises:
ValueError: If the flow state doesn't have an 'id' field
RuntimeError: If state persistence fails
Example:
@persist(verbose=True) # Class-level persistence with logging
class MyFlow(Flow[MyState]):
@start()
def begin(self):
pass
"""
def decorator(target: Union[Type, Callable[..., T]]) -> Union[Type, Callable[..., T]]:
"""Decorator that handles both class and method decoration."""
actual_persistence = persistence or SQLiteFlowPersistence()
if isinstance(target, type):
# Class decoration
original_init = getattr(target, "__init__")
@functools.wraps(original_init)
def new_init(self: Any, *args: Any, **kwargs: Any) -> None:
if 'persistence' not in kwargs:
kwargs['persistence'] = actual_persistence
original_init(self, *args, **kwargs)
setattr(target, "__init__", new_init)
# Store original methods to preserve their decorators
original_methods = {}
for name, method in target.__dict__.items():
if callable(method) and (
hasattr(method, "__is_start_method__") or
hasattr(method, "__trigger_methods__") or
hasattr(method, "__condition_type__") or
hasattr(method, "__is_flow_method__") or
hasattr(method, "__is_router__")
):
original_methods[name] = method
# Create wrapped versions of the methods that include persistence
for name, method in original_methods.items():
if asyncio.iscoroutinefunction(method):
# Create a closure to capture the current name and method
def create_async_wrapper(method_name: str, original_method: Callable):
@functools.wraps(original_method)
async def method_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
result = await original_method(self, *args, **kwargs)
PersistenceDecorator.persist_state(self, method_name, actual_persistence, verbose)
return result
return method_wrapper
wrapped = create_async_wrapper(name, method)
# Preserve all original decorators and attributes
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
if hasattr(method, attr):
setattr(wrapped, attr, getattr(method, attr))
setattr(wrapped, "__is_flow_method__", True)
# Update the class with the wrapped method
setattr(target, name, wrapped)
else:
# Create a closure to capture the current name and method
def create_sync_wrapper(method_name: str, original_method: Callable):
@functools.wraps(original_method)
def method_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
result = original_method(self, *args, **kwargs)
PersistenceDecorator.persist_state(self, method_name, actual_persistence, verbose)
return result
return method_wrapper
wrapped = create_sync_wrapper(name, method)
# Preserve all original decorators and attributes
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
if hasattr(method, attr):
setattr(wrapped, attr, getattr(method, attr))
setattr(wrapped, "__is_flow_method__", True)
# Update the class with the wrapped method
setattr(target, name, wrapped)
return target
else:
# Method decoration
method = target
setattr(method, "__is_flow_method__", True)
if asyncio.iscoroutinefunction(method):
@functools.wraps(method)
async def method_async_wrapper(flow_instance: Any, *args: Any, **kwargs: Any) -> T:
method_coro = method(flow_instance, *args, **kwargs)
if asyncio.iscoroutine(method_coro):
result = await method_coro
else:
result = method_coro
PersistenceDecorator.persist_state(flow_instance, method.__name__, actual_persistence, verbose)
return result
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
if hasattr(method, attr):
setattr(method_async_wrapper, attr, getattr(method, attr))
setattr(method_async_wrapper, "__is_flow_method__", True)
return cast(Callable[..., T], method_async_wrapper)
else:
@functools.wraps(method)
def method_sync_wrapper(flow_instance: Any, *args: Any, **kwargs: Any) -> T:
result = method(flow_instance, *args, **kwargs)
PersistenceDecorator.persist_state(flow_instance, method.__name__, actual_persistence, verbose)
return result
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
if hasattr(method, attr):
setattr(method_sync_wrapper, attr, getattr(method, attr))
setattr(method_sync_wrapper, "__is_flow_method__", True)
return cast(Callable[..., T], method_sync_wrapper)
return decorator

View File

@@ -0,0 +1,134 @@
"""
SQLite-based implementation of flow state persistence.
"""
import json
import sqlite3
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, Optional, Union
from pydantic import BaseModel
from crewai.flow.persistence.base import FlowPersistence
class SQLiteFlowPersistence(FlowPersistence):
"""SQLite-based implementation of flow state persistence.
This class provides a simple, file-based persistence implementation using SQLite.
It's suitable for development and testing, or for production use cases with
moderate performance requirements.
"""
db_path: str # Type annotation for instance variable
def __init__(self, db_path: Optional[str] = None):
"""Initialize SQLite persistence.
Args:
db_path: Path to the SQLite database file. If not provided, uses
db_storage_path() from utilities.paths.
Raises:
ValueError: If db_path is invalid
"""
from crewai.utilities.paths import db_storage_path
# Get path from argument or default location
path = db_path or str(Path(db_storage_path()) / "flow_states.db")
if not path:
raise ValueError("Database path must be provided")
self.db_path = path # Now mypy knows this is str
self.init_db()
def init_db(self) -> None:
"""Create the necessary tables if they don't exist."""
with sqlite3.connect(self.db_path) as conn:
conn.execute(
"""
CREATE TABLE IF NOT EXISTS flow_states (
id INTEGER PRIMARY KEY AUTOINCREMENT,
flow_uuid TEXT NOT NULL,
method_name TEXT NOT NULL,
timestamp DATETIME NOT NULL,
state_json TEXT NOT NULL
)
"""
)
# Add index for faster UUID lookups
conn.execute(
"""
CREATE INDEX IF NOT EXISTS idx_flow_states_uuid
ON flow_states(flow_uuid)
"""
)
def save_state(
self,
flow_uuid: str,
method_name: str,
state_data: Union[Dict[str, Any], BaseModel],
) -> None:
"""Save the current flow state to SQLite.
Args:
flow_uuid: Unique identifier for the flow instance
method_name: Name of the method that just completed
state_data: Current state data (either dict or Pydantic model)
"""
# Convert state_data to dict, handling both Pydantic and dict cases
if isinstance(state_data, BaseModel):
state_dict = dict(state_data) # Use dict() for better type compatibility
elif isinstance(state_data, dict):
state_dict = state_data
else:
raise ValueError(
f"state_data must be either a Pydantic BaseModel or dict, got {type(state_data)}"
)
with sqlite3.connect(self.db_path) as conn:
conn.execute(
"""
INSERT INTO flow_states (
flow_uuid,
method_name,
timestamp,
state_json
) VALUES (?, ?, ?, ?)
""",
(
flow_uuid,
method_name,
datetime.now(timezone.utc).isoformat(),
json.dumps(state_dict),
),
)
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
"""Load the most recent state for a given flow UUID.
Args:
flow_uuid: Unique identifier for the flow instance
Returns:
The most recent state as a dictionary, or None if no state exists
"""
with sqlite3.connect(self.db_path) as conn:
cursor = conn.execute(
"""
SELECT state_json
FROM flow_states
WHERE flow_uuid = ?
ORDER BY id DESC
LIMIT 1
""",
(flow_uuid,),
)
row = cursor.fetchone()
if row:
return json.loads(row[0])
return None

View File

@@ -1,9 +1,26 @@
"""
Utility functions for flow visualization and dependency analysis.
This module provides core functionality for analyzing and manipulating flow structures,
including node level calculation, ancestor tracking, and return value analysis.
Functions in this module are primarily used by the visualization system to create
accurate and informative flow diagrams.
Example
-------
>>> flow = Flow()
>>> node_levels = calculate_node_levels(flow)
>>> ancestors = build_ancestor_dict(flow)
"""
import ast
import inspect
import textwrap
from collections import defaultdict, deque
from typing import Any, Deque, Dict, List, Optional, Set, Union
def get_possible_return_constants(function):
def get_possible_return_constants(function: Any) -> Optional[List[str]]:
try:
source = inspect.getsource(function)
except OSError:
@@ -77,11 +94,34 @@ def get_possible_return_constants(function):
return list(return_values) if return_values else None
def calculate_node_levels(flow):
levels = {}
queue = []
visited = set()
pending_and_listeners = {}
def calculate_node_levels(flow: Any) -> Dict[str, int]:
"""
Calculate the hierarchical level of each node in the flow.
Performs a breadth-first traversal of the flow graph to assign levels
to nodes, starting with start methods at level 0.
Parameters
----------
flow : Any
The flow instance containing methods, listeners, and router configurations.
Returns
-------
Dict[str, int]
Dictionary mapping method names to their hierarchical levels.
Notes
-----
- Start methods are assigned level 0
- Each subsequent connected node is assigned level = parent_level + 1
- Handles both OR and AND conditions for listeners
- Processes router paths separately
"""
levels: Dict[str, int] = {}
queue: Deque[str] = deque()
visited: Set[str] = set()
pending_and_listeners: Dict[str, Set[str]] = {}
# Make all start methods at level 0
for method_name, method in flow._methods.items():
@@ -89,28 +129,35 @@ def calculate_node_levels(flow):
levels[method_name] = 0
queue.append(method_name)
# Precompute listener dependencies
or_listeners = defaultdict(list)
and_listeners = defaultdict(set)
for listener_name, (condition_type, trigger_methods) in flow._listeners.items():
if condition_type == "OR":
for method in trigger_methods:
or_listeners[method].append(listener_name)
elif condition_type == "AND":
and_listeners[listener_name] = set(trigger_methods)
# Breadth-first traversal to assign levels
while queue:
current = queue.pop(0)
current = queue.popleft()
current_level = levels[current]
visited.add(current)
for listener_name, (condition_type, trigger_methods) in flow._listeners.items():
if condition_type == "OR":
if current in trigger_methods:
if (
listener_name not in levels
or levels[listener_name] > current_level + 1
):
levels[listener_name] = current_level + 1
if listener_name not in visited:
queue.append(listener_name)
elif condition_type == "AND":
for listener_name in or_listeners[current]:
if listener_name not in levels or levels[listener_name] > current_level + 1:
levels[listener_name] = current_level + 1
if listener_name not in visited:
queue.append(listener_name)
for listener_name, required_methods in and_listeners.items():
if current in required_methods:
if listener_name not in pending_and_listeners:
pending_and_listeners[listener_name] = set()
if current in trigger_methods:
pending_and_listeners[listener_name].add(current)
if set(trigger_methods) == pending_and_listeners[listener_name]:
pending_and_listeners[listener_name].add(current)
if required_methods == pending_and_listeners[listener_name]:
if (
listener_name not in levels
or levels[listener_name] > current_level + 1
@@ -120,27 +167,25 @@ def calculate_node_levels(flow):
queue.append(listener_name)
# Handle router connections
if current in flow._routers:
router_method_name = current
paths = flow._router_paths.get(router_method_name, [])
for path in paths:
for listener_name, (
condition_type,
trigger_methods,
) in flow._listeners.items():
if path in trigger_methods:
if (
listener_name not in levels
or levels[listener_name] > current_level + 1
):
levels[listener_name] = current_level + 1
if listener_name not in visited:
queue.append(listener_name)
process_router_paths(flow, current, current_level, levels, queue)
return levels
def count_outgoing_edges(flow):
def count_outgoing_edges(flow: Any) -> Dict[str, int]:
"""
Count the number of outgoing edges for each method in the flow.
Parameters
----------
flow : Any
The flow instance to analyze.
Returns
-------
Dict[str, int]
Dictionary mapping method names to their outgoing edge count.
"""
counts = {}
for method_name in flow._methods:
counts[method_name] = 0
@@ -152,16 +197,50 @@ def count_outgoing_edges(flow):
return counts
def build_ancestor_dict(flow):
ancestors = {node: set() for node in flow._methods}
visited = set()
def build_ancestor_dict(flow: Any) -> Dict[str, Set[str]]:
"""
Build a dictionary mapping each node to its ancestor nodes.
Parameters
----------
flow : Any
The flow instance to analyze.
Returns
-------
Dict[str, Set[str]]
Dictionary mapping each node to a set of its ancestor nodes.
"""
ancestors: Dict[str, Set[str]] = {node: set() for node in flow._methods}
visited: Set[str] = set()
for node in flow._methods:
if node not in visited:
dfs_ancestors(node, ancestors, visited, flow)
return ancestors
def dfs_ancestors(node, ancestors, visited, flow):
def dfs_ancestors(
node: str, ancestors: Dict[str, Set[str]], visited: Set[str], flow: Any
) -> None:
"""
Perform depth-first search to build ancestor relationships.
Parameters
----------
node : str
Current node being processed.
ancestors : Dict[str, Set[str]]
Dictionary tracking ancestor relationships.
visited : Set[str]
Set of already visited nodes.
flow : Any
The flow instance being analyzed.
Notes
-----
This function modifies the ancestors dictionary in-place to build
the complete ancestor graph.
"""
if node in visited:
return
visited.add(node)
@@ -185,12 +264,50 @@ def dfs_ancestors(node, ancestors, visited, flow):
dfs_ancestors(listener_name, ancestors, visited, flow)
def is_ancestor(node, ancestor_candidate, ancestors):
def is_ancestor(
node: str, ancestor_candidate: str, ancestors: Dict[str, Set[str]]
) -> bool:
"""
Check if one node is an ancestor of another.
Parameters
----------
node : str
The node to check ancestors for.
ancestor_candidate : str
The potential ancestor node.
ancestors : Dict[str, Set[str]]
Dictionary containing ancestor relationships.
Returns
-------
bool
True if ancestor_candidate is an ancestor of node, False otherwise.
"""
return ancestor_candidate in ancestors.get(node, set())
def build_parent_children_dict(flow):
parent_children = {}
def build_parent_children_dict(flow: Any) -> Dict[str, List[str]]:
"""
Build a dictionary mapping parent nodes to their children.
Parameters
----------
flow : Any
The flow instance to analyze.
Returns
-------
Dict[str, List[str]]
Dictionary mapping parent method names to lists of their child method names.
Notes
-----
- Maps listeners to their trigger methods
- Maps router methods to their paths and listeners
- Children lists are sorted for consistent ordering
"""
parent_children: Dict[str, List[str]] = {}
# Map listeners to their trigger methods
for listener_name, (_, trigger_methods) in flow._listeners.items():
@@ -214,7 +331,46 @@ def build_parent_children_dict(flow):
return parent_children
def get_child_index(parent, child, parent_children):
def get_child_index(
parent: str, child: str, parent_children: Dict[str, List[str]]
) -> int:
"""
Get the index of a child node in its parent's sorted children list.
Parameters
----------
parent : str
The parent node name.
child : str
The child node name to find the index for.
parent_children : Dict[str, List[str]]
Dictionary mapping parents to their children lists.
Returns
-------
int
Zero-based index of the child in its parent's sorted children list.
"""
children = parent_children.get(parent, [])
children.sort()
return children.index(child)
def process_router_paths(flow, current, current_level, levels, queue):
"""
Handle the router connections for the current node.
"""
if current in flow._routers:
paths = flow._router_paths.get(current, [])
for path in paths:
for listener_name, (
condition_type,
trigger_methods,
) in flow._listeners.items():
if path in trigger_methods:
if (
listener_name not in levels
or levels[listener_name] > current_level + 1
):
levels[listener_name] = current_level + 1
queue.append(listener_name)

View File

@@ -1,5 +1,23 @@
"""
Utilities for creating visual representations of flow structures.
This module provides functions for generating network visualizations of flows,
including node placement, edge creation, and visual styling. It handles the
conversion of flow structures into visual network graphs with appropriate
styling and layout.
Example
-------
>>> flow = Flow()
>>> net = Network(directed=True)
>>> node_positions = compute_positions(flow, node_levels)
>>> add_nodes_to_network(net, flow, node_positions, node_styles)
>>> add_edges(net, flow, node_positions, colors)
"""
import ast
import inspect
from typing import Any, Dict, List, Optional, Tuple, Union
from .utils import (
build_ancestor_dict,
@@ -9,8 +27,25 @@ from .utils import (
)
def method_calls_crew(method):
"""Check if the method calls `.crew()`."""
def method_calls_crew(method: Any) -> bool:
"""
Check if the method contains a call to `.crew()`.
Parameters
----------
method : Any
The method to analyze for crew() calls.
Returns
-------
bool
True if the method calls .crew(), False otherwise.
Notes
-----
Uses AST analysis to detect method calls, specifically looking for
attribute access of 'crew'.
"""
try:
source = inspect.getsource(method)
source = inspect.cleandoc(source)
@@ -20,6 +55,7 @@ def method_calls_crew(method):
return False
class CrewCallVisitor(ast.NodeVisitor):
"""AST visitor to detect .crew() method calls."""
def __init__(self):
self.found = False
@@ -34,7 +70,34 @@ def method_calls_crew(method):
return visitor.found
def add_nodes_to_network(net, flow, node_positions, node_styles):
def add_nodes_to_network(
net: Any,
flow: Any,
node_positions: Dict[str, Tuple[float, float]],
node_styles: Dict[str, Dict[str, Any]]
) -> None:
"""
Add nodes to the network visualization with appropriate styling.
Parameters
----------
net : Any
The pyvis Network instance to add nodes to.
flow : Any
The flow instance containing method information.
node_positions : Dict[str, Tuple[float, float]]
Dictionary mapping node names to their (x, y) positions.
node_styles : Dict[str, Dict[str, Any]]
Dictionary containing style configurations for different node types.
Notes
-----
Node types include:
- Start methods
- Router methods
- Crew methods
- Regular methods
"""
def human_friendly_label(method_name):
return method_name.replace("_", " ").title()
@@ -73,9 +136,33 @@ def add_nodes_to_network(net, flow, node_positions, node_styles):
)
def compute_positions(flow, node_levels, y_spacing=150, x_spacing=150):
level_nodes = {}
node_positions = {}
def compute_positions(
flow: Any,
node_levels: Dict[str, int],
y_spacing: float = 150,
x_spacing: float = 150
) -> Dict[str, Tuple[float, float]]:
"""
Compute the (x, y) positions for each node in the flow graph.
Parameters
----------
flow : Any
The flow instance to compute positions for.
node_levels : Dict[str, int]
Dictionary mapping node names to their hierarchical levels.
y_spacing : float, optional
Vertical spacing between levels, by default 150.
x_spacing : float, optional
Horizontal spacing between nodes, by default 150.
Returns
-------
Dict[str, Tuple[float, float]]
Dictionary mapping node names to their (x, y) coordinates.
"""
level_nodes: Dict[int, List[str]] = {}
node_positions: Dict[str, Tuple[float, float]] = {}
for method_name, level in node_levels.items():
level_nodes.setdefault(level, []).append(method_name)
@@ -90,7 +177,33 @@ def compute_positions(flow, node_levels, y_spacing=150, x_spacing=150):
return node_positions
def add_edges(net, flow, node_positions, colors):
def add_edges(
net: Any,
flow: Any,
node_positions: Dict[str, Tuple[float, float]],
colors: Dict[str, str]
) -> None:
edge_smooth: Dict[str, Union[str, float]] = {"type": "continuous"} # Default value
"""
Add edges to the network visualization with appropriate styling.
Parameters
----------
net : Any
The pyvis Network instance to add edges to.
flow : Any
The flow instance containing edge information.
node_positions : Dict[str, Tuple[float, float]]
Dictionary mapping node names to their positions.
colors : Dict[str, str]
Dictionary mapping edge types to their colors.
Notes
-----
- Handles both normal listener edges and router edges
- Applies appropriate styling (color, dashes) based on edge type
- Adds curvature to edges when needed (cycles or multiple children)
"""
ancestors = build_ancestor_dict(flow)
parent_children = build_parent_children_dict(flow)
@@ -126,7 +239,7 @@ def add_edges(net, flow, node_positions, colors):
else:
edge_smooth = {"type": "cubicBezier"}
else:
edge_smooth = False
edge_smooth.update({"type": "continuous"})
edge_style = {
"color": edge_color,
@@ -189,7 +302,7 @@ def add_edges(net, flow, node_positions, colors):
else:
edge_smooth = {"type": "cubicBezier"}
else:
edge_smooth = False
edge_smooth.update({"type": "continuous"})
edge_style = {
"color": colors["router_edge"],

View File

@@ -14,21 +14,21 @@ class Knowledge(BaseModel):
Knowledge is a collection of sources and setup for the vector store to save and query relevant context.
Args:
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
embedder_config: Optional[Dict[str, Any]] = None
storage: Optional[KnowledgeStorage] = Field(default=None)
embedder: Optional[Dict[str, Any]] = None
"""
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
model_config = ConfigDict(arbitrary_types_allowed=True)
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
embedder_config: Optional[Dict[str, Any]] = None
storage: Optional[KnowledgeStorage] = Field(default=None)
embedder: Optional[Dict[str, Any]] = None
collection_name: Optional[str] = None
def __init__(
self,
collection_name: str,
sources: List[BaseKnowledgeSource],
embedder_config: Optional[Dict[str, Any]] = None,
embedder: Optional[Dict[str, Any]] = None,
storage: Optional[KnowledgeStorage] = None,
**data,
):
@@ -37,19 +37,22 @@ class Knowledge(BaseModel):
self.storage = storage
else:
self.storage = KnowledgeStorage(
embedder_config=embedder_config, collection_name=collection_name
embedder=embedder, collection_name=collection_name
)
self.sources = sources
self.storage.initialize_knowledge_storage()
for source in sources:
source.storage = self.storage
source.add()
self._add_sources()
def query(self, query: List[str], limit: int = 3) -> List[Dict[str, Any]]:
"""
Query across all knowledge sources to find the most relevant information.
Returns the top_k most relevant chunks.
Raises:
ValueError: If storage is not initialized.
"""
if self.storage is None:
raise ValueError("Storage is not initialized.")
results = self.storage.search(
query,
@@ -58,6 +61,15 @@ class Knowledge(BaseModel):
return results
def _add_sources(self):
for source in self.sources:
source.storage = self.storage
source.add()
try:
for source in self.sources:
source.storage = self.storage
source.add()
except Exception as e:
raise e
def reset(self) -> None:
if self.storage:
self.storage.reset()
else:
raise ValueError("Storage is not initialized.")

View File

@@ -22,13 +22,20 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
default_factory=list, description="The path to the file"
)
content: Dict[Path, str] = Field(init=False, default_factory=dict)
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
storage: Optional[KnowledgeStorage] = Field(default=None)
safe_file_paths: List[Path] = Field(default_factory=list)
@field_validator("file_path", "file_paths", mode="before")
def validate_file_path(cls, v, values):
def validate_file_path(cls, v, info):
"""Validate that at least one of file_path or file_paths is provided."""
if v is None and ("file_path" not in values or values.get("file_path") is None):
# Single check if both are None, O(1) instead of nested conditions
if (
v is None
and info.data.get(
"file_path" if info.field_name == "file_paths" else "file_paths"
)
is None
):
raise ValueError("Either file_path or file_paths must be provided")
return v
@@ -62,7 +69,10 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
def _save_documents(self):
"""Save the documents to the storage."""
self.storage.save(self.chunks)
if self.storage:
self.storage.save(self.chunks)
else:
raise ValueError("No storage found to save documents.")
def convert_to_path(self, path: Union[Path, str]) -> Path:
"""Convert a path to a Path object."""

View File

@@ -16,7 +16,7 @@ class BaseKnowledgeSource(BaseModel, ABC):
chunk_embeddings: List[np.ndarray] = Field(default_factory=list)
model_config = ConfigDict(arbitrary_types_allowed=True)
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
storage: Optional[KnowledgeStorage] = Field(default=None)
metadata: Dict[str, Any] = Field(default_factory=dict) # Currently unused
collection_name: Optional[str] = Field(default=None)
@@ -46,4 +46,7 @@ class BaseKnowledgeSource(BaseModel, ABC):
Save the documents to the storage.
This method should be called after the chunks and embeddings are generated.
"""
self.storage.save(self.chunks)
if self.storage:
self.storage.save(self.chunks)
else:
raise ValueError("No storage found to save documents.")

View File

@@ -2,11 +2,17 @@ from pathlib import Path
from typing import Iterator, List, Optional, Union
from urllib.parse import urlparse
from docling.datamodel.base_models import InputFormat
from docling.document_converter import DocumentConverter
from docling.exceptions import ConversionError
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
from docling_core.types.doc.document import DoclingDocument
try:
from docling.datamodel.base_models import InputFormat
from docling.document_converter import DocumentConverter
from docling.exceptions import ConversionError
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
from docling_core.types.doc.document import DoclingDocument
DOCLING_AVAILABLE = True
except ImportError:
DOCLING_AVAILABLE = False
from pydantic import Field
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
@@ -19,14 +25,22 @@ class CrewDoclingSource(BaseKnowledgeSource):
This will auto support PDF, DOCX, and TXT, XLSX, Images, and HTML files without any additional dependencies and follows the docling package as the source of truth.
"""
def __init__(self, *args, **kwargs):
if not DOCLING_AVAILABLE:
raise ImportError(
"The docling package is required to use CrewDoclingSource. "
"Please install it using: uv add docling"
)
super().__init__(*args, **kwargs)
_logger: Logger = Logger(verbose=True)
file_path: Optional[List[Union[Path, str]]] = Field(default=None)
file_paths: List[Union[Path, str]] = Field(default_factory=list)
chunks: List[str] = Field(default_factory=list)
safe_file_paths: List[Union[Path, str]] = Field(default_factory=list)
content: List[DoclingDocument] = Field(default_factory=list)
document_converter: DocumentConverter = Field(
content: List["DoclingDocument"] = Field(default_factory=list)
document_converter: "DocumentConverter" = Field(
default_factory=lambda: DocumentConverter(
allowed_formats=[
InputFormat.MD,
@@ -52,7 +66,7 @@ class CrewDoclingSource(BaseKnowledgeSource):
self.safe_file_paths = self.validate_content()
self.content = self._load_content()
def _load_content(self) -> List[DoclingDocument]:
def _load_content(self) -> List["DoclingDocument"]:
try:
return self._convert_source_to_docling_documents()
except ConversionError as e:
@@ -74,11 +88,11 @@ class CrewDoclingSource(BaseKnowledgeSource):
self.chunks.extend(list(new_chunks_iterable))
self._save_documents()
def _convert_source_to_docling_documents(self) -> List[DoclingDocument]:
def _convert_source_to_docling_documents(self) -> List["DoclingDocument"]:
conv_results_iter = self.document_converter.convert_all(self.safe_file_paths)
return [result.document for result in conv_results_iter]
def _chunk_doc(self, doc: DoclingDocument) -> Iterator[str]:
def _chunk_doc(self, doc: "DoclingDocument") -> Iterator[str]:
chunker = HierarchicalChunker()
for chunk in chunker.chunk(doc):
yield chunk.text

View File

@@ -1,28 +1,138 @@
from pathlib import Path
from typing import Dict, List
from typing import Dict, Iterator, List, Optional, Union
from urllib.parse import urlparse
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
from pydantic import Field, field_validator
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
from crewai.utilities.logger import Logger
class ExcelKnowledgeSource(BaseFileKnowledgeSource):
class ExcelKnowledgeSource(BaseKnowledgeSource):
"""A knowledge source that stores and queries Excel file content using embeddings."""
def load_content(self) -> Dict[Path, str]:
"""Load and preprocess Excel file content."""
pd = self._import_dependencies()
# override content to be a dict of file paths to sheet names to csv content
_logger: Logger = Logger(verbose=True)
file_path: Optional[Union[Path, List[Path], str, List[str]]] = Field(
default=None,
description="[Deprecated] The path to the file. Use file_paths instead.",
)
file_paths: Optional[Union[Path, List[Path], str, List[str]]] = Field(
default_factory=list, description="The path to the file"
)
chunks: List[str] = Field(default_factory=list)
content: Dict[Path, Dict[str, str]] = Field(default_factory=dict)
safe_file_paths: List[Path] = Field(default_factory=list)
@field_validator("file_path", "file_paths", mode="before")
def validate_file_path(cls, v, info):
"""Validate that at least one of file_path or file_paths is provided."""
# Single check if both are None, O(1) instead of nested conditions
if (
v is None
and info.data.get(
"file_path" if info.field_name == "file_paths" else "file_paths"
)
is None
):
raise ValueError("Either file_path or file_paths must be provided")
return v
def _process_file_paths(self) -> List[Path]:
"""Convert file_path to a list of Path objects."""
if hasattr(self, "file_path") and self.file_path is not None:
self._logger.log(
"warning",
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
color="yellow",
)
self.file_paths = self.file_path
if self.file_paths is None:
raise ValueError("Your source must be provided with a file_paths: []")
# Convert single path to list
path_list: List[Union[Path, str]] = (
[self.file_paths]
if isinstance(self.file_paths, (str, Path))
else list(self.file_paths)
if isinstance(self.file_paths, list)
else []
)
if not path_list:
raise ValueError(
"file_path/file_paths must be a Path, str, or a list of these types"
)
return [self.convert_to_path(path) for path in path_list]
def validate_content(self):
"""Validate the paths."""
for path in self.safe_file_paths:
if not path.exists():
self._logger.log(
"error",
f"File not found: {path}. Try adding sources to the knowledge directory. If it's inside the knowledge directory, use the relative path.",
color="red",
)
raise FileNotFoundError(f"File not found: {path}")
if not path.is_file():
self._logger.log(
"error",
f"Path is not a file: {path}",
color="red",
)
def model_post_init(self, _) -> None:
if self.file_path:
self._logger.log(
"warning",
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
color="yellow",
)
self.file_paths = self.file_path
self.safe_file_paths = self._process_file_paths()
self.validate_content()
self.content = self._load_content()
def _load_content(self) -> Dict[Path, Dict[str, str]]:
"""Load and preprocess Excel file content from multiple sheets.
Each sheet's content is converted to CSV format and stored.
Returns:
Dict[Path, Dict[str, str]]: A mapping of file paths to their respective sheet contents.
Raises:
ImportError: If required dependencies are missing.
FileNotFoundError: If the specified Excel file cannot be opened.
"""
pd = self._import_dependencies()
content_dict = {}
for file_path in self.safe_file_paths:
file_path = self.convert_to_path(file_path)
df = pd.read_excel(file_path)
content = df.to_csv(index=False)
content_dict[file_path] = content
with pd.ExcelFile(file_path) as xl:
sheet_dict = {
str(sheet_name): str(
pd.read_excel(xl, sheet_name).to_csv(index=False)
)
for sheet_name in xl.sheet_names
}
content_dict[file_path] = sheet_dict
return content_dict
def convert_to_path(self, path: Union[Path, str]) -> Path:
"""Convert a path to a Path object."""
return Path(KNOWLEDGE_DIRECTORY + "/" + path) if isinstance(path, str) else path
def _import_dependencies(self):
"""Dynamically import dependencies."""
try:
import openpyxl # noqa
import pandas as pd
return pd
@@ -38,10 +148,14 @@ class ExcelKnowledgeSource(BaseFileKnowledgeSource):
and save the embeddings.
"""
# Convert dictionary values to a single string if content is a dictionary
if isinstance(self.content, dict):
content_str = "\n".join(str(value) for value in self.content.values())
else:
content_str = str(self.content)
# Updated to account for .xlsx workbooks with multiple tabs/sheets
content_str = ""
for value in self.content.values():
if isinstance(value, dict):
for sheet_value in value.values():
content_str += str(sheet_value) + "\n"
else:
content_str += str(value) + "\n"
new_chunks = self._chunk_text(content_str)
self.chunks.extend(new_chunks)

View File

@@ -14,6 +14,7 @@ from chromadb.config import Settings
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
from crewai.utilities import EmbeddingConfigurator
from crewai.utilities.chromadb import sanitize_collection_name
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
from crewai.utilities.logger import Logger
from crewai.utilities.paths import db_storage_path
@@ -48,11 +49,11 @@ class KnowledgeStorage(BaseKnowledgeStorage):
def __init__(
self,
embedder_config: Optional[Dict[str, Any]] = None,
embedder: Optional[Dict[str, Any]] = None,
collection_name: Optional[str] = None,
):
self.collection_name = collection_name
self._set_embedder_config(embedder_config)
self._set_embedder_config(embedder)
def search(
self,
@@ -76,7 +77,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
"context": fetched["documents"][0][i], # type: ignore
"score": fetched["distances"][0][i], # type: ignore
}
if result["score"] >= score_threshold: # type: ignore
if result["score"] >= score_threshold:
results.append(result)
return results
else:
@@ -99,7 +100,8 @@ class KnowledgeStorage(BaseKnowledgeStorage):
)
if self.app:
self.collection = self.app.get_or_create_collection(
name=collection_name, embedding_function=self.embedder_config
name=sanitize_collection_name(collection_name),
embedding_function=self.embedder,
)
else:
raise Exception("Vector Database Client not initialized")
@@ -187,17 +189,15 @@ class KnowledgeStorage(BaseKnowledgeStorage):
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
)
def _set_embedder_config(
self, embedder_config: Optional[Dict[str, Any]] = None
) -> None:
def _set_embedder_config(self, embedder: Optional[Dict[str, Any]] = None) -> None:
"""Set the embedding configuration for the knowledge storage.
Args:
embedder_config (Optional[Dict[str, Any]]): Configuration dictionary for the embedder.
If None or empty, defaults to the default embedding function.
"""
self.embedder_config = (
EmbeddingConfigurator().configure_embedder(embedder_config)
if embedder_config
self.embedder = (
EmbeddingConfigurator().configure_embedder(embedder)
if embedder
else self._create_default_embedding_function()
)

518
src/crewai/lite_agent.py Normal file
View File

@@ -0,0 +1,518 @@
import asyncio
import json
import re
import uuid
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Type, Union, cast
from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
from crewai.agents.cache import CacheHandler
from crewai.agents.parser import (
AgentAction,
AgentFinish,
OutputParserException,
)
from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.utilities import I18N
from crewai.utilities.agent_utils import (
enforce_rpm_limit,
format_message_for_llm,
get_llm_response,
get_tool_names,
handle_agent_action_core,
handle_context_length,
handle_max_iterations_exceeded,
handle_output_parser_exception,
handle_unknown_error,
has_reached_max_iterations,
is_context_length_exceeded,
parse_tools,
process_llm_response,
render_text_description_and_args,
show_agent_logs,
)
from crewai.utilities.converter import convert_to_model, generate_model_description
from crewai.utilities.events.agent_events import (
LiteAgentExecutionCompletedEvent,
LiteAgentExecutionErrorEvent,
LiteAgentExecutionStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMCallType,
)
from crewai.utilities.events.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.llm_utils import create_llm
from crewai.utilities.printer import Printer
from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.tool_utils import execute_tool_and_check_finality
class LiteAgentOutput(BaseModel):
"""Class that represents the result of a LiteAgent execution."""
model_config = {"arbitrary_types_allowed": True}
raw: str = Field(description="Raw output of the agent", default="")
pydantic: Optional[BaseModel] = Field(
description="Pydantic output of the agent", default=None
)
agent_role: str = Field(description="Role of the agent that produced this output")
usage_metrics: Optional[Dict[str, Any]] = Field(
description="Token usage metrics for this execution", default=None
)
def to_dict(self) -> Dict[str, Any]:
"""Convert pydantic_output to a dictionary."""
if self.pydantic:
return self.pydantic.model_dump()
return {}
def __str__(self) -> str:
"""String representation of the output."""
if self.pydantic:
return str(self.pydantic)
return self.raw
class LiteAgent(BaseModel):
"""
A lightweight agent that can process messages and use tools.
This agent is simpler than the full Agent class, focusing on direct execution
rather than task delegation. It's designed to be used for simple interactions
where a full crew is not needed.
Attributes:
role: The role of the agent.
goal: The objective of the agent.
backstory: The backstory of the agent.
llm: The language model that will run the agent.
tools: Tools at the agent's disposal.
verbose: Whether the agent execution should be in verbose mode.
max_iterations: Maximum number of iterations for tool usage.
max_execution_time: Maximum execution time in seconds.
response_format: Optional Pydantic model for structured output.
"""
model_config = {"arbitrary_types_allowed": True}
# Core Agent Properties
role: str = Field(description="Role of the agent")
goal: str = Field(description="Goal of the agent")
backstory: str = Field(description="Backstory of the agent")
llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
default=None, description="Language model that will run the agent"
)
tools: List[BaseTool] = Field(
default_factory=list, description="Tools at agent's disposal"
)
# Execution Control Properties
max_iterations: int = Field(
default=15, description="Maximum number of iterations for tool usage"
)
max_execution_time: Optional[int] = Field(
default=None, description="Maximum execution time in seconds"
)
respect_context_window: bool = Field(
default=True,
description="Whether to respect the context window of the LLM",
)
use_stop_words: bool = Field(
default=True,
description="Whether to use stop words to prevent the LLM from using tools",
)
request_within_rpm_limit: Optional[Callable[[], bool]] = Field(
default=None,
description="Callback to check if the request is within the RPM limit",
)
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
# Output and Formatting Properties
response_format: Optional[Type[BaseModel]] = Field(
default=None, description="Pydantic model for structured output"
)
verbose: bool = Field(
default=False, description="Whether to print execution details"
)
callbacks: List[Callable] = Field(
default=[], description="Callbacks to be used for the agent"
)
# State and Results
tools_results: List[Dict[str, Any]] = Field(
default=[], description="Results of the tools used by the agent."
)
# Private Attributes
_parsed_tools: List[CrewStructuredTool] = PrivateAttr(default_factory=list)
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
_cache_handler: CacheHandler = PrivateAttr(default_factory=CacheHandler)
_key: str = PrivateAttr(default_factory=lambda: str(uuid.uuid4()))
_messages: List[Dict[str, str]] = PrivateAttr(default_factory=list)
_iterations: int = PrivateAttr(default=0)
_printer: Printer = PrivateAttr(default_factory=Printer)
@model_validator(mode="after")
def setup_llm(self):
"""Set up the LLM and other components after initialization."""
self.llm = create_llm(self.llm)
if not isinstance(self.llm, LLM):
raise ValueError("Unable to create LLM instance")
# Initialize callbacks
token_callback = TokenCalcHandler(token_cost_process=self._token_process)
self._callbacks = [token_callback]
return self
@model_validator(mode="after")
def parse_tools(self):
"""Parse the tools and convert them to CrewStructuredTool instances."""
self._parsed_tools = parse_tools(self.tools)
return self
@property
def key(self) -> str:
"""Get the unique key for this agent instance."""
return self._key
@property
def _original_role(self) -> str:
"""Return the original role for compatibility with tool interfaces."""
return self.role
def kickoff(self, messages: Union[str, List[Dict[str, str]]]) -> LiteAgentOutput:
"""
Execute the agent with the given messages.
Args:
messages: Either a string query or a list of message dictionaries.
If a string is provided, it will be converted to a user message.
If a list is provided, each dict should have 'role' and 'content' keys.
Returns:
LiteAgentOutput: The result of the agent execution.
"""
# Create agent info for event emission
agent_info = {
"role": self.role,
"goal": self.goal,
"backstory": self.backstory,
"tools": self._parsed_tools,
"verbose": self.verbose,
}
try:
# Reset state for this run
self._iterations = 0
self.tools_results = []
# Format messages for the LLM
self._messages = self._format_messages(messages)
# Emit event for agent execution start
crewai_event_bus.emit(
self,
event=LiteAgentExecutionStartedEvent(
agent_info=agent_info,
tools=self._parsed_tools,
messages=messages,
),
)
# Execute the agent using invoke loop
agent_finish = self._invoke_loop()
formatted_result: Optional[BaseModel] = None
if self.response_format:
try:
# Cast to BaseModel to ensure type safety
result = self.response_format.model_validate_json(
agent_finish.output
)
if isinstance(result, BaseModel):
formatted_result = result
except Exception as e:
self._printer.print(
content=f"Failed to parse output into response format: {str(e)}",
color="yellow",
)
# Calculate token usage metrics
usage_metrics = self._token_process.get_summary()
# Create output
output = LiteAgentOutput(
raw=agent_finish.output,
pydantic=formatted_result,
agent_role=self.role,
usage_metrics=usage_metrics.model_dump() if usage_metrics else None,
)
# Emit completion event
crewai_event_bus.emit(
self,
event=LiteAgentExecutionCompletedEvent(
agent_info=agent_info,
output=agent_finish.output,
),
)
return output
except Exception as e:
self._printer.print(
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
color="red",
)
handle_unknown_error(self._printer, e)
# Emit error event
crewai_event_bus.emit(
self,
event=LiteAgentExecutionErrorEvent(
agent_info=agent_info,
error=str(e),
),
)
raise e
async def kickoff_async(
self, messages: Union[str, List[Dict[str, str]]]
) -> LiteAgentOutput:
"""
Execute the agent asynchronously with the given messages.
Args:
messages: Either a string query or a list of message dictionaries.
If a string is provided, it will be converted to a user message.
If a list is provided, each dict should have 'role' and 'content' keys.
Returns:
LiteAgentOutput: The result of the agent execution.
"""
return await asyncio.to_thread(self.kickoff, messages)
def _get_default_system_prompt(self) -> str:
"""Get the default system prompt for the agent."""
base_prompt = ""
if self._parsed_tools:
# Use the prompt template for agents with tools
base_prompt = self.i18n.slice("lite_agent_system_prompt_with_tools").format(
role=self.role,
backstory=self.backstory,
goal=self.goal,
tools=render_text_description_and_args(self._parsed_tools),
tool_names=get_tool_names(self._parsed_tools),
)
else:
# Use the prompt template for agents without tools
base_prompt = self.i18n.slice(
"lite_agent_system_prompt_without_tools"
).format(
role=self.role,
backstory=self.backstory,
goal=self.goal,
)
# Add response format instructions if specified
if self.response_format:
schema = generate_model_description(self.response_format)
base_prompt += self.i18n.slice("lite_agent_response_format").format(
response_format=schema
)
return base_prompt
def _format_messages(
self, messages: Union[str, List[Dict[str, str]]]
) -> List[Dict[str, str]]:
"""Format messages for the LLM."""
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
system_prompt = self._get_default_system_prompt()
# Add system message at the beginning
formatted_messages = [{"role": "system", "content": system_prompt}]
# Add the rest of the messages
formatted_messages.extend(messages)
return formatted_messages
def _invoke_loop(self) -> AgentFinish:
"""
Run the agent's thought process until it reaches a conclusion or max iterations.
Returns:
AgentFinish: The final result of the agent execution.
"""
# Execute the agent loop
formatted_answer = None
while not isinstance(formatted_answer, AgentFinish):
try:
if has_reached_max_iterations(self._iterations, self.max_iterations):
formatted_answer = handle_max_iterations_exceeded(
formatted_answer,
printer=self._printer,
i18n=self.i18n,
messages=self._messages,
llm=cast(LLM, self.llm),
callbacks=self._callbacks,
)
enforce_rpm_limit(self.request_within_rpm_limit)
# Emit LLM call started event
crewai_event_bus.emit(
self,
event=LLMCallStartedEvent(
messages=self._messages,
tools=None,
callbacks=self._callbacks,
),
)
try:
answer = get_llm_response(
llm=cast(LLM, self.llm),
messages=self._messages,
callbacks=self._callbacks,
printer=self._printer,
)
# Emit LLM call completed event
crewai_event_bus.emit(
self,
event=LLMCallCompletedEvent(
response=answer,
call_type=LLMCallType.LLM_CALL,
),
)
except Exception as e:
# Emit LLM call failed event
crewai_event_bus.emit(
self,
event=LLMCallFailedEvent(error=str(e)),
)
raise e
formatted_answer = process_llm_response(answer, self.use_stop_words)
if isinstance(formatted_answer, AgentAction):
# Emit tool usage started event
crewai_event_bus.emit(
self,
event=ToolUsageStartedEvent(
agent_key=self.key,
agent_role=self.role,
tool_name=formatted_answer.tool,
tool_args=formatted_answer.tool_input,
tool_class=formatted_answer.tool,
),
)
try:
tool_result = execute_tool_and_check_finality(
agent_action=formatted_answer,
tools=self._parsed_tools,
i18n=self.i18n,
agent_key=self.key,
agent_role=self.role,
)
# Emit tool usage finished event
crewai_event_bus.emit(
self,
event=ToolUsageFinishedEvent(
agent_key=self.key,
agent_role=self.role,
tool_name=formatted_answer.tool,
tool_args=formatted_answer.tool_input,
tool_class=formatted_answer.tool,
started_at=datetime.now(),
finished_at=datetime.now(),
output=tool_result.result,
),
)
except Exception as e:
# Emit tool usage error event
crewai_event_bus.emit(
self,
event=ToolUsageErrorEvent(
agent_key=self.key,
agent_role=self.role,
tool_name=formatted_answer.tool,
tool_args=formatted_answer.tool_input,
tool_class=formatted_answer.tool,
error=str(e),
),
)
raise e
formatted_answer = handle_agent_action_core(
formatted_answer=formatted_answer,
tool_result=tool_result,
show_logs=self._show_logs,
)
self._append_message(formatted_answer.text, role="assistant")
except OutputParserException as e:
formatted_answer = handle_output_parser_exception(
e=e,
messages=self._messages,
iterations=self._iterations,
log_error_after=3,
printer=self._printer,
)
except Exception as e:
if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors
raise e
if is_context_length_exceeded(e):
handle_context_length(
respect_context_window=self.respect_context_window,
printer=self._printer,
messages=self._messages,
llm=cast(LLM, self.llm),
callbacks=self._callbacks,
i18n=self.i18n,
)
continue
else:
handle_unknown_error(self._printer, e)
raise e
finally:
self._iterations += 1
assert isinstance(formatted_answer, AgentFinish)
self._show_logs(formatted_answer)
return formatted_answer
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
"""Show logs for the agent's execution."""
show_agent_logs(
printer=self._printer,
agent_role=self.role,
formatted_answer=formatted_answer,
verbose=self.verbose,
)
def _append_message(self, text: str, role: str = "assistant") -> None:
"""Append a message to the message list with the given role."""
self._messages.append(format_message_for_llm(text, role=role))

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,91 @@
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional, Union
class BaseLLM(ABC):
"""Abstract base class for LLM implementations.
This class defines the interface that all LLM implementations must follow.
Users can extend this class to create custom LLM implementations that don't
rely on litellm's authentication mechanism.
Custom LLM implementations should handle error cases gracefully, including
timeouts, authentication failures, and malformed responses. They should also
implement proper validation for input parameters and provide clear error
messages when things go wrong.
Attributes:
stop (list): A list of stop sequences that the LLM should use to stop generation.
This is used by the CrewAgentExecutor and other components.
"""
model: str
temperature: Optional[float] = None
stop: Optional[List[str]] = None
def __init__(
self,
model: str,
temperature: Optional[float] = None,
):
"""Initialize the BaseLLM with default attributes.
This constructor sets default values for attributes that are expected
by the CrewAgentExecutor and other components.
All custom LLM implementations should call super().__init__() to ensure
that these default attributes are properly initialized.
"""
self.model = model
self.temperature = temperature
self.stop = []
@abstractmethod
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
"""Call the LLM with the given messages.
Args:
messages: Input messages for the LLM.
Can be a string or list of message dictionaries.
If string, it will be converted to a single user message.
If list, each dict must have 'role' and 'content' keys.
tools: Optional list of tool schemas for function calling.
Each tool should define its name, description, and parameters.
callbacks: Optional list of callback functions to be executed
during and after the LLM call.
available_functions: Optional dict mapping function names to callables
that can be invoked by the LLM.
Returns:
Either a text response from the LLM (str) or
the result of a tool function call (Any).
Raises:
ValueError: If the messages format is invalid.
TimeoutError: If the LLM request times out.
RuntimeError: If the LLM request fails for other reasons.
"""
pass
def supports_stop_words(self) -> bool:
"""Check if the LLM supports stop words.
Returns:
bool: True if the LLM supports stop words, False otherwise.
"""
return True # Default implementation assumes support for stop words
def get_context_window_size(self) -> int:
"""Get the context window size for the LLM.
Returns:
int: The number of tokens/characters the model can handle.
"""
# Default implementation - subclasses should override with model-specific values
return 4096

38
src/crewai/llms/third_party/ai_suite.py vendored Normal file
View File

@@ -0,0 +1,38 @@
from typing import Any, Dict, List, Optional, Union
import aisuite as ai
from crewai.llms.base_llm import BaseLLM
class AISuiteLLM(BaseLLM):
def __init__(self, model: str, temperature: Optional[float] = None, **kwargs):
super().__init__(model, temperature, **kwargs)
self.client = ai.Client()
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
completion_params = self._prepare_completion_params(messages, tools)
response = self.client.chat.completions.create(**completion_params)
return response.choices[0].message.content
def _prepare_completion_params(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
) -> Dict[str, Any]:
return {
"model": self.model,
"messages": messages,
"temperature": self.temperature,
"tools": tools,
}
def supports_function_calling(self) -> bool:
return False

View File

@@ -2,5 +2,12 @@ from .entity.entity_memory import EntityMemory
from .long_term.long_term_memory import LongTermMemory
from .short_term.short_term_memory import ShortTermMemory
from .user.user_memory import UserMemory
from .external.external_memory import ExternalMemory
__all__ = ["UserMemory", "EntityMemory", "LongTermMemory", "ShortTermMemory"]
__all__ = [
"UserMemory",
"EntityMemory",
"LongTermMemory",
"ShortTermMemory",
"ExternalMemory",
]

View File

@@ -1,6 +1,12 @@
from typing import Any, Dict, Optional
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory, UserMemory
from crewai.memory import (
EntityMemory,
ExternalMemory,
LongTermMemory,
ShortTermMemory,
UserMemory,
)
class ContextualMemory:
@@ -11,6 +17,7 @@ class ContextualMemory:
ltm: LongTermMemory,
em: EntityMemory,
um: UserMemory,
exm: ExternalMemory,
):
if memory_config is not None:
self.memory_provider = memory_config.get("provider")
@@ -20,6 +27,7 @@ class ContextualMemory:
self.ltm = ltm
self.em = em
self.um = um
self.exm = exm
def build_context_for_task(self, task, context) -> str:
"""
@@ -35,6 +43,7 @@ class ContextualMemory:
context.append(self._fetch_ltm_context(task.description))
context.append(self._fetch_stm_context(query))
context.append(self._fetch_entity_context(query))
context.append(self._fetch_external_context(query))
if self.memory_provider == "mem0":
context.append(self._fetch_user_context(query))
return "\n".join(filter(None, context))
@@ -94,6 +103,10 @@ class ContextualMemory:
Returns:
str: Formatted user memories as bullet points, or an empty string if none found.
"""
if self.um is None:
return ""
user_memories = self.um.search(query)
if not user_memories:
return ""
@@ -102,3 +115,24 @@ class ContextualMemory:
f"- {result['memory']}" for result in user_memories
)
return f"User memories/preferences:\n{formatted_memories}"
def _fetch_external_context(self, query: str) -> str:
"""
Fetches and formats relevant information from External Memory.
Args:
query (str): The search query to find relevant information.
Returns:
str: Formatted information as bullet points, or an empty string if none found.
"""
if self.exm is None:
return ""
external_memories = self.exm.search(query)
if not external_memories:
return ""
formatted_memories = "\n".join(
f"- {result['memory']}" for result in external_memories
)
return f"External memories:\n{formatted_memories}"

View File

@@ -1,3 +1,7 @@
from typing import Optional
from pydantic import PrivateAttr
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
from crewai.memory.memory import Memory
from crewai.memory.storage.rag_storage import RAGStorage
@@ -10,13 +14,15 @@ class EntityMemory(Memory):
Inherits from the Memory class.
"""
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
if hasattr(crew, "memory_config") and crew.memory_config is not None:
self.memory_provider = crew.memory_config.get("provider")
else:
self.memory_provider = None
_memory_provider: Optional[str] = PrivateAttr()
if self.memory_provider == "mem0":
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
memory_provider = crew.memory_config.get("provider")
else:
memory_provider = None
if memory_provider == "mem0":
try:
from crewai.memory.storage.mem0_storage import Mem0Storage
except ImportError:
@@ -36,11 +42,13 @@ class EntityMemory(Memory):
path=path,
)
)
super().__init__(storage)
super().__init__(storage=storage)
self._memory_provider = memory_provider
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
"""Saves an entity item into the SQLite storage."""
if self.memory_provider == "mem0":
if self._memory_provider == "mem0":
data = f"""
Remember details about the following entity:
Name: {item.name}

View File

View File

@@ -0,0 +1,61 @@
from typing import TYPE_CHECKING, Any, Dict, Optional, Self
from crewai.memory.external.external_memory_item import ExternalMemoryItem
from crewai.memory.memory import Memory
from crewai.memory.storage.interface import Storage
if TYPE_CHECKING:
from crewai.memory.storage.mem0_storage import Mem0Storage
class ExternalMemory(Memory):
def __init__(self, storage: Optional[Storage] = None, **data: Any):
super().__init__(storage=storage, **data)
@staticmethod
def _configure_mem0(crew: Any, config: Dict[str, Any]) -> "Mem0Storage":
from crewai.memory.storage.mem0_storage import Mem0Storage
return Mem0Storage(type="external", crew=crew, config=config)
@staticmethod
def external_supported_storages() -> Dict[str, Any]:
return {
"mem0": ExternalMemory._configure_mem0,
}
@staticmethod
def create_storage(crew: Any, embedder_config: Optional[Dict[str, Any]]) -> Storage:
if not embedder_config:
raise ValueError("embedder_config is required")
if "provider" not in embedder_config:
raise ValueError("embedder_config must include a 'provider' key")
provider = embedder_config["provider"]
supported_storages = ExternalMemory.external_supported_storages()
if provider not in supported_storages:
raise ValueError(f"Provider {provider} not supported")
return supported_storages[provider](crew, embedder_config.get("config", {}))
def save(
self,
value: Any,
metadata: Optional[Dict[str, Any]] = None,
agent: Optional[str] = None,
) -> None:
"""Saves a value into the external storage."""
item = ExternalMemoryItem(value=value, metadata=metadata, agent=agent)
super().save(value=item.value, metadata=item.metadata, agent=item.agent)
def reset(self) -> None:
self.storage.reset()
def set_crew(self, crew: Any) -> Self:
super().set_crew(crew)
if not self.storage:
self.storage = self.create_storage(crew, self.embedder_config)
return self

View File

@@ -0,0 +1,13 @@
from typing import Any, Dict, Optional
class ExternalMemoryItem:
def __init__(
self,
value: Any,
metadata: Optional[Dict[str, Any]] = None,
agent: Optional[str] = None,
):
self.value = value
self.metadata = metadata
self.agent = agent

View File

@@ -17,7 +17,7 @@ class LongTermMemory(Memory):
def __init__(self, storage=None, path=None):
if not storage:
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
super().__init__(storage)
super().__init__(storage=storage)
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
metadata = item.metadata

View File

@@ -1,15 +1,20 @@
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List, Optional, Self
from crewai.memory.storage.rag_storage import RAGStorage
from pydantic import BaseModel
class Memory:
class Memory(BaseModel):
"""
Base class for memory, now supporting agent tags and generic metadata.
"""
def __init__(self, storage: RAGStorage):
self.storage = storage
embedder_config: Optional[Dict[str, Any]] = None
crew: Optional[Any] = None
storage: Any
def __init__(self, storage: Any, **data: Any):
super().__init__(storage=storage, **data)
def save(
self,
@@ -32,3 +37,7 @@ class Memory:
return self.storage.search(
query=query, limit=limit, score_threshold=score_threshold
)
def set_crew(self, crew: Any) -> Self:
self.crew = crew
return self

View File

@@ -1,5 +1,7 @@
from typing import Any, Dict, Optional
from pydantic import PrivateAttr
from crewai.memory.memory import Memory
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
from crewai.memory.storage.rag_storage import RAGStorage
@@ -14,13 +16,15 @@ class ShortTermMemory(Memory):
MemoryItem instances.
"""
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
if hasattr(crew, "memory_config") and crew.memory_config is not None:
self.memory_provider = crew.memory_config.get("provider")
else:
self.memory_provider = None
_memory_provider: Optional[str] = PrivateAttr()
if self.memory_provider == "mem0":
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
memory_provider = crew.memory_config.get("provider")
else:
memory_provider = None
if memory_provider == "mem0":
try:
from crewai.memory.storage.mem0_storage import Mem0Storage
except ImportError:
@@ -39,7 +43,8 @@ class ShortTermMemory(Memory):
path=path,
)
)
super().__init__(storage)
super().__init__(storage=storage)
self._memory_provider = memory_provider
def save(
self,
@@ -48,7 +53,7 @@ class ShortTermMemory(Memory):
agent: Optional[str] = None,
) -> None:
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
if self.memory_provider == "mem0":
if self._memory_provider == "mem0":
item.data = f"Remember the following insights from Agent run: {item.data}"
super().save(value=item.data, metadata=item.metadata, agent=item.agent)

View File

@@ -13,7 +13,7 @@ class BaseRAGStorage(ABC):
self,
type: str,
allow_reset: bool = True,
embedder_config: Optional[Any] = None,
embedder_config: Optional[Dict[str, Any]] = None,
crew: Any = None,
):
self.type = type

View File

@@ -1,12 +1,17 @@
import json
import logging
import sqlite3
from pathlib import Path
from typing import Any, Dict, List, Optional
from crewai.task import Task
from crewai.utilities import Printer
from crewai.utilities.crew_json_encoder import CrewJSONEncoder
from crewai.utilities.errors import DatabaseError, DatabaseOperationError
from crewai.utilities.paths import db_storage_path
logger = logging.getLogger(__name__)
class KickoffTaskOutputsSQLiteStorage:
"""
@@ -14,15 +19,24 @@ class KickoffTaskOutputsSQLiteStorage:
"""
def __init__(
self, db_path: str = f"{db_storage_path()}/latest_kickoff_task_outputs.db"
self, db_path: Optional[str] = None
) -> None:
if db_path is None:
# Get the parent directory of the default db path and create our db file there
db_path = str(Path(db_storage_path()) / "latest_kickoff_task_outputs.db")
self.db_path = db_path
self._printer: Printer = Printer()
self._initialize_db()
def _initialize_db(self):
"""
Initializes the SQLite database and creates LTM table
def _initialize_db(self) -> None:
"""Initialize the SQLite database and create the latest_kickoff_task_outputs table.
This method sets up the database schema for storing task outputs. It creates
a table with columns for task_id, expected_output, output (as JSON),
task_index, inputs (as JSON), was_replayed flag, and timestamp.
Raises:
DatabaseOperationError: If database initialization fails due to SQLite errors.
"""
try:
with sqlite3.connect(self.db_path) as conn:
@@ -43,10 +57,9 @@ class KickoffTaskOutputsSQLiteStorage:
conn.commit()
except sqlite3.Error as e:
self._printer.print(
content=f"SAVING KICKOFF TASK OUTPUTS ERROR: An error occurred during database initialization: {e}",
color="red",
)
error_msg = DatabaseError.format_error(DatabaseError.INIT_ERROR, e)
logger.error(error_msg)
raise DatabaseOperationError(error_msg, e)
def add(
self,
@@ -55,9 +68,22 @@ class KickoffTaskOutputsSQLiteStorage:
task_index: int,
was_replayed: bool = False,
inputs: Dict[str, Any] = {},
):
) -> None:
"""Add a new task output record to the database.
Args:
task: The Task object containing task details.
output: Dictionary containing the task's output data.
task_index: Integer index of the task in the sequence.
was_replayed: Boolean indicating if this was a replay execution.
inputs: Dictionary of input parameters used for the task.
Raises:
DatabaseOperationError: If saving the task output fails due to SQLite errors.
"""
try:
with sqlite3.connect(self.db_path) as conn:
conn.execute("BEGIN TRANSACTION")
cursor = conn.cursor()
cursor.execute(
"""
@@ -76,21 +102,31 @@ class KickoffTaskOutputsSQLiteStorage:
)
conn.commit()
except sqlite3.Error as e:
self._printer.print(
content=f"SAVING KICKOFF TASK OUTPUTS ERROR: An error occurred during database initialization: {e}",
color="red",
)
error_msg = DatabaseError.format_error(DatabaseError.SAVE_ERROR, e)
logger.error(error_msg)
raise DatabaseOperationError(error_msg, e)
def update(
self,
task_index: int,
**kwargs,
):
"""
Updates an existing row in the latest_kickoff_task_outputs table based on task_index.
**kwargs: Any,
) -> None:
"""Update an existing task output record in the database.
Updates fields of a task output record identified by task_index. The fields
to update are provided as keyword arguments.
Args:
task_index: Integer index of the task to update.
**kwargs: Arbitrary keyword arguments representing fields to update.
Values that are dictionaries will be JSON encoded.
Raises:
DatabaseOperationError: If updating the task output fails due to SQLite errors.
"""
try:
with sqlite3.connect(self.db_path) as conn:
conn.execute("BEGIN TRANSACTION")
cursor = conn.cursor()
fields = []
@@ -110,14 +146,23 @@ class KickoffTaskOutputsSQLiteStorage:
conn.commit()
if cursor.rowcount == 0:
self._printer.print(
f"No row found with task_index {task_index}. No update performed.",
color="red",
)
logger.warning(f"No row found with task_index {task_index}. No update performed.")
except sqlite3.Error as e:
self._printer.print(f"UPDATE KICKOFF TASK OUTPUTS ERROR: {e}", color="red")
error_msg = DatabaseError.format_error(DatabaseError.UPDATE_ERROR, e)
logger.error(error_msg)
raise DatabaseOperationError(error_msg, e)
def load(self) -> Optional[List[Dict[str, Any]]]:
def load(self) -> List[Dict[str, Any]]:
"""Load all task output records from the database.
Returns:
List of dictionaries containing task output records, ordered by task_index.
Each dictionary contains: task_id, expected_output, output, task_index,
inputs, was_replayed, and timestamp.
Raises:
DatabaseOperationError: If loading task outputs fails due to SQLite errors.
"""
try:
with sqlite3.connect(self.db_path) as conn:
cursor = conn.cursor()
@@ -144,23 +189,26 @@ class KickoffTaskOutputsSQLiteStorage:
return results
except sqlite3.Error as e:
self._printer.print(
content=f"LOADING KICKOFF TASK OUTPUTS ERROR: An error occurred while querying kickoff task outputs: {e}",
color="red",
)
return None
error_msg = DatabaseError.format_error(DatabaseError.LOAD_ERROR, e)
logger.error(error_msg)
raise DatabaseOperationError(error_msg, e)
def delete_all(self):
"""
Deletes all rows from the latest_kickoff_task_outputs table.
def delete_all(self) -> None:
"""Delete all task output records from the database.
This method removes all records from the latest_kickoff_task_outputs table.
Use with caution as this operation cannot be undone.
Raises:
DatabaseOperationError: If deleting task outputs fails due to SQLite errors.
"""
try:
with sqlite3.connect(self.db_path) as conn:
conn.execute("BEGIN TRANSACTION")
cursor = conn.cursor()
cursor.execute("DELETE FROM latest_kickoff_task_outputs")
conn.commit()
except sqlite3.Error as e:
self._printer.print(
content=f"ERROR: Failed to delete all kickoff task outputs: {e}",
color="red",
)
error_msg = DatabaseError.format_error(DatabaseError.DELETE_ERROR, e)
logger.error(error_msg)
raise DatabaseOperationError(error_msg, e)

View File

@@ -1,5 +1,6 @@
import json
import sqlite3
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from crewai.utilities import Printer
@@ -12,10 +13,15 @@ class LTMSQLiteStorage:
"""
def __init__(
self, db_path: str = f"{db_storage_path()}/long_term_memory_storage.db"
self, db_path: Optional[str] = None
) -> None:
if db_path is None:
# Get the parent directory of the default db path and create our db file there
db_path = str(Path(db_storage_path()) / "long_term_memory_storage.db")
self.db_path = db_path
self._printer: Printer = Printer()
# Ensure parent directory exists
Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
self._initialize_db()
def _initialize_db(self):

View File

@@ -1,7 +1,7 @@
import os
from typing import Any, Dict, List
from mem0 import MemoryClient
from mem0 import Memory, MemoryClient
from crewai.memory.storage.interface import Storage
@@ -11,15 +11,20 @@ class Mem0Storage(Storage):
Extends Storage to handle embedding and searching across entities using Mem0.
"""
def __init__(self, type, crew=None):
def __init__(self, type, crew=None, config=None):
super().__init__()
if type not in ["user", "short_term", "long_term", "entities"]:
raise ValueError("Invalid type for Mem0Storage. Must be 'user' or 'agent'.")
supported_types = ["user", "short_term", "long_term", "entities", "external"]
if type not in supported_types:
raise ValueError(
f"Invalid type '{type}' for Mem0Storage. Must be one of: "
+ ", ".join(supported_types)
)
self.memory_type = type
self.crew = crew
self.memory_config = crew.memory_config
self.config = config or {}
# TODO: Memory config will be removed in the future the config will be passed as a parameter
self.memory_config = self.config or getattr(crew, "memory_config", {}) or {}
# User ID is required for user memory type "user" since it's used as a unique identifier for the user.
user_id = self._get_user_id()
@@ -27,10 +32,25 @@ class Mem0Storage(Storage):
raise ValueError("User ID is required for user memory type")
# API key in memory config overrides the environment variable
mem0_api_key = self.memory_config.get("config", {}).get("api_key") or os.getenv(
"MEM0_API_KEY"
)
self.memory = MemoryClient(api_key=mem0_api_key)
config = self._get_config()
mem0_api_key = config.get("api_key") or os.getenv("MEM0_API_KEY")
mem0_org_id = config.get("org_id")
mem0_project_id = config.get("project_id")
mem0_local_config = config.get("local_mem0_config")
# Initialize MemoryClient or Memory based on the presence of the mem0_api_key
if mem0_api_key:
if mem0_org_id and mem0_project_id:
self.memory = MemoryClient(
api_key=mem0_api_key, org_id=mem0_org_id, project_id=mem0_project_id
)
else:
self.memory = MemoryClient(api_key=mem0_api_key)
else:
if mem0_local_config and len(mem0_local_config):
self.memory = Memory.from_config(config)
else:
self.memory = Memory()
def _sanitize_role(self, role: str) -> str:
"""
@@ -41,26 +61,34 @@ class Mem0Storage(Storage):
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
user_id = self._get_user_id()
agent_name = self._get_agent_name()
if self.memory_type == "user":
self.memory.add(value, user_id=user_id, metadata={**metadata})
elif self.memory_type == "short_term":
agent_name = self._get_agent_name()
self.memory.add(
value, agent_id=agent_name, metadata={"type": "short_term", **metadata}
)
params = None
if self.memory_type == "short_term":
params = {
"agent_id": agent_name,
"infer": False,
"metadata": {"type": "short_term", **metadata},
}
elif self.memory_type == "long_term":
agent_name = self._get_agent_name()
self.memory.add(
value,
agent_id=agent_name,
infer=False,
metadata={"type": "long_term", **metadata},
)
params = {
"agent_id": agent_name,
"infer": False,
"metadata": {"type": "long_term", **metadata},
}
elif self.memory_type == "entities":
entity_name = None
self.memory.add(
value, user_id=entity_name, metadata={"type": "entity", **metadata}
)
params = {
"agent_id": agent_name,
"infer": False,
"metadata": {"type": "entity", **metadata},
}
elif self.memory_type == "external":
params = {
"user_id": user_id,
"agent_id": agent_name,
"metadata": {"type": "external", **metadata},
}
if params:
self.memory.add(value, **params | {"output_format": "v1.1"})
def search(
self,
@@ -69,37 +97,43 @@ class Mem0Storage(Storage):
score_threshold: float = 0.35,
) -> List[Any]:
params = {"query": query, "limit": limit}
if self.memory_type == "user":
user_id = self._get_user_id()
if user_id := self._get_user_id():
params["user_id"] = user_id
elif self.memory_type == "short_term":
agent_name = self._get_agent_name()
agent_name = self._get_agent_name()
if self.memory_type == "short_term":
params["agent_id"] = agent_name
params["metadata"] = {"type": "short_term"}
elif self.memory_type == "long_term":
agent_name = self._get_agent_name()
params["agent_id"] = agent_name
params["metadata"] = {"type": "long_term"}
elif self.memory_type == "entities":
agent_name = self._get_agent_name()
params["agent_id"] = agent_name
params["metadata"] = {"type": "entity"}
elif self.memory_type == "external":
params["agent_id"] = agent_name
params["metadata"] = {"type": "external"}
# Discard the filters for now since we create the filters
# automatically when the crew is created.
results = self.memory.search(**params)
return [r for r in results if r["score"] >= score_threshold]
def _get_user_id(self):
if self.memory_type == "user":
if hasattr(self, "memory_config") and self.memory_config is not None:
return self.memory_config.get("config", {}).get("user_id")
else:
return None
return None
def _get_user_id(self) -> str:
return self._get_config().get("user_id", "")
def _get_agent_name(self):
agents = self.crew.agents if self.crew else []
def _get_agent_name(self) -> str:
if not self.crew:
return ""
agents = self.crew.agents
agents = [self._sanitize_role(agent.role) for agent in agents]
agents = "_".join(agents)
return agents
def _get_config(self) -> Dict[str, Any]:
return self.config or getattr(self, "memory_config", {}).get("config", {}) or {}
def reset(self):
if self.memory:
self.memory.reset()

View File

@@ -1,3 +1,4 @@
import warnings
from typing import Any, Dict, Optional
from crewai.memory.memory import Memory
@@ -12,6 +13,12 @@ class UserMemory(Memory):
"""
def __init__(self, crew=None):
warnings.warn(
"UserMemory is deprecated and will be removed in a future version. "
"Please use ExternalMemory instead.",
DeprecationWarning,
stacklevel=2,
)
try:
from crewai.memory.storage.mem0_storage import Mem0Storage
except ImportError:
@@ -43,3 +50,9 @@ class UserMemory(Memory):
score_threshold=score_threshold,
)
return results
def reset(self) -> None:
try:
self.storage.reset()
except Exception as e:
raise Exception(f"An error occurred while resetting the user memory: {e}")

View File

@@ -4,18 +4,23 @@ from typing import Callable
from crewai import Crew
from crewai.project.utils import memoize
"""Decorators for defining crew components and their behaviors."""
def before_kickoff(func):
"""Marks a method to execute before crew kickoff."""
func.is_before_kickoff = True
return func
def after_kickoff(func):
"""Marks a method to execute after crew kickoff."""
func.is_after_kickoff = True
return func
def task(func):
"""Marks a method as a crew task."""
func.is_task = True
@wraps(func)
@@ -29,43 +34,51 @@ def task(func):
def agent(func):
"""Marks a method as a crew agent."""
func.is_agent = True
func = memoize(func)
return func
def llm(func):
"""Marks a method as an LLM provider."""
func.is_llm = True
func = memoize(func)
return func
def output_json(cls):
"""Marks a class as JSON output format."""
cls.is_output_json = True
return cls
def output_pydantic(cls):
"""Marks a class as Pydantic output format."""
cls.is_output_pydantic = True
return cls
def tool(func):
"""Marks a method as a crew tool."""
func.is_tool = True
return memoize(func)
def callback(func):
"""Marks a method as a crew callback."""
func.is_callback = True
return memoize(func)
def cache_handler(func):
"""Marks a method as a cache handler."""
func.is_cache_handler = True
return memoize(func)
def crew(func) -> Callable[..., Crew]:
"""Marks a method as the main crew execution point."""
@wraps(func)
def wrapper(self, *args, **kwargs) -> Crew:

View File

@@ -1,4 +1,5 @@
import inspect
import logging
from pathlib import Path
from typing import Any, Callable, Dict, TypeVar, cast
@@ -7,10 +8,16 @@ from dotenv import load_dotenv
load_dotenv()
logging.basicConfig(level=logging.WARNING)
T = TypeVar("T", bound=type)
"""Base decorator for creating crew classes with configuration and function management."""
def CrewBase(cls: T) -> T:
"""Wraps a class with crew functionality and configuration management."""
class WrappedClass(cls): # type: ignore
is_crew_class: bool = True # type: ignore
@@ -24,16 +31,9 @@ def CrewBase(cls: T) -> T:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
agents_config_path = self.base_directory / self.original_agents_config_path
tasks_config_path = self.base_directory / self.original_tasks_config_path
self.agents_config = self.load_yaml(agents_config_path)
self.tasks_config = self.load_yaml(tasks_config_path)
self.load_configurations()
self.map_all_agent_variables()
self.map_all_task_variables()
# Preserve all decorated functions
self._original_functions = {
name: method
@@ -49,7 +49,6 @@ def CrewBase(cls: T) -> T:
]
)
}
# Store specific function types
self._original_tasks = self._filter_functions(
self._original_functions, "is_task"
@@ -67,6 +66,44 @@ def CrewBase(cls: T) -> T:
self._original_functions, "is_kickoff"
)
def load_configurations(self):
"""Load agent and task configurations from YAML files."""
if isinstance(self.original_agents_config_path, str):
agents_config_path = (
self.base_directory / self.original_agents_config_path
)
try:
self.agents_config = self.load_yaml(agents_config_path)
except FileNotFoundError:
logging.warning(
f"Agent config file not found at {agents_config_path}. "
"Proceeding with empty agent configurations."
)
self.agents_config = {}
else:
logging.warning(
"No agent configuration path provided. Proceeding with empty agent configurations."
)
self.agents_config = {}
if isinstance(self.original_tasks_config_path, str):
tasks_config_path = (
self.base_directory / self.original_tasks_config_path
)
try:
self.tasks_config = self.load_yaml(tasks_config_path)
except FileNotFoundError:
logging.warning(
f"Task config file not found at {tasks_config_path}. "
"Proceeding with empty task configurations."
)
self.tasks_config = {}
else:
logging.warning(
"No task configuration path provided. Proceeding with empty task configurations."
)
self.tasks_config = {}
@staticmethod
def load_yaml(config_path: Path):
try:
@@ -100,13 +137,11 @@ def CrewBase(cls: T) -> T:
all_functions, "is_cache_handler"
)
callbacks = self._filter_functions(all_functions, "is_callback")
agents = self._filter_functions(all_functions, "is_agent")
for agent_name, agent_info in self.agents_config.items():
self._map_agent_variables(
agent_name,
agent_info,
agents,
llms,
tool_functions,
cache_handler_functions,
@@ -117,7 +152,6 @@ def CrewBase(cls: T) -> T:
self,
agent_name: str,
agent_info: Dict[str, Any],
agents: Dict[str, Callable],
llms: Dict[str, Callable],
tool_functions: Dict[str, Callable],
cache_handler_functions: Dict[str, Callable],
@@ -135,9 +169,10 @@ def CrewBase(cls: T) -> T:
]
if function_calling_llm := agent_info.get("function_calling_llm"):
self.agents_config[agent_name]["function_calling_llm"] = agents[
function_calling_llm
]()
try:
self.agents_config[agent_name]["function_calling_llm"] = llms[function_calling_llm]()
except KeyError:
self.agents_config[agent_name]["function_calling_llm"] = function_calling_llm
if step_callback := agent_info.get("step_callback"):
self.agents_config[agent_name]["step_callback"] = callbacks[
@@ -216,5 +251,5 @@ def CrewBase(cls: T) -> T:
# Include base class (qual)name in the wrapper class (qual)name.
WrappedClass.__name__ = CrewBase.__name__ + "(" + cls.__name__ + ")"
WrappedClass.__qualname__ = CrewBase.__qualname__ + "(" + cls.__name__ + ")"
return cast(T, WrappedClass)

View File

@@ -0,0 +1,13 @@
"""
CrewAI security module.
This module provides security-related functionality for CrewAI, including:
- Fingerprinting for component identity and tracking
- Security configuration for controlling access and permissions
- Future: authentication, scoping, and delegation mechanisms
"""
from crewai.security.fingerprint import Fingerprint
from crewai.security.security_config import SecurityConfig
__all__ = ["Fingerprint", "SecurityConfig"]

View File

@@ -0,0 +1,170 @@
"""
Fingerprint Module
This module provides functionality for generating and validating unique identifiers
for CrewAI agents. These identifiers are used for tracking, auditing, and security.
"""
import uuid
from datetime import datetime
from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, field_validator
class Fingerprint(BaseModel):
"""
A class for generating and managing unique identifiers for agents.
Each agent has dual identifiers:
- Human-readable ID: For debugging and reference (derived from role if not specified)
- Fingerprint UUID: Unique runtime identifier for tracking and auditing
Attributes:
uuid_str (str): String representation of the UUID for this fingerprint, auto-generated
created_at (datetime): When this fingerprint was created, auto-generated
metadata (Dict[str, Any]): Additional metadata associated with this fingerprint
"""
uuid_str: str = Field(default_factory=lambda: str(uuid.uuid4()), description="String representation of the UUID")
created_at: datetime = Field(default_factory=datetime.now, description="When this fingerprint was created")
metadata: Dict[str, Any] = Field(default_factory=dict, description="Additional metadata for this fingerprint")
model_config = ConfigDict(arbitrary_types_allowed=True)
@field_validator('metadata')
@classmethod
def validate_metadata(cls, v):
"""Validate that metadata is a dictionary with string keys and valid values."""
if not isinstance(v, dict):
raise ValueError("Metadata must be a dictionary")
# Validate that all keys are strings
for key, value in v.items():
if not isinstance(key, str):
raise ValueError(f"Metadata keys must be strings, got {type(key)}")
# Validate nested dictionaries (prevent deeply nested structures)
if isinstance(value, dict):
# Check for nested dictionaries (limit depth to 1)
for nested_key, nested_value in value.items():
if not isinstance(nested_key, str):
raise ValueError(f"Nested metadata keys must be strings, got {type(nested_key)}")
if isinstance(nested_value, dict):
raise ValueError("Metadata can only be nested one level deep")
# Check for maximum metadata size (prevent DoS)
if len(str(v)) > 10000: # Limit metadata size to 10KB
raise ValueError("Metadata size exceeds maximum allowed (10KB)")
return v
def __init__(self, **data):
"""Initialize a Fingerprint with auto-generated uuid_str and created_at."""
# Remove uuid_str and created_at from data to ensure they're auto-generated
if 'uuid_str' in data:
data.pop('uuid_str')
if 'created_at' in data:
data.pop('created_at')
# Call the parent constructor with the modified data
super().__init__(**data)
@property
def uuid(self) -> uuid.UUID:
"""Get the UUID object for this fingerprint."""
return uuid.UUID(self.uuid_str)
@classmethod
def _generate_uuid(cls, seed: str) -> str:
"""
Generate a deterministic UUID based on a seed string.
Args:
seed (str): The seed string to use for UUID generation
Returns:
str: A string representation of the UUID consistently generated from the seed
"""
if not isinstance(seed, str):
raise ValueError("Seed must be a string")
if not seed.strip():
raise ValueError("Seed cannot be empty or whitespace")
# Create a deterministic UUID using v5 (SHA-1)
# Custom namespace for CrewAI to enhance security
# Using a unique namespace specific to CrewAI to reduce collision risks
CREW_AI_NAMESPACE = uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')
return str(uuid.uuid5(CREW_AI_NAMESPACE, seed))
@classmethod
def generate(cls, seed: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None) -> 'Fingerprint':
"""
Static factory method to create a new Fingerprint.
Args:
seed (Optional[str]): A string to use as seed for the UUID generation.
If None, a random UUID is generated.
metadata (Optional[Dict[str, Any]]): Additional metadata to store with the fingerprint.
Returns:
Fingerprint: A new Fingerprint instance
"""
fingerprint = cls(metadata=metadata or {})
if seed:
# For seed-based generation, we need to manually set the uuid_str after creation
object.__setattr__(fingerprint, 'uuid_str', cls._generate_uuid(seed))
return fingerprint
def __str__(self) -> str:
"""String representation of the fingerprint (the UUID)."""
return self.uuid_str
def __eq__(self, other) -> bool:
"""Compare fingerprints by their UUID."""
if isinstance(other, Fingerprint):
return self.uuid_str == other.uuid_str
return False
def __hash__(self) -> int:
"""Hash of the fingerprint (based on UUID)."""
return hash(self.uuid_str)
def to_dict(self) -> Dict[str, Any]:
"""
Convert the fingerprint to a dictionary representation.
Returns:
Dict[str, Any]: Dictionary representation of the fingerprint
"""
return {
"uuid_str": self.uuid_str,
"created_at": self.created_at.isoformat(),
"metadata": self.metadata
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'Fingerprint':
"""
Create a Fingerprint from a dictionary representation.
Args:
data (Dict[str, Any]): Dictionary representation of a fingerprint
Returns:
Fingerprint: A new Fingerprint instance
"""
if not data:
return cls()
fingerprint = cls(metadata=data.get("metadata", {}))
# For consistency with existing stored fingerprints, we need to manually set these
if "uuid_str" in data:
object.__setattr__(fingerprint, 'uuid_str', data["uuid_str"])
if "created_at" in data and isinstance(data["created_at"], str):
object.__setattr__(fingerprint, 'created_at', datetime.fromisoformat(data["created_at"]))
return fingerprint

View File

@@ -0,0 +1,116 @@
"""
Security Configuration Module
This module provides configuration for CrewAI security features, including:
- Authentication settings
- Scoping rules
- Fingerprinting
The SecurityConfig class is the primary interface for managing security settings
in CrewAI applications.
"""
from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
from crewai.security.fingerprint import Fingerprint
class SecurityConfig(BaseModel):
"""
Configuration for CrewAI security features.
This class manages security settings for CrewAI agents, including:
- Authentication credentials *TODO*
- Identity information (agent fingerprints)
- Scoping rules *TODO*
- Impersonation/delegation tokens *TODO*
Attributes:
version (str): Version of the security configuration
fingerprint (Fingerprint): The unique fingerprint automatically generated for the component
"""
model_config = ConfigDict(
arbitrary_types_allowed=True
# Note: Cannot use frozen=True as existing tests modify the fingerprint property
)
version: str = Field(
default="1.0.0",
description="Version of the security configuration"
)
fingerprint: Fingerprint = Field(
default_factory=Fingerprint,
description="Unique identifier for the component"
)
def is_compatible(self, min_version: str) -> bool:
"""
Check if this security configuration is compatible with the minimum required version.
Args:
min_version (str): Minimum required version in semver format (e.g., "1.0.0")
Returns:
bool: True if this configuration is compatible, False otherwise
"""
# Simple version comparison (can be enhanced with packaging.version if needed)
current = [int(x) for x in self.version.split(".")]
minimum = [int(x) for x in min_version.split(".")]
# Compare major, minor, patch versions
for c, m in zip(current, minimum):
if c > m:
return True
if c < m:
return False
return True
@model_validator(mode='before')
@classmethod
def validate_fingerprint(cls, values):
"""Ensure fingerprint is properly initialized."""
if isinstance(values, dict):
# Handle case where fingerprint is not provided or is None
if 'fingerprint' not in values or values['fingerprint'] is None:
values['fingerprint'] = Fingerprint()
# Handle case where fingerprint is a string (seed)
elif isinstance(values['fingerprint'], str):
if not values['fingerprint'].strip():
raise ValueError("Fingerprint seed cannot be empty")
values['fingerprint'] = Fingerprint.generate(seed=values['fingerprint'])
return values
def to_dict(self) -> Dict[str, Any]:
"""
Convert the security config to a dictionary.
Returns:
Dict[str, Any]: Dictionary representation of the security config
"""
result = {
"fingerprint": self.fingerprint.to_dict()
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'SecurityConfig':
"""
Create a SecurityConfig from a dictionary.
Args:
data (Dict[str, Any]): Dictionary representation of a security config
Returns:
SecurityConfig: A new SecurityConfig instance
"""
# Make a copy to avoid modifying the original
data_copy = data.copy()
fingerprint_data = data_copy.pop("fingerprint", None)
fingerprint = Fingerprint.from_dict(fingerprint_data) if fingerprint_data else Fingerprint()
return cls(fingerprint=fingerprint)

View File

@@ -2,6 +2,7 @@ import datetime
import inspect
import json
import logging
import re
import threading
import uuid
from concurrent.futures import Future
@@ -19,9 +20,10 @@ from typing import (
Tuple,
Type,
Union,
get_args,
get_origin,
)
from opentelemetry.trace import Span
from pydantic import (
UUID4,
BaseModel,
@@ -33,14 +35,22 @@ from pydantic import (
from pydantic_core import PydanticCustomError
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.security import Fingerprint, SecurityConfig
from crewai.tasks.guardrail_result import GuardrailResult
from crewai.tasks.output_format import OutputFormat
from crewai.tasks.task_output import TaskOutput
from crewai.telemetry.telemetry import Telemetry
from crewai.tools.base_tool import BaseTool
from crewai.utilities.config import process_config
from crewai.utilities.converter import Converter, convert_to_model
from crewai.utilities.events import (
TaskCompletedEvent,
TaskFailedEvent,
TaskStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.i18n import I18N
from crewai.utilities.printer import Printer
from crewai.utilities.string_utils import interpolate_only
class Task(BaseModel):
@@ -59,6 +69,7 @@ class Task(BaseModel):
output_file: File path for storing task output.
output_json: Pydantic model for structuring JSON output.
output_pydantic: Pydantic model for task output.
security_config: Security configuration including fingerprinting.
tools: List of tools/resources limited for task execution.
"""
@@ -111,6 +122,10 @@ class Task(BaseModel):
default_factory=list,
description="Tools the agent is limited to use for this task.",
)
security_config: SecurityConfig = Field(
default_factory=SecurityConfig,
description="Security configuration for the task.",
)
id: UUID4 = Field(
default_factory=uuid.uuid4,
frozen=True,
@@ -127,60 +142,79 @@ class Task(BaseModel):
processed_by_agents: Set[str] = Field(default_factory=set)
guardrail: Optional[Callable[[TaskOutput], Tuple[bool, Any]]] = Field(
default=None,
description="Function to validate task output before proceeding to next task"
description="Function to validate task output before proceeding to next task",
)
max_retries: int = Field(
default=3,
description="Maximum number of retries when guardrail fails"
default=3, description="Maximum number of retries when guardrail fails"
)
retry_count: int = Field(
default=0,
description="Current number of retries"
retry_count: int = Field(default=0, description="Current number of retries")
start_time: Optional[datetime.datetime] = Field(
default=None, description="Start time of the task execution"
)
end_time: Optional[datetime.datetime] = Field(
default=None, description="End time of the task execution"
)
@field_validator("guardrail")
@classmethod
def validate_guardrail_function(cls, v: Optional[Callable]) -> Optional[Callable]:
"""Validate that the guardrail function has the correct signature and behavior.
While type hints provide static checking, this validator ensures runtime safety by:
1. Verifying the function accepts exactly one parameter (the TaskOutput)
2. Checking return type annotations match Tuple[bool, Any] if present
3. Providing clear, immediate error messages for debugging
This runtime validation is crucial because:
- Type hints are optional and can be ignored at runtime
- Function signatures need immediate validation before task execution
- Clear error messages help users debug guardrail implementation issues
Args:
v: The guardrail function to validate
Returns:
The validated guardrail function
Raises:
ValueError: If the function signature is invalid or return annotation
doesn't match Tuple[bool, Any]
"""
if v is not None:
sig = inspect.signature(v)
if len(sig.parameters) != 1:
positional_args = [
param
for param in sig.parameters.values()
if param.default is inspect.Parameter.empty
]
if len(positional_args) != 1:
raise ValueError("Guardrail function must accept exactly one parameter")
# Check return annotation if present, but don't require it
return_annotation = sig.return_annotation
if return_annotation != inspect.Signature.empty:
if not (return_annotation == Tuple[bool, Any] or str(return_annotation) == 'Tuple[bool, Any]'):
raise ValueError("If return type is annotated, it must be Tuple[bool, Any]")
return_annotation_args = get_args(return_annotation)
if not (
get_origin(return_annotation) is tuple
and len(return_annotation_args) == 2
and return_annotation_args[0] is bool
and (
return_annotation_args[1] is Any
or return_annotation_args[1] is str
or return_annotation_args[1] is TaskOutput
or return_annotation_args[1] == Union[str, TaskOutput]
)
):
raise ValueError(
"If return type is annotated, it must be Tuple[bool, Any]"
)
return v
_telemetry: Telemetry = PrivateAttr(default_factory=Telemetry)
_execution_span: Optional[Span] = PrivateAttr(default=None)
_original_description: Optional[str] = PrivateAttr(default=None)
_original_expected_output: Optional[str] = PrivateAttr(default=None)
_original_output_file: Optional[str] = PrivateAttr(default=None)
_thread: Optional[threading.Thread] = PrivateAttr(default=None)
_execution_time: Optional[float] = PrivateAttr(default=None)
@model_validator(mode="before")
@classmethod
@@ -205,16 +239,54 @@ class Task(BaseModel):
"may_not_set_field", "This field is not to be set by the user.", {}
)
def _set_start_execution_time(self) -> float:
return datetime.datetime.now().timestamp()
def _set_end_execution_time(self, start_time: float) -> None:
self._execution_time = datetime.datetime.now().timestamp() - start_time
@field_validator("output_file")
@classmethod
def output_file_validation(cls, value: str) -> str:
"""Validate the output file path by removing the / from the beginning of the path."""
def output_file_validation(cls, value: Optional[str]) -> Optional[str]:
"""Validate the output file path.
Args:
value: The output file path to validate. Can be None or a string.
If the path contains template variables (e.g. {var}), leading slashes are preserved.
For regular paths, leading slashes are stripped.
Returns:
The validated and potentially modified path, or None if no path was provided.
Raises:
ValueError: If the path contains invalid characters, path traversal attempts,
or other security concerns.
"""
if value is None:
return None
# Basic security checks
if ".." in value:
raise ValueError(
"Path traversal attempts are not allowed in output_file paths"
)
# Check for shell expansion first
if value.startswith("~") or value.startswith("$"):
raise ValueError(
"Shell expansion characters are not allowed in output_file paths"
)
# Then check other shell special characters
if any(char in value for char in ["|", ">", "<", "&", ";"]):
raise ValueError(
"Shell special characters are not allowed in output_file paths"
)
# Don't strip leading slash if it's a template path with variables
if "{" in value or "}" in value:
# Validate template variable format
template_vars = [part.split("}")[0] for part in value.split("{")[1:]]
for var in template_vars:
if not var.isidentifier():
raise ValueError(f"Invalid template variable name: {var}")
return value
# Strip leading slash for regular paths
if value.startswith("/"):
return value[1:]
return value
@@ -263,6 +335,12 @@ class Task(BaseModel):
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
@property
def execution_duration(self) -> float | None:
if not self.start_time or not self.end_time:
return None
return (self.end_time - self.start_time).total_seconds()
def execute_async(
self,
agent: BaseAgent | None = None,
@@ -296,88 +374,102 @@ class Task(BaseModel):
tools: Optional[List[Any]],
) -> TaskOutput:
"""Run the core execution logic of the task."""
agent = agent or self.agent
self.agent = agent
if not agent:
raise Exception(
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
try:
agent = agent or self.agent
self.agent = agent
if not agent:
raise Exception(
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
)
self.start_time = datetime.datetime.now()
self.prompt_context = context
tools = tools or self.tools or []
self.processed_by_agents.add(agent.role)
crewai_event_bus.emit(self, TaskStartedEvent(context=context, task=self))
result = agent.execute_task(
task=self,
context=context,
tools=tools,
)
start_time = self._set_start_execution_time()
self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self)
pydantic_output, json_output = self._export_output(result)
task_output = TaskOutput(
name=self.name,
description=self.description,
expected_output=self.expected_output,
raw=result,
pydantic=pydantic_output,
json_dict=json_output,
agent=agent.role,
output_format=self._get_output_format(),
)
self.prompt_context = context
tools = tools or self.tools or []
if self.guardrail:
guardrail_result = GuardrailResult.from_tuple(
self.guardrail(task_output)
)
if not guardrail_result.success:
if self.retry_count >= self.max_retries:
raise Exception(
f"Task failed guardrail validation after {self.max_retries} retries. "
f"Last error: {guardrail_result.error}"
)
self.processed_by_agents.add(agent.role)
self.retry_count += 1
context = self.i18n.errors("validation_error").format(
guardrail_result_error=guardrail_result.error,
task_output=task_output.raw,
)
printer = Printer()
printer.print(
content=f"Guardrail blocked, retrying, due to: {guardrail_result.error}\n",
color="yellow",
)
return self._execute_core(agent, context, tools)
result = agent.execute_task(
task=self,
context=context,
tools=tools,
)
pydantic_output, json_output = self._export_output(result)
task_output = TaskOutput(
name=self.name,
description=self.description,
expected_output=self.expected_output,
raw=result,
pydantic=pydantic_output,
json_dict=json_output,
agent=agent.role,
output_format=self._get_output_format(),
)
if self.guardrail:
guardrail_result = GuardrailResult.from_tuple(self.guardrail(task_output))
if not guardrail_result.success:
if self.retry_count >= self.max_retries:
if guardrail_result.result is None:
raise Exception(
f"Task failed guardrail validation after {self.max_retries} retries. "
f"Last error: {guardrail_result.error}"
"Task guardrail returned None as result. This is not allowed."
)
self.retry_count += 1
context = (
f"### Previous attempt failed validation: {guardrail_result.error}\n\n\n"
f"### Previous result:\n{task_output.raw}\n\n\n"
"Try again, making sure to address the validation error."
if isinstance(guardrail_result.result, str):
task_output.raw = guardrail_result.result
pydantic_output, json_output = self._export_output(
guardrail_result.result
)
task_output.pydantic = pydantic_output
task_output.json_dict = json_output
elif isinstance(guardrail_result.result, TaskOutput):
task_output = guardrail_result.result
self.output = task_output
self.end_time = datetime.datetime.now()
if self.callback:
self.callback(self.output)
crew = self.agent.crew # type: ignore[union-attr]
if crew and crew.task_callback and crew.task_callback != self.callback:
crew.task_callback(self.output)
if self.output_file:
content = (
json_output
if json_output
else (
pydantic_output.model_dump_json() if pydantic_output else result
)
)
return self._execute_core(agent, context, tools)
if guardrail_result.result is None:
raise Exception(
"Task guardrail returned None as result. This is not allowed."
)
if isinstance(guardrail_result.result, str):
task_output.raw = guardrail_result.result
pydantic_output, json_output = self._export_output(guardrail_result.result)
task_output.pydantic = pydantic_output
task_output.json_dict = json_output
elif isinstance(guardrail_result.result, TaskOutput):
task_output = guardrail_result.result
self.output = task_output
self._set_end_execution_time(start_time)
if self.callback:
self.callback(self.output)
if self._execution_span:
self._telemetry.task_ended(self._execution_span, self, agent.crew)
self._execution_span = None
if self.output_file:
content = (
json_output
if json_output
else pydantic_output.model_dump_json() if pydantic_output else result
)
self._save_file(content)
return task_output
self._save_file(content)
crewai_event_bus.emit(self, TaskCompletedEvent(output=task_output, task=self))
return task_output
except Exception as e:
self.end_time = datetime.datetime.now()
crewai_event_bus.emit(self, TaskFailedEvent(error=str(e), task=self))
raise e # Re-raise the exception after emitting the event
def prompt(self) -> str:
"""Prompt the task.
@@ -393,27 +485,79 @@ class Task(BaseModel):
tasks_slices = [self.description, output]
return "\n".join(tasks_slices)
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Interpolate inputs into the task description and expected output."""
def interpolate_inputs_and_add_conversation_history(
self, inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]]
) -> None:
"""Interpolate inputs into the task description, expected output, and output file path.
Add conversation history if present.
Args:
inputs: Dictionary mapping template variables to their values.
Supported value types are strings, integers, and floats.
Raises:
ValueError: If a required template variable is missing from inputs.
"""
if self._original_description is None:
self._original_description = self.description
if self._original_expected_output is None:
self._original_expected_output = self.expected_output
if self.output_file is not None and self._original_output_file is None:
self._original_output_file = self.output_file
if inputs:
self.description = self._original_description.format(**inputs)
self.expected_output = self.interpolate_only(
if not inputs:
return
try:
self.description = interpolate_only(
input_string=self._original_description, inputs=inputs
)
except KeyError as e:
raise ValueError(
f"Missing required template variable '{e.args[0]}' in description"
) from e
except ValueError as e:
raise ValueError(f"Error interpolating description: {str(e)}") from e
try:
self.expected_output = interpolate_only(
input_string=self._original_expected_output, inputs=inputs
)
except (KeyError, ValueError) as e:
raise ValueError(f"Error interpolating expected_output: {str(e)}") from e
def interpolate_only(self, input_string: str, inputs: Dict[str, Any]) -> str:
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched."""
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
if self.output_file is not None:
try:
self.output_file = interpolate_only(
input_string=self._original_output_file, inputs=inputs
)
except (KeyError, ValueError) as e:
raise ValueError(
f"Error interpolating output_file path: {str(e)}"
) from e
for key in inputs.keys():
escaped_string = escaped_string.replace(f"{{{{{key}}}}}", f"{{{key}}}")
if "crew_chat_messages" in inputs and inputs["crew_chat_messages"]:
conversation_instruction = self.i18n.slice(
"conversation_history_instruction"
)
return escaped_string.format(**inputs)
crew_chat_messages_json = str(inputs["crew_chat_messages"])
try:
crew_chat_messages = json.loads(crew_chat_messages_json)
except json.JSONDecodeError as e:
print("An error occurred while parsing crew chat messages:", e)
raise
conversation_history = "\n".join(
f"{msg['role'].capitalize()}: {msg['content']}"
for msg in crew_chat_messages
if isinstance(msg, dict) and "role" in msg and "content" in msg
)
self.description += (
f"\n\n{conversation_instruction}\n\n{conversation_history}"
)
def increment_tools_errors(self) -> None:
"""Increment the tools errors counter."""
@@ -428,7 +572,15 @@ class Task(BaseModel):
def copy(
self, agents: List["BaseAgent"], task_mapping: Dict[str, "Task"]
) -> "Task":
"""Create a deep copy of the Task."""
"""Creates a deep copy of the Task while preserving its original class type.
Args:
agents: List of agents available for the task.
task_mapping: Dictionary mapping task IDs to Task instances.
Returns:
A copy of the task with the same class type as the original.
"""
exclude = {
"id",
"agent",
@@ -451,7 +603,7 @@ class Task(BaseModel):
cloned_agent = get_agent_by_role(self.agent.role) if self.agent else None
cloned_tools = copy(self.tools) if self.tools else []
copied_task = Task(
copied_task = self.__class__(
**copied_data,
context=cloned_context,
agent=cloned_agent,
@@ -494,19 +646,32 @@ class Task(BaseModel):
return OutputFormat.PYDANTIC
return OutputFormat.RAW
def _save_file(self, result: Any) -> None:
def _save_file(self, result: Union[Dict, str, Any]) -> None:
"""Save task output to a file.
Note:
For cross-platform file writing, especially on Windows, consider using FileWriterTool
from the crewai_tools package:
pip install 'crewai[tools]'
from crewai_tools import FileWriterTool
Args:
result: The result to save to the file. Can be a dict or any stringifiable object.
Raises:
ValueError: If output_file is not set
RuntimeError: If there is an error writing to the file
RuntimeError: If there is an error writing to the file. For cross-platform
compatibility, especially on Windows, use FileWriterTool from crewai_tools
package.
"""
if self.output_file is None:
raise ValueError("output_file is not set.")
FILEWRITER_RECOMMENDATION = (
"For cross-platform file writing, especially on Windows, "
"use FileWriterTool from crewai_tools package."
)
try:
resolved_path = Path(self.output_file).expanduser().resolve()
directory = resolved_path.parent
@@ -517,12 +682,26 @@ class Task(BaseModel):
with resolved_path.open("w", encoding="utf-8") as file:
if isinstance(result, dict):
import json
json.dump(result, file, ensure_ascii=False, indent=2)
else:
file.write(str(result))
except (OSError, IOError) as e:
raise RuntimeError(f"Failed to save output file: {e}")
raise RuntimeError(
"\n".join(
[f"Failed to save output file: {e}", FILEWRITER_RECOMMENDATION]
)
)
return None
def __repr__(self):
return f"Task(description={self.description}, expected_output={self.expected_output})"
@property
def fingerprint(self) -> Fingerprint:
"""Get the fingerprint of the task.
Returns:
Fingerprint: The fingerprint of the task
"""
return self.security_config.fingerprint

View File

@@ -112,6 +112,23 @@ class Telemetry:
self._add_attribute(span, "crew_memory", crew.memory)
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
# Add fingerprint data
if hasattr(crew, "fingerprint") and crew.fingerprint:
self._add_attribute(span, "crew_fingerprint", crew.fingerprint.uuid_str)
self._add_attribute(
span,
"crew_fingerprint_created_at",
crew.fingerprint.created_at.isoformat(),
)
# Add fingerprint metadata if it exists
if hasattr(crew.fingerprint, "metadata") and crew.fingerprint.metadata:
self._add_attribute(
span,
"crew_fingerprint_metadata",
json.dumps(crew.fingerprint.metadata),
)
if crew.share_crew:
self._add_attribute(
span,
@@ -129,17 +146,43 @@ class Telemetry:
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.prompt_file,
"function_calling_llm": (
agent.function_calling_llm.model
if agent.function_calling_llm
getattr(
getattr(agent, "function_calling_llm", None),
"model",
"",
)
if getattr(agent, "function_calling_llm", None)
else ""
),
"llm": agent.llm.model,
"delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": agent.allow_code_execution,
"max_retry_limit": agent.max_retry_limit,
"allow_code_execution?": getattr(
agent, "allow_code_execution", False
),
"max_retry_limit": getattr(agent, "max_retry_limit", 3),
"tools_names": [
tool.name.casefold() for tool in agent.tools or []
],
# Add agent fingerprint data if sharing crew details
"fingerprint": (
getattr(
getattr(agent, "fingerprint", None),
"uuid_str",
None,
)
),
"fingerprint_created_at": (
created_at.isoformat()
if (
created_at := getattr(
getattr(agent, "fingerprint", None),
"created_at",
None,
)
)
is not None
else None
),
}
for agent in crew.agents
]
@@ -169,6 +212,17 @@ class Telemetry:
"tools_names": [
tool.name.casefold() for tool in task.tools or []
],
# Add task fingerprint data if sharing crew details
"fingerprint": (
task.fingerprint.uuid_str
if hasattr(task, "fingerprint") and task.fingerprint
else None
),
"fingerprint_created_at": (
task.fingerprint.created_at.isoformat()
if hasattr(task, "fingerprint") and task.fingerprint
else None
),
}
for task in crew.tasks
]
@@ -196,14 +250,20 @@ class Telemetry:
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"function_calling_llm": (
agent.function_calling_llm.model
if agent.function_calling_llm
getattr(
getattr(agent, "function_calling_llm", None),
"model",
"",
)
if getattr(agent, "function_calling_llm", None)
else ""
),
"llm": agent.llm.model,
"delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": agent.allow_code_execution,
"max_retry_limit": agent.max_retry_limit,
"allow_code_execution?": getattr(
agent, "allow_code_execution", False
),
"max_retry_limit": getattr(agent, "max_retry_limit", 3),
"tools_names": [
tool.name.casefold() for tool in agent.tools or []
],
@@ -252,6 +312,39 @@ class Telemetry:
self._add_attribute(created_span, "task_key", task.key)
self._add_attribute(created_span, "task_id", str(task.id))
# Add fingerprint data
if hasattr(crew, "fingerprint") and crew.fingerprint:
self._add_attribute(
created_span, "crew_fingerprint", crew.fingerprint.uuid_str
)
if hasattr(task, "fingerprint") and task.fingerprint:
self._add_attribute(
created_span, "task_fingerprint", task.fingerprint.uuid_str
)
self._add_attribute(
created_span,
"task_fingerprint_created_at",
task.fingerprint.created_at.isoformat(),
)
# Add fingerprint metadata if it exists
if hasattr(task.fingerprint, "metadata") and task.fingerprint.metadata:
self._add_attribute(
created_span,
"task_fingerprint_metadata",
json.dumps(task.fingerprint.metadata),
)
# Add agent fingerprint if task has an assigned agent
if hasattr(task, "agent") and task.agent:
agent_fingerprint = getattr(
getattr(task.agent, "fingerprint", None), "uuid_str", None
)
if agent_fingerprint:
self._add_attribute(
created_span, "agent_fingerprint", agent_fingerprint
)
if crew.share_crew:
self._add_attribute(
created_span, "formatted_description", task.description
@@ -270,6 +363,21 @@ class Telemetry:
self._add_attribute(span, "task_key", task.key)
self._add_attribute(span, "task_id", str(task.id))
# Add fingerprint data to execution span
if hasattr(crew, "fingerprint") and crew.fingerprint:
self._add_attribute(span, "crew_fingerprint", crew.fingerprint.uuid_str)
if hasattr(task, "fingerprint") and task.fingerprint:
self._add_attribute(span, "task_fingerprint", task.fingerprint.uuid_str)
# Add agent fingerprint if task has an assigned agent
if hasattr(task, "agent") and task.agent:
agent_fingerprint = getattr(
getattr(task.agent, "fingerprint", None), "uuid_str", None
)
if agent_fingerprint:
self._add_attribute(span, "agent_fingerprint", agent_fingerprint)
if crew.share_crew:
self._add_attribute(span, "formatted_description", task.description)
self._add_attribute(
@@ -281,9 +389,22 @@ class Telemetry:
return self._safe_telemetry_operation(operation)
def task_ended(self, span: Span, task: Task, crew: Crew):
"""Records task execution in a crew."""
"""Records the completion of a task execution in a crew.
Args:
span (Span): The OpenTelemetry span tracking the task execution
task (Task): The task that was completed
crew (Crew): The crew context in which the task was executed
Note:
If share_crew is enabled, this will also record the task output
"""
def operation():
# Ensure fingerprint data is present on completion span
if hasattr(task, "fingerprint") and task.fingerprint:
self._add_attribute(span, "task_fingerprint", task.fingerprint.uuid_str)
if crew.share_crew:
self._add_attribute(
span,
@@ -297,7 +418,13 @@ class Telemetry:
self._safe_telemetry_operation(operation)
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
"""Records the repeated usage 'error' of a tool by an agent."""
"""Records when a tool is used repeatedly, which might indicate an issue.
Args:
llm (Any): The language model being used
tool_name (str): Name of the tool being repeatedly used
attempts (int): Number of attempts made with this tool
"""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
@@ -316,8 +443,15 @@ class Telemetry:
self._safe_telemetry_operation(operation)
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
"""Records the usage of a tool by an agent."""
def tool_usage(self, llm: Any, tool_name: str, attempts: int, agent: Any = None):
"""Records the usage of a tool by an agent.
Args:
llm (Any): The language model being used
tool_name (str): Name of the tool being used
attempts (int): Number of attempts made with this tool
agent (Any, optional): The agent using the tool
"""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
@@ -331,13 +465,30 @@ class Telemetry:
self._add_attribute(span, "attempts", attempts)
if llm:
self._add_attribute(span, "llm", llm.model)
# Add agent fingerprint data if available
if agent and hasattr(agent, "fingerprint") and agent.fingerprint:
self._add_attribute(
span, "agent_fingerprint", agent.fingerprint.uuid_str
)
if hasattr(agent, "role"):
self._add_attribute(span, "agent_role", agent.role)
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def tool_usage_error(self, llm: Any):
"""Records the usage of a tool by an agent."""
def tool_usage_error(
self, llm: Any, agent: Any = None, tool_name: Optional[str] = None
):
"""Records when a tool usage results in an error.
Args:
llm (Any): The language model being used when the error occurred
agent (Any, optional): The agent using the tool
tool_name (str, optional): Name of the tool that caused the error
"""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
@@ -349,6 +500,18 @@ class Telemetry:
)
if llm:
self._add_attribute(span, "llm", llm.model)
if tool_name:
self._add_attribute(span, "tool_name", tool_name)
# Add agent fingerprint data if available
if agent and hasattr(agent, "fingerprint") and agent.fingerprint:
self._add_attribute(
span, "agent_fingerprint", agent.fingerprint.uuid_str
)
if hasattr(agent, "role"):
self._add_attribute(span, "agent_role", agent.role)
span.set_status(Status(StatusCode.OK))
span.end()
@@ -357,6 +520,15 @@ class Telemetry:
def individual_test_result_span(
self, crew: Crew, quality: float, exec_time: int, model_name: str
):
"""Records individual test results for a crew execution.
Args:
crew (Crew): The crew being tested
quality (float): Quality score of the execution
exec_time (int): Execution time in seconds
model_name (str): Name of the model used
"""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Individual Test Result")
@@ -383,6 +555,15 @@ class Telemetry:
inputs: dict[str, Any] | None,
model_name: str,
):
"""Records the execution of a test suite for a crew.
Args:
crew (Crew): The crew being tested
iterations (int): Number of test iterations
inputs (dict[str, Any] | None): Input parameters for the test
model_name (str): Name of the model used in testing
"""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Test Execution")
@@ -408,6 +589,8 @@ class Telemetry:
self._safe_telemetry_operation(operation)
def deploy_signup_error_span(self):
"""Records when an error occurs during the deployment signup process."""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Deploy Signup Error")
@@ -417,6 +600,12 @@ class Telemetry:
self._safe_telemetry_operation(operation)
def start_deployment_span(self, uuid: Optional[str] = None):
"""Records the start of a deployment process.
Args:
uuid (Optional[str]): Unique identifier for the deployment
"""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Start Deployment")
@@ -428,6 +617,8 @@ class Telemetry:
self._safe_telemetry_operation(operation)
def create_crew_deployment_span(self):
"""Records the creation of a new crew deployment."""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Create Crew Deployment")
@@ -437,6 +628,13 @@ class Telemetry:
self._safe_telemetry_operation(operation)
def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"):
"""Records the retrieval of crew logs.
Args:
uuid (Optional[str]): Unique identifier for the crew
log_type (str, optional): Type of logs being retrieved. Defaults to "deployment".
"""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Get Crew Logs")
@@ -449,6 +647,12 @@ class Telemetry:
self._safe_telemetry_operation(operation)
def remove_crew_span(self, uuid: Optional[str] = None):
"""Records the removal of a crew.
Args:
uuid (Optional[str]): Unique identifier for the crew being removed
"""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Remove Crew")
@@ -574,6 +778,12 @@ class Telemetry:
self._safe_telemetry_operation(operation)
def flow_creation_span(self, flow_name: str):
"""Records the creation of a new flow.
Args:
flow_name (str): Name of the flow being created
"""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Flow Creation")
@@ -584,6 +794,13 @@ class Telemetry:
self._safe_telemetry_operation(operation)
def flow_plotting_span(self, flow_name: str, node_names: list[str]):
"""Records flow visualization/plotting activity.
Args:
flow_name (str): Name of the flow being plotted
node_names (list[str]): List of node names in the flow
"""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Flow Plotting")
@@ -595,6 +812,13 @@ class Telemetry:
self._safe_telemetry_operation(operation)
def flow_execution_span(self, flow_name: str, node_names: list[str]):
"""Records the execution of a flow.
Args:
flow_name (str): Name of the flow being executed
node_names (list[str]): List of nodes being executed in the flow
"""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Flow Execution")

View File

@@ -7,11 +7,11 @@ from crewai.utilities import I18N
i18n = I18N()
class AddImageToolSchema(BaseModel):
image_url: str = Field(..., description="The URL or path of the image to add")
action: Optional[str] = Field(
default=None,
description="Optional context or question about the image"
default=None, description="Optional context or question about the image"
)
@@ -36,10 +36,7 @@ class AddImageTool(BaseTool):
"image_url": {
"url": image_url,
},
}
},
]
return {
"role": "user",
"content": content
}
return {"role": "user", "content": content}

View File

@@ -1,4 +1,5 @@
from typing import Optional, Union
import logging
from typing import Optional
from pydantic import Field
@@ -7,6 +8,8 @@ from crewai.task import Task
from crewai.tools.base_tool import BaseTool
from crewai.utilities import I18N
logger = logging.getLogger(__name__)
class BaseAgentTool(BaseTool):
"""Base class for agent-related tools"""
@@ -16,6 +19,25 @@ class BaseAgentTool(BaseTool):
default_factory=I18N, description="Internationalization settings"
)
def sanitize_agent_name(self, name: str) -> str:
"""
Sanitize agent role name by normalizing whitespace and setting to lowercase.
Converts all whitespace (including newlines) to single spaces and removes quotes.
Args:
name (str): The agent role name to sanitize
Returns:
str: The sanitized agent role name, with whitespace normalized,
converted to lowercase, and quotes removed
"""
if not name:
return ""
# Normalize all whitespace (including newlines) to single spaces
normalized = " ".join(name.split())
# Remove quotes and convert to lowercase
return normalized.replace('"', "").casefold()
def _get_coworker(self, coworker: Optional[str], **kwargs) -> Optional[str]:
coworker = coworker or kwargs.get("co_worker") or kwargs.get("coworker")
if coworker:
@@ -25,11 +47,27 @@ class BaseAgentTool(BaseTool):
return coworker
def _execute(
self, agent_name: Union[str, None], task: str, context: Union[str, None]
self,
agent_name: Optional[str],
task: str,
context: Optional[str] = None
) -> str:
"""
Execute delegation to an agent with case-insensitive and whitespace-tolerant matching.
Args:
agent_name: Name/role of the agent to delegate to (case-insensitive)
task: The specific question or task to delegate
context: Optional additional context for the task execution
Returns:
str: The execution result from the delegated agent or an error message
if the agent cannot be found
"""
try:
if agent_name is None:
agent_name = ""
logger.debug("No agent name provided, using empty string")
# It is important to remove the quotes from the agent name.
# The reason we have to do this is because less-powerful LLM's
@@ -38,31 +76,49 @@ class BaseAgentTool(BaseTool):
# {"task": "....", "coworker": "....
# when it should look like this:
# {"task": "....", "coworker": "...."}
agent_name = agent_name.casefold().replace('"', "").replace("\n", "")
sanitized_name = self.sanitize_agent_name(agent_name)
logger.debug(f"Sanitized agent name from '{agent_name}' to '{sanitized_name}'")
available_agents = [agent.role for agent in self.agents]
logger.debug(f"Available agents: {available_agents}")
agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None")
available_agent
for available_agent in self.agents
if available_agent.role.casefold().replace("\n", "") == agent_name
if self.sanitize_agent_name(available_agent.role) == sanitized_name
]
except Exception as _:
logger.debug(f"Found {len(agent)} matching agents for role '{sanitized_name}'")
except (AttributeError, ValueError) as e:
# Handle specific exceptions that might occur during role name processing
return self.i18n.errors("agent_tool_unexisting_coworker").format(
coworkers="\n".join(
[f"- {agent.role.casefold()}" for agent in self.agents]
)
[f"- {self.sanitize_agent_name(agent.role)}" for agent in self.agents]
),
error=str(e)
)
if not agent:
# No matching agent found after sanitization
return self.i18n.errors("agent_tool_unexisting_coworker").format(
coworkers="\n".join(
[f"- {agent.role.casefold()}" for agent in self.agents]
)
[f"- {self.sanitize_agent_name(agent.role)}" for agent in self.agents]
),
error=f"No agent found with role '{sanitized_name}'"
)
agent = agent[0]
task_with_assigned_agent = Task( # type: ignore # Incompatible types in assignment (expression has type "Task", variable has type "str")
description=task,
agent=agent,
expected_output=agent.i18n.slice("manager_request"),
i18n=agent.i18n,
)
return agent.execute_task(task_with_assigned_agent, context)
try:
task_with_assigned_agent = Task(
description=task,
agent=agent,
expected_output=agent.i18n.slice("manager_request"),
i18n=agent.i18n,
)
logger.debug(f"Created task for agent '{self.sanitize_agent_name(agent.role)}': {task}")
return agent.execute_task(task_with_assigned_agent, context)
except Exception as e:
# Handle task creation or execution errors
return self.i18n.errors("agent_tool_execution_error").format(
agent_role=self.sanitize_agent_name(agent.role),
error=str(e)
)

View File

@@ -1,8 +1,15 @@
import warnings
from abc import ABC, abstractmethod
from inspect import signature
from typing import Any, Callable, Type, get_args, get_origin
from pydantic import BaseModel, ConfigDict, Field, create_model, validator
from pydantic import (
BaseModel,
ConfigDict,
Field,
create_model,
field_validator,
)
from pydantic import BaseModel as PydanticBaseModel
from crewai.tools.structured_tool import CrewStructuredTool
@@ -12,13 +19,15 @@ class BaseTool(BaseModel, ABC):
class _ArgsSchemaPlaceholder(PydanticBaseModel):
pass
model_config = ConfigDict()
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str
"""The unique name of the tool that clearly communicates its purpose."""
description: str
"""Used to tell the model how/when/why to use the tool."""
args_schema: Type[PydanticBaseModel] = Field(default_factory=_ArgsSchemaPlaceholder)
args_schema: Type[PydanticBaseModel] = Field(
default_factory=_ArgsSchemaPlaceholder, validate_default=True
)
"""The schema for the arguments that the tool accepts."""
description_updated: bool = False
"""Flag to check if the description has been updated."""
@@ -27,7 +36,8 @@ class BaseTool(BaseModel, ABC):
result_as_answer: bool = False
"""Flag to check if the tool should be the final agent answer."""
@validator("args_schema", always=True, pre=True)
@field_validator("args_schema", mode="before")
@classmethod
def _default_args_schema(
cls, v: Type[PydanticBaseModel]
) -> Type[PydanticBaseModel]:

View File

@@ -0,0 +1,9 @@
from dataclasses import dataclass
@dataclass
class ToolResult:
"""Result of tool execution."""
result: str
result_as_answer: bool = False

View File

@@ -1,25 +1,47 @@
import ast
import datetime
import json
import time
from dataclasses import dataclass
from difflib import SequenceMatcher
from json import JSONDecodeError
from textwrap import dedent
from typing import Any, List, Union
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
import json5
from json_repair import repair_json
import crewai.utilities.events as events
from crewai.agents.tools_handler import ToolsHandler
from crewai.task import Task
from crewai.telemetry import Telemetry
from crewai.tools import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
from crewai.tools.tool_usage_events import ToolUsageError, ToolUsageFinished
from crewai.utilities import I18N, Converter, ConverterError, Printer
from crewai.utilities import I18N, Converter, Printer
from crewai.utilities.agent_utils import (
get_tool_names,
render_text_description_and_args,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.tool_usage_events import (
ToolSelectionErrorEvent,
ToolUsageErrorEvent,
ToolUsageFinishedEvent,
ToolValidateInputErrorEvent,
)
try:
import agentops # type: ignore
except ImportError:
agentops = None
OPENAI_BIGGER_MODELS = ["gpt-4", "gpt-4o", "o1-preview", "o1-mini", "o1", "o3", "o3-mini"]
if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.lite_agent import LiteAgent
OPENAI_BIGGER_MODELS = [
"gpt-4",
"gpt-4o",
"o1-preview",
"o1-mini",
"o1",
"o3",
"o3-mini",
]
class ToolUsageErrorException(Exception):
@@ -46,31 +68,29 @@ class ToolUsage:
def __init__(
self,
tools_handler: ToolsHandler,
tools: List[BaseTool],
original_tools: List[Any],
tools_description: str,
tools_names: str,
task: Task,
tools_handler: Optional[ToolsHandler],
tools: List[CrewStructuredTool],
task: Optional[Task],
function_calling_llm: Any,
agent: Any,
action: Any,
agent: Optional[Union["BaseAgent", "LiteAgent"]] = None,
action: Any = None,
fingerprint_context: Optional[Dict[str, str]] = None,
) -> None:
self._i18n: I18N = agent.i18n
self._i18n: I18N = agent.i18n if agent else I18N()
self._printer: Printer = Printer()
self._telemetry: Telemetry = Telemetry()
self._run_attempts: int = 1
self._max_parsing_attempts: int = 3
self._remember_format_after_usages: int = 3
self.agent = agent
self.tools_description = tools_description
self.tools_names = tools_names
self.tools_description = render_text_description_and_args(tools)
self.tools_names = get_tool_names(tools)
self.tools_handler = tools_handler
self.original_tools = original_tools
self.tools = tools
self.task = task
self.action = action
self.function_calling_llm = function_calling_llm
self.fingerprint_context = fingerprint_context or {}
# Set the maximum parsing attempts for bigger models
if (
@@ -80,7 +100,7 @@ class ToolUsage:
self._max_parsing_attempts = 2
self._remember_format_after_usages = 4
def parse(self, tool_string: str):
def parse_tool_calling(self, tool_string: str):
"""Parse the tool string and return the tool calling."""
return self._tool_calling(tool_string)
@@ -89,42 +109,46 @@ class ToolUsage:
) -> str:
if isinstance(calling, ToolUsageErrorException):
error = calling.message
if self.agent.verbose:
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red")
self.task.increment_tools_errors()
if self.task:
self.task.increment_tools_errors()
return error
# BUG? The code below seems to be unreachable
try:
tool = self._select_tool(calling.tool_name)
except Exception as e:
error = getattr(e, "message", str(e))
self.task.increment_tools_errors()
if self.agent.verbose:
if self.task:
self.task.increment_tools_errors()
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red")
return error
if isinstance(tool, CrewStructuredTool) and tool.name == self._i18n.tools("add_image")["name"]: # type: ignore
if (
isinstance(tool, CrewStructuredTool)
and tool.name == self._i18n.tools("add_image")["name"] # type: ignore
):
try:
result = self._use(tool_string=tool_string, tool=tool, calling=calling)
return result
except Exception as e:
error = getattr(e, "message", str(e))
self.task.increment_tools_errors()
if self.agent.verbose:
if self.task:
self.task.increment_tools_errors()
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red")
return error
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}" # type: ignore # BUG?: "_use" of "ToolUsage" does not return a value (it only ever returns None)
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}"
def _use(
self,
tool_string: str,
tool: Any,
tool: CrewStructuredTool,
calling: Union[ToolCalling, InstructorToolCalling],
) -> str: # TODO: Fix this return type
tool_event = agentops.ToolEvent(name=calling.tool_name) if agentops else None # type: ignore
) -> str:
if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
try:
result = self._i18n.errors("task_repeated_usage").format(
@@ -139,24 +163,29 @@ class ToolUsage:
return result # type: ignore # Fix the return type of this function
except Exception:
self.task.increment_tools_errors()
if self.task:
self.task.increment_tools_errors()
started_at = time.time()
from_cache = False
result = None # type: ignore
result = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
# check if cache is available
if self.tools_handler.cache:
result = self.tools_handler.cache.read( # type: ignore # Incompatible types in assignment (expression has type "str | None", variable has type "str")
if self.tools_handler and self.tools_handler.cache:
result = self.tools_handler.cache.read(
tool=calling.tool_name, input=calling.arguments
)
) # type: ignore
from_cache = result is not None
original_tool = next(
(ot for ot in self.original_tools if ot.name == tool.name), None
available_tool = next(
(
available_tool
for available_tool in self.tools
if available_tool.name == tool.name
),
None,
)
if result is None: #! finecwg: if not result --> if result is None
if result is None:
try:
if calling.tool_name in [
"Delegate work to coworker",
@@ -165,22 +194,31 @@ class ToolUsage:
coworker = (
calling.arguments.get("coworker") if calling.arguments else None
)
self.task.increment_delegations(coworker)
if self.task:
self.task.increment_delegations(coworker)
if calling.arguments:
try:
acceptable_args = tool.args_schema.schema()["properties"].keys() # type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "schema"
acceptable_args = tool.args_schema.model_json_schema()[
"properties"
].keys() # type: ignore
arguments = {
k: v
for k, v in calling.arguments.items()
if k in acceptable_args
}
# Add fingerprint metadata if available
arguments = self._add_fingerprint_metadata(arguments)
result = tool.invoke(input=arguments)
except Exception:
arguments = calling.arguments
# Add fingerprint metadata if available
arguments = self._add_fingerprint_metadata(arguments)
result = tool.invoke(input=arguments)
else:
result = tool.invoke(input={})
# Add fingerprint metadata even to empty arguments
arguments = self._add_fingerprint_metadata({})
result = tool.invoke(input=arguments)
except Exception as e:
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
self._run_attempts += 1
@@ -190,38 +228,33 @@ class ToolUsage:
error=e, tool=tool.name, tool_inputs=tool.description
)
error = ToolUsageErrorException(
f'\n{error_message}.\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
).message
self.task.increment_tools_errors()
if self.agent.verbose:
if self.task:
self.task.increment_tools_errors()
if self.agent and self.agent.verbose:
self._printer.print(
content=f"\n\n{error_message}\n", color="red"
)
return error # type: ignore # No return value expected
self.task.increment_tools_errors()
if agentops:
agentops.record(
agentops.ErrorEvent(exception=e, trigger_event=tool_event)
)
if self.task:
self.task.increment_tools_errors()
return self.use(calling=calling, tool_string=tool_string) # type: ignore # No return value expected
if self.tools_handler:
should_cache = True
if (
hasattr(original_tool, "cache_function")
and original_tool.cache_function # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
hasattr(available_tool, "cache_function")
and available_tool.cache_function # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
):
should_cache = original_tool.cache_function( # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
should_cache = available_tool.cache_function( # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
calling.arguments, result
)
self.tools_handler.on_tool_use(
calling=calling, output=result, should_cache=should_cache
)
if agentops:
agentops.record(tool_event)
self._telemetry.tool_usage(
llm=self.function_calling_llm,
tool_name=tool.name,
@@ -239,44 +272,50 @@ class ToolUsage:
tool_calling=calling,
from_cache=from_cache,
started_at=started_at,
result=result,
)
if (
hasattr(original_tool, "result_as_answer")
and original_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
hasattr(available_tool, "result_as_answer")
and available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
):
result_as_answer = original_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
data["result_as_answer"] = result_as_answer
result_as_answer = available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
data["result_as_answer"] = result_as_answer # type: ignore
self.agent.tools_results.append(data)
if self.agent and hasattr(self.agent, "tools_results"):
self.agent.tools_results.append(data)
return result # type: ignore # No return value expected
def _format_result(self, result: Any) -> None:
self.task.used_tools += 1
if self._should_remember_format(): # type: ignore # "_should_remember_format" of "ToolUsage" does not return a value (it only ever returns None)
result = self._remember_format(result=result) # type: ignore # "_remember_format" of "ToolUsage" does not return a value (it only ever returns None)
return result
def _should_remember_format(self) -> bool:
return self.task.used_tools % self._remember_format_after_usages == 0
def _format_result(self, result: Any) -> str:
if self.task:
self.task.used_tools += 1
if self._should_remember_format():
result = self._remember_format(result=result)
return str(result)
def _remember_format(self, result: str) -> None:
def _should_remember_format(self) -> bool:
if self.task:
return self.task.used_tools % self._remember_format_after_usages == 0
return False
def _remember_format(self, result: str) -> str:
result = str(result)
result += "\n\n" + self._i18n.slice("tools").format(
tools=self.tools_description, tool_names=self.tools_names
)
return result # type: ignore # No return value expected
return result
def _check_tool_repeated_usage(
self, calling: Union[ToolCalling, InstructorToolCalling]
) -> None:
) -> bool:
if not self.tools_handler:
return False # type: ignore # No return value expected
return False
if last_tool_usage := self.tools_handler.last_used_tool:
return (calling.tool_name == last_tool_usage.tool_name) and ( # type: ignore # No return value expected
return (calling.tool_name == last_tool_usage.tool_name) and (
calling.arguments == last_tool_usage.arguments
)
return False
def _select_tool(self, tool_name: str) -> Any:
order_tools = sorted(
@@ -295,15 +334,35 @@ class ToolUsage:
> 0.85
):
return tool
self.task.increment_tools_errors()
if self.task:
self.task.increment_tools_errors()
tool_selection_data: Dict[str, Any] = {
"agent_key": getattr(self.agent, "key", None) if self.agent else None,
"agent_role": getattr(self.agent, "role", None) if self.agent else None,
"tool_name": tool_name,
"tool_args": {},
"tool_class": self.tools_description,
}
if tool_name and tool_name != "":
raise Exception(
f"Action '{tool_name}' don't exist, these are the only available Actions:\n{self.tools_description}"
error = f"Action '{tool_name}' don't exist, these are the only available Actions:\n{self.tools_description}"
crewai_event_bus.emit(
self,
ToolSelectionErrorEvent(
**tool_selection_data,
error=error,
),
)
raise Exception(error)
else:
raise Exception(
f"I forgot the Action name, these are the only available Actions: {self.tools_description}"
error = f"I forgot the Action name, these are the only available Actions: {self.tools_description}"
crewai_event_bus.emit(
self,
ToolSelectionErrorEvent(
**tool_selection_data,
error=error,
),
)
raise Exception(error)
def _render(self) -> str:
"""Render the tool name and description in plain text."""
@@ -312,7 +371,9 @@ class ToolUsage:
descriptions.append(tool.description)
return "\n--\n".join(descriptions)
def _function_calling(self, tool_string: str):
def _function_calling(
self, tool_string: str
) -> Union[ToolCalling, InstructorToolCalling]:
model = (
InstructorToolCalling
if self.function_calling_llm.supports_function_calling()
@@ -334,48 +395,43 @@ class ToolUsage:
max_attempts=1,
)
tool_object = converter.to_pydantic()
calling = ToolCalling(
tool_name=tool_object["tool_name"],
arguments=tool_object["arguments"],
log=tool_string, # type: ignore
)
if not isinstance(tool_object, (ToolCalling, InstructorToolCalling)):
raise ToolUsageErrorException("Failed to parse tool calling")
if isinstance(calling, ConverterError):
raise calling
return tool_object
return calling
def _original_tool_calling(self, tool_string: str, raise_error: bool = False):
def _original_tool_calling(
self, tool_string: str, raise_error: bool = False
) -> Union[ToolCalling, InstructorToolCalling, ToolUsageErrorException]:
tool_name = self.action.tool
tool = self._select_tool(tool_name)
try:
tool_input = self._validate_tool_input(self.action.tool_input)
arguments = ast.literal_eval(tool_input)
arguments = self._validate_tool_input(self.action.tool_input)
except Exception:
if raise_error:
raise
else:
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
f'{self._i18n.errors("tool_arguments_error")}'
return ToolUsageErrorException(
f"{self._i18n.errors('tool_arguments_error')}"
)
if not isinstance(arguments, dict):
if raise_error:
raise
else:
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
f'{self._i18n.errors("tool_arguments_error")}'
return ToolUsageErrorException(
f"{self._i18n.errors('tool_arguments_error')}"
)
return ToolCalling(
tool_name=tool.name,
arguments=arguments,
log=tool_string, # type: ignore
)
def _tool_calling(
self, tool_string: str
) -> Union[ToolCalling, InstructorToolCalling]:
) -> Union[ToolCalling, InstructorToolCalling, ToolUsageErrorException]:
try:
try:
return self._original_tool_calling(tool_string, raise_error=True)
@@ -388,74 +444,104 @@ class ToolUsage:
self._run_attempts += 1
if self._run_attempts > self._max_parsing_attempts:
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
self.task.increment_tools_errors()
if self.agent.verbose:
if self.task:
self.task.increment_tools_errors()
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{e}\n", color="red")
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
f'{self._i18n.errors("tool_usage_error").format(error=e)}\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
f"{self._i18n.errors('tool_usage_error').format(error=e)}\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
)
return self._tool_calling(tool_string)
def _validate_tool_input(self, tool_input: str) -> str:
def _validate_tool_input(self, tool_input: Optional[str]) -> Dict[str, Any]:
if tool_input is None:
return {}
if not isinstance(tool_input, str) or not tool_input.strip():
raise Exception(
"Tool input must be a valid dictionary in JSON or Python literal format"
)
# Attempt 1: Parse as JSON
try:
ast.literal_eval(tool_input)
return tool_input
except Exception:
# Clean and ensure the string is properly enclosed in braces
tool_input = tool_input.strip()
if not tool_input.startswith("{"):
tool_input = "{" + tool_input
if not tool_input.endswith("}"):
tool_input += "}"
arguments = json.loads(tool_input)
if isinstance(arguments, dict):
return arguments
except (JSONDecodeError, TypeError):
pass # Continue to the next parsing attempt
# Manually split the input into key-value pairs
entries = tool_input.strip("{} ").split(",")
formatted_entries = []
# Attempt 2: Parse as Python literal
try:
arguments = ast.literal_eval(tool_input)
if isinstance(arguments, dict):
return arguments
except (ValueError, SyntaxError):
repaired_input = repair_json(tool_input)
pass # Continue to the next parsing attempt
for entry in entries:
if ":" not in entry:
continue # Skip malformed entries
key, value = entry.split(":", 1)
# Attempt 3: Parse as JSON5
try:
arguments = json5.loads(tool_input)
if isinstance(arguments, dict):
return arguments
except (JSONDecodeError, ValueError, TypeError):
pass # Continue to the next parsing attempt
# Remove extraneous white spaces and quotes, replace single quotes
key = key.strip().strip('"').replace("'", '"')
value = value.strip()
# Attempt 4: Repair JSON
try:
repaired_input = str(repair_json(tool_input, skip_json_loads=True))
self._printer.print(
content=f"Repaired JSON: {repaired_input}", color="blue"
)
arguments = json.loads(repaired_input)
if isinstance(arguments, dict):
return arguments
except Exception as e:
error = f"Failed to repair JSON: {e}"
self._printer.print(content=error, color="red")
# Handle replacement of single quotes at the start and end of the value string
if value.startswith("'") and value.endswith("'"):
value = value[1:-1] # Remove single quotes
value = (
'"' + value.replace('"', '\\"') + '"'
) # Re-encapsulate with double quotes
elif value.isdigit(): # Check if value is a digit, hence integer
value = value
elif value.lower() in [
"true",
"false",
]: # Check for boolean and null values
value = value.lower().capitalize()
elif value.lower() == "null":
value = "None"
else:
# Assume the value is a string and needs quotes
value = '"' + value.replace('"', '\\"') + '"'
error_message = (
"Tool input must be a valid dictionary in JSON or Python literal format"
)
self._emit_validate_input_error(error_message)
# If all parsing attempts fail, raise an error
raise Exception(error_message)
# Rebuild the entry with proper quoting
formatted_entry = f'"{key}": {value}'
formatted_entries.append(formatted_entry)
def _emit_validate_input_error(self, final_error: str):
tool_selection_data = {
"agent_key": getattr(self.agent, "key", None) if self.agent else None,
"agent_role": getattr(self.agent, "role", None) if self.agent else None,
"tool_name": self.action.tool,
"tool_args": str(self.action.tool_input),
"tool_class": self.__class__.__name__,
"agent": self.agent, # Adding agent for fingerprint extraction
}
# Reconstruct the JSON string
new_json_string = "{" + ", ".join(formatted_entries) + "}"
return new_json_string
# Include fingerprint context if available
if self.fingerprint_context:
tool_selection_data.update(self.fingerprint_context)
def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
event_data = self._prepare_event_data(tool, tool_calling)
events.emit(
source=self, event=ToolUsageError(**{**event_data, "error": str(e)})
crewai_event_bus.emit(
self,
ToolValidateInputErrorEvent(**tool_selection_data, error=final_error),
)
def on_tool_error(
self,
tool: Any,
tool_calling: Union[ToolCalling, InstructorToolCalling],
e: Exception,
) -> None:
event_data = self._prepare_event_data(tool, tool_calling)
crewai_event_bus.emit(self, ToolUsageErrorEvent(**{**event_data, "error": e}))
def on_tool_use_finished(
self, tool: Any, tool_calling: ToolCalling, from_cache: bool, started_at: float
self,
tool: Any,
tool_calling: Union[ToolCalling, InstructorToolCalling],
from_cache: bool,
started_at: float,
result: Any,
) -> None:
finished_at = time.time()
event_data = self._prepare_event_data(tool, tool_calling)
@@ -464,17 +550,75 @@ class ToolUsage:
"started_at": datetime.datetime.fromtimestamp(started_at),
"finished_at": datetime.datetime.fromtimestamp(finished_at),
"from_cache": from_cache,
"output": result,
}
)
events.emit(source=self, event=ToolUsageFinished(**event_data))
crewai_event_bus.emit(self, ToolUsageFinishedEvent(**event_data))
def _prepare_event_data(self, tool: Any, tool_calling: ToolCalling) -> dict:
return {
"agent_key": self.agent.key,
"agent_role": (self.agent._original_role or self.agent.role),
def _prepare_event_data(
self, tool: Any, tool_calling: Union[ToolCalling, InstructorToolCalling]
) -> dict:
event_data = {
"run_attempts": self._run_attempts,
"delegations": self.task.delegations,
"delegations": self.task.delegations if self.task else 0,
"tool_name": tool.name,
"tool_args": tool_calling.arguments,
"tool_class": tool.__class__.__name__,
"agent_key": (
getattr(self.agent, "key", "unknown") if self.agent else "unknown"
),
"agent_role": (
getattr(self.agent, "_original_role", None)
or getattr(self.agent, "role", "unknown")
if self.agent
else "unknown"
),
}
# Include fingerprint context if available
if self.fingerprint_context:
event_data.update(self.fingerprint_context)
return event_data
def _add_fingerprint_metadata(self, arguments: dict) -> dict:
"""Add fingerprint metadata to tool arguments if available.
Args:
arguments: The original tool arguments
Returns:
Updated arguments dictionary with fingerprint metadata
"""
# Create a shallow copy to avoid modifying the original
arguments = arguments.copy()
# Add security metadata under a designated key
if "security_context" not in arguments:
arguments["security_context"] = {}
security_context = arguments["security_context"]
# Add agent fingerprint if available
if self.agent and hasattr(self.agent, "security_config"):
security_config = getattr(self.agent, "security_config", None)
if security_config and hasattr(security_config, "fingerprint"):
try:
security_context["agent_fingerprint"] = (
security_config.fingerprint.to_dict()
)
except AttributeError:
pass
# Add task fingerprint if available
if self.task and hasattr(self.task, "security_config"):
security_config = getattr(self.task, "security_config", None)
if security_config and hasattr(security_config, "fingerprint"):
try:
security_context["task_fingerprint"] = (
security_config.fingerprint.to_dict()
)
except AttributeError:
pass
return arguments

View File

@@ -1,24 +0,0 @@
from datetime import datetime
from typing import Any, Dict
from pydantic import BaseModel
class ToolUsageEvent(BaseModel):
agent_key: str
agent_role: str
tool_name: str
tool_args: Dict[str, Any]
tool_class: str
run_attempts: int | None = None
delegations: int | None = None
class ToolUsageFinished(ToolUsageEvent):
started_at: datetime
finished_at: datetime
from_cache: bool = False
class ToolUsageError(ToolUsageEvent):
error: str

View File

@@ -9,13 +9,13 @@
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
"memory": "\n\n# Useful context: \n{memory}",
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
"no_tools": "\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
"no_tools": "\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. When responding, I must use the following format:\n\n```\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n```\nThis Thought/Action/Action Input/Result can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
"expected_output": "\nThis is the expected criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
"getting_input": "This is the agent's final answer: {final_answer}\n\n",
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
@@ -23,24 +23,30 @@
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
"human_feedback_classification": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with 'True' if further changes are needed, or 'False' if the user is satisfied. **Important** Do not include any additional commentary outside of your 'True' or 'False' response.\n\nFeedback: \"{feedback}\""
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary.",
"lite_agent_system_prompt_with_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
"lite_agent_system_prompt_without_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"lite_agent_response_format": "\nIMPORTANT: Your final answer MUST contain all the information requested in the following format: {response_format}\n\nIMPORTANT: Ensure the final output does not include any code block markers like ```json or ```python."
},
"errors": {
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
"agent_tool_unexisting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
"tool_usage_error": "I encountered an error: {error}",
"tool_arguments_error": "Error: the Action Input is not a valid key, value dictionary.",
"wrong_tool_name": "You tried to use the tool {tool}, but it doesn't exist. You must use one of the following tools, use one at time: {tools}.",
"tool_usage_exception": "I encountered an error while trying to use the tool. This was the error: {error}.\n Tool {tool} accepts these inputs: {tool_inputs}"
"tool_usage_exception": "I encountered an error while trying to use the tool. This was the error: {error}.\n Tool {tool} accepts these inputs: {tool_inputs}",
"agent_tool_execution_error": "Error executing task with agent '{agent_role}'. Error: {error}",
"validation_error": "### Previous attempt failed validation: {guardrail_result_error}\n\n\n### Previous result:\n{task_output}\n\n\nTry again, making sure to address the validation error."
},
"tools": {
"delegate_work": "Delegate a specific task to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to execute the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
"ask_question": "Ask a specific question to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolute everything you know, don't reference things but instead explain them.",
"delegate_work": "Delegate a specific task to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to execute the task, they know nothing about the task, so share absolutely everything you know, don't reference things but instead explain them.",
"ask_question": "Ask a specific question to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolutely everything you know, don't reference things but instead explain them.",
"add_image": {
"name": "Add image to content",
"description": "See image to understand it's content, you can optionally ask a question about the image",
"description": "See image to understand its content, you can optionally ask a question about the image",
"default_action": "Please provide a detailed description of this image, including all visual elements, context, and any notable details you can observe."
}
}

View File

@@ -0,0 +1,40 @@
from typing import List
from pydantic import BaseModel, Field
class ChatInputField(BaseModel):
"""
Represents a single required input for the crew, with a name and short description.
Example:
{
"name": "topic",
"description": "The topic to focus on for the conversation"
}
"""
name: str = Field(..., description="The name of the input field")
description: str = Field(..., description="A short description of the input field")
class ChatInputs(BaseModel):
"""
Holds a high-level crew_description plus a list of ChatInputFields.
Example:
{
"crew_name": "topic-based-qa",
"crew_description": "Use this crew for topic-based Q&A",
"inputs": [
{"name": "topic", "description": "The topic to focus on"},
{"name": "username", "description": "Name of the user"},
]
}
"""
crew_name: str = Field(..., description="The name of the crew")
crew_description: str = Field(
..., description="A description of the crew's purpose"
)
inputs: List[ChatInputField] = Field(
default_factory=list, description="A list of input fields for the crew"
)

View File

@@ -0,0 +1,431 @@
import json
import re
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
from crewai.agents.parser import (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
AgentAction,
AgentFinish,
CrewAgentParser,
OutputParserException,
)
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.tools import BaseTool as CrewAITool
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_types import ToolResult
from crewai.utilities import I18N, Printer
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
)
def parse_tools(tools: List[BaseTool]) -> List[CrewStructuredTool]:
"""Parse tools to be used for the task."""
tools_list = []
for tool in tools:
if isinstance(tool, CrewAITool):
tools_list.append(tool.to_structured_tool())
else:
raise ValueError("Tool is not a CrewStructuredTool or BaseTool")
return tools_list
def get_tool_names(tools: Sequence[Union[CrewStructuredTool, BaseTool]]) -> str:
"""Get the names of the tools."""
return ", ".join([t.name for t in tools])
def render_text_description_and_args(
tools: Sequence[Union[CrewStructuredTool, BaseTool]],
) -> str:
"""Render the tool name, description, and args in plain text.
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, \
args: {"expression": {"type": "string"}}
"""
tool_strings = []
for tool in tools:
tool_strings.append(tool.description)
return "\n".join(tool_strings)
def has_reached_max_iterations(iterations: int, max_iterations: int) -> bool:
"""Check if the maximum number of iterations has been reached."""
return iterations >= max_iterations
def handle_max_iterations_exceeded(
formatted_answer: Union[AgentAction, AgentFinish, None],
printer: Printer,
i18n: I18N,
messages: List[Dict[str, str]],
llm: Union[LLM, BaseLLM],
callbacks: List[Any],
) -> Union[AgentAction, AgentFinish]:
"""
Handles the case when the maximum number of iterations is exceeded.
Performs one more LLM call to get the final answer.
Parameters:
formatted_answer: The last formatted answer from the agent.
Returns:
The final formatted answer after exceeding max iterations.
"""
printer.print(
content="Maximum iterations reached. Requesting final answer.",
color="yellow",
)
if formatted_answer and hasattr(formatted_answer, "text"):
assistant_message = (
formatted_answer.text + f'\n{i18n.errors("force_final_answer")}'
)
else:
assistant_message = i18n.errors("force_final_answer")
messages.append(format_message_for_llm(assistant_message, role="assistant"))
# Perform one more LLM call to get the final answer
answer = llm.call(
messages,
callbacks=callbacks,
)
if answer is None or answer == "":
printer.print(
content="Received None or empty response from LLM call.",
color="red",
)
raise ValueError("Invalid response from LLM call - None or empty.")
formatted_answer = format_answer(answer)
# Return the formatted answer, regardless of its type
return formatted_answer
def format_message_for_llm(prompt: str, role: str = "user") -> Dict[str, str]:
prompt = prompt.rstrip()
return {"role": role, "content": prompt}
def format_answer(answer: str) -> Union[AgentAction, AgentFinish]:
"""Format a response from the LLM into an AgentAction or AgentFinish."""
try:
return CrewAgentParser.parse_text(answer)
except Exception:
# If parsing fails, return a default AgentFinish
return AgentFinish(
thought="Failed to parse LLM response",
output=answer,
text=answer,
)
def enforce_rpm_limit(
request_within_rpm_limit: Optional[Callable[[], bool]] = None,
) -> None:
"""Enforce the requests per minute (RPM) limit if applicable."""
if request_within_rpm_limit:
request_within_rpm_limit()
def get_llm_response(
llm: Union[LLM, BaseLLM],
messages: List[Dict[str, str]],
callbacks: List[Any],
printer: Printer,
) -> str:
"""Call the LLM and return the response, handling any invalid responses."""
try:
answer = llm.call(
messages,
callbacks=callbacks,
)
except Exception as e:
printer.print(
content=f"Error during LLM call: {e}",
color="red",
)
raise e
if not answer:
printer.print(
content="Received None or empty response from LLM call.",
color="red",
)
raise ValueError("Invalid response from LLM call - None or empty.")
return answer
def process_llm_response(
answer: str, use_stop_words: bool
) -> Union[AgentAction, AgentFinish]:
"""Process the LLM response and format it into an AgentAction or AgentFinish."""
if not use_stop_words:
try:
# Preliminary parsing to check for errors.
format_answer(answer)
except OutputParserException as e:
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
answer = answer.split("Observation:")[0].strip()
return format_answer(answer)
def handle_agent_action_core(
formatted_answer: AgentAction,
tool_result: ToolResult,
messages: Optional[List[Dict[str, str]]] = None,
step_callback: Optional[Callable] = None,
show_logs: Optional[Callable] = None,
) -> Union[AgentAction, AgentFinish]:
"""Core logic for handling agent actions and tool results.
Args:
formatted_answer: The agent's action
tool_result: The result of executing the tool
messages: Optional list of messages to append results to
step_callback: Optional callback to execute after processing
show_logs: Optional function to show logs
Returns:
Either an AgentAction or AgentFinish
"""
if step_callback:
step_callback(tool_result)
formatted_answer.text += f"\nObservation: {tool_result.result}"
formatted_answer.result = tool_result.result
if tool_result.result_as_answer:
return AgentFinish(
thought="",
output=tool_result.result,
text=formatted_answer.text,
)
if show_logs:
show_logs(formatted_answer)
if messages is not None:
messages.append({"role": "assistant", "content": tool_result.result})
return formatted_answer
def handle_unknown_error(printer: Any, exception: Exception) -> None:
"""Handle unknown errors by informing the user.
Args:
printer: Printer instance for output
exception: The exception that occurred
"""
printer.print(
content="An unknown error occurred. Please check the details below.",
color="red",
)
printer.print(
content=f"Error details: {exception}",
color="red",
)
def handle_output_parser_exception(
e: OutputParserException,
messages: List[Dict[str, str]],
iterations: int,
log_error_after: int = 3,
printer: Optional[Any] = None,
) -> AgentAction:
"""Handle OutputParserException by updating messages and formatted_answer.
Args:
e: The OutputParserException that occurred
messages: List of messages to append to
iterations: Current iteration count
log_error_after: Number of iterations after which to log errors
printer: Optional printer instance for logging
Returns:
AgentAction: A formatted answer with the error
"""
messages.append({"role": "user", "content": e.error})
formatted_answer = AgentAction(
text=e.error,
tool="",
tool_input="",
thought="",
)
if iterations > log_error_after and printer:
printer.print(
content=f"Error parsing LLM output, agent will retry: {e.error}",
color="red",
)
return formatted_answer
def is_context_length_exceeded(exception: Exception) -> bool:
"""Check if the exception is due to context length exceeding.
Args:
exception: The exception to check
Returns:
bool: True if the exception is due to context length exceeding
"""
return LLMContextLengthExceededException(str(exception))._is_context_limit_error(
str(exception)
)
def handle_context_length(
respect_context_window: bool,
printer: Any,
messages: List[Dict[str, str]],
llm: Any,
callbacks: List[Any],
i18n: Any,
) -> None:
"""Handle context length exceeded by either summarizing or raising an error.
Args:
respect_context_window: Whether to respect context window
printer: Printer instance for output
messages: List of messages to summarize
llm: LLM instance for summarization
callbacks: List of callbacks for LLM
i18n: I18N instance for messages
"""
if respect_context_window:
printer.print(
content="Context length exceeded. Summarizing content to fit the model context window.",
color="yellow",
)
summarize_messages(messages, llm, callbacks, i18n)
else:
printer.print(
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
color="red",
)
raise SystemExit(
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
)
def summarize_messages(
messages: List[Dict[str, str]],
llm: Any,
callbacks: List[Any],
i18n: Any,
) -> None:
"""Summarize messages to fit within context window.
Args:
messages: List of messages to summarize
llm: LLM instance for summarization
callbacks: List of callbacks for LLM
i18n: I18N instance for messages
"""
messages_groups = []
for message in messages:
content = message["content"]
cut_size = llm.get_context_window_size()
for i in range(0, len(content), cut_size):
messages_groups.append({"content": content[i : i + cut_size]})
summarized_contents = []
for group in messages_groups:
summary = llm.call(
[
format_message_for_llm(
i18n.slice("summarizer_system_message"), role="system"
),
format_message_for_llm(
i18n.slice("summarize_instruction").format(group=group["content"]),
),
],
callbacks=callbacks,
)
summarized_contents.append({"content": str(summary)})
merged_summary = " ".join(content["content"] for content in summarized_contents)
messages.clear()
messages.append(
format_message_for_llm(
i18n.slice("summary").format(merged_summary=merged_summary)
)
)
def show_agent_logs(
printer: Printer,
agent_role: str,
formatted_answer: Optional[Union[AgentAction, AgentFinish]] = None,
task_description: Optional[str] = None,
verbose: bool = False,
) -> None:
"""Show agent logs for both start and execution states.
Args:
printer: Printer instance for output
agent_role: Role of the agent
formatted_answer: Optional AgentAction or AgentFinish for execution logs
task_description: Optional task description for start logs
verbose: Whether to show verbose output
"""
if not verbose:
return
agent_role = agent_role.split("\n")[0]
if formatted_answer is None:
# Start logs
printer.print(
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
if task_description:
printer.print(
content=f"\033[95m## Task:\033[00m \033[92m{task_description}\033[00m"
)
else:
# Execution logs
printer.print(
content=f"\n\n\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
if isinstance(formatted_answer, AgentAction):
thought = re.sub(r"\n+", "\n", formatted_answer.thought)
formatted_json = json.dumps(
formatted_answer.tool_input,
indent=2,
ensure_ascii=False,
)
if thought and thought != "":
printer.print(
content=f"\033[95m## Thought:\033[00m \033[92m{thought}\033[00m"
)
printer.print(
content=f"\033[95m## Using tool:\033[00m \033[92m{formatted_answer.tool}\033[00m"
)
printer.print(
content=f"\033[95m## Tool Input:\033[00m \033[92m\n{formatted_json}\033[00m"
)
printer.print(
content=f"\033[95m## Tool Output:\033[00m \033[92m\n{formatted_answer.result}\033[00m"
)
elif isinstance(formatted_answer, AgentFinish):
printer.print(
content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m\n\n"
)

View File

@@ -0,0 +1,62 @@
import re
from typing import Optional
MIN_COLLECTION_LENGTH = 3
MAX_COLLECTION_LENGTH = 63
DEFAULT_COLLECTION = "default_collection"
# Compiled regex patterns for better performance
INVALID_CHARS_PATTERN = re.compile(r"[^a-zA-Z0-9_-]")
IPV4_PATTERN = re.compile(r"^(\d{1,3}\.){3}\d{1,3}$")
def is_ipv4_pattern(name: str) -> bool:
"""
Check if a string matches an IPv4 address pattern.
Args:
name: The string to check
Returns:
True if the string matches an IPv4 pattern, False otherwise
"""
return bool(IPV4_PATTERN.match(name))
def sanitize_collection_name(name: Optional[str]) -> str:
"""
Sanitize a collection name to meet ChromaDB requirements:
1. 3-63 characters long
2. Starts and ends with alphanumeric character
3. Contains only alphanumeric characters, underscores, or hyphens
4. No consecutive periods
5. Not a valid IPv4 address
Args:
name: The original collection name to sanitize
Returns:
A sanitized collection name that meets ChromaDB requirements
"""
if not name:
return DEFAULT_COLLECTION
if is_ipv4_pattern(name):
name = f"ip_{name}"
sanitized = INVALID_CHARS_PATTERN.sub("_", name)
if not sanitized[0].isalnum():
sanitized = "a" + sanitized
if not sanitized[-1].isalnum():
sanitized = sanitized[:-1] + "z"
if len(sanitized) < MIN_COLLECTION_LENGTH:
sanitized = sanitized + "x" * (MIN_COLLECTION_LENGTH - len(sanitized))
if len(sanitized) > MAX_COLLECTION_LENGTH:
sanitized = sanitized[:MAX_COLLECTION_LENGTH]
if not sanitized[-1].isalnum():
sanitized = sanitized[:-1] + "z"
return sanitized

View File

@@ -4,3 +4,4 @@ DEFAULT_SCORE_THRESHOLD = 0.35
KNOWLEDGE_DIRECTORY = "knowledge"
MAX_LLM_RETRY = 3
MAX_FILE_NAME_LENGTH = 255
EMITTER_COLOR = "bold_blue"

View File

@@ -20,23 +20,52 @@ class ConverterError(Exception):
class Converter(OutputConverter):
"""Class that converts text into either pydantic or json."""
def to_pydantic(self, current_attempt=1):
def to_pydantic(self, current_attempt=1) -> BaseModel:
"""Convert text to pydantic."""
try:
if self.llm.supports_function_calling():
return self._create_instructor().to_pydantic()
result = self._create_instructor().to_pydantic()
else:
return self.llm.call(
response = self.llm.call(
[
{"role": "system", "content": self.instructions},
{"role": "user", "content": self.text},
]
)
try:
# Try to directly validate the response JSON
result = self.model.model_validate_json(response)
except ValidationError:
# If direct validation fails, attempt to extract valid JSON
result = handle_partial_json(response, self.model, False, None)
# Ensure result is a BaseModel instance
if not isinstance(result, BaseModel):
if isinstance(result, dict):
result = self.model.parse_obj(result)
elif isinstance(result, str):
try:
parsed = json.loads(result)
result = self.model.parse_obj(parsed)
except Exception as parse_err:
raise ConverterError(
f"Failed to convert partial JSON result into Pydantic: {parse_err}"
)
else:
raise ConverterError(
"handle_partial_json returned an unexpected type."
)
return result
except ValidationError as e:
if current_attempt < self.max_attempts:
return self.to_pydantic(current_attempt + 1)
raise ConverterError(
f"Failed to convert text into a Pydantic model due to validation error: {e}"
)
except Exception as e:
if current_attempt < self.max_attempts:
return self.to_pydantic(current_attempt + 1)
return ConverterError(
f"Failed to convert text into a pydantic model due to the following error: {e}"
raise ConverterError(
f"Failed to convert text into a Pydantic model due to error: {e}"
)
def to_json(self, current_attempt=1):
@@ -66,7 +95,6 @@ class Converter(OutputConverter):
llm=self.llm,
model=self.model,
content=self.text,
instructions=self.instructions,
)
return inst
@@ -187,10 +215,19 @@ def convert_with_instructions(
def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str:
instructions = "I'm gonna convert this raw text into valid JSON."
instructions = "Please convert the following text into valid JSON."
if llm.supports_function_calling():
model_schema = PydanticSchemaParser(model=model).get_schema()
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
instructions += (
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
f"The JSON must follow this schema exactly:\n```json\n{model_schema}\n```"
)
else:
model_description = generate_model_description(model)
instructions += (
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
f"The JSON must follow this format exactly:\n{model_description}"
)
return instructions
@@ -230,9 +267,13 @@ def generate_model_description(model: Type[BaseModel]) -> str:
origin = get_origin(field_type)
args = get_args(field_type)
if origin is Union and type(None) in args:
if origin is Union or (origin is None and len(args) > 0):
# Handle both Union and the new '|' syntax
non_none_args = [arg for arg in args if arg is not type(None)]
return f"Optional[{describe_field(non_none_args[0])}]"
if len(non_none_args) == 1:
return f"Optional[{describe_field(non_none_args[0])}]"
else:
return f"Optional[Union[{', '.join(describe_field(arg) for arg in non_none_args)}]]"
elif origin is list:
return f"List[{describe_field(args[0])}]"
elif origin is dict:
@@ -241,11 +282,14 @@ def generate_model_description(model: Type[BaseModel]) -> str:
return f"Dict[{key_type}, {value_type}]"
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
return generate_model_description(field_type)
else:
elif hasattr(field_type, "__name__"):
return field_type.__name__
else:
return str(field_type)
fields = model.__annotations__
fields = model.model_fields
field_descriptions = [
f'"{name}": {describe_field(type_)}' for name, type_ in fields.items()
f'"{name}": {describe_field(field.annotation)}'
for name, field in fields.items()
]
return "{\n " + ",\n ".join(field_descriptions) + "\n}"

View File

@@ -1,3 +1,5 @@
"""JSON encoder for handling CrewAI specific types."""
import json
from datetime import date, datetime
from decimal import Decimal
@@ -8,6 +10,7 @@ from pydantic import BaseModel
class CrewJSONEncoder(json.JSONEncoder):
"""Custom JSON encoder for CrewAI objects and special types."""
def default(self, obj):
if isinstance(obj, BaseModel):
return self._handle_pydantic_model(obj)

View File

@@ -6,9 +6,10 @@ from pydantic import BaseModel, ValidationError
from crewai.agents.parser import OutputParserException
"""Parser for converting text outputs into Pydantic models."""
class CrewPydanticOutputParser:
"""Parses the text into pydantic models"""
"""Parses text outputs into specified Pydantic models."""
pydantic_object: Type[BaseModel]

View File

@@ -1,5 +1,5 @@
import os
from typing import Any, Dict, cast
from typing import Any, Dict, Optional, cast
from chromadb import Documents, EmbeddingFunction, Embeddings
from chromadb.api.types import validate_embedding_function
@@ -14,14 +14,16 @@ class EmbeddingConfigurator:
"vertexai": self._configure_vertexai,
"google": self._configure_google,
"cohere": self._configure_cohere,
"voyageai": self._configure_voyageai,
"bedrock": self._configure_bedrock,
"huggingface": self._configure_huggingface,
"watson": self._configure_watson,
"custom": self._configure_custom,
}
def configure_embedder(
self,
embedder_config: Dict[str, Any] | None = None,
embedder_config: Optional[Dict[str, Any]] = None,
) -> EmbeddingFunction:
"""Configures and returns an embedding function based on the provided config."""
if embedder_config is None:
@@ -29,21 +31,19 @@ class EmbeddingConfigurator:
provider = embedder_config.get("provider")
config = embedder_config.get("config", {})
model_name = config.get("model")
if isinstance(provider, EmbeddingFunction):
try:
validate_embedding_function(provider)
return provider
except Exception as e:
raise ValueError(f"Invalid custom embedding function: {str(e)}")
model_name = config.get("model") if provider != "custom" else None
if provider not in self.embedding_functions:
raise Exception(
f"Unsupported embedding provider: {provider}, supported providers: {list(self.embedding_functions.keys())}"
)
return self.embedding_functions[provider](config, model_name)
embedding_function = self.embedding_functions[provider]
return (
embedding_function(config)
if provider == "custom"
else embedding_function(config, model_name)
)
@staticmethod
def _create_default_embedding_function():
@@ -64,6 +64,13 @@ class EmbeddingConfigurator:
return OpenAIEmbeddingFunction(
api_key=config.get("api_key") or os.getenv("OPENAI_API_KEY"),
model_name=model_name,
api_base=config.get("api_base", None),
api_type=config.get("api_type", None),
api_version=config.get("api_version", None),
default_headers=config.get("default_headers", None),
dimensions=config.get("dimensions", None),
deployment_id=config.get("deployment_id", None),
organization_id=config.get("organization_id", None),
)
@staticmethod
@@ -78,6 +85,10 @@ class EmbeddingConfigurator:
api_type=config.get("api_type", "azure"),
api_version=config.get("api_version"),
model_name=model_name,
default_headers=config.get("default_headers"),
dimensions=config.get("dimensions"),
deployment_id=config.get("deployment_id"),
organization_id=config.get("organization_id"),
)
@staticmethod
@@ -100,6 +111,8 @@ class EmbeddingConfigurator:
return GoogleVertexEmbeddingFunction(
model_name=model_name,
api_key=config.get("api_key"),
project_id=config.get("project_id"),
region=config.get("region"),
)
@staticmethod
@@ -111,6 +124,7 @@ class EmbeddingConfigurator:
return GoogleGenerativeAiEmbeddingFunction(
model_name=model_name,
api_key=config.get("api_key"),
task_type=config.get("task_type"),
)
@staticmethod
@@ -124,15 +138,28 @@ class EmbeddingConfigurator:
api_key=config.get("api_key"),
)
@staticmethod
def _configure_voyageai(config, model_name):
from chromadb.utils.embedding_functions.voyageai_embedding_function import (
VoyageAIEmbeddingFunction,
)
return VoyageAIEmbeddingFunction(
model_name=model_name,
api_key=config.get("api_key"),
)
@staticmethod
def _configure_bedrock(config, model_name):
from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import (
AmazonBedrockEmbeddingFunction,
)
return AmazonBedrockEmbeddingFunction(
session=config.get("session"),
)
# Allow custom model_name override with backwards compatibility
kwargs = {"session": config.get("session")}
if model_name is not None:
kwargs["model_name"] = model_name
return AmazonBedrockEmbeddingFunction(**kwargs)
@staticmethod
def _configure_huggingface(config, model_name):
@@ -182,3 +209,28 @@ class EmbeddingConfigurator:
raise e
return WatsonEmbeddingFunction()
@staticmethod
def _configure_custom(config):
custom_embedder = config.get("embedder")
if isinstance(custom_embedder, EmbeddingFunction):
try:
validate_embedding_function(custom_embedder)
return custom_embedder
except Exception as e:
raise ValueError(f"Invalid custom embedding function: {str(e)}")
elif callable(custom_embedder):
try:
instance = custom_embedder()
if isinstance(instance, EmbeddingFunction):
validate_embedding_function(instance)
return instance
raise ValueError(
"Custom embedder does not create an EmbeddingFunction instance"
)
except Exception as e:
raise ValueError(f"Error instantiating custom embedder: {str(e)}")
else:
raise ValueError(
"Custom embedder must be an instance of `EmbeddingFunction` or a callable that creates one"
)

View File

@@ -0,0 +1,39 @@
"""Error message definitions for CrewAI database operations."""
from typing import Optional
class DatabaseOperationError(Exception):
"""Base exception class for database operation errors."""
def __init__(self, message: str, original_error: Optional[Exception] = None):
"""Initialize the database operation error.
Args:
message: The error message to display
original_error: The original exception that caused this error, if any
"""
super().__init__(message)
self.original_error = original_error
class DatabaseError:
"""Standardized error message templates for database operations."""
INIT_ERROR: str = "Database initialization error: {}"
SAVE_ERROR: str = "Error saving task outputs: {}"
UPDATE_ERROR: str = "Error updating task outputs: {}"
LOAD_ERROR: str = "Error loading task outputs: {}"
DELETE_ERROR: str = "Error deleting task outputs: {}"
@classmethod
def format_error(cls, template: str, error: Exception) -> str:
"""Format an error message with the given template and error.
Args:
template: The error message template to use
error: The exception to format into the template
Returns:
The formatted error message
"""
return template.format(str(error))

View File

@@ -1,11 +1,12 @@
from collections import defaultdict
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, InstanceOf
from rich.box import HEAVY_EDGE
from rich.console import Console
from rich.table import Table
from crewai.agent import Agent
from crewai.llm import BaseLLM
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.telemetry import Telemetry
@@ -23,7 +24,7 @@ class CrewEvaluator:
Attributes:
crew (Crew): The crew of agents to evaluate.
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted).
eval_llm (BaseLLM): Language model instance to use for evaluations
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
iteration (int): The current iteration of the evaluation.
"""
@@ -32,9 +33,9 @@ class CrewEvaluator:
run_execution_times: defaultdict = defaultdict(list)
iteration: int = 0
def __init__(self, crew, openai_model_name: str):
def __init__(self, crew, eval_llm: InstanceOf[BaseLLM]):
self.crew = crew
self.openai_model_name = openai_model_name
self.llm = eval_llm
self._telemetry = Telemetry()
self._setup_for_evaluating()
@@ -51,7 +52,7 @@ class CrewEvaluator:
),
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
verbose=False,
llm=self.openai_model_name,
llm=self.llm,
)
def _evaluation_task(
@@ -180,12 +181,12 @@ class CrewEvaluator:
self._test_result_span = self._telemetry.individual_test_result_span(
self.crew,
evaluation_result.pydantic.quality,
current_task._execution_time,
self.openai_model_name,
current_task.execution_duration,
self.llm.model,
)
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
self.run_execution_times[self.iteration].append(
current_task._execution_time
current_task.execution_duration
)
else:
raise ValueError("Evaluation result is not in the expected format")

View File

@@ -3,19 +3,9 @@ from typing import List
from pydantic import BaseModel, Field
from crewai.utilities import Converter
from crewai.utilities.events import TaskEvaluationEvent, crewai_event_bus
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
agentops = None
try:
from agentops import track_agent # type: ignore
except ImportError:
def track_agent(name):
def noop(f):
return f
return noop
class Entity(BaseModel):
name: str = Field(description="The name of the entity.")
@@ -48,12 +38,15 @@ class TrainingTaskEvaluation(BaseModel):
)
@track_agent(name="Task Evaluator")
class TaskEvaluator:
def __init__(self, original_agent):
self.llm = original_agent.llm
self.original_agent = original_agent
def evaluate(self, task, output) -> TaskEvaluation:
crewai_event_bus.emit(
self, TaskEvaluationEvent(evaluation_type="task_evaluation", task=task)
)
evaluation_query = (
f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n"
f"Task Description:\n{task.description}\n\n"
@@ -90,15 +83,39 @@ class TaskEvaluator:
- training_data (dict): The training data to be evaluated.
- agent_id (str): The ID of the agent.
"""
crewai_event_bus.emit(
self, TaskEvaluationEvent(evaluation_type="training_data_evaluation")
)
output_training_data = training_data[agent_id]
final_aggregated_data = ""
for _, data in output_training_data.items():
for iteration, data in output_training_data.items():
improved_output = data.get("improved_output")
initial_output = data.get("initial_output")
human_feedback = data.get("human_feedback")
if not all([improved_output, initial_output, human_feedback]):
missing_fields = [
field
for field in ["improved_output", "initial_output", "human_feedback"]
if not data.get(field)
]
error_msg = (
f"Critical training data error: Missing fields ({', '.join(missing_fields)}) "
f"for agent {agent_id} in iteration {iteration}.\n"
"This indicates a broken training process. "
"Cannot proceed with evaluation.\n"
"Please check your training implementation."
)
raise ValueError(error_msg)
final_aggregated_data += (
f"Initial Output:\n{data['initial_output']}\n\n"
f"Human Feedback:\n{data['human_feedback']}\n\n"
f"Improved Output:\n{data['improved_output']}\n\n"
f"Iteration: {iteration}\n"
f"Initial Output:\n{initial_output}\n\n"
f"Human Feedback:\n{human_feedback}\n\n"
f"Improved Output:\n{improved_output}\n\n"
"------------------------------------------------\n\n"
)
evaluation_query = (

View File

@@ -1,44 +0,0 @@
from functools import wraps
from typing import Any, Callable, Dict, Generic, List, Type, TypeVar
from pydantic import BaseModel
T = TypeVar("T")
EVT = TypeVar("EVT", bound=BaseModel)
class Emitter(Generic[T, EVT]):
_listeners: Dict[Type[EVT], List[Callable]] = {}
def on(self, event_type: Type[EVT]):
def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
self._listeners.setdefault(event_type, []).append(wrapper)
return wrapper
return decorator
def emit(self, source: T, event: EVT) -> None:
event_type = type(event)
for func in self._listeners.get(event_type, []):
func(source, event)
default_emitter = Emitter[Any, BaseModel]()
def emit(source: Any, event: BaseModel, raise_on_error: bool = False) -> None:
try:
default_emitter.emit(source, event)
except Exception as e:
if raise_on_error:
raise e
else:
print(f"Error emitting event: {e}")
def on(event_type: Type[BaseModel]) -> Callable:
return default_emitter.on(event_type)

View File

@@ -0,0 +1,52 @@
from .crew_events import (
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewTrainStartedEvent,
CrewTrainCompletedEvent,
CrewTrainFailedEvent,
CrewTestStartedEvent,
CrewTestCompletedEvent,
CrewTestFailedEvent,
)
from .agent_events import (
AgentExecutionStartedEvent,
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
)
from .task_events import (
TaskStartedEvent,
TaskCompletedEvent,
TaskFailedEvent,
TaskEvaluationEvent,
)
from .flow_events import (
FlowCreatedEvent,
FlowStartedEvent,
FlowFinishedEvent,
FlowPlotEvent,
MethodExecutionStartedEvent,
MethodExecutionFinishedEvent,
MethodExecutionFailedEvent,
)
from .crewai_event_bus import CrewAIEventsBus, crewai_event_bus
from .tool_usage_events import (
ToolUsageFinishedEvent,
ToolUsageErrorEvent,
ToolUsageStartedEvent,
ToolExecutionErrorEvent,
ToolSelectionErrorEvent,
ToolUsageEvent,
ToolValidateInputErrorEvent,
)
from .llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMCallType,
LLMStreamChunkEvent,
)
# events
from .event_listener import EventListener
from .third_party.agentops_listener import agentops_listener

View File

@@ -0,0 +1,104 @@
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from .base_events import BaseEvent
if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent
class AgentExecutionStartedEvent(BaseEvent):
"""Event emitted when an agent starts executing a task"""
agent: BaseAgent
task: Any
tools: Optional[Sequence[Union[BaseTool, CrewStructuredTool]]]
task_prompt: str
type: str = "agent_execution_started"
model_config = {"arbitrary_types_allowed": True}
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
if (
hasattr(self.agent.fingerprint, "metadata")
and self.agent.fingerprint.metadata
):
self.fingerprint_metadata = self.agent.fingerprint.metadata
class AgentExecutionCompletedEvent(BaseEvent):
"""Event emitted when an agent completes executing a task"""
agent: BaseAgent
task: Any
output: str
type: str = "agent_execution_completed"
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
if (
hasattr(self.agent.fingerprint, "metadata")
and self.agent.fingerprint.metadata
):
self.fingerprint_metadata = self.agent.fingerprint.metadata
class AgentExecutionErrorEvent(BaseEvent):
"""Event emitted when an agent encounters an error during execution"""
agent: BaseAgent
task: Any
error: str
type: str = "agent_execution_error"
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
if (
hasattr(self.agent.fingerprint, "metadata")
and self.agent.fingerprint.metadata
):
self.fingerprint_metadata = self.agent.fingerprint.metadata
# New event classes for LiteAgent
class LiteAgentExecutionStartedEvent(BaseEvent):
"""Event emitted when a LiteAgent starts executing"""
agent_info: Dict[str, Any]
tools: Optional[Sequence[Union[BaseTool, CrewStructuredTool]]]
messages: Union[str, List[Dict[str, str]]]
type: str = "lite_agent_execution_started"
model_config = {"arbitrary_types_allowed": True}
class LiteAgentExecutionCompletedEvent(BaseEvent):
"""Event emitted when a LiteAgent completes execution"""
agent_info: Dict[str, Any]
output: str
type: str = "lite_agent_execution_completed"
class LiteAgentExecutionErrorEvent(BaseEvent):
"""Event emitted when a LiteAgent encounters an error during execution"""
agent_info: Dict[str, Any]
error: str
type: str = "lite_agent_execution_error"

View File

@@ -0,0 +1,16 @@
from abc import ABC, abstractmethod
from logging import Logger
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus, crewai_event_bus
class BaseEventListener(ABC):
verbose: bool = False
def __init__(self):
super().__init__()
self.setup_listeners(crewai_event_bus)
@abstractmethod
def setup_listeners(self, crewai_event_bus: CrewAIEventsBus):
pass

View File

@@ -0,0 +1,28 @@
from datetime import datetime
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field
from crewai.utilities.serialization import to_serializable
class BaseEvent(BaseModel):
"""Base class for all events"""
timestamp: datetime = Field(default_factory=datetime.now)
type: str
source_fingerprint: Optional[str] = None # UUID string of the source entity
source_type: Optional[str] = None # "agent", "task", "crew"
fingerprint_metadata: Optional[Dict[str, Any]] = None # Any relevant metadata
def to_json(self, exclude: set[str] | None = None):
"""
Converts the event to a JSON-serializable dictionary.
Args:
exclude (set[str], optional): Set of keys to exclude from the result. Defaults to None.
Returns:
dict: A JSON-serializable dictionary.
"""
return to_serializable(self, exclude=exclude)

View File

@@ -0,0 +1,102 @@
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
from crewai.utilities.events.base_events import BaseEvent
if TYPE_CHECKING:
from crewai.crew import Crew
else:
Crew = Any
class CrewBaseEvent(BaseEvent):
"""Base class for crew events with fingerprint handling"""
crew_name: Optional[str]
crew: Optional[Crew] = None
def __init__(self, **data):
super().__init__(**data)
self.set_crew_fingerprint()
def set_crew_fingerprint(self) -> None:
if self.crew and hasattr(self.crew, "fingerprint") and self.crew.fingerprint:
self.source_fingerprint = self.crew.fingerprint.uuid_str
self.source_type = "crew"
if (
hasattr(self.crew.fingerprint, "metadata")
and self.crew.fingerprint.metadata
):
self.fingerprint_metadata = self.crew.fingerprint.metadata
def to_json(self, exclude: set[str] | None = None):
if exclude is None:
exclude = set()
exclude.add("crew")
return super().to_json(exclude=exclude)
class CrewKickoffStartedEvent(CrewBaseEvent):
"""Event emitted when a crew starts execution"""
inputs: Optional[Dict[str, Any]]
type: str = "crew_kickoff_started"
class CrewKickoffCompletedEvent(CrewBaseEvent):
"""Event emitted when a crew completes execution"""
output: Any
type: str = "crew_kickoff_completed"
class CrewKickoffFailedEvent(CrewBaseEvent):
"""Event emitted when a crew fails to complete execution"""
error: str
type: str = "crew_kickoff_failed"
class CrewTrainStartedEvent(CrewBaseEvent):
"""Event emitted when a crew starts training"""
n_iterations: int
filename: str
inputs: Optional[Dict[str, Any]]
type: str = "crew_train_started"
class CrewTrainCompletedEvent(CrewBaseEvent):
"""Event emitted when a crew completes training"""
n_iterations: int
filename: str
type: str = "crew_train_completed"
class CrewTrainFailedEvent(CrewBaseEvent):
"""Event emitted when a crew fails to complete training"""
error: str
type: str = "crew_train_failed"
class CrewTestStartedEvent(CrewBaseEvent):
"""Event emitted when a crew starts testing"""
n_iterations: int
eval_llm: Optional[Union[str, Any]]
inputs: Optional[Dict[str, Any]]
type: str = "crew_test_started"
class CrewTestCompletedEvent(CrewBaseEvent):
"""Event emitted when a crew completes testing"""
type: str = "crew_test_completed"
class CrewTestFailedEvent(CrewBaseEvent):
"""Event emitted when a crew fails to complete testing"""
error: str
type: str = "crew_test_failed"

View File

@@ -0,0 +1,110 @@
import threading
from contextlib import contextmanager
from typing import Any, Callable, Dict, List, Type, TypeVar, cast
from blinker import Signal
from crewai.utilities.events.base_events import BaseEvent
from crewai.utilities.events.event_types import EventTypes
EventT = TypeVar("EventT", bound=BaseEvent)
class CrewAIEventsBus:
"""
A singleton event bus that uses blinker signals for event handling.
Allows both internal (Flow/Crew) and external event handling.
"""
_instance = None
_lock = threading.Lock()
def __new__(cls):
if cls._instance is None:
with cls._lock:
if cls._instance is None: # prevent race condition
cls._instance = super(CrewAIEventsBus, cls).__new__(cls)
cls._instance._initialize()
return cls._instance
def _initialize(self) -> None:
"""Initialize the event bus internal state"""
self._signal = Signal("crewai_event_bus")
self._handlers: Dict[Type[BaseEvent], List[Callable]] = {}
def on(
self, event_type: Type[EventT]
) -> Callable[[Callable[[Any, EventT], None]], Callable[[Any, EventT], None]]:
"""
Decorator to register an event handler for a specific event type.
Usage:
@crewai_event_bus.on(AgentExecutionCompletedEvent)
def on_agent_execution_completed(
source: Any, event: AgentExecutionCompletedEvent
):
print(f"👍 Agent '{event.agent}' completed task")
print(f" Output: {event.output}")
"""
def decorator(
handler: Callable[[Any, EventT], None],
) -> Callable[[Any, EventT], None]:
if event_type not in self._handlers:
self._handlers[event_type] = []
self._handlers[event_type].append(
cast(Callable[[Any, EventT], None], handler)
)
return handler
return decorator
def emit(self, source: Any, event: BaseEvent) -> None:
"""
Emit an event to all registered handlers
Args:
source: The object emitting the event
event: The event instance to emit
"""
for event_type, handlers in self._handlers.items():
if isinstance(event, event_type):
for handler in handlers:
handler(source, event)
self._signal.send(source, event=event)
def register_handler(
self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
) -> None:
"""Register an event handler for a specific event type"""
if event_type not in self._handlers:
self._handlers[event_type] = []
self._handlers[event_type].append(
cast(Callable[[Any, EventTypes], None], handler)
)
@contextmanager
def scoped_handlers(self):
"""
Context manager for temporary event handling scope.
Useful for testing or temporary event handling.
Usage:
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStarted)
def temp_handler(source, event):
print("Temporary handler")
# Do stuff...
# Handlers are cleared after the context
"""
previous_handlers = self._handlers.copy()
self._handlers.clear()
try:
yield
finally:
self._handlers = previous_handlers
# Global instance
crewai_event_bus = CrewAIEventsBus()

View File

@@ -0,0 +1,346 @@
from io import StringIO
from typing import Any, Dict
from pydantic import Field, PrivateAttr
from crewai.task import Task
from crewai.telemetry.telemetry import Telemetry
from crewai.utilities import Logger
from crewai.utilities.constants import EMITTER_COLOR
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMStreamChunkEvent,
)
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
from .agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionStartedEvent,
LiteAgentExecutionCompletedEvent,
LiteAgentExecutionErrorEvent,
LiteAgentExecutionStartedEvent,
)
from .crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewKickoffStartedEvent,
CrewTestCompletedEvent,
CrewTestFailedEvent,
CrewTestStartedEvent,
CrewTrainCompletedEvent,
CrewTrainFailedEvent,
CrewTrainStartedEvent,
)
from .flow_events import (
FlowCreatedEvent,
FlowFinishedEvent,
FlowStartedEvent,
MethodExecutionFailedEvent,
MethodExecutionFinishedEvent,
MethodExecutionStartedEvent,
)
from .task_events import TaskCompletedEvent, TaskFailedEvent, TaskStartedEvent
from .tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
class EventListener(BaseEventListener):
_instance = None
_telemetry: Telemetry = PrivateAttr(default_factory=lambda: Telemetry())
logger = Logger(verbose=True, default_color=EMITTER_COLOR)
execution_spans: Dict[Task, Any] = Field(default_factory=dict)
next_chunk = 0
text_stream = StringIO()
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if not hasattr(self, "_initialized") or not self._initialized:
super().__init__()
self._telemetry = Telemetry()
self._telemetry.set_tracer()
self.execution_spans = {}
self._initialized = True
self.formatter = ConsoleFormatter(verbose=True)
# ----------- CREW EVENTS -----------
def setup_listeners(self, crewai_event_bus):
@crewai_event_bus.on(CrewKickoffStartedEvent)
def on_crew_started(source, event: CrewKickoffStartedEvent):
self.formatter.create_crew_tree(event.crew_name or "Crew", source.id)
self._telemetry.crew_execution_span(source, event.inputs)
@crewai_event_bus.on(CrewKickoffCompletedEvent)
def on_crew_completed(source, event: CrewKickoffCompletedEvent):
# Handle telemetry
final_string_output = event.output.raw
self._telemetry.end_crew(source, final_string_output)
self.formatter.update_crew_tree(
self.formatter.current_crew_tree,
event.crew_name or "Crew",
source.id,
"completed",
)
@crewai_event_bus.on(CrewKickoffFailedEvent)
def on_crew_failed(source, event: CrewKickoffFailedEvent):
self.formatter.update_crew_tree(
self.formatter.current_crew_tree,
event.crew_name or "Crew",
source.id,
"failed",
)
@crewai_event_bus.on(CrewTrainStartedEvent)
def on_crew_train_started(source, event: CrewTrainStartedEvent):
self.formatter.handle_crew_train_started(
event.crew_name or "Crew", str(event.timestamp)
)
@crewai_event_bus.on(CrewTrainCompletedEvent)
def on_crew_train_completed(source, event: CrewTrainCompletedEvent):
self.formatter.handle_crew_train_completed(
event.crew_name or "Crew", str(event.timestamp)
)
@crewai_event_bus.on(CrewTrainFailedEvent)
def on_crew_train_failed(source, event: CrewTrainFailedEvent):
self.formatter.handle_crew_train_failed(event.crew_name or "Crew")
# ----------- TASK EVENTS -----------
@crewai_event_bus.on(TaskStartedEvent)
def on_task_started(source, event: TaskStartedEvent):
span = self._telemetry.task_started(crew=source.agent.crew, task=source)
self.execution_spans[source] = span
self.formatter.create_task_branch(
self.formatter.current_crew_tree, source.id
)
@crewai_event_bus.on(TaskCompletedEvent)
def on_task_completed(source, event: TaskCompletedEvent):
# Handle telemetry
span = self.execution_spans.get(source)
if span:
self._telemetry.task_ended(span, source, source.agent.crew)
self.execution_spans[source] = None
self.formatter.update_task_status(
self.formatter.current_crew_tree,
source.id,
source.agent.role,
"completed",
)
@crewai_event_bus.on(TaskFailedEvent)
def on_task_failed(source, event: TaskFailedEvent):
span = self.execution_spans.get(source)
if span:
if source.agent and source.agent.crew:
self._telemetry.task_ended(span, source, source.agent.crew)
self.execution_spans[source] = None
self.formatter.update_task_status(
self.formatter.current_crew_tree,
source.id,
source.agent.role,
"failed",
)
# ----------- AGENT EVENTS -----------
@crewai_event_bus.on(AgentExecutionStartedEvent)
def on_agent_execution_started(source, event: AgentExecutionStartedEvent):
self.formatter.create_agent_branch(
self.formatter.current_task_branch,
event.agent.role,
self.formatter.current_crew_tree,
)
@crewai_event_bus.on(AgentExecutionCompletedEvent)
def on_agent_execution_completed(source, event: AgentExecutionCompletedEvent):
self.formatter.update_agent_status(
self.formatter.current_agent_branch,
event.agent.role,
self.formatter.current_crew_tree,
)
# ----------- LITE AGENT EVENTS -----------
@crewai_event_bus.on(LiteAgentExecutionStartedEvent)
def on_lite_agent_execution_started(
source, event: LiteAgentExecutionStartedEvent
):
"""Handle LiteAgent execution started event."""
self.formatter.handle_lite_agent_execution(
event.agent_info["role"], status="started", **event.agent_info
)
@crewai_event_bus.on(LiteAgentExecutionCompletedEvent)
def on_lite_agent_execution_completed(
source, event: LiteAgentExecutionCompletedEvent
):
"""Handle LiteAgent execution completed event."""
self.formatter.handle_lite_agent_execution(
event.agent_info["role"], status="completed", **event.agent_info
)
@crewai_event_bus.on(LiteAgentExecutionErrorEvent)
def on_lite_agent_execution_error(source, event: LiteAgentExecutionErrorEvent):
"""Handle LiteAgent execution error event."""
self.formatter.handle_lite_agent_execution(
event.agent_info["role"],
status="failed",
error=event.error,
**event.agent_info,
)
# ----------- FLOW EVENTS -----------
@crewai_event_bus.on(FlowCreatedEvent)
def on_flow_created(source, event: FlowCreatedEvent):
self._telemetry.flow_creation_span(event.flow_name)
self.formatter.create_flow_tree(event.flow_name, str(source.flow_id))
@crewai_event_bus.on(FlowStartedEvent)
def on_flow_started(source, event: FlowStartedEvent):
self._telemetry.flow_execution_span(
event.flow_name, list(source._methods.keys())
)
self.formatter.start_flow(event.flow_name, str(source.flow_id))
@crewai_event_bus.on(FlowFinishedEvent)
def on_flow_finished(source, event: FlowFinishedEvent):
self.formatter.update_flow_status(
self.formatter.current_flow_tree, event.flow_name, source.flow_id
)
@crewai_event_bus.on(MethodExecutionStartedEvent)
def on_method_execution_started(source, event: MethodExecutionStartedEvent):
self.formatter.update_method_status(
self.formatter.current_method_branch,
self.formatter.current_flow_tree,
event.method_name,
"running",
)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def on_method_execution_finished(source, event: MethodExecutionFinishedEvent):
self.formatter.update_method_status(
self.formatter.current_method_branch,
self.formatter.current_flow_tree,
event.method_name,
"completed",
)
@crewai_event_bus.on(MethodExecutionFailedEvent)
def on_method_execution_failed(source, event: MethodExecutionFailedEvent):
self.formatter.update_method_status(
self.formatter.current_method_branch,
self.formatter.current_flow_tree,
event.method_name,
"failed",
)
# ----------- TOOL USAGE EVENTS -----------
@crewai_event_bus.on(ToolUsageStartedEvent)
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
self.formatter.handle_tool_usage_started(
self.formatter.current_agent_branch,
event.tool_name,
self.formatter.current_crew_tree,
)
@crewai_event_bus.on(ToolUsageFinishedEvent)
def on_tool_usage_finished(source, event: ToolUsageFinishedEvent):
self.formatter.handle_tool_usage_finished(
self.formatter.current_tool_branch,
event.tool_name,
self.formatter.current_crew_tree,
)
@crewai_event_bus.on(ToolUsageErrorEvent)
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
self.formatter.handle_tool_usage_error(
self.formatter.current_tool_branch,
event.tool_name,
event.error,
self.formatter.current_crew_tree,
)
# ----------- LLM EVENTS -----------
@crewai_event_bus.on(LLMCallStartedEvent)
def on_llm_call_started(source, event: LLMCallStartedEvent):
self.formatter.handle_llm_call_started(
self.formatter.current_agent_branch,
self.formatter.current_crew_tree,
)
@crewai_event_bus.on(LLMCallCompletedEvent)
def on_llm_call_completed(source, event: LLMCallCompletedEvent):
self.formatter.handle_llm_call_completed(
self.formatter.current_tool_branch,
self.formatter.current_agent_branch,
self.formatter.current_crew_tree,
)
@crewai_event_bus.on(LLMCallFailedEvent)
def on_llm_call_failed(source, event: LLMCallFailedEvent):
self.formatter.handle_llm_call_failed(
self.formatter.current_tool_branch,
event.error,
self.formatter.current_crew_tree,
)
@crewai_event_bus.on(LLMStreamChunkEvent)
def on_llm_stream_chunk(source, event: LLMStreamChunkEvent):
self.text_stream.write(event.chunk)
self.text_stream.seek(self.next_chunk)
# Read from the in-memory stream
content = self.text_stream.read()
print(content, end="", flush=True)
self.next_chunk = self.text_stream.tell()
@crewai_event_bus.on(CrewTestStartedEvent)
def on_crew_test_started(source, event: CrewTestStartedEvent):
cloned_crew = source.copy()
self._telemetry.test_execution_span(
cloned_crew,
event.n_iterations,
event.inputs,
event.eval_llm or "",
)
self.formatter.handle_crew_test_started(
event.crew_name or "Crew", source.id, event.n_iterations
)
@crewai_event_bus.on(CrewTestCompletedEvent)
def on_crew_test_completed(source, event: CrewTestCompletedEvent):
self.formatter.handle_crew_test_completed(
self.formatter.current_flow_tree,
event.crew_name or "Crew",
)
@crewai_event_bus.on(CrewTestFailedEvent)
def on_crew_test_failed(source, event: CrewTestFailedEvent):
self.formatter.handle_crew_test_failed(event.crew_name or "Crew")
event_listener = EventListener()

View File

@@ -0,0 +1,71 @@
from typing import Union
from .agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
AgentExecutionStartedEvent,
)
from .crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewKickoffStartedEvent,
CrewTestCompletedEvent,
CrewTestFailedEvent,
CrewTestStartedEvent,
CrewTrainCompletedEvent,
CrewTrainFailedEvent,
CrewTrainStartedEvent,
)
from .flow_events import (
FlowFinishedEvent,
FlowStartedEvent,
MethodExecutionFailedEvent,
MethodExecutionFinishedEvent,
MethodExecutionStartedEvent,
)
from .llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMStreamChunkEvent,
)
from .task_events import (
TaskCompletedEvent,
TaskFailedEvent,
TaskStartedEvent,
)
from .tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
EventTypes = Union[
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewTestStartedEvent,
CrewTestCompletedEvent,
CrewTestFailedEvent,
CrewTrainStartedEvent,
CrewTrainCompletedEvent,
CrewTrainFailedEvent,
AgentExecutionStartedEvent,
AgentExecutionCompletedEvent,
TaskStartedEvent,
TaskCompletedEvent,
TaskFailedEvent,
FlowStartedEvent,
FlowFinishedEvent,
MethodExecutionStartedEvent,
MethodExecutionFinishedEvent,
MethodExecutionFailedEvent,
AgentExecutionErrorEvent,
ToolUsageFinishedEvent,
ToolUsageErrorEvent,
ToolUsageStartedEvent,
LLMCallStartedEvent,
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMStreamChunkEvent,
]

View File

@@ -0,0 +1,73 @@
from typing import Any, Dict, Optional, Union
from pydantic import BaseModel, ConfigDict
from .base_events import BaseEvent
class FlowEvent(BaseEvent):
"""Base class for all flow events"""
type: str
flow_name: str
class FlowStartedEvent(FlowEvent):
"""Event emitted when a flow starts execution"""
flow_name: str
inputs: Optional[Dict[str, Any]] = None
type: str = "flow_started"
class FlowCreatedEvent(FlowEvent):
"""Event emitted when a flow is created"""
flow_name: str
type: str = "flow_created"
class MethodExecutionStartedEvent(FlowEvent):
"""Event emitted when a flow method starts execution"""
flow_name: str
method_name: str
state: Union[Dict[str, Any], BaseModel]
params: Optional[Dict[str, Any]] = None
type: str = "method_execution_started"
class MethodExecutionFinishedEvent(FlowEvent):
"""Event emitted when a flow method completes execution"""
flow_name: str
method_name: str
result: Any = None
state: Union[Dict[str, Any], BaseModel]
type: str = "method_execution_finished"
class MethodExecutionFailedEvent(FlowEvent):
"""Event emitted when a flow method fails execution"""
flow_name: str
method_name: str
error: Exception
type: str = "method_execution_failed"
model_config = ConfigDict(arbitrary_types_allowed=True)
class FlowFinishedEvent(FlowEvent):
"""Event emitted when a flow completes execution"""
flow_name: str
result: Optional[Any] = None
type: str = "flow_finished"
class FlowPlotEvent(FlowEvent):
"""Event emitted when a flow plot is created"""
flow_name: str
type: str = "flow_plot"

View File

@@ -0,0 +1,48 @@
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from crewai.utilities.events.base_events import BaseEvent
class LLMCallType(Enum):
"""Type of LLM call being made"""
TOOL_CALL = "tool_call"
LLM_CALL = "llm_call"
class LLMCallStartedEvent(BaseEvent):
"""Event emitted when a LLM call starts
Attributes:
messages: Content can be either a string or a list of dictionaries that support
multimodal content (text, images, etc.)
"""
type: str = "llm_call_started"
messages: Union[str, List[Dict[str, Any]]]
tools: Optional[List[dict]] = None
callbacks: Optional[List[Any]] = None
available_functions: Optional[Dict[str, Any]] = None
class LLMCallCompletedEvent(BaseEvent):
"""Event emitted when a LLM call completes"""
type: str = "llm_call_completed"
response: Any
call_type: LLMCallType
class LLMCallFailedEvent(BaseEvent):
"""Event emitted when a LLM call fails"""
error: str
type: str = "llm_call_failed"
class LLMStreamChunkEvent(BaseEvent):
"""Event emitted when a streaming chunk is received"""
type: str = "llm_stream_chunk"
chunk: str

Some files were not shown because too many files have changed in this diff Show More