mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-02 12:48:30 +00:00
Compare commits
28 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ee438c39d | ||
|
|
cbb9965bf7 | ||
|
|
4951d30dd9 | ||
|
|
7426969736 | ||
|
|
d879be8b66 | ||
|
|
24b84a4b68 | ||
|
|
8e571ea8a7 | ||
|
|
2cfc4d37b8 | ||
|
|
f4abc41235 | ||
|
|
de5d3c3ad1 | ||
|
|
c062826779 | ||
|
|
9491fe8334 | ||
|
|
6f2ea013a7 | ||
|
|
39e8792ae5 | ||
|
|
2f682e1564 | ||
|
|
d4aa676195 | ||
|
|
578fa8c2e4 | ||
|
|
6f5af2b27c | ||
|
|
8ee3cf4874 | ||
|
|
f2d3fd0c0f | ||
|
|
f28e78c5ba | ||
|
|
81bd81e5f5 | ||
|
|
1b00cc71ef | ||
|
|
45d0c9912c | ||
|
|
1f1ab14b07 | ||
|
|
1a70f1698e | ||
|
|
8883fb656b | ||
|
|
79d65e55a1 |
102
.github/workflows/codeql.yml
vendored
Normal file
102
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL Advanced"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
paths-ignore:
|
||||
- "src/crewai/cli/templates/**"
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths-ignore:
|
||||
- "src/crewai/cli/templates/**"
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: actions
|
||||
build-mode: none
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Add any setup steps before running the `github/codeql-action/init` action.
|
||||
# This includes steps like installing compilers or runtimes (`actions/setup-node`
|
||||
# or others). This is typically only required for manual builds.
|
||||
# - name: Setup runtime (example)
|
||||
# uses: actions/setup-example@v1
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code, for example:'
|
||||
echo ' make bootstrap'
|
||||
echo ' make release'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
29
.github/workflows/tests.yml
vendored
29
.github/workflows/tests.yml
vendored
@@ -22,6 +22,8 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for proper diff
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
@@ -45,14 +47,41 @@ jobs:
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
- name: Restore test durations
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: .test_durations_py*
|
||||
key: test-durations-py${{ matrix.python-version }}
|
||||
|
||||
- name: Run tests (group ${{ matrix.group }} of 8)
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
DURATION_FILE=".test_durations_py${PYTHON_VERSION_SAFE}"
|
||||
|
||||
# Temporarily always skip cached durations to fix test splitting
|
||||
# When durations don't match, pytest-split runs duplicate tests instead of splitting
|
||||
echo "Using even test splitting (duration cache disabled until fix merged)"
|
||||
DURATIONS_ARG=""
|
||||
|
||||
# Original logic (disabled temporarily):
|
||||
# if [ ! -f "$DURATION_FILE" ]; then
|
||||
# echo "No cached durations found, tests will be split evenly"
|
||||
# DURATIONS_ARG=""
|
||||
# elif git diff origin/${{ github.base_ref }}...HEAD --name-only 2>/dev/null | grep -q "^tests/.*\.py$"; then
|
||||
# echo "Test files have changed, skipping cached durations to avoid mismatches"
|
||||
# DURATIONS_ARG=""
|
||||
# else
|
||||
# echo "No test changes detected, using cached test durations for optimal splitting"
|
||||
# DURATIONS_ARG="--durations-path=${DURATION_FILE}"
|
||||
# fi
|
||||
|
||||
uv run pytest \
|
||||
--block-network \
|
||||
--timeout=30 \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--group ${{ matrix.group }} \
|
||||
$DURATIONS_ARG \
|
||||
--durations=10 \
|
||||
-n auto \
|
||||
--maxfail=3
|
||||
|
||||
71
.github/workflows/update-test-durations.yml
vendored
Normal file
71
.github/workflows/update-test-durations.yml
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
name: Update Test Durations
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'tests/**/*.py'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
update-durations:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13']
|
||||
env:
|
||||
OPENAI_API_KEY: fake-api-key
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py${{ matrix.python-version }}-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
- name: Run all tests and store durations
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
uv run pytest --store-durations --durations-path=.test_durations_py${PYTHON_VERSION_SAFE} -n auto
|
||||
continue-on-error: true
|
||||
|
||||
- name: Save durations to cache
|
||||
if: always()
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: .test_durations_py*
|
||||
key: test-durations-py${{ matrix.python-version }}
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
@@ -9,7 +9,7 @@ mode: "wide"
|
||||
|
||||
## Description
|
||||
|
||||
The `RagTool` is designed to answer questions by leveraging the power of Retrieval-Augmented Generation (RAG) through EmbedChain.
|
||||
The `RagTool` is designed to answer questions by leveraging the power of Retrieval-Augmented Generation (RAG) through CrewAI's native RAG system.
|
||||
It provides a dynamic knowledge base that can be queried to retrieve relevant information from various data sources.
|
||||
This tool is particularly useful for applications that require access to a vast array of information and need to provide contextually relevant answers.
|
||||
|
||||
@@ -76,8 +76,8 @@ The `RagTool` can be used with a wide variety of data sources, including:
|
||||
The `RagTool` accepts the following parameters:
|
||||
|
||||
- **summarize**: Optional. Whether to summarize the retrieved content. Default is `False`.
|
||||
- **adapter**: Optional. A custom adapter for the knowledge base. If not provided, an EmbedchainAdapter will be used.
|
||||
- **config**: Optional. Configuration for the underlying EmbedChain App.
|
||||
- **adapter**: Optional. A custom adapter for the knowledge base. If not provided, a CrewAIRagAdapter will be used.
|
||||
- **config**: Optional. Configuration for the underlying CrewAI RAG system.
|
||||
|
||||
## Adding Content
|
||||
|
||||
@@ -130,44 +130,23 @@ from crewai_tools import RagTool
|
||||
|
||||
# Create a RAG tool with custom configuration
|
||||
config = {
|
||||
"app": {
|
||||
"name": "custom_app",
|
||||
},
|
||||
"llm": {
|
||||
"provider": "openai",
|
||||
"vectordb": {
|
||||
"provider": "qdrant",
|
||||
"config": {
|
||||
"model": "gpt-4",
|
||||
"collection_name": "my-collection"
|
||||
}
|
||||
},
|
||||
"embedding_model": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "text-embedding-ada-002"
|
||||
"model": "text-embedding-3-small"
|
||||
}
|
||||
},
|
||||
"vectordb": {
|
||||
"provider": "elasticsearch",
|
||||
"config": {
|
||||
"collection_name": "my-collection",
|
||||
"cloud_id": "deployment-name:xxxx",
|
||||
"api_key": "your-key",
|
||||
"verify_certs": False
|
||||
}
|
||||
},
|
||||
"chunker": {
|
||||
"chunk_size": 400,
|
||||
"chunk_overlap": 100,
|
||||
"length_function": "len",
|
||||
"min_chunk_size": 0
|
||||
}
|
||||
}
|
||||
|
||||
rag_tool = RagTool(config=config, summarize=True)
|
||||
```
|
||||
|
||||
The internal RAG tool utilizes the Embedchain adapter, allowing you to pass any configuration options that are supported by Embedchain.
|
||||
You can refer to the [Embedchain documentation](https://docs.embedchain.ai/components/introduction) for details.
|
||||
Make sure to review the configuration options available in the .yaml file.
|
||||
|
||||
## Conclusion
|
||||
The `RagTool` provides a powerful way to create and query knowledge bases from various data sources. By leveraging Retrieval-Augmented Generation, it enables agents to access and retrieve relevant information efficiently, enhancing their ability to provide accurate and contextually appropriate responses.
|
||||
|
||||
@@ -48,7 +48,7 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools~=0.71.0"]
|
||||
tools = ["crewai-tools~=0.73.0"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
]
|
||||
@@ -131,13 +131,15 @@ select = [
|
||||
"I001", # sort imports
|
||||
"I002", # remove unused imports
|
||||
]
|
||||
ignore = ["E501"] # ignore line too long
|
||||
ignore = ["E501"] # ignore line too long globally
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"tests/**/*.py" = ["S101"] # Allow assert statements in tests
|
||||
"tests/**/*.py" = ["S101", "RET504"] # Allow assert statements and unnecessary assignments before return in tests
|
||||
|
||||
[tool.mypy]
|
||||
exclude = ["src/crewai/cli/templates", "tests"]
|
||||
exclude = ["src/crewai/cli/templates", "tests/"]
|
||||
plugins = ["pydantic.mypy"]
|
||||
|
||||
|
||||
[tool.bandit]
|
||||
exclude_dirs = ["src/crewai/cli/templates"]
|
||||
|
||||
@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "0.186.1"
|
||||
__version__ = "0.193.2"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.parser import parse, AgentAction, AgentFinish, OutputParserException
|
||||
from crewai.agents.parser import AgentAction, AgentFinish, OutputParserError, parse
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
|
||||
__all__ = ["CacheHandler", "parse", "AgentAction", "AgentFinish", "OutputParserException", "ToolsHandler"]
|
||||
__all__ = [
|
||||
"AgentAction",
|
||||
"AgentFinish",
|
||||
"CacheHandler",
|
||||
"OutputParserError",
|
||||
"ToolsHandler",
|
||||
"parse",
|
||||
]
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
from pydantic import ConfigDict, PrivateAttr
|
||||
|
||||
from crewai.agent import BaseAgent
|
||||
from crewai.tools import BaseTool
|
||||
@@ -16,22 +16,21 @@ class BaseAgentAdapter(BaseAgent, ABC):
|
||||
"""
|
||||
|
||||
adapted_structured_output: bool = False
|
||||
_agent_config: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
||||
_agent_config: dict[str, Any] | None = PrivateAttr(default=None)
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
def __init__(self, agent_config: Optional[Dict[str, Any]] = None, **kwargs: Any):
|
||||
def __init__(self, agent_config: dict[str, Any] | None = None, **kwargs: Any):
|
||||
super().__init__(adapted_agent=True, **kwargs)
|
||||
self._agent_config = agent_config
|
||||
|
||||
@abstractmethod
|
||||
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
def configure_tools(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure and adapt tools for the specific agent implementation.
|
||||
|
||||
Args:
|
||||
tools: Optional list of BaseTool instances to be configured
|
||||
"""
|
||||
pass
|
||||
|
||||
def configure_structured_output(self, structured_output: Any) -> None:
|
||||
"""Configure the structured output for the specific agent implementation.
|
||||
@@ -39,4 +38,3 @@ class BaseAgentAdapter(BaseAgent, ABC):
|
||||
Args:
|
||||
structured_output: The structured output to be configured
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, List, Optional
|
||||
from typing import Any
|
||||
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
@@ -12,23 +12,22 @@ class BaseToolAdapter(ABC):
|
||||
different frameworks and platforms.
|
||||
"""
|
||||
|
||||
original_tools: List[BaseTool]
|
||||
converted_tools: List[Any]
|
||||
original_tools: list[BaseTool]
|
||||
converted_tools: list[Any]
|
||||
|
||||
def __init__(self, tools: Optional[List[BaseTool]] = None):
|
||||
def __init__(self, tools: list[BaseTool] | None = None):
|
||||
self.original_tools = tools or []
|
||||
self.converted_tools = []
|
||||
|
||||
@abstractmethod
|
||||
def configure_tools(self, tools: List[BaseTool]) -> None:
|
||||
def configure_tools(self, tools: list[BaseTool]) -> None:
|
||||
"""Configure and convert tools for the specific implementation.
|
||||
|
||||
Args:
|
||||
tools: List of BaseTool instances to be configured and converted
|
||||
"""
|
||||
pass
|
||||
|
||||
def tools(self) -> List[Any]:
|
||||
def tools(self) -> list[Any]:
|
||||
"""Return all converted tools."""
|
||||
return self.converted_tools
|
||||
|
||||
|
||||
@@ -1,47 +1,56 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
"""LangGraph agent adapter for CrewAI integration.
|
||||
|
||||
from pydantic import Field, PrivateAttr
|
||||
This module contains the LangGraphAgentAdapter class that integrates LangGraph ReAct agents
|
||||
with CrewAI's agent system. Provides memory persistence, tool integration, and structured
|
||||
output functionality.
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import Any, cast
|
||||
|
||||
from pydantic import ConfigDict, Field, PrivateAttr
|
||||
|
||||
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
|
||||
from crewai.agents.agent_adapters.langgraph.langgraph_tool_adapter import (
|
||||
LangGraphToolAdapter,
|
||||
)
|
||||
from crewai.agents.agent_adapters.langgraph.protocols import (
|
||||
LangGraphCheckPointMemoryModule,
|
||||
LangGraphPrebuiltModule,
|
||||
)
|
||||
from crewai.agents.agent_adapters.langgraph.structured_output_converter import (
|
||||
LangGraphConverterAdapter,
|
||||
)
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
|
||||
try:
|
||||
from langgraph.checkpoint.memory import MemorySaver
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
|
||||
LANGGRAPH_AVAILABLE = True
|
||||
except ImportError:
|
||||
LANGGRAPH_AVAILABLE = False
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.import_utils import require
|
||||
|
||||
|
||||
class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
"""Adapter for LangGraph agents to work with CrewAI."""
|
||||
"""Adapter for LangGraph agents to work with CrewAI.
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
This adapter integrates LangGraph's ReAct agents with CrewAI's agent system,
|
||||
providing memory persistence, tool integration, and structured output support.
|
||||
"""
|
||||
|
||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger())
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
_logger: Logger = PrivateAttr(default_factory=Logger)
|
||||
_tool_adapter: LangGraphToolAdapter = PrivateAttr()
|
||||
_graph: Any = PrivateAttr(default=None)
|
||||
_memory: Any = PrivateAttr(default=None)
|
||||
_max_iterations: int = PrivateAttr(default=10)
|
||||
function_calling_llm: Any = Field(default=None)
|
||||
step_callback: Any = Field(default=None)
|
||||
step_callback: Callable[..., Any] | None = Field(default=None)
|
||||
|
||||
model: str = Field(default="gpt-4o")
|
||||
verbose: bool = Field(default=False)
|
||||
@@ -51,17 +60,24 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
role: str,
|
||||
goal: str,
|
||||
backstory: str,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
llm: Any = None,
|
||||
max_iterations: int = 10,
|
||||
agent_config: Optional[Dict[str, Any]] = None,
|
||||
agent_config: dict[str, Any] | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Initialize the LangGraph agent adapter."""
|
||||
if not LANGGRAPH_AVAILABLE:
|
||||
raise ImportError(
|
||||
"LangGraph Agent Dependencies are not installed. Please install it using `uv add langchain-core langgraph`"
|
||||
)
|
||||
) -> None:
|
||||
"""Initialize the LangGraph agent adapter.
|
||||
|
||||
Args:
|
||||
role: The role description for the agent.
|
||||
goal: The primary goal the agent should achieve.
|
||||
backstory: Background information about the agent.
|
||||
tools: Optional list of tools available to the agent.
|
||||
llm: Language model to use, defaults to gpt-4o.
|
||||
max_iterations: Maximum number of iterations for task execution.
|
||||
agent_config: Additional configuration for the LangGraph agent.
|
||||
**kwargs: Additional arguments passed to the base adapter.
|
||||
"""
|
||||
super().__init__(
|
||||
role=role,
|
||||
goal=goal,
|
||||
@@ -72,46 +88,65 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
**kwargs,
|
||||
)
|
||||
self._tool_adapter = LangGraphToolAdapter(tools=tools)
|
||||
self._converter_adapter = LangGraphConverterAdapter(self)
|
||||
self._converter_adapter: LangGraphConverterAdapter = LangGraphConverterAdapter(
|
||||
self
|
||||
)
|
||||
self._max_iterations = max_iterations
|
||||
self._setup_graph()
|
||||
|
||||
def _setup_graph(self) -> None:
|
||||
"""Set up the LangGraph workflow graph."""
|
||||
try:
|
||||
self._memory = MemorySaver()
|
||||
"""Set up the LangGraph workflow graph.
|
||||
|
||||
converted_tools: List[Any] = self._tool_adapter.tools()
|
||||
if self._agent_config:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools,
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
**self._agent_config,
|
||||
)
|
||||
else:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools or [],
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
)
|
||||
Initializes the memory saver and creates a ReAct agent with the configured
|
||||
tools, memory checkpointer, and debug settings.
|
||||
"""
|
||||
|
||||
except ImportError as e:
|
||||
self._logger.log(
|
||||
"error", f"Failed to import LangGraph dependencies: {str(e)}"
|
||||
memory_saver: type[Any] = cast(
|
||||
LangGraphCheckPointMemoryModule,
|
||||
require(
|
||||
"langgraph.checkpoint.memory",
|
||||
purpose="LangGraph core functionality",
|
||||
),
|
||||
).MemorySaver
|
||||
create_react_agent: Callable[..., Any] = cast(
|
||||
LangGraphPrebuiltModule,
|
||||
require(
|
||||
"langgraph.prebuilt",
|
||||
purpose="LangGraph core functionality",
|
||||
),
|
||||
).create_react_agent
|
||||
|
||||
self._memory = memory_saver()
|
||||
|
||||
converted_tools: list[Any] = self._tool_adapter.tools()
|
||||
if self._agent_config:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools,
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
**self._agent_config,
|
||||
)
|
||||
else:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools or [],
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Error setting up LangGraph agent: {str(e)}")
|
||||
raise
|
||||
|
||||
def _build_system_prompt(self) -> str:
|
||||
"""Build a system prompt for the LangGraph agent."""
|
||||
"""Build a system prompt for the LangGraph agent.
|
||||
|
||||
Creates a prompt that includes the agent's role, goal, and backstory,
|
||||
then enhances it through the converter adapter for structured output.
|
||||
|
||||
Returns:
|
||||
The complete system prompt string.
|
||||
"""
|
||||
base_prompt = f"""
|
||||
You are {self.role}.
|
||||
|
||||
|
||||
Your goal is: {self.goal}
|
||||
|
||||
Your backstory: {self.backstory}
|
||||
@@ -123,10 +158,25 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> str:
|
||||
"""Execute a task using the LangGraph workflow."""
|
||||
"""Execute a task using the LangGraph workflow.
|
||||
|
||||
Configures the agent, processes the task through the LangGraph workflow,
|
||||
and handles event emission for execution tracking.
|
||||
|
||||
Args:
|
||||
task: The task object to execute.
|
||||
context: Optional context information for the task.
|
||||
tools: Optional additional tools for this specific execution.
|
||||
|
||||
Returns:
|
||||
The final answer from the task execution.
|
||||
|
||||
Raises:
|
||||
Exception: If task execution fails.
|
||||
"""
|
||||
self.create_agent_executor(tools)
|
||||
|
||||
self.configure_structured_output(task)
|
||||
@@ -151,9 +201,11 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
|
||||
session_id = f"task_{id(task)}"
|
||||
|
||||
config = {"configurable": {"thread_id": session_id}}
|
||||
config: dict[str, dict[str, str]] = {
|
||||
"configurable": {"thread_id": session_id}
|
||||
}
|
||||
|
||||
result = self._graph.invoke(
|
||||
result: dict[str, Any] = self._graph.invoke(
|
||||
{
|
||||
"messages": [
|
||||
("system", self._build_system_prompt()),
|
||||
@@ -163,10 +215,10 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
config,
|
||||
)
|
||||
|
||||
messages = result.get("messages", [])
|
||||
last_message = messages[-1] if messages else None
|
||||
messages: list[Any] = result.get("messages", [])
|
||||
last_message: Any = messages[-1] if messages else None
|
||||
|
||||
final_answer = ""
|
||||
final_answer: str = ""
|
||||
if isinstance(last_message, dict):
|
||||
final_answer = last_message.get("content", "")
|
||||
elif hasattr(last_message, "content"):
|
||||
@@ -186,7 +238,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
return final_answer
|
||||
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Error executing LangGraph task: {str(e)}")
|
||||
self._logger.log("error", f"Error executing LangGraph task: {e!s}")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionErrorEvent(
|
||||
@@ -197,29 +249,67 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
)
|
||||
raise
|
||||
|
||||
def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
"""Configure the LangGraph agent for execution."""
|
||||
def create_agent_executor(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure the LangGraph agent for execution.
|
||||
|
||||
Args:
|
||||
tools: Optional tools to configure for the agent.
|
||||
"""
|
||||
self.configure_tools(tools)
|
||||
|
||||
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
"""Configure tools for the LangGraph agent."""
|
||||
def configure_tools(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure tools for the LangGraph agent.
|
||||
|
||||
Merges additional tools with existing ones and updates the graph's
|
||||
available tools through the tool adapter.
|
||||
|
||||
Args:
|
||||
tools: Optional additional tools to configure.
|
||||
"""
|
||||
if tools:
|
||||
all_tools = list(self.tools or []) + list(tools or [])
|
||||
all_tools: list[BaseTool] = list(self.tools or []) + list(tools or [])
|
||||
self._tool_adapter.configure_tools(all_tools)
|
||||
available_tools = self._tool_adapter.tools()
|
||||
available_tools: list[Any] = self._tool_adapter.tools()
|
||||
self._graph.tools = available_tools
|
||||
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]) -> List[BaseTool]:
|
||||
"""Implement delegation tools support for LangGraph."""
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
"""Implement delegation tools support for LangGraph.
|
||||
|
||||
Creates delegation tools that allow this agent to delegate tasks to other agents.
|
||||
|
||||
Args:
|
||||
agents: List of agents available for delegation.
|
||||
|
||||
Returns:
|
||||
List of delegation tools.
|
||||
"""
|
||||
agent_tools: AgentTools = AgentTools(agents=agents)
|
||||
return agent_tools.tools()
|
||||
|
||||
@staticmethod
|
||||
def get_output_converter(
|
||||
self, llm: Any, text: str, model: Any, instructions: str
|
||||
) -> Any:
|
||||
"""Convert output format if needed."""
|
||||
llm: Any, text: str, model: Any, instructions: str
|
||||
) -> Converter:
|
||||
"""Convert output format if needed.
|
||||
|
||||
Args:
|
||||
llm: Language model instance.
|
||||
text: Text to convert.
|
||||
model: Model configuration.
|
||||
instructions: Conversion instructions.
|
||||
|
||||
Returns:
|
||||
Converter instance for output transformation.
|
||||
"""
|
||||
return Converter(llm=llm, text=text, model=model, instructions=instructions)
|
||||
|
||||
def configure_structured_output(self, task) -> None:
|
||||
"""Configure the structured output for LangGraph."""
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
"""Configure the structured output for LangGraph.
|
||||
|
||||
Uses the converter adapter to set up structured output formatting
|
||||
based on the task requirements.
|
||||
|
||||
Args:
|
||||
task: Task object containing output requirements.
|
||||
"""
|
||||
self._converter_adapter.configure_structured_output(task)
|
||||
|
||||
@@ -1,38 +1,72 @@
|
||||
"""LangGraph tool adapter for CrewAI tool integration.
|
||||
|
||||
This module contains the LangGraphToolAdapter class that converts CrewAI tools
|
||||
to LangGraph-compatible format using langchain_core.tools.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
from typing import Any, List, Optional
|
||||
from collections.abc import Awaitable
|
||||
from typing import Any
|
||||
|
||||
from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
class LangGraphToolAdapter(BaseToolAdapter):
|
||||
"""Adapts CrewAI tools to LangGraph agent tool compatible format"""
|
||||
"""Adapts CrewAI tools to LangGraph agent tool compatible format.
|
||||
|
||||
def __init__(self, tools: Optional[List[BaseTool]] = None):
|
||||
self.original_tools = tools or []
|
||||
self.converted_tools = []
|
||||
Converts CrewAI BaseTool instances to langchain_core.tools format
|
||||
that can be used by LangGraph agents.
|
||||
"""
|
||||
|
||||
def configure_tools(self, tools: List[BaseTool]) -> None:
|
||||
def __init__(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Initialize the tool adapter.
|
||||
|
||||
Args:
|
||||
tools: Optional list of CrewAI tools to adapt.
|
||||
"""
|
||||
Configure and convert CrewAI tools to LangGraph-compatible format.
|
||||
LangGraph expects tools in langchain_core.tools format.
|
||||
"""
|
||||
from langchain_core.tools import BaseTool, StructuredTool
|
||||
super().__init__()
|
||||
self.original_tools: list[BaseTool] = tools or []
|
||||
self.converted_tools: list[Any] = []
|
||||
|
||||
converted_tools = []
|
||||
def configure_tools(self, tools: list[BaseTool]) -> None:
|
||||
"""Configure and convert CrewAI tools to LangGraph-compatible format.
|
||||
|
||||
LangGraph expects tools in langchain_core.tools format. This method
|
||||
converts CrewAI BaseTool instances to StructuredTool instances.
|
||||
|
||||
Args:
|
||||
tools: List of CrewAI tools to convert.
|
||||
"""
|
||||
from langchain_core.tools import BaseTool as LangChainBaseTool
|
||||
from langchain_core.tools import StructuredTool
|
||||
|
||||
converted_tools: list[Any] = []
|
||||
if self.original_tools:
|
||||
all_tools = tools + self.original_tools
|
||||
all_tools: list[BaseTool] = tools + self.original_tools
|
||||
else:
|
||||
all_tools = tools
|
||||
for tool in all_tools:
|
||||
if isinstance(tool, BaseTool):
|
||||
if isinstance(tool, LangChainBaseTool):
|
||||
converted_tools.append(tool)
|
||||
continue
|
||||
|
||||
sanitized_name = self.sanitize_tool_name(tool.name)
|
||||
sanitized_name: str = self.sanitize_tool_name(tool.name)
|
||||
|
||||
async def tool_wrapper(*args, tool=tool, **kwargs):
|
||||
output = None
|
||||
async def tool_wrapper(
|
||||
*args: Any, tool: BaseTool = tool, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Wrapper function to adapt CrewAI tool calls to LangGraph format.
|
||||
|
||||
Args:
|
||||
*args: Positional arguments for the tool.
|
||||
tool: The CrewAI tool to wrap.
|
||||
**kwargs: Keyword arguments for the tool.
|
||||
|
||||
Returns:
|
||||
The result from the tool execution.
|
||||
"""
|
||||
output: Any | Awaitable[Any]
|
||||
if len(args) > 0 and isinstance(args[0], str):
|
||||
output = tool.run(args[0])
|
||||
elif "input" in kwargs:
|
||||
@@ -41,12 +75,12 @@ class LangGraphToolAdapter(BaseToolAdapter):
|
||||
output = tool.run(**kwargs)
|
||||
|
||||
if inspect.isawaitable(output):
|
||||
result = await output
|
||||
result: Any = await output
|
||||
else:
|
||||
result = output
|
||||
return result
|
||||
|
||||
converted_tool = StructuredTool(
|
||||
converted_tool: StructuredTool = StructuredTool(
|
||||
name=sanitized_name,
|
||||
description=tool.description,
|
||||
func=tool_wrapper,
|
||||
@@ -57,5 +91,10 @@ class LangGraphToolAdapter(BaseToolAdapter):
|
||||
|
||||
self.converted_tools = converted_tools
|
||||
|
||||
def tools(self) -> List[Any]:
|
||||
def tools(self) -> list[Any]:
|
||||
"""Get the list of converted tools.
|
||||
|
||||
Returns:
|
||||
List of LangGraph-compatible tools.
|
||||
"""
|
||||
return self.converted_tools or []
|
||||
|
||||
55
src/crewai/agents/agent_adapters/langgraph/protocols.py
Normal file
55
src/crewai/agents/agent_adapters/langgraph/protocols.py
Normal file
@@ -0,0 +1,55 @@
|
||||
"""Type protocols for LangGraph modules."""
|
||||
|
||||
from typing import Any, Protocol, runtime_checkable
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class LangGraphMemorySaver(Protocol):
|
||||
"""Protocol for LangGraph MemorySaver.
|
||||
|
||||
Defines the interface for LangGraph's memory persistence mechanism.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the memory saver."""
|
||||
...
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class LangGraphCheckPointMemoryModule(Protocol):
|
||||
"""Protocol for LangGraph checkpoint memory module.
|
||||
|
||||
Defines the interface for modules containing memory checkpoint functionality.
|
||||
"""
|
||||
|
||||
MemorySaver: type[LangGraphMemorySaver]
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class LangGraphPrebuiltModule(Protocol):
|
||||
"""Protocol for LangGraph prebuilt module.
|
||||
|
||||
Defines the interface for modules containing prebuilt agent factories.
|
||||
"""
|
||||
|
||||
def create_react_agent(
|
||||
self,
|
||||
model: Any,
|
||||
tools: list[Any],
|
||||
checkpointer: Any,
|
||||
debug: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Create a ReAct agent with the given configuration.
|
||||
|
||||
Args:
|
||||
model: The language model to use for the agent.
|
||||
tools: List of tools available to the agent.
|
||||
checkpointer: Memory checkpointer for state persistence.
|
||||
debug: Whether to enable debug mode.
|
||||
**kwargs: Additional configuration options.
|
||||
|
||||
Returns:
|
||||
The configured ReAct agent instance.
|
||||
"""
|
||||
...
|
||||
@@ -1,21 +1,45 @@
|
||||
"""LangGraph structured output converter for CrewAI task integration.
|
||||
|
||||
This module contains the LangGraphConverterAdapter class that handles structured
|
||||
output conversion for LangGraph agents, supporting JSON and Pydantic model formats.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
|
||||
|
||||
class LangGraphConverterAdapter(BaseConverterAdapter):
|
||||
"""Adapter for handling structured output conversion in LangGraph agents"""
|
||||
"""Adapter for handling structured output conversion in LangGraph agents.
|
||||
|
||||
def __init__(self, agent_adapter):
|
||||
"""Initialize the converter adapter with a reference to the agent adapter"""
|
||||
self.agent_adapter = agent_adapter
|
||||
self._output_format = None
|
||||
self._schema = None
|
||||
self._system_prompt_appendix = None
|
||||
Converts task output requirements into system prompt modifications and
|
||||
post-processing logic to ensure agents return properly structured outputs.
|
||||
"""
|
||||
|
||||
def configure_structured_output(self, task) -> None:
|
||||
"""Configure the structured output for LangGraph."""
|
||||
def __init__(self, agent_adapter: Any) -> None:
|
||||
"""Initialize the converter adapter with a reference to the agent adapter.
|
||||
|
||||
Args:
|
||||
agent_adapter: The LangGraph agent adapter instance.
|
||||
"""
|
||||
super().__init__(agent_adapter=agent_adapter)
|
||||
self.agent_adapter: Any = agent_adapter
|
||||
self._output_format: Literal["json", "pydantic"] | None = None
|
||||
self._schema: str | None = None
|
||||
self._system_prompt_appendix: str | None = None
|
||||
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
"""Configure the structured output for LangGraph.
|
||||
|
||||
Analyzes the task's output requirements and sets up the necessary
|
||||
formatting and validation logic.
|
||||
|
||||
Args:
|
||||
task: The task object containing output format specifications.
|
||||
"""
|
||||
if not (task.output_json or task.output_pydantic):
|
||||
self._output_format = None
|
||||
self._schema = None
|
||||
@@ -32,7 +56,14 @@ class LangGraphConverterAdapter(BaseConverterAdapter):
|
||||
self._system_prompt_appendix = self._generate_system_prompt_appendix()
|
||||
|
||||
def _generate_system_prompt_appendix(self) -> str:
|
||||
"""Generate an appendix for the system prompt to enforce structured output"""
|
||||
"""Generate an appendix for the system prompt to enforce structured output.
|
||||
|
||||
Creates instructions that are appended to the system prompt to guide
|
||||
the agent in producing properly formatted output.
|
||||
|
||||
Returns:
|
||||
System prompt appendix string, or empty string if no structured output.
|
||||
"""
|
||||
if not self._output_format or not self._schema:
|
||||
return ""
|
||||
|
||||
@@ -41,19 +72,36 @@ Important: Your final answer MUST be provided in the following structured format
|
||||
|
||||
{self._schema}
|
||||
|
||||
DO NOT include any markdown code blocks, backticks, or other formatting around your response.
|
||||
DO NOT include any markdown code blocks, backticks, or other formatting around your response.
|
||||
The output should be raw JSON that exactly matches the specified schema.
|
||||
"""
|
||||
|
||||
def enhance_system_prompt(self, original_prompt: str) -> str:
|
||||
"""Add structured output instructions to the system prompt if needed"""
|
||||
"""Add structured output instructions to the system prompt if needed.
|
||||
|
||||
Args:
|
||||
original_prompt: The base system prompt.
|
||||
|
||||
Returns:
|
||||
Enhanced system prompt with structured output instructions.
|
||||
"""
|
||||
if not self._system_prompt_appendix:
|
||||
return original_prompt
|
||||
|
||||
return f"{original_prompt}\n{self._system_prompt_appendix}"
|
||||
|
||||
def post_process_result(self, result: str) -> str:
|
||||
"""Post-process the result to ensure it matches the expected format"""
|
||||
"""Post-process the result to ensure it matches the expected format.
|
||||
|
||||
Attempts to extract and validate JSON content from agent responses,
|
||||
handling cases where JSON may be wrapped in markdown or other formatting.
|
||||
|
||||
Args:
|
||||
result: The raw result string from the agent.
|
||||
|
||||
Returns:
|
||||
Processed result string, ideally in valid JSON format.
|
||||
"""
|
||||
if not self._output_format:
|
||||
return result
|
||||
|
||||
@@ -65,16 +113,16 @@ The output should be raw JSON that exactly matches the specified schema.
|
||||
return result
|
||||
except json.JSONDecodeError:
|
||||
# Try to extract JSON from the text
|
||||
import re
|
||||
|
||||
json_match = re.search(r"(\{.*\})", result, re.DOTALL)
|
||||
json_match: re.Match[str] | None = re.search(
|
||||
r"(\{.*})", result, re.DOTALL
|
||||
)
|
||||
if json_match:
|
||||
try:
|
||||
extracted = json_match.group(1)
|
||||
extracted: str = json_match.group(1)
|
||||
# Validate it's proper JSON
|
||||
json.loads(extracted)
|
||||
return extracted
|
||||
except:
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
@@ -1,78 +1,99 @@
|
||||
from typing import Any, List, Optional
|
||||
"""OpenAI agents adapter for CrewAI integration.
|
||||
|
||||
from pydantic import Field, PrivateAttr
|
||||
This module contains the OpenAIAgentAdapter class that integrates OpenAI Assistants
|
||||
with CrewAI's agent system, providing tool integration and structured output support.
|
||||
"""
|
||||
|
||||
from typing import Any, cast
|
||||
|
||||
from pydantic import ConfigDict, Field, PrivateAttr
|
||||
from typing_extensions import Unpack
|
||||
|
||||
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
|
||||
from crewai.agents.agent_adapters.openai_agents.openai_agent_tool_adapter import (
|
||||
OpenAIAgentToolAdapter,
|
||||
)
|
||||
from crewai.agents.agent_adapters.openai_agents.protocols import (
|
||||
AgentKwargs,
|
||||
OpenAIAgentsModule,
|
||||
)
|
||||
from crewai.agents.agent_adapters.openai_agents.protocols import (
|
||||
OpenAIAgent as OpenAIAgentProtocol,
|
||||
)
|
||||
from crewai.agents.agent_adapters.openai_agents.structured_output_converter import (
|
||||
OpenAIConverterAdapter,
|
||||
)
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Logger
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.import_utils import require
|
||||
|
||||
try:
|
||||
from agents import Agent as OpenAIAgent # type: ignore
|
||||
from agents import Runner, enable_verbose_stdout_logging # type: ignore
|
||||
|
||||
from .openai_agent_tool_adapter import OpenAIAgentToolAdapter
|
||||
|
||||
OPENAI_AVAILABLE = True
|
||||
except ImportError:
|
||||
OPENAI_AVAILABLE = False
|
||||
openai_agents_module = cast(
|
||||
OpenAIAgentsModule,
|
||||
require(
|
||||
"agents",
|
||||
purpose="OpenAI agents functionality",
|
||||
),
|
||||
)
|
||||
OpenAIAgent = openai_agents_module.Agent
|
||||
Runner = openai_agents_module.Runner
|
||||
enable_verbose_stdout_logging = openai_agents_module.enable_verbose_stdout_logging
|
||||
|
||||
|
||||
class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
"""Adapter for OpenAI Assistants"""
|
||||
"""Adapter for OpenAI Assistants.
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
Integrates OpenAI Assistants API with CrewAI's agent system, providing
|
||||
tool configuration, structured output handling, and task execution.
|
||||
"""
|
||||
|
||||
_openai_agent: "OpenAIAgent" = PrivateAttr()
|
||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger())
|
||||
_active_thread: Optional[str] = PrivateAttr(default=None)
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
_openai_agent: OpenAIAgentProtocol = PrivateAttr()
|
||||
_logger: Logger = PrivateAttr(default_factory=Logger)
|
||||
_active_thread: str | None = PrivateAttr(default=None)
|
||||
function_calling_llm: Any = Field(default=None)
|
||||
step_callback: Any = Field(default=None)
|
||||
_tool_adapter: "OpenAIAgentToolAdapter" = PrivateAttr()
|
||||
_tool_adapter: OpenAIAgentToolAdapter = PrivateAttr()
|
||||
_converter_adapter: OpenAIConverterAdapter = PrivateAttr()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str = "gpt-4o-mini",
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
agent_config: Optional[dict] = None,
|
||||
**kwargs,
|
||||
):
|
||||
if not OPENAI_AVAILABLE:
|
||||
raise ImportError(
|
||||
"OpenAI Agent Dependencies are not installed. Please install it using `uv add openai-agents`"
|
||||
)
|
||||
else:
|
||||
role = kwargs.pop("role", None)
|
||||
goal = kwargs.pop("goal", None)
|
||||
backstory = kwargs.pop("backstory", None)
|
||||
super().__init__(
|
||||
role=role,
|
||||
goal=goal,
|
||||
backstory=backstory,
|
||||
tools=tools,
|
||||
agent_config=agent_config,
|
||||
**kwargs,
|
||||
)
|
||||
self._tool_adapter = OpenAIAgentToolAdapter(tools=tools)
|
||||
self.llm = model
|
||||
self._converter_adapter = OpenAIConverterAdapter(self)
|
||||
**kwargs: Unpack[AgentKwargs],
|
||||
) -> None:
|
||||
"""Initialize the OpenAI agent adapter.
|
||||
|
||||
Args:
|
||||
**kwargs: All initialization arguments including role, goal, backstory,
|
||||
model, tools, and agent_config.
|
||||
|
||||
Raises:
|
||||
ImportError: If OpenAI agent dependencies are not installed.
|
||||
"""
|
||||
self.llm = kwargs.pop("model", "gpt-4o-mini")
|
||||
super().__init__(**kwargs)
|
||||
self._tool_adapter = OpenAIAgentToolAdapter(tools=kwargs.get("tools"))
|
||||
self._converter_adapter = OpenAIConverterAdapter(agent_adapter=self)
|
||||
|
||||
def _build_system_prompt(self) -> str:
|
||||
"""Build a system prompt for the OpenAI agent."""
|
||||
"""Build a system prompt for the OpenAI agent.
|
||||
|
||||
Creates a prompt containing the agent's role, goal, and backstory,
|
||||
then enhances it with structured output instructions if needed.
|
||||
|
||||
Returns:
|
||||
The complete system prompt string.
|
||||
"""
|
||||
base_prompt = f"""
|
||||
You are {self.role}.
|
||||
|
||||
|
||||
Your goal is: {self.goal}
|
||||
|
||||
Your backstory: {self.backstory}
|
||||
@@ -84,10 +105,25 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> str:
|
||||
"""Execute a task using the OpenAI Assistant"""
|
||||
"""Execute a task using the OpenAI Assistant.
|
||||
|
||||
Configures the assistant, processes the task, and handles event emission
|
||||
for execution tracking.
|
||||
|
||||
Args:
|
||||
task: The task object to execute.
|
||||
context: Optional context information for the task.
|
||||
tools: Optional additional tools for this execution.
|
||||
|
||||
Returns:
|
||||
The final answer from the task execution.
|
||||
|
||||
Raises:
|
||||
Exception: If task execution fails.
|
||||
"""
|
||||
self._converter_adapter.configure_structured_output(task)
|
||||
self.create_agent_executor(tools)
|
||||
|
||||
@@ -95,7 +131,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
enable_verbose_stdout_logging()
|
||||
|
||||
try:
|
||||
task_prompt = task.prompt()
|
||||
task_prompt: str = task.prompt()
|
||||
if context:
|
||||
task_prompt = self.i18n.slice("task_with_context").format(
|
||||
task=task_prompt, context=context
|
||||
@@ -109,8 +145,8 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
task=task,
|
||||
),
|
||||
)
|
||||
result = self.agent_executor.run_sync(self._openai_agent, task_prompt)
|
||||
final_answer = self.handle_execution_result(result)
|
||||
result: Any = self.agent_executor.run_sync(self._openai_agent, task_prompt)
|
||||
final_answer: str = self.handle_execution_result(result)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionCompletedEvent(
|
||||
@@ -120,7 +156,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
return final_answer
|
||||
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Error executing OpenAI task: {str(e)}")
|
||||
self._logger.log("error", f"Error executing OpenAI task: {e!s}")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionErrorEvent(
|
||||
@@ -131,15 +167,22 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
)
|
||||
raise
|
||||
|
||||
def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
"""
|
||||
Configure the OpenAI agent for execution.
|
||||
While OpenAI handles execution differently through Runner,
|
||||
we can use this method to set up tools and configurations.
|
||||
"""
|
||||
all_tools = list(self.tools or []) + list(tools or [])
|
||||
def create_agent_executor(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure the OpenAI agent for execution.
|
||||
|
||||
instructions = self._build_system_prompt()
|
||||
While OpenAI handles execution differently through Runner,
|
||||
this method sets up tools and agent configuration.
|
||||
|
||||
Args:
|
||||
tools: Optional tools to configure for the agent.
|
||||
|
||||
Notes:
|
||||
TODO: Properly type agent_executor in BaseAgent to avoid type issues
|
||||
when assigning Runner class to this attribute.
|
||||
"""
|
||||
all_tools: list[BaseTool] = list(self.tools or []) + list(tools or [])
|
||||
|
||||
instructions: str = self._build_system_prompt()
|
||||
self._openai_agent = OpenAIAgent(
|
||||
name=self.role,
|
||||
instructions=instructions,
|
||||
@@ -152,27 +195,48 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
|
||||
self.agent_executor = Runner
|
||||
|
||||
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
"""Configure tools for the OpenAI Assistant"""
|
||||
def configure_tools(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure tools for the OpenAI Assistant.
|
||||
|
||||
Args:
|
||||
tools: Optional tools to configure for the assistant.
|
||||
"""
|
||||
if tools:
|
||||
self._tool_adapter.configure_tools(tools)
|
||||
if self._tool_adapter.converted_tools:
|
||||
self._openai_agent.tools = self._tool_adapter.converted_tools
|
||||
|
||||
def handle_execution_result(self, result: Any) -> str:
|
||||
"""Process OpenAI Assistant execution result converting any structured output to a string"""
|
||||
"""Process OpenAI Assistant execution result.
|
||||
|
||||
Converts any structured output to a string through the converter adapter.
|
||||
|
||||
Args:
|
||||
result: The execution result from the OpenAI assistant.
|
||||
|
||||
Returns:
|
||||
Processed result as a string.
|
||||
"""
|
||||
return self._converter_adapter.post_process_result(result.final_output)
|
||||
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]) -> List[BaseTool]:
|
||||
"""Implement delegation tools support"""
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
tools = agent_tools.tools()
|
||||
return tools
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
"""Implement delegation tools support.
|
||||
|
||||
def configure_structured_output(self, task) -> None:
|
||||
Creates delegation tools that allow this agent to delegate tasks to other agents.
|
||||
|
||||
Args:
|
||||
agents: List of agents available for delegation.
|
||||
|
||||
Returns:
|
||||
List of delegation tools.
|
||||
"""
|
||||
agent_tools: AgentTools = AgentTools(agents=agents)
|
||||
return agent_tools.tools()
|
||||
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
"""Configure the structured output for the specific agent implementation.
|
||||
|
||||
Args:
|
||||
structured_output: The structured output to be configured
|
||||
task: The task object containing output format specifications.
|
||||
"""
|
||||
self._converter_adapter.configure_structured_output(task)
|
||||
|
||||
@@ -1,57 +1,125 @@
|
||||
import inspect
|
||||
from typing import Any, List, Optional
|
||||
"""OpenAI agent tool adapter for CrewAI tool integration.
|
||||
|
||||
from agents import FunctionTool, Tool
|
||||
This module contains the OpenAIAgentToolAdapter class that converts CrewAI tools
|
||||
to OpenAI Assistant-compatible format using the agents library.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import json
|
||||
import re
|
||||
from collections.abc import Awaitable
|
||||
from typing import Any, cast
|
||||
|
||||
from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter
|
||||
from crewai.agents.agent_adapters.openai_agents.protocols import (
|
||||
OpenAIFunctionTool,
|
||||
OpenAITool,
|
||||
)
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.utilities.import_utils import require
|
||||
|
||||
agents_module = cast(
|
||||
Any,
|
||||
require(
|
||||
"agents",
|
||||
purpose="OpenAI agents functionality",
|
||||
),
|
||||
)
|
||||
FunctionTool = agents_module.FunctionTool
|
||||
Tool = agents_module.Tool
|
||||
|
||||
|
||||
class OpenAIAgentToolAdapter(BaseToolAdapter):
|
||||
"""Adapter for OpenAI Assistant tools"""
|
||||
"""Adapter for OpenAI Assistant tools.
|
||||
|
||||
def __init__(self, tools: Optional[List[BaseTool]] = None):
|
||||
self.original_tools = tools or []
|
||||
Converts CrewAI BaseTool instances to OpenAI Assistant FunctionTool format
|
||||
that can be used by OpenAI agents.
|
||||
"""
|
||||
|
||||
def configure_tools(self, tools: List[BaseTool]) -> None:
|
||||
"""Configure tools for the OpenAI Assistant"""
|
||||
def __init__(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Initialize the tool adapter.
|
||||
|
||||
Args:
|
||||
tools: Optional list of CrewAI tools to adapt.
|
||||
"""
|
||||
super().__init__()
|
||||
self.original_tools: list[BaseTool] = tools or []
|
||||
self.converted_tools: list[OpenAITool] = []
|
||||
|
||||
def configure_tools(self, tools: list[BaseTool]) -> None:
|
||||
"""Configure tools for the OpenAI Assistant.
|
||||
|
||||
Merges provided tools with original tools and converts them to
|
||||
OpenAI Assistant format.
|
||||
|
||||
Args:
|
||||
tools: List of CrewAI tools to configure.
|
||||
"""
|
||||
if self.original_tools:
|
||||
all_tools = tools + self.original_tools
|
||||
all_tools: list[BaseTool] = tools + self.original_tools
|
||||
else:
|
||||
all_tools = tools
|
||||
if all_tools:
|
||||
self.converted_tools = self._convert_tools_to_openai_format(all_tools)
|
||||
|
||||
@staticmethod
|
||||
def _convert_tools_to_openai_format(
|
||||
self, tools: Optional[List[BaseTool]]
|
||||
) -> List[Tool]:
|
||||
"""Convert CrewAI tools to OpenAI Assistant tool format"""
|
||||
tools: list[BaseTool] | None,
|
||||
) -> list[OpenAITool]:
|
||||
"""Convert CrewAI tools to OpenAI Assistant tool format.
|
||||
|
||||
Args:
|
||||
tools: List of CrewAI tools to convert.
|
||||
|
||||
Returns:
|
||||
List of OpenAI Assistant FunctionTool instances.
|
||||
"""
|
||||
if not tools:
|
||||
return []
|
||||
|
||||
def sanitize_tool_name(name: str) -> str:
|
||||
"""Convert tool name to match OpenAI's required pattern"""
|
||||
import re
|
||||
"""Convert tool name to match OpenAI's required pattern.
|
||||
|
||||
sanitized = re.sub(r"[^a-zA-Z0-9_-]", "_", name).lower()
|
||||
return sanitized
|
||||
Args:
|
||||
name: Original tool name.
|
||||
|
||||
def create_tool_wrapper(tool: BaseTool):
|
||||
"""Create a wrapper function that handles the OpenAI function tool interface"""
|
||||
Returns:
|
||||
Sanitized tool name matching OpenAI requirements.
|
||||
"""
|
||||
|
||||
return re.sub(r"[^a-zA-Z0-9_-]", "_", name).lower()
|
||||
|
||||
def create_tool_wrapper(tool: BaseTool) -> Any:
|
||||
"""Create a wrapper function that handles the OpenAI function tool interface.
|
||||
|
||||
Args:
|
||||
tool: The CrewAI tool to wrap.
|
||||
|
||||
Returns:
|
||||
Async wrapper function for OpenAI agent integration.
|
||||
"""
|
||||
|
||||
async def wrapper(context_wrapper: Any, arguments: Any) -> Any:
|
||||
"""Wrapper function to adapt CrewAI tool calls to OpenAI format.
|
||||
|
||||
Args:
|
||||
context_wrapper: OpenAI context wrapper.
|
||||
arguments: Tool arguments from OpenAI.
|
||||
|
||||
Returns:
|
||||
Tool execution result.
|
||||
"""
|
||||
# Get the parameter name from the schema
|
||||
param_name = list(
|
||||
tool.args_schema.model_json_schema()["properties"].keys()
|
||||
)[0]
|
||||
param_name: str = next(
|
||||
iter(tool.args_schema.model_json_schema()["properties"].keys())
|
||||
)
|
||||
|
||||
# Handle different argument types
|
||||
args_dict: dict[str, Any]
|
||||
if isinstance(arguments, dict):
|
||||
args_dict = arguments
|
||||
elif isinstance(arguments, str):
|
||||
try:
|
||||
import json
|
||||
|
||||
args_dict = json.loads(arguments)
|
||||
except json.JSONDecodeError:
|
||||
args_dict = {param_name: arguments}
|
||||
@@ -59,11 +127,11 @@ class OpenAIAgentToolAdapter(BaseToolAdapter):
|
||||
args_dict = {param_name: str(arguments)}
|
||||
|
||||
# Run the tool with the processed arguments
|
||||
output = tool._run(**args_dict)
|
||||
output: Any | Awaitable[Any] = tool._run(**args_dict)
|
||||
|
||||
# Await if the tool returned a coroutine
|
||||
if inspect.isawaitable(output):
|
||||
result = await output
|
||||
result: Any = await output
|
||||
else:
|
||||
result = output
|
||||
|
||||
@@ -74,17 +142,20 @@ class OpenAIAgentToolAdapter(BaseToolAdapter):
|
||||
|
||||
return wrapper
|
||||
|
||||
openai_tools = []
|
||||
openai_tools: list[OpenAITool] = []
|
||||
for tool in tools:
|
||||
schema = tool.args_schema.model_json_schema()
|
||||
schema: dict[str, Any] = tool.args_schema.model_json_schema()
|
||||
|
||||
schema.update({"additionalProperties": False, "type": "object"})
|
||||
|
||||
openai_tool = FunctionTool(
|
||||
name=sanitize_tool_name(tool.name),
|
||||
description=tool.description,
|
||||
params_json_schema=schema,
|
||||
on_invoke_tool=create_tool_wrapper(tool),
|
||||
openai_tool: OpenAIFunctionTool = cast(
|
||||
OpenAIFunctionTool,
|
||||
FunctionTool(
|
||||
name=sanitize_tool_name(tool.name),
|
||||
description=tool.description,
|
||||
params_json_schema=schema,
|
||||
on_invoke_tool=create_tool_wrapper(tool),
|
||||
),
|
||||
)
|
||||
openai_tools.append(openai_tool)
|
||||
|
||||
|
||||
74
src/crewai/agents/agent_adapters/openai_agents/protocols.py
Normal file
74
src/crewai/agents/agent_adapters/openai_agents/protocols.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""Type protocols for OpenAI agents modules."""
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import Any, Protocol, TypedDict, runtime_checkable
|
||||
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
class AgentKwargs(TypedDict, total=False):
|
||||
"""Typed dict for agent initialization kwargs."""
|
||||
|
||||
role: str
|
||||
goal: str
|
||||
backstory: str
|
||||
model: str
|
||||
tools: list[BaseTool] | None
|
||||
agent_config: dict[str, Any] | None
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class OpenAIAgent(Protocol):
|
||||
"""Protocol for OpenAI Agent."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
instructions: str,
|
||||
model: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize the OpenAI agent."""
|
||||
...
|
||||
|
||||
tools: list[Any]
|
||||
output_type: Any
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class OpenAIRunner(Protocol):
|
||||
"""Protocol for OpenAI Runner."""
|
||||
|
||||
@classmethod
|
||||
def run_sync(cls, agent: OpenAIAgent, message: str) -> Any:
|
||||
"""Run agent synchronously with a message."""
|
||||
...
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class OpenAIAgentsModule(Protocol):
|
||||
"""Protocol for OpenAI agents module."""
|
||||
|
||||
Agent: type[OpenAIAgent]
|
||||
Runner: type[OpenAIRunner]
|
||||
enable_verbose_stdout_logging: Callable[[], None]
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class OpenAITool(Protocol):
|
||||
"""Protocol for OpenAI Tool."""
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class OpenAIFunctionTool(Protocol):
|
||||
"""Protocol for OpenAI FunctionTool."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
description: str,
|
||||
params_json_schema: dict[str, Any],
|
||||
on_invoke_tool: Any,
|
||||
) -> None:
|
||||
"""Initialize the function tool."""
|
||||
...
|
||||
@@ -1,5 +1,12 @@
|
||||
"""OpenAI structured output converter for CrewAI task integration.
|
||||
|
||||
This module contains the OpenAIConverterAdapter class that handles structured
|
||||
output conversion for OpenAI agents, supporting JSON and Pydantic model formats.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
@@ -7,8 +14,7 @@ from crewai.utilities.i18n import I18N
|
||||
|
||||
|
||||
class OpenAIConverterAdapter(BaseConverterAdapter):
|
||||
"""
|
||||
Adapter for handling structured output conversion in OpenAI agents.
|
||||
"""Adapter for handling structured output conversion in OpenAI agents.
|
||||
|
||||
This adapter enhances the OpenAI agent to handle structured output formats
|
||||
and post-processes the results when needed.
|
||||
@@ -19,19 +25,23 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
||||
_output_model: The Pydantic model for the output
|
||||
"""
|
||||
|
||||
def __init__(self, agent_adapter):
|
||||
"""Initialize the converter adapter with a reference to the agent adapter"""
|
||||
self.agent_adapter = agent_adapter
|
||||
self._output_format = None
|
||||
self._schema = None
|
||||
self._output_model = None
|
||||
|
||||
def configure_structured_output(self, task) -> None:
|
||||
"""
|
||||
Configure the structured output for OpenAI agent based on task requirements.
|
||||
def __init__(self, agent_adapter: Any) -> None:
|
||||
"""Initialize the converter adapter with a reference to the agent adapter.
|
||||
|
||||
Args:
|
||||
task: The task containing output format requirements
|
||||
agent_adapter: The OpenAI agent adapter instance.
|
||||
"""
|
||||
super().__init__(agent_adapter=agent_adapter)
|
||||
self.agent_adapter: Any = agent_adapter
|
||||
self._output_format: Literal["json", "pydantic"] | None = None
|
||||
self._schema: str | None = None
|
||||
self._output_model: Any = None
|
||||
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
"""Configure the structured output for OpenAI agent based on task requirements.
|
||||
|
||||
Args:
|
||||
task: The task containing output format requirements.
|
||||
"""
|
||||
# Reset configuration
|
||||
self._output_format = None
|
||||
@@ -55,19 +65,18 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
||||
self._output_model = task.output_pydantic
|
||||
|
||||
def enhance_system_prompt(self, base_prompt: str) -> str:
|
||||
"""
|
||||
Enhance the base system prompt with structured output requirements if needed.
|
||||
"""Enhance the base system prompt with structured output requirements if needed.
|
||||
|
||||
Args:
|
||||
base_prompt: The original system prompt
|
||||
base_prompt: The original system prompt.
|
||||
|
||||
Returns:
|
||||
Enhanced system prompt with output format instructions if needed
|
||||
Enhanced system prompt with output format instructions if needed.
|
||||
"""
|
||||
if not self._output_format:
|
||||
return base_prompt
|
||||
|
||||
output_schema = (
|
||||
output_schema: str = (
|
||||
I18N()
|
||||
.slice("formatted_task_instructions")
|
||||
.format(output_format=self._schema)
|
||||
@@ -76,16 +85,15 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
||||
return f"{base_prompt}\n\n{output_schema}"
|
||||
|
||||
def post_process_result(self, result: str) -> str:
|
||||
"""
|
||||
Post-process the result to ensure it matches the expected format.
|
||||
"""Post-process the result to ensure it matches the expected format.
|
||||
|
||||
This method attempts to extract valid JSON from the result if necessary.
|
||||
|
||||
Args:
|
||||
result: The raw result from the agent
|
||||
result: The raw result from the agent.
|
||||
|
||||
Returns:
|
||||
Processed result conforming to the expected output format
|
||||
Processed result conforming to the expected output format.
|
||||
"""
|
||||
if not self._output_format:
|
||||
return result
|
||||
@@ -97,26 +105,30 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
||||
return result
|
||||
except json.JSONDecodeError:
|
||||
# Try to extract JSON from markdown code blocks
|
||||
code_block_pattern = r"```(?:json)?\s*([\s\S]*?)```"
|
||||
code_blocks = re.findall(code_block_pattern, result)
|
||||
code_block_pattern: str = r"```(?:json)?\s*([\s\S]*?)```"
|
||||
code_blocks: list[str] = re.findall(code_block_pattern, result)
|
||||
|
||||
for block in code_blocks:
|
||||
stripped_block = block.strip()
|
||||
try:
|
||||
json.loads(block.strip())
|
||||
return block.strip()
|
||||
json.loads(stripped_block)
|
||||
return stripped_block
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
pass
|
||||
|
||||
# Try to extract any JSON-like structure
|
||||
json_pattern = r"(\{[\s\S]*\})"
|
||||
json_matches = re.findall(json_pattern, result, re.DOTALL)
|
||||
json_pattern: str = r"(\{[\s\S]*\})"
|
||||
json_matches: list[str] = re.findall(json_pattern, result, re.DOTALL)
|
||||
|
||||
for match in json_matches:
|
||||
is_valid = True
|
||||
try:
|
||||
json.loads(match)
|
||||
return match
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
is_valid = False
|
||||
|
||||
if is_valid:
|
||||
return match
|
||||
|
||||
# If all extraction attempts fail, return the original
|
||||
return str(result)
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import uuid
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Callable
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
from typing import Any, Callable, Dict, List, Optional, TypeVar
|
||||
from typing import Any, TypeVar
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -25,7 +26,6 @@ from crewai.security.security_config import SecurityConfig
|
||||
from crewai.tools.base_tool import BaseTool, Tool
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.string_utils import interpolate_only
|
||||
|
||||
T = TypeVar("T", bound="BaseAgent")
|
||||
@@ -81,17 +81,17 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
|
||||
_rpm_controller: Optional[RPMController] = PrivateAttr(default=None)
|
||||
_rpm_controller: RPMController | None = PrivateAttr(default=None)
|
||||
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
||||
_original_role: Optional[str] = PrivateAttr(default=None)
|
||||
_original_goal: Optional[str] = PrivateAttr(default=None)
|
||||
_original_backstory: Optional[str] = PrivateAttr(default=None)
|
||||
_original_role: str | None = PrivateAttr(default=None)
|
||||
_original_goal: str | None = PrivateAttr(default=None)
|
||||
_original_backstory: str | None = PrivateAttr(default=None)
|
||||
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Objective of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
config: Optional[Dict[str, Any]] = Field(
|
||||
config: dict[str, Any] | None = Field(
|
||||
description="Configuration for the agent", default=None, exclude=True
|
||||
)
|
||||
cache: bool = Field(
|
||||
@@ -100,7 +100,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
verbose: bool = Field(
|
||||
default=False, description="Verbose mode for the Agent Execution"
|
||||
)
|
||||
max_rpm: Optional[int] = Field(
|
||||
max_rpm: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the agent execution to be respected.",
|
||||
)
|
||||
@@ -108,7 +108,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: Optional[List[BaseTool]] = Field(
|
||||
tools: list[BaseTool] | None = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: int = Field(
|
||||
@@ -122,27 +122,27 @@ class BaseAgent(ABC, BaseModel):
|
||||
)
|
||||
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
||||
cache_handler: InstanceOf[CacheHandler] | None = Field(
|
||||
default=None, description="An instance of the CacheHandler class."
|
||||
)
|
||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||
default_factory=ToolsHandler,
|
||||
description="An instance of the ToolsHandler class.",
|
||||
)
|
||||
tools_results: List[Dict[str, Any]] = Field(
|
||||
tools_results: list[dict[str, Any]] = Field(
|
||||
default=[], description="Results of the tools used by the agent."
|
||||
)
|
||||
max_tokens: Optional[int] = Field(
|
||||
max_tokens: int | None = Field(
|
||||
default=None, description="Maximum number of tokens for the agent's execution."
|
||||
)
|
||||
knowledge: Optional[Knowledge] = Field(
|
||||
knowledge: Knowledge | None = Field(
|
||||
default=None, description="Knowledge for the agent."
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
knowledge_sources: list[BaseKnowledgeSource] | None = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the agent.",
|
||||
)
|
||||
knowledge_storage: Optional[Any] = Field(
|
||||
knowledge_storage: Any | None = Field(
|
||||
default=None,
|
||||
description="Custom knowledge storage for the agent.",
|
||||
)
|
||||
@@ -150,13 +150,13 @@ class BaseAgent(ABC, BaseModel):
|
||||
default_factory=SecurityConfig,
|
||||
description="Security configuration for the agent, including fingerprinting.",
|
||||
)
|
||||
callbacks: List[Callable] = Field(
|
||||
callbacks: list[Callable] = Field(
|
||||
default=[], description="Callbacks to be used for the agent"
|
||||
)
|
||||
adapted_agent: bool = Field(
|
||||
default=False, description="Whether the agent is adapted"
|
||||
)
|
||||
knowledge_config: Optional[KnowledgeConfig] = Field(
|
||||
knowledge_config: KnowledgeConfig | None = Field(
|
||||
default=None,
|
||||
description="Knowledge configuration for the agent such as limits and threshold",
|
||||
)
|
||||
@@ -168,7 +168,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
@field_validator("tools")
|
||||
@classmethod
|
||||
def validate_tools(cls, tools: List[Any]) -> List[BaseTool]:
|
||||
def validate_tools(cls, tools: list[Any]) -> list[BaseTool]:
|
||||
"""Validate and process the tools provided to the agent.
|
||||
|
||||
This method ensures that each tool is either an instance of BaseTool
|
||||
@@ -221,7 +221,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
||||
def _deny_user_set_id(cls, v: UUID4 | None) -> None:
|
||||
if v:
|
||||
raise PydanticCustomError(
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
@@ -252,8 +252,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@@ -262,9 +262,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]:
|
||||
def get_delegation_tools(self, agents: list["BaseAgent"]) -> list[BaseTool]:
|
||||
"""Set the task tools that init BaseAgenTools class."""
|
||||
pass
|
||||
|
||||
def copy(self: T) -> T: # type: ignore # Signature of "copy" incompatible with supertype "BaseModel"
|
||||
"""Create a deep copy of the Agent."""
|
||||
@@ -309,7 +308,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
copied_agent = type(self)(
|
||||
return type(self)(
|
||||
**copied_data,
|
||||
llm=existing_llm,
|
||||
tools=self.tools,
|
||||
@@ -318,9 +317,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
knowledge_storage=copied_knowledge_storage,
|
||||
)
|
||||
|
||||
return copied_agent
|
||||
|
||||
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
def interpolate_inputs(self, inputs: dict[str, Any]) -> None:
|
||||
"""Interpolate inputs into the agent description and backstory."""
|
||||
if self._original_role is None:
|
||||
self._original_role = self.role
|
||||
@@ -362,5 +359,5 @@ class BaseAgent(ABC, BaseModel):
|
||||
self._rpm_controller = rpm_controller
|
||||
self.create_agent_executor()
|
||||
|
||||
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
||||
def set_knowledge(self, crew_embedder: dict[str, Any] | None = None):
|
||||
pass
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Dict, List
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.events.event_listener import event_listener
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities.converter import ConverterError
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.events.event_listener import event_listener
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
@@ -21,7 +21,7 @@ class CrewAgentExecutorMixin:
|
||||
task: "Task"
|
||||
iterations: int
|
||||
max_iter: int
|
||||
messages: List[Dict[str, str]]
|
||||
messages: list[dict[str, str]]
|
||||
_i18n: I18N
|
||||
_printer: Printer = Printer()
|
||||
|
||||
@@ -46,7 +46,6 @@ class CrewAgentExecutorMixin:
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Failed to add to short term memory: {e}")
|
||||
pass
|
||||
|
||||
def _create_external_memory(self, output) -> None:
|
||||
"""Create and save a external-term memory item if conditions are met."""
|
||||
@@ -67,7 +66,6 @@ class CrewAgentExecutorMixin:
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Failed to add to external memory: {e}")
|
||||
pass
|
||||
|
||||
def _create_long_term_memory(self, output) -> None:
|
||||
"""Create and save long-term and entity memory items based on evaluation."""
|
||||
@@ -113,10 +111,8 @@ class CrewAgentExecutorMixin:
|
||||
self.crew._entity_memory.save(entity_memories)
|
||||
except AttributeError as e:
|
||||
print(f"Missing attributes for long term memory: {e}")
|
||||
pass
|
||||
except Exception as e:
|
||||
print(f"Failed to add to long term memory: {e}")
|
||||
pass
|
||||
elif (
|
||||
self.crew
|
||||
and self.crew._long_term_memory
|
||||
|
||||
@@ -12,7 +12,7 @@ from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecu
|
||||
from crewai.agents.parser import (
|
||||
AgentAction,
|
||||
AgentFinish,
|
||||
OutputParserException,
|
||||
OutputParserError,
|
||||
)
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
@@ -228,7 +228,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._invoke_step_callback(formatted_answer)
|
||||
self._append_message(formatted_answer.text)
|
||||
|
||||
except OutputParserException as e:
|
||||
except OutputParserError as e: # noqa: PERF203
|
||||
formatted_answer = handle_output_parser_exception(
|
||||
e=e,
|
||||
messages=self.messages,
|
||||
@@ -251,17 +251,20 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
i18n=self._i18n,
|
||||
)
|
||||
continue
|
||||
else:
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
|
||||
# During the invoke loop, formatted_answer alternates between AgentAction
|
||||
# (when the agent is using tools) and eventually becomes AgentFinish
|
||||
# (when the agent reaches a final answer). This assertion confirms we've
|
||||
# (when the agent reaches a final answer). This check confirms we've
|
||||
# reached a final answer and helps type checking understand this transition.
|
||||
assert isinstance(formatted_answer, AgentFinish)
|
||||
if not isinstance(formatted_answer, AgentFinish):
|
||||
raise RuntimeError(
|
||||
"Agent execution ended without reaching a final answer. "
|
||||
f"Got {type(formatted_answer).__name__} instead of AgentFinish."
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
@@ -324,9 +327,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.agent,
|
||||
AgentLogsStartedEvent(
|
||||
agent_role=self.agent.role,
|
||||
task_description=(
|
||||
getattr(self.task, "description") if self.task else "Not Found"
|
||||
),
|
||||
task_description=(self.task.description if self.task else "Not Found"),
|
||||
verbose=self.agent.verbose
|
||||
or (hasattr(self, "crew") and getattr(self.crew, "verbose", False)),
|
||||
),
|
||||
@@ -415,8 +416,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
"""
|
||||
prompt = prompt.replace("{input}", inputs["input"])
|
||||
prompt = prompt.replace("{tool_names}", inputs["tool_names"])
|
||||
prompt = prompt.replace("{tools}", inputs["tools"])
|
||||
return prompt
|
||||
return prompt.replace("{tools}", inputs["tools"])
|
||||
|
||||
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
|
||||
"""Process human feedback.
|
||||
|
||||
@@ -7,12 +7,12 @@ AgentAction or AgentFinish objects.
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from json_repair import repair_json
|
||||
from json_repair import repair_json # type: ignore[import-untyped]
|
||||
|
||||
from crewai.agents.constants import (
|
||||
ACTION_INPUT_ONLY_REGEX,
|
||||
ACTION_INPUT_REGEX,
|
||||
ACTION_REGEX,
|
||||
ACTION_INPUT_ONLY_REGEX,
|
||||
FINAL_ANSWER_ACTION,
|
||||
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
|
||||
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
|
||||
@@ -43,7 +43,7 @@ class AgentFinish:
|
||||
text: str
|
||||
|
||||
|
||||
class OutputParserException(Exception):
|
||||
class OutputParserError(Exception):
|
||||
"""Exception raised when output parsing fails.
|
||||
|
||||
Attributes:
|
||||
@@ -51,7 +51,7 @@ class OutputParserException(Exception):
|
||||
"""
|
||||
|
||||
def __init__(self, error: str) -> None:
|
||||
"""Initialize OutputParserException.
|
||||
"""Initialize OutputParserError.
|
||||
|
||||
Args:
|
||||
error: The error message.
|
||||
@@ -87,7 +87,7 @@ def parse(text: str) -> AgentAction | AgentFinish:
|
||||
AgentAction or AgentFinish based on the content.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the text format is invalid.
|
||||
OutputParserError: If the text format is invalid.
|
||||
"""
|
||||
thought = _extract_thought(text)
|
||||
includes_answer = FINAL_ANSWER_ACTION in text
|
||||
@@ -104,7 +104,7 @@ def parse(text: str) -> AgentAction | AgentFinish:
|
||||
final_answer = final_answer[:-3].rstrip()
|
||||
return AgentFinish(thought=thought, output=final_answer, text=text)
|
||||
|
||||
elif action_match:
|
||||
if action_match:
|
||||
action = action_match.group(1)
|
||||
clean_action = _clean_action(action)
|
||||
|
||||
@@ -118,19 +118,18 @@ def parse(text: str) -> AgentAction | AgentFinish:
|
||||
)
|
||||
|
||||
if not ACTION_REGEX.search(text):
|
||||
raise OutputParserException(
|
||||
raise OutputParserError(
|
||||
f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{_I18N.slice('final_answer_format')}",
|
||||
)
|
||||
elif not ACTION_INPUT_ONLY_REGEX.search(text):
|
||||
raise OutputParserException(
|
||||
if not ACTION_INPUT_ONLY_REGEX.search(text):
|
||||
raise OutputParserError(
|
||||
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
|
||||
)
|
||||
else:
|
||||
err_format = _I18N.slice("format_without_tools")
|
||||
error = f"{err_format}"
|
||||
raise OutputParserException(
|
||||
error,
|
||||
)
|
||||
err_format = _I18N.slice("format_without_tools")
|
||||
error = f"{err_format}"
|
||||
raise OutputParserError(
|
||||
error,
|
||||
)
|
||||
|
||||
|
||||
def _extract_thought(text: str) -> str:
|
||||
@@ -149,8 +148,7 @@ def _extract_thought(text: str) -> str:
|
||||
return ""
|
||||
thought = text[:thought_index].strip()
|
||||
# Remove any triple backticks from the thought string
|
||||
thought = thought.replace("```", "").strip()
|
||||
return thought
|
||||
return thought.replace("```", "").strip()
|
||||
|
||||
|
||||
def _clean_action(text: str) -> str:
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
"""Tools handler for managing tool execution and caching."""
|
||||
|
||||
import json
|
||||
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.tools.cache_tools.cache_tools import CacheTools
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
|
||||
|
||||
class ToolsHandler:
|
||||
@@ -37,8 +39,16 @@ class ToolsHandler:
|
||||
"""
|
||||
self.last_used_tool = calling
|
||||
if self.cache and should_cache and calling.tool_name != CacheTools().name:
|
||||
# Convert arguments to string for cache
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
|
||||
self.cache.add(
|
||||
tool=calling.tool_name,
|
||||
input=calling.arguments,
|
||||
input=input_str,
|
||||
output=output,
|
||||
)
|
||||
|
||||
0
src/crewai/cli/authentication/providers/__init__.py
Normal file
0
src/crewai/cli/authentication/providers/__init__.py
Normal file
@@ -1,5 +1,6 @@
|
||||
from crewai.cli.authentication.providers.base_provider import BaseProvider
|
||||
|
||||
|
||||
class Auth0Provider(BaseProvider):
|
||||
def get_authorize_url(self) -> str:
|
||||
return f"https://{self._get_domain()}/oauth/device/code"
|
||||
@@ -14,13 +15,20 @@ class Auth0Provider(BaseProvider):
|
||||
return f"https://{self._get_domain()}/"
|
||||
|
||||
def get_audience(self) -> str:
|
||||
assert self.settings.audience is not None, "Audience is required"
|
||||
if self.settings.audience is None:
|
||||
raise ValueError(
|
||||
"Audience is required. Please set it in the configuration."
|
||||
)
|
||||
return self.settings.audience
|
||||
|
||||
def get_client_id(self) -> str:
|
||||
assert self.settings.client_id is not None, "Client ID is required"
|
||||
if self.settings.client_id is None:
|
||||
raise ValueError(
|
||||
"Client ID is required. Please set it in the configuration."
|
||||
)
|
||||
return self.settings.client_id
|
||||
|
||||
def _get_domain(self) -> str:
|
||||
assert self.settings.domain is not None, "Domain is required"
|
||||
if self.settings.domain is None:
|
||||
raise ValueError("Domain is required. Please set it in the configuration.")
|
||||
return self.settings.domain
|
||||
|
||||
@@ -1,30 +1,26 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from crewai.cli.authentication.main import Oauth2Settings
|
||||
|
||||
|
||||
class BaseProvider(ABC):
|
||||
def __init__(self, settings: Oauth2Settings):
|
||||
self.settings = settings
|
||||
|
||||
@abstractmethod
|
||||
def get_authorize_url(self) -> str:
|
||||
...
|
||||
def get_authorize_url(self) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def get_token_url(self) -> str:
|
||||
...
|
||||
def get_token_url(self) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def get_jwks_url(self) -> str:
|
||||
...
|
||||
def get_jwks_url(self) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def get_issuer(self) -> str:
|
||||
...
|
||||
def get_issuer(self) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def get_audience(self) -> str:
|
||||
...
|
||||
def get_audience(self) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def get_client_id(self) -> str:
|
||||
...
|
||||
def get_client_id(self) -> str: ...
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from crewai.cli.authentication.providers.base_provider import BaseProvider
|
||||
|
||||
|
||||
class OktaProvider(BaseProvider):
|
||||
def get_authorize_url(self) -> str:
|
||||
return f"https://{self.settings.domain}/oauth2/default/v1/device/authorize"
|
||||
@@ -14,9 +15,15 @@ class OktaProvider(BaseProvider):
|
||||
return f"https://{self.settings.domain}/oauth2/default"
|
||||
|
||||
def get_audience(self) -> str:
|
||||
assert self.settings.audience is not None
|
||||
if self.settings.audience is None:
|
||||
raise ValueError(
|
||||
"Audience is required. Please set it in the configuration."
|
||||
)
|
||||
return self.settings.audience
|
||||
|
||||
def get_client_id(self) -> str:
|
||||
assert self.settings.client_id is not None
|
||||
if self.settings.client_id is None:
|
||||
raise ValueError(
|
||||
"Client ID is required. Please set it in the configuration."
|
||||
)
|
||||
return self.settings.client_id
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from crewai.cli.authentication.providers.base_provider import BaseProvider
|
||||
|
||||
|
||||
class WorkosProvider(BaseProvider):
|
||||
def get_authorize_url(self) -> str:
|
||||
return f"https://{self._get_domain()}/oauth2/device_authorization"
|
||||
@@ -17,9 +18,13 @@ class WorkosProvider(BaseProvider):
|
||||
return self.settings.audience or ""
|
||||
|
||||
def get_client_id(self) -> str:
|
||||
assert self.settings.client_id is not None, "Client ID is required"
|
||||
if self.settings.client_id is None:
|
||||
raise ValueError(
|
||||
"Client ID is required. Please set it in the configuration."
|
||||
)
|
||||
return self.settings.client_id
|
||||
|
||||
def _get_domain(self) -> str:
|
||||
assert self.settings.domain is not None, "Domain is required"
|
||||
if self.settings.domain is None:
|
||||
raise ValueError("Domain is required. Please set it in the configuration.")
|
||||
return self.settings.domain
|
||||
|
||||
@@ -17,8 +17,6 @@ def validate_jwt_token(
|
||||
missing required claims).
|
||||
"""
|
||||
|
||||
decoded_token = None
|
||||
|
||||
try:
|
||||
jwk_client = PyJWKClient(jwks_url)
|
||||
signing_key = jwk_client.get_signing_key_from_jwt(jwt_token)
|
||||
@@ -26,7 +24,7 @@ def validate_jwt_token(
|
||||
_unverified_decoded_token = jwt.decode(
|
||||
jwt_token, options={"verify_signature": False}
|
||||
)
|
||||
decoded_token = jwt.decode(
|
||||
return jwt.decode(
|
||||
jwt_token,
|
||||
signing_key.key,
|
||||
algorithms=["RS256"],
|
||||
@@ -40,23 +38,22 @@ def validate_jwt_token(
|
||||
"require": ["exp", "iat", "iss", "aud", "sub"],
|
||||
},
|
||||
)
|
||||
return decoded_token
|
||||
|
||||
except jwt.ExpiredSignatureError:
|
||||
raise Exception("Token has expired.")
|
||||
except jwt.InvalidAudienceError:
|
||||
except jwt.ExpiredSignatureError as e:
|
||||
raise Exception("Token has expired.") from e
|
||||
except jwt.InvalidAudienceError as e:
|
||||
actual_audience = _unverified_decoded_token.get("aud", "[no audience found]")
|
||||
raise Exception(
|
||||
f"Invalid token audience. Got: '{actual_audience}'. Expected: '{audience}'"
|
||||
)
|
||||
except jwt.InvalidIssuerError:
|
||||
) from e
|
||||
except jwt.InvalidIssuerError as e:
|
||||
actual_issuer = _unverified_decoded_token.get("iss", "[no issuer found]")
|
||||
raise Exception(
|
||||
f"Invalid token issuer. Got: '{actual_issuer}'. Expected: '{issuer}'"
|
||||
)
|
||||
) from e
|
||||
except jwt.MissingRequiredClaimError as e:
|
||||
raise Exception(f"Token is missing required claims: {str(e)}")
|
||||
raise Exception(f"Token is missing required claims: {e!s}") from e
|
||||
except jwt.exceptions.PyJWKClientError as e:
|
||||
raise Exception(f"JWKS or key processing error: {str(e)}")
|
||||
raise Exception(f"JWKS or key processing error: {e!s}") from e
|
||||
except jwt.InvalidTokenError as e:
|
||||
raise Exception(f"Invalid token: {str(e)}")
|
||||
raise Exception(f"Invalid token: {e!s}") from e
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
from importlib.metadata import version as get_version
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.cli.settings.main import SettingsCommand
|
||||
|
||||
from crewai.cli.add_crew_to_flow import add_crew_to_flow
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.cli.create_crew import create_crew
|
||||
from crewai.cli.create_flow import create_flow
|
||||
from crewai.cli.crew_chat import run_chat
|
||||
from crewai.cli.settings.main import SettingsCommand
|
||||
from crewai.memory.storage.kickoff_task_outputs_storage import (
|
||||
KickoffTaskOutputsSQLiteStorage,
|
||||
)
|
||||
@@ -237,13 +237,11 @@ def login():
|
||||
@crewai.group()
|
||||
def deploy():
|
||||
"""Deploy the Crew CLI group."""
|
||||
pass
|
||||
|
||||
|
||||
@crewai.group()
|
||||
def tool():
|
||||
"""Tool Repository related commands."""
|
||||
pass
|
||||
|
||||
|
||||
@deploy.command(name="create")
|
||||
@@ -263,7 +261,7 @@ def deploy_list():
|
||||
|
||||
@deploy.command(name="push")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deploy_push(uuid: Optional[str]):
|
||||
def deploy_push(uuid: str | None):
|
||||
"""Deploy the Crew."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.deploy(uuid=uuid)
|
||||
@@ -271,7 +269,7 @@ def deploy_push(uuid: Optional[str]):
|
||||
|
||||
@deploy.command(name="status")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deply_status(uuid: Optional[str]):
|
||||
def deply_status(uuid: str | None):
|
||||
"""Get the status of a deployment."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.get_crew_status(uuid=uuid)
|
||||
@@ -279,7 +277,7 @@ def deply_status(uuid: Optional[str]):
|
||||
|
||||
@deploy.command(name="logs")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deploy_logs(uuid: Optional[str]):
|
||||
def deploy_logs(uuid: str | None):
|
||||
"""Get the logs of a deployment."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.get_crew_logs(uuid=uuid)
|
||||
@@ -287,7 +285,7 @@ def deploy_logs(uuid: Optional[str]):
|
||||
|
||||
@deploy.command(name="remove")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deploy_remove(uuid: Optional[str]):
|
||||
def deploy_remove(uuid: str | None):
|
||||
"""Remove a deployment."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.remove_crew(uuid=uuid)
|
||||
@@ -327,7 +325,6 @@ def tool_publish(is_public: bool, force: bool):
|
||||
@crewai.group()
|
||||
def flow():
|
||||
"""Flow related commands."""
|
||||
pass
|
||||
|
||||
|
||||
@flow.command(name="kickoff")
|
||||
@@ -359,7 +356,7 @@ def chat():
|
||||
and using the Chat LLM to generate responses.
|
||||
"""
|
||||
click.secho(
|
||||
"\nStarting a conversation with the Crew\n" "Type 'exit' or Ctrl+C to quit.\n",
|
||||
"\nStarting a conversation with the Crew\nType 'exit' or Ctrl+C to quit.\n",
|
||||
)
|
||||
|
||||
run_chat()
|
||||
@@ -368,7 +365,6 @@ def chat():
|
||||
@crewai.group(invoke_without_command=True)
|
||||
def org():
|
||||
"""Organization management commands."""
|
||||
pass
|
||||
|
||||
|
||||
@org.command("list")
|
||||
@@ -396,7 +392,6 @@ def current():
|
||||
@crewai.group()
|
||||
def enterprise():
|
||||
"""Enterprise Configuration commands."""
|
||||
pass
|
||||
|
||||
|
||||
@enterprise.command("configure")
|
||||
@@ -410,7 +405,6 @@ def enterprise_configure(enterprise_url: str):
|
||||
@crewai.group()
|
||||
def config():
|
||||
"""CLI Configuration commands."""
|
||||
pass
|
||||
|
||||
|
||||
@config.command("list")
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.cli.constants import (
|
||||
DEFAULT_CREWAI_ENTERPRISE_URL,
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_PROVIDER,
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_AUDIENCE,
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_CLIENT_ID,
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_DOMAIN,
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_PROVIDER,
|
||||
DEFAULT_CREWAI_ENTERPRISE_URL,
|
||||
)
|
||||
from crewai.cli.shared.token_manager import TokenManager
|
||||
|
||||
@@ -56,20 +55,20 @@ HIDDEN_SETTINGS_KEYS = [
|
||||
|
||||
|
||||
class Settings(BaseModel):
|
||||
enterprise_base_url: Optional[str] = Field(
|
||||
enterprise_base_url: str | None = Field(
|
||||
default=DEFAULT_CLI_SETTINGS["enterprise_base_url"],
|
||||
description="Base URL of the CrewAI Enterprise instance",
|
||||
)
|
||||
tool_repository_username: Optional[str] = Field(
|
||||
tool_repository_username: str | None = Field(
|
||||
None, description="Username for interacting with the Tool Repository"
|
||||
)
|
||||
tool_repository_password: Optional[str] = Field(
|
||||
tool_repository_password: str | None = Field(
|
||||
None, description="Password for interacting with the Tool Repository"
|
||||
)
|
||||
org_name: Optional[str] = Field(
|
||||
org_name: str | None = Field(
|
||||
None, description="Name of the currently active organization"
|
||||
)
|
||||
org_uuid: Optional[str] = Field(
|
||||
org_uuid: str | None = Field(
|
||||
None, description="UUID of the currently active organization"
|
||||
)
|
||||
config_path: Path = Field(default=DEFAULT_CONFIG_PATH, frozen=True, exclude=True)
|
||||
@@ -79,7 +78,7 @@ class Settings(BaseModel):
|
||||
default=DEFAULT_CLI_SETTINGS["oauth2_provider"],
|
||||
)
|
||||
|
||||
oauth2_audience: Optional[str] = Field(
|
||||
oauth2_audience: str | None = Field(
|
||||
description="OAuth2 audience value, typically used to identify the target API or resource.",
|
||||
default=DEFAULT_CLI_SETTINGS["oauth2_audience"],
|
||||
)
|
||||
|
||||
@@ -16,48 +16,72 @@ from crewai.cli.utils import copy_template, load_env_vars, write_env_file
|
||||
def create_folder_structure(name, parent_folder=None):
|
||||
import keyword
|
||||
import re
|
||||
|
||||
name = name.rstrip('/')
|
||||
|
||||
|
||||
name = name.rstrip("/")
|
||||
|
||||
if not name.strip():
|
||||
raise ValueError("Project name cannot be empty or contain only whitespace")
|
||||
|
||||
|
||||
folder_name = name.replace(" ", "_").replace("-", "_").lower()
|
||||
folder_name = re.sub(r'[^a-zA-Z0-9_]', '', folder_name)
|
||||
|
||||
folder_name = re.sub(r"[^a-zA-Z0-9_]", "", folder_name)
|
||||
|
||||
# Check if the name starts with invalid characters or is primarily invalid
|
||||
if re.match(r'^[^a-zA-Z0-9_-]+', name):
|
||||
raise ValueError(f"Project name '{name}' contains no valid characters for a Python module name")
|
||||
|
||||
if re.match(r"^[^a-zA-Z0-9_-]+", name):
|
||||
raise ValueError(
|
||||
f"Project name '{name}' contains no valid characters for a Python module name"
|
||||
)
|
||||
|
||||
if not folder_name:
|
||||
raise ValueError(f"Project name '{name}' contains no valid characters for a Python module name")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' contains no valid characters for a Python module name"
|
||||
)
|
||||
|
||||
if folder_name[0].isdigit():
|
||||
raise ValueError(f"Project name '{name}' would generate folder name '{folder_name}' which cannot start with a digit (invalid Python module name)")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate folder name '{folder_name}' which cannot start with a digit (invalid Python module name)"
|
||||
)
|
||||
|
||||
if keyword.iskeyword(folder_name):
|
||||
raise ValueError(f"Project name '{name}' would generate folder name '{folder_name}' which is a reserved Python keyword")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate folder name '{folder_name}' which is a reserved Python keyword"
|
||||
)
|
||||
|
||||
if not folder_name.isidentifier():
|
||||
raise ValueError(f"Project name '{name}' would generate invalid Python module name '{folder_name}'")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate invalid Python module name '{folder_name}'"
|
||||
)
|
||||
|
||||
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
||||
|
||||
class_name = re.sub(r'[^a-zA-Z0-9_]', '', class_name)
|
||||
|
||||
|
||||
class_name = re.sub(r"[^a-zA-Z0-9_]", "", class_name)
|
||||
|
||||
if not class_name:
|
||||
raise ValueError(f"Project name '{name}' contains no valid characters for a Python class name")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' contains no valid characters for a Python class name"
|
||||
)
|
||||
|
||||
if class_name[0].isdigit():
|
||||
raise ValueError(f"Project name '{name}' would generate class name '{class_name}' which cannot start with a digit")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate class name '{class_name}' which cannot start with a digit"
|
||||
)
|
||||
|
||||
# Check if the original name (before title casing) is a keyword
|
||||
original_name_clean = re.sub(r'[^a-zA-Z0-9_]', '', name.replace("_", "").replace("-", "").lower())
|
||||
if keyword.iskeyword(original_name_clean) or keyword.iskeyword(class_name) or class_name in ('True', 'False', 'None'):
|
||||
raise ValueError(f"Project name '{name}' would generate class name '{class_name}' which is a reserved Python keyword")
|
||||
|
||||
original_name_clean = re.sub(
|
||||
r"[^a-zA-Z0-9_]", "", name.replace("_", "").replace("-", "").lower()
|
||||
)
|
||||
if (
|
||||
keyword.iskeyword(original_name_clean)
|
||||
or keyword.iskeyword(class_name)
|
||||
or class_name in ("True", "False", "None")
|
||||
):
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate class name '{class_name}' which is a reserved Python keyword"
|
||||
)
|
||||
|
||||
if not class_name.isidentifier():
|
||||
raise ValueError(f"Project name '{name}' would generate invalid Python class name '{class_name}'")
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate invalid Python class name '{class_name}'"
|
||||
)
|
||||
|
||||
if parent_folder:
|
||||
folder_path = Path(parent_folder) / folder_name
|
||||
@@ -172,7 +196,7 @@ def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
|
||||
)
|
||||
|
||||
# Check if the selected provider has predefined models
|
||||
if selected_provider in MODELS and MODELS[selected_provider]:
|
||||
if MODELS.get(selected_provider):
|
||||
while True:
|
||||
selected_model = select_model(selected_provider, provider_models)
|
||||
if selected_model is None: # User typed 'q'
|
||||
|
||||
@@ -5,7 +5,7 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
from typing import Any
|
||||
|
||||
import click
|
||||
import tomli
|
||||
@@ -116,7 +116,7 @@ def show_loading(event: threading.Event):
|
||||
print()
|
||||
|
||||
|
||||
def initialize_chat_llm(crew: Crew) -> Optional[LLM | BaseLLM]:
|
||||
def initialize_chat_llm(crew: Crew) -> LLM | BaseLLM | None:
|
||||
"""Initializes the chat LLM and handles exceptions."""
|
||||
try:
|
||||
return create_llm(crew.chat_llm)
|
||||
@@ -157,7 +157,7 @@ def build_system_message(crew_chat_inputs: ChatInputs) -> str:
|
||||
)
|
||||
|
||||
|
||||
def create_tool_function(crew: Crew, messages: List[Dict[str, str]]) -> Any:
|
||||
def create_tool_function(crew: Crew, messages: list[dict[str, str]]) -> Any:
|
||||
"""Creates a wrapper function for running the crew tool with messages."""
|
||||
|
||||
def run_crew_tool_with_messages(**kwargs):
|
||||
@@ -193,7 +193,7 @@ def chat_loop(chat_llm, messages, crew_tool_schema, available_functions):
|
||||
user_input, chat_llm, messages, crew_tool_schema, available_functions
|
||||
)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
except KeyboardInterrupt: # noqa: PERF203
|
||||
click.echo("\nExiting chat. Goodbye!")
|
||||
break
|
||||
except Exception as e:
|
||||
@@ -221,9 +221,9 @@ def get_user_input() -> str:
|
||||
def handle_user_input(
|
||||
user_input: str,
|
||||
chat_llm: LLM,
|
||||
messages: List[Dict[str, str]],
|
||||
crew_tool_schema: Dict[str, Any],
|
||||
available_functions: Dict[str, Any],
|
||||
messages: list[dict[str, str]],
|
||||
crew_tool_schema: dict[str, Any],
|
||||
available_functions: dict[str, Any],
|
||||
) -> None:
|
||||
if user_input.strip().lower() == "exit":
|
||||
click.echo("Exiting chat. Goodbye!")
|
||||
@@ -281,7 +281,7 @@ def generate_crew_tool_schema(crew_inputs: ChatInputs) -> dict:
|
||||
}
|
||||
|
||||
|
||||
def run_crew_tool(crew: Crew, messages: List[Dict[str, str]], **kwargs):
|
||||
def run_crew_tool(crew: Crew, messages: list[dict[str, str]], **kwargs):
|
||||
"""
|
||||
Runs the crew using crew.kickoff(inputs=kwargs) and returns the output.
|
||||
|
||||
@@ -304,9 +304,8 @@ def run_crew_tool(crew: Crew, messages: List[Dict[str, str]], **kwargs):
|
||||
crew_output = crew.kickoff(inputs=kwargs)
|
||||
|
||||
# Convert CrewOutput to a string to send back to the user
|
||||
result = str(crew_output)
|
||||
return str(crew_output)
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
# Exit the chat and show the error message
|
||||
click.secho("An error occurred while running the crew:", fg="red")
|
||||
@@ -314,7 +313,7 @@ def run_crew_tool(crew: Crew, messages: List[Dict[str, str]], **kwargs):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def load_crew_and_name() -> Tuple[Crew, str]:
|
||||
def load_crew_and_name() -> tuple[Crew, str]:
|
||||
"""
|
||||
Loads the crew by importing the crew class from the user's project.
|
||||
|
||||
@@ -351,15 +350,17 @@ def load_crew_and_name() -> Tuple[Crew, str]:
|
||||
try:
|
||||
crew_module = __import__(crew_module_name, fromlist=[crew_class_name])
|
||||
except ImportError as e:
|
||||
raise ImportError(f"Failed to import crew module {crew_module_name}: {e}")
|
||||
raise ImportError(
|
||||
f"Failed to import crew module {crew_module_name}: {e}"
|
||||
) from e
|
||||
|
||||
# Get the crew class from the module
|
||||
try:
|
||||
crew_class = getattr(crew_module, crew_class_name)
|
||||
except AttributeError:
|
||||
except AttributeError as e:
|
||||
raise AttributeError(
|
||||
f"Crew class {crew_class_name} not found in module {crew_module_name}"
|
||||
)
|
||||
) from e
|
||||
|
||||
# Instantiate the crew
|
||||
crew_instance = crew_class().crew()
|
||||
@@ -395,7 +396,7 @@ def generate_crew_chat_inputs(crew: Crew, crew_name: str, chat_llm) -> ChatInput
|
||||
)
|
||||
|
||||
|
||||
def fetch_required_inputs(crew: Crew) -> Set[str]:
|
||||
def fetch_required_inputs(crew: Crew) -> set[str]:
|
||||
"""
|
||||
Extracts placeholders from the crew's tasks and agents.
|
||||
|
||||
@@ -405,8 +406,8 @@ def fetch_required_inputs(crew: Crew) -> Set[str]:
|
||||
Returns:
|
||||
Set[str]: A set of placeholder names.
|
||||
"""
|
||||
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||
required_inputs: Set[str] = set()
|
||||
placeholder_pattern = re.compile(r"\{(.+?)}")
|
||||
required_inputs: set[str] = set()
|
||||
|
||||
# Scan tasks
|
||||
for task in crew.tasks:
|
||||
@@ -435,7 +436,7 @@ def generate_input_description_with_ai(input_name: str, crew: Crew, chat_llm) ->
|
||||
"""
|
||||
# Gather context from tasks and agents where the input is used
|
||||
context_texts = []
|
||||
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||
placeholder_pattern = re.compile(r"\{(.+?)}")
|
||||
|
||||
for task in crew.tasks:
|
||||
if (
|
||||
@@ -479,9 +480,7 @@ def generate_input_description_with_ai(input_name: str, crew: Crew, chat_llm) ->
|
||||
f"{context}"
|
||||
)
|
||||
response = chat_llm.call(messages=[{"role": "user", "content": prompt}])
|
||||
description = response.strip()
|
||||
|
||||
return description
|
||||
return response.strip()
|
||||
|
||||
|
||||
def generate_crew_description_with_ai(crew: Crew, chat_llm) -> str:
|
||||
@@ -497,7 +496,7 @@ def generate_crew_description_with_ai(crew: Crew, chat_llm) -> str:
|
||||
"""
|
||||
# Gather context from tasks and agents
|
||||
context_texts = []
|
||||
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||
placeholder_pattern = re.compile(r"\{(.+?)}")
|
||||
|
||||
for task in crew.tasks:
|
||||
# Replace placeholders with input names
|
||||
@@ -531,6 +530,4 @@ def generate_crew_description_with_ai(crew: Crew, chat_llm) -> str:
|
||||
f"{context}"
|
||||
)
|
||||
response = chat_llm.call(messages=[{"role": "user", "content": prompt}])
|
||||
crew_description = response.strip()
|
||||
|
||||
return crew_description
|
||||
return response.strip()
|
||||
|
||||
@@ -14,11 +14,15 @@ class Repository:
|
||||
|
||||
self.fetch()
|
||||
|
||||
def is_git_installed(self) -> bool:
|
||||
@staticmethod
|
||||
def is_git_installed() -> bool:
|
||||
"""Check if Git is installed and available in the system."""
|
||||
try:
|
||||
subprocess.run(
|
||||
["git", "--version"], capture_output=True, check=True, text=True
|
||||
["git", "--version"], # noqa: S607
|
||||
capture_output=True,
|
||||
check=True,
|
||||
text=True,
|
||||
)
|
||||
return True
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
@@ -26,22 +30,26 @@ class Repository:
|
||||
|
||||
def fetch(self) -> None:
|
||||
"""Fetch latest updates from the remote."""
|
||||
subprocess.run(["git", "fetch"], cwd=self.path, check=True)
|
||||
subprocess.run(["git", "fetch"], cwd=self.path, check=True) # noqa: S607
|
||||
|
||||
def status(self) -> str:
|
||||
"""Get the git status in porcelain format."""
|
||||
return subprocess.check_output(
|
||||
["git", "status", "--branch", "--porcelain"],
|
||||
["git", "status", "--branch", "--porcelain"], # noqa: S607
|
||||
cwd=self.path,
|
||||
encoding="utf-8",
|
||||
).strip()
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
@lru_cache(maxsize=None) # noqa: B019
|
||||
def is_git_repo(self) -> bool:
|
||||
"""Check if the current directory is a git repository."""
|
||||
"""Check if the current directory is a git repository.
|
||||
|
||||
Notes:
|
||||
- TODO: This method is cached to avoid redundant checks, but using lru_cache on methods can lead to memory leaks
|
||||
"""
|
||||
try:
|
||||
subprocess.check_output(
|
||||
["git", "rev-parse", "--is-inside-work-tree"],
|
||||
["git", "rev-parse", "--is-inside-work-tree"], # noqa: S607
|
||||
cwd=self.path,
|
||||
encoding="utf-8",
|
||||
)
|
||||
@@ -64,14 +72,13 @@ class Repository:
|
||||
"""Return True if the Git repository is fully synced with the remote, False otherwise."""
|
||||
if self.has_uncommitted_changes() or self.is_ahead_or_behind():
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
return True
|
||||
|
||||
def origin_url(self) -> str | None:
|
||||
"""Get the Git repository's remote URL."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "remote", "get-url", "origin"],
|
||||
["git", "remote", "get-url", "origin"], # noqa: S607
|
||||
cwd=self.path,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
|
||||
@@ -12,8 +12,8 @@ def install_crew(proxy_options: list[str]) -> None:
|
||||
Install the crew by running the UV command to lock and install.
|
||||
"""
|
||||
try:
|
||||
command = ["uv", "sync"] + proxy_options
|
||||
subprocess.run(command, check=True, capture_output=False, text=True)
|
||||
command = ["uv", "sync", *proxy_options]
|
||||
subprocess.run(command, check=True, capture_output=False, text=True) # noqa: S603
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
from typing import List, Optional
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import requests
|
||||
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
class PlusAPI:
|
||||
@@ -56,9 +55,9 @@ class PlusAPI:
|
||||
handle: str,
|
||||
is_public: bool,
|
||||
version: str,
|
||||
description: Optional[str],
|
||||
description: str | None,
|
||||
encoded_file: str,
|
||||
available_exports: Optional[List[str]] = None,
|
||||
available_exports: list[str] | None = None,
|
||||
):
|
||||
params = {
|
||||
"handle": handle,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import os
|
||||
import certifi
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
import certifi
|
||||
import click
|
||||
import requests
|
||||
|
||||
@@ -25,7 +25,7 @@ def select_choice(prompt_message, choices):
|
||||
|
||||
provider_models = get_provider_data()
|
||||
if not provider_models:
|
||||
return
|
||||
return None
|
||||
click.secho(prompt_message, fg="cyan")
|
||||
for idx, choice in enumerate(choices, start=1):
|
||||
click.secho(f"{idx}. {choice}", fg="cyan")
|
||||
@@ -67,7 +67,7 @@ def select_provider(provider_models):
|
||||
all_providers = sorted(set(predefined_providers + list(provider_models.keys())))
|
||||
|
||||
provider = select_choice(
|
||||
"Select a provider to set up:", predefined_providers + ["other"]
|
||||
"Select a provider to set up:", [*predefined_providers, "other"]
|
||||
)
|
||||
if provider is None: # User typed 'q'
|
||||
return None
|
||||
@@ -102,10 +102,9 @@ def select_model(provider, provider_models):
|
||||
click.secho(f"No models available for provider '{provider}'.", fg="red")
|
||||
return None
|
||||
|
||||
selected_model = select_choice(
|
||||
return select_choice(
|
||||
f"Select a model to use for {provider.capitalize()}:", available_models
|
||||
)
|
||||
return selected_model
|
||||
|
||||
|
||||
def load_provider_data(cache_file, cache_expiry):
|
||||
@@ -165,7 +164,7 @@ def fetch_provider_data(cache_file):
|
||||
Returns:
|
||||
- dict or None: The fetched provider data or None if the operation fails.
|
||||
"""
|
||||
ssl_config = os.environ['SSL_CERT_FILE'] = certifi.where()
|
||||
ssl_config = os.environ["SSL_CERT_FILE"] = certifi.where()
|
||||
|
||||
try:
|
||||
response = requests.get(JSON_URL, stream=True, timeout=60, verify=ssl_config)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import subprocess
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
import click
|
||||
from packaging import version
|
||||
@@ -57,7 +56,7 @@ def execute_command(crew_type: CrewType) -> None:
|
||||
command = ["uv", "run", "kickoff" if crew_type == CrewType.FLOW else "run_crew"]
|
||||
|
||||
try:
|
||||
subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
subprocess.run(command, capture_output=False, text=True, check=True) # noqa: S603
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
handle_error(e, crew_type)
|
||||
|
||||
@@ -3,7 +3,7 @@ import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ class TokenManager:
|
||||
encrypted_data = self.fernet.encrypt(json.dumps(data).encode())
|
||||
self.save_secure_file(self.file_path, encrypted_data)
|
||||
|
||||
def get_token(self) -> Optional[str]:
|
||||
def get_token(self) -> str | None:
|
||||
"""
|
||||
Get the access token if it is valid and not expired.
|
||||
|
||||
@@ -113,7 +113,7 @@ class TokenManager:
|
||||
# Set appropriate permissions (read/write for owner only)
|
||||
os.chmod(file_path, 0o600)
|
||||
|
||||
def read_secure_file(self, filename: str) -> Optional[bytes]:
|
||||
def read_secure_file(self, filename: str) -> bytes | None:
|
||||
"""
|
||||
Read the content of a secure file.
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.186.1,<1.0.0"
|
||||
"crewai[tools]>=0.193.2,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.186.1,<1.0.0",
|
||||
"crewai[tools]>=0.193.2,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.186.1"
|
||||
"crewai[tools]>=0.193.2"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -5,7 +5,7 @@ import sys
|
||||
from functools import reduce
|
||||
from inspect import getmro, isclass, isfunction, ismethod
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, get_type_hints
|
||||
from typing import Any, get_type_hints
|
||||
|
||||
import click
|
||||
import tomli
|
||||
@@ -41,8 +41,7 @@ def copy_template(src, dst, name, class_name, folder_name):
|
||||
def read_toml(file_path: str = "pyproject.toml"):
|
||||
"""Read the content of a TOML file and return it as a dictionary."""
|
||||
with open(file_path, "rb") as f:
|
||||
toml_dict = tomli.load(f)
|
||||
return toml_dict
|
||||
return tomli.load(f)
|
||||
|
||||
|
||||
def parse_toml(content):
|
||||
@@ -77,7 +76,7 @@ def get_project_description(
|
||||
|
||||
|
||||
def _get_project_attribute(
|
||||
pyproject_path: str, keys: List[str], require: bool
|
||||
pyproject_path: str, keys: list[str], require: bool
|
||||
) -> Any | None:
|
||||
"""Get an attribute from the pyproject.toml file."""
|
||||
attribute = None
|
||||
@@ -96,16 +95,20 @@ def _get_project_attribute(
|
||||
except FileNotFoundError:
|
||||
console.print(f"Error: {pyproject_path} not found.", style="bold red")
|
||||
except KeyError:
|
||||
console.print(f"Error: {pyproject_path} is not a valid pyproject.toml file.", style="bold red")
|
||||
except tomllib.TOMLDecodeError if sys.version_info >= (3, 11) else Exception as e: # type: ignore
|
||||
console.print(
|
||||
f"Error: {pyproject_path} is not a valid TOML file."
|
||||
if sys.version_info >= (3, 11)
|
||||
else f"Error reading the pyproject.toml file: {e}",
|
||||
f"Error: {pyproject_path} is not a valid pyproject.toml file.",
|
||||
style="bold red",
|
||||
)
|
||||
except Exception as e:
|
||||
console.print(f"Error reading the pyproject.toml file: {e}", style="bold red")
|
||||
# Handle TOML decode errors for Python 3.11+
|
||||
if sys.version_info >= (3, 11) and isinstance(e, tomllib.TOMLDecodeError): # type: ignore
|
||||
console.print(
|
||||
f"Error: {pyproject_path} is not a valid TOML file.", style="bold red"
|
||||
)
|
||||
else:
|
||||
console.print(
|
||||
f"Error reading the pyproject.toml file: {e}", style="bold red"
|
||||
)
|
||||
|
||||
if require and not attribute:
|
||||
console.print(
|
||||
@@ -117,7 +120,7 @@ def _get_project_attribute(
|
||||
return attribute
|
||||
|
||||
|
||||
def _get_nested_value(data: Dict[str, Any], keys: List[str]) -> Any:
|
||||
def _get_nested_value(data: dict[str, Any], keys: list[str]) -> Any:
|
||||
return reduce(dict.__getitem__, keys, data)
|
||||
|
||||
|
||||
@@ -296,7 +299,10 @@ def get_crews(crew_path: str = "crew.py", require: bool = False) -> list[Crew]:
|
||||
try:
|
||||
crew_instances.extend(fetch_crews(module_attr))
|
||||
except Exception as e:
|
||||
console.print(f"Error processing attribute {attr_name}: {e}", style="bold red")
|
||||
console.print(
|
||||
f"Error processing attribute {attr_name}: {e}",
|
||||
style="bold red",
|
||||
)
|
||||
continue
|
||||
|
||||
# If we found crew instances, break out of the loop
|
||||
@@ -304,12 +310,15 @@ def get_crews(crew_path: str = "crew.py", require: bool = False) -> list[Crew]:
|
||||
break
|
||||
|
||||
except Exception as exec_error:
|
||||
console.print(f"Error executing module: {exec_error}", style="bold red")
|
||||
console.print(
|
||||
f"Error executing module: {exec_error}",
|
||||
style="bold red",
|
||||
)
|
||||
|
||||
except (ImportError, AttributeError) as e:
|
||||
if require:
|
||||
console.print(
|
||||
f"Error importing crew from {crew_path}: {str(e)}",
|
||||
f"Error importing crew from {crew_path}: {e!s}",
|
||||
style="bold red",
|
||||
)
|
||||
continue
|
||||
@@ -325,9 +334,9 @@ def get_crews(crew_path: str = "crew.py", require: bool = False) -> list[Crew]:
|
||||
except Exception as e:
|
||||
if require:
|
||||
console.print(
|
||||
f"Unexpected error while loading crew: {str(e)}", style="bold red"
|
||||
f"Unexpected error while loading crew: {e!s}", style="bold red"
|
||||
)
|
||||
raise SystemExit
|
||||
raise SystemExit from e
|
||||
return crew_instances
|
||||
|
||||
|
||||
@@ -348,8 +357,7 @@ def get_crew_instance(module_attr) -> Crew | None:
|
||||
|
||||
if isinstance(module_attr, Crew):
|
||||
return module_attr
|
||||
else:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def fetch_crews(module_attr) -> list[Crew]:
|
||||
@@ -402,11 +410,11 @@ def extract_available_exports(dir_path: str = "src"):
|
||||
return available_exports
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error: Could not extract tool classes: {str(e)}[/red]")
|
||||
console.print(f"[red]Error: Could not extract tool classes: {e!s}[/red]")
|
||||
console.print(
|
||||
"Please ensure your project contains valid tools (classes inheriting from BaseTool or functions with @tool decorator)."
|
||||
)
|
||||
raise SystemExit(1)
|
||||
raise SystemExit(1) from e
|
||||
|
||||
|
||||
def _load_tools_from_init(init_file: Path) -> list[dict[str, Any]]:
|
||||
@@ -440,8 +448,8 @@ def _load_tools_from_init(init_file: Path) -> list[dict[str, Any]]:
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Warning: Could not load {init_file}: {str(e)}[/red]")
|
||||
raise SystemExit(1)
|
||||
console.print(f"[red]Warning: Could not load {init_file}: {e!s}[/red]")
|
||||
raise SystemExit(1) from e
|
||||
|
||||
finally:
|
||||
sys.modules.pop("temp_module", None)
|
||||
|
||||
25
src/crewai/context.py
Normal file
25
src/crewai/context.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import os
|
||||
import contextvars
|
||||
from typing import Optional
|
||||
from contextlib import contextmanager
|
||||
|
||||
_platform_integration_token: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar(
|
||||
"platform_integration_token", default=None
|
||||
)
|
||||
|
||||
def set_platform_integration_token(integration_token: str) -> None:
|
||||
_platform_integration_token.set(integration_token)
|
||||
|
||||
def get_platform_integration_token() -> Optional[str]:
|
||||
token = _platform_integration_token.get()
|
||||
if token is None:
|
||||
token = os.getenv("CREWAI_PLATFORM_INTEGRATION_TOKEN")
|
||||
return token
|
||||
|
||||
@contextmanager
|
||||
def platform_context(integration_token: str):
|
||||
token = _platform_integration_token.set(integration_token)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_platform_integration_token.reset(token)
|
||||
@@ -3,26 +3,17 @@ import json
|
||||
import re
|
||||
import uuid
|
||||
import warnings
|
||||
from collections.abc import Callable
|
||||
from concurrent.futures import Future
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from opentelemetry import baggage
|
||||
from opentelemetry.context import attach, detach
|
||||
|
||||
from crewai.utilities.crew.models import CrewContext
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
@@ -39,26 +30,15 @@ from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.flow.flow_trackable import FlowTrackable
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.llm import LLM, BaseLLM
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.process import Process
|
||||
from crewai.security import Fingerprint, SecurityConfig
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool, Tool
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import NOT_SPECIFIED, TRAINING_DATA_FILE
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.event_listener import EventListener
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
should_auto_collect_first_time_traces,
|
||||
)
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
@@ -70,16 +50,28 @@ from crewai.events.types.crew_events import (
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.event_listener import EventListener
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
|
||||
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
)
|
||||
from crewai.flow.flow_trackable import FlowTrackable
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.llm import LLM, BaseLLM
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.process import Process
|
||||
from crewai.rag.types import SearchResult
|
||||
from crewai.security import Fingerprint, SecurityConfig
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool, Tool
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import NOT_SPECIFIED, TRAINING_DATA_FILE
|
||||
from crewai.utilities.crew.models import CrewContext
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.formatter import (
|
||||
aggregate_raw_outputs_from_task_outputs,
|
||||
aggregate_raw_outputs_from_tasks,
|
||||
@@ -94,28 +86,40 @@ warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||
|
||||
class Crew(FlowTrackable, BaseModel):
|
||||
"""
|
||||
Represents a group of agents, defining how they should collaborate and the tasks they should perform.
|
||||
Represents a group of agents, defining how they should collaborate and the
|
||||
tasks they should perform.
|
||||
|
||||
Attributes:
|
||||
tasks: List of tasks assigned to the crew.
|
||||
agents: List of agents part of this crew.
|
||||
tasks: list of tasks assigned to the crew.
|
||||
agents: list of agents part of this crew.
|
||||
manager_llm: The language model that will run manager agent.
|
||||
manager_agent: Custom agent that will be used as manager.
|
||||
memory: Whether the crew should use memory to store memories of it's execution.
|
||||
cache: Whether the crew should use a cache to store the results of the tools execution.
|
||||
function_calling_llm: The language model that will run the tool calling for all the agents.
|
||||
process: The process flow that the crew will follow (e.g., sequential, hierarchical).
|
||||
memory: Whether the crew should use memory to store memories of it's
|
||||
execution.
|
||||
cache: Whether the crew should use a cache to store the results of the
|
||||
tools execution.
|
||||
function_calling_llm: The language model that will run the tool calling
|
||||
for all the agents.
|
||||
process: The process flow that the crew will follow (e.g., sequential,
|
||||
hierarchical).
|
||||
verbose: Indicates the verbosity level for logging during execution.
|
||||
config: Configuration settings for the crew.
|
||||
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
|
||||
max_rpm: Maximum number of requests per minute for the crew execution to
|
||||
be respected.
|
||||
prompt_file: Path to the prompt json file to be used for the crew.
|
||||
id: A unique identifier for the crew instance.
|
||||
task_callback: Callback to be executed after each task for every agents execution.
|
||||
step_callback: Callback to be executed after each step for every agents execution.
|
||||
share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models.
|
||||
task_callback: Callback to be executed after each task for every agents
|
||||
execution.
|
||||
step_callback: Callback to be executed after each step for every agents
|
||||
execution.
|
||||
share_crew: Whether you want to share the complete crew information and
|
||||
execution with crewAI to make the library better, and allow us to
|
||||
train models.
|
||||
planning: Plan the crew execution and add the plan to the crew.
|
||||
chat_llm: The language model used for orchestrating chat interactions with the crew.
|
||||
security_config: Security configuration for the crew, including fingerprinting.
|
||||
chat_llm: The language model used for orchestrating chat interactions
|
||||
with the crew.
|
||||
security_config: Security configuration for the crew, including
|
||||
fingerprinting.
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
@@ -124,13 +128,13 @@ class Crew(FlowTrackable, BaseModel):
|
||||
_logger: Logger = PrivateAttr()
|
||||
_file_handler: FileHandler = PrivateAttr()
|
||||
_cache_handler: InstanceOf[CacheHandler] = PrivateAttr(default=CacheHandler())
|
||||
_short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr()
|
||||
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
|
||||
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
|
||||
_external_memory: Optional[InstanceOf[ExternalMemory]] = PrivateAttr()
|
||||
_train: Optional[bool] = PrivateAttr(default=False)
|
||||
_train_iteration: Optional[int] = PrivateAttr()
|
||||
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
||||
_short_term_memory: InstanceOf[ShortTermMemory] | None = PrivateAttr()
|
||||
_long_term_memory: InstanceOf[LongTermMemory] | None = PrivateAttr()
|
||||
_entity_memory: InstanceOf[EntityMemory] | None = PrivateAttr()
|
||||
_external_memory: InstanceOf[ExternalMemory] | None = PrivateAttr()
|
||||
_train: bool | None = PrivateAttr(default=False)
|
||||
_train_iteration: int | None = PrivateAttr()
|
||||
_inputs: dict[str, Any] | None = PrivateAttr(default=None)
|
||||
_logging_color: str = PrivateAttr(
|
||||
default="bold_purple",
|
||||
)
|
||||
@@ -138,107 +142,121 @@ class Crew(FlowTrackable, BaseModel):
|
||||
default_factory=TaskOutputStorageHandler
|
||||
)
|
||||
|
||||
name: Optional[str] = Field(default="crew")
|
||||
name: str | None = Field(default="crew")
|
||||
cache: bool = Field(default=True)
|
||||
tasks: List[Task] = Field(default_factory=list)
|
||||
agents: List[BaseAgent] = Field(default_factory=list)
|
||||
tasks: list[Task] = Field(default_factory=list)
|
||||
agents: list[BaseAgent] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: bool = Field(default=False)
|
||||
memory: bool = Field(
|
||||
default=False,
|
||||
description="Whether the crew should use memory to store memories of it's execution",
|
||||
description="If crew should use memory to store memories of it's execution",
|
||||
)
|
||||
short_term_memory: Optional[InstanceOf[ShortTermMemory]] = Field(
|
||||
short_term_memory: InstanceOf[ShortTermMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the ShortTermMemory to be used by the Crew",
|
||||
)
|
||||
long_term_memory: Optional[InstanceOf[LongTermMemory]] = Field(
|
||||
long_term_memory: InstanceOf[LongTermMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the LongTermMemory to be used by the Crew",
|
||||
)
|
||||
entity_memory: Optional[InstanceOf[EntityMemory]] = Field(
|
||||
entity_memory: InstanceOf[EntityMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the EntityMemory to be used by the Crew",
|
||||
)
|
||||
external_memory: Optional[InstanceOf[ExternalMemory]] = Field(
|
||||
external_memory: InstanceOf[ExternalMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the ExternalMemory to be used by the Crew",
|
||||
)
|
||||
embedder: Optional[dict] = Field(
|
||||
embedder: dict | None = Field(
|
||||
default=None,
|
||||
description="Configuration for the embedder to be used for the crew.",
|
||||
)
|
||||
usage_metrics: Optional[UsageMetrics] = Field(
|
||||
usage_metrics: UsageMetrics | None = Field(
|
||||
default=None,
|
||||
description="Metrics for the LLM usage during all tasks execution.",
|
||||
)
|
||||
manager_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
|
||||
manager_llm: str | InstanceOf[BaseLLM] | Any | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
manager_agent: Optional[BaseAgent] = Field(
|
||||
manager_agent: BaseAgent | None = Field(
|
||||
description="Custom agent that will be used as manager.", default=None
|
||||
)
|
||||
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
|
||||
function_calling_llm: str | InstanceOf[LLM] | Any | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
||||
config: Json | dict[str, Any] | None = Field(default=None)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
share_crew: Optional[bool] = Field(default=False)
|
||||
step_callback: Optional[Any] = Field(
|
||||
share_crew: bool | None = Field(default=False)
|
||||
step_callback: Any | None = Field(
|
||||
default=None,
|
||||
description="Callback to be executed after each step for all agents execution.",
|
||||
)
|
||||
task_callback: Optional[Any] = Field(
|
||||
task_callback: Any | None = Field(
|
||||
default=None,
|
||||
description="Callback to be executed after each task for all agents execution.",
|
||||
)
|
||||
before_kickoff_callbacks: List[
|
||||
Callable[[Optional[Dict[str, Any]]], Optional[Dict[str, Any]]]
|
||||
before_kickoff_callbacks: list[
|
||||
Callable[[dict[str, Any] | None], dict[str, Any] | None]
|
||||
] = Field(
|
||||
default_factory=list,
|
||||
description="List of callbacks to be executed before crew kickoff. It may be used to adjust inputs before the crew is executed.",
|
||||
description=(
|
||||
"List of callbacks to be executed before crew kickoff. "
|
||||
"It may be used to adjust inputs before the crew is executed."
|
||||
),
|
||||
)
|
||||
after_kickoff_callbacks: List[Callable[[CrewOutput], CrewOutput]] = Field(
|
||||
after_kickoff_callbacks: list[Callable[[CrewOutput], CrewOutput]] = Field(
|
||||
default_factory=list,
|
||||
description="List of callbacks to be executed after crew kickoff. It may be used to adjust the output of the crew.",
|
||||
description=(
|
||||
"List of callbacks to be executed after crew kickoff. "
|
||||
"It may be used to adjust the output of the crew."
|
||||
),
|
||||
)
|
||||
max_rpm: Optional[int] = Field(
|
||||
max_rpm: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the crew execution to be respected.",
|
||||
description=(
|
||||
"Maximum number of requests per minute for the crew execution "
|
||||
"to be respected."
|
||||
),
|
||||
)
|
||||
prompt_file: Optional[str] = Field(
|
||||
prompt_file: str | None = Field(
|
||||
default=None,
|
||||
description="Path to the prompt json file to be used for the crew.",
|
||||
)
|
||||
output_log_file: Optional[Union[bool, str]] = Field(
|
||||
output_log_file: bool | str | None = Field(
|
||||
default=None,
|
||||
description="Path to the log file to be saved",
|
||||
)
|
||||
planning: Optional[bool] = Field(
|
||||
planning: bool | None = Field(
|
||||
default=False,
|
||||
description="Plan the crew execution and add the plan to the crew.",
|
||||
)
|
||||
planning_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
|
||||
planning_llm: str | InstanceOf[BaseLLM] | Any | None = Field(
|
||||
default=None,
|
||||
description="Language model that will run the AgentPlanner if planning is True.",
|
||||
description=(
|
||||
"Language model that will run the AgentPlanner if planning is True."
|
||||
),
|
||||
)
|
||||
task_execution_output_json_files: Optional[List[str]] = Field(
|
||||
task_execution_output_json_files: list[str] | None = Field(
|
||||
default=None,
|
||||
description="List of file paths for task execution JSON files.",
|
||||
description="list of file paths for task execution JSON files.",
|
||||
)
|
||||
execution_logs: List[Dict[str, Any]] = Field(
|
||||
execution_logs: list[dict[str, Any]] = Field(
|
||||
default=[],
|
||||
description="List of execution logs for tasks",
|
||||
description="list of execution logs for tasks",
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
knowledge_sources: list[BaseKnowledgeSource] | None = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the crew. Add knowledge sources to the knowledge object.",
|
||||
description=(
|
||||
"Knowledge sources for the crew. Add knowledge sources to the "
|
||||
"knowledge object."
|
||||
),
|
||||
)
|
||||
chat_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
|
||||
chat_llm: str | InstanceOf[BaseLLM] | Any | None = Field(
|
||||
default=None,
|
||||
description="LLM used to handle chatting with the crew.",
|
||||
)
|
||||
knowledge: Optional[Knowledge] = Field(
|
||||
knowledge: Knowledge | None = Field(
|
||||
default=None,
|
||||
description="Knowledge for the crew.",
|
||||
)
|
||||
@@ -246,18 +264,18 @@ class Crew(FlowTrackable, BaseModel):
|
||||
default_factory=SecurityConfig,
|
||||
description="Security configuration for the crew, including fingerprinting.",
|
||||
)
|
||||
token_usage: Optional[UsageMetrics] = Field(
|
||||
token_usage: UsageMetrics | None = Field(
|
||||
default=None,
|
||||
description="Metrics for the LLM usage during all tasks execution.",
|
||||
)
|
||||
tracing: Optional[bool] = Field(
|
||||
tracing: bool | None = Field(
|
||||
default=False,
|
||||
description="Whether to enable tracing for the crew.",
|
||||
)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
||||
def _deny_user_set_id(cls, v: UUID4 | None) -> None:
|
||||
"""Prevent manual setting of the 'id' field by users."""
|
||||
if v:
|
||||
raise PydanticCustomError(
|
||||
@@ -266,9 +284,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
@field_validator("config", mode="before")
|
||||
@classmethod
|
||||
def check_config_type(
|
||||
cls, v: Union[Json, Dict[str, Any]]
|
||||
) -> Union[Json, Dict[str, Any]]:
|
||||
def check_config_type(cls, v: Json | dict[str, Any]) -> Json | dict[str, Any]:
|
||||
"""Validates that the config is a valid type.
|
||||
Args:
|
||||
v: The config to be validated.
|
||||
@@ -281,12 +297,16 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self) -> "Crew":
|
||||
"""Set private attributes."""
|
||||
"""set private attributes."""
|
||||
|
||||
self._cache_handler = CacheHandler()
|
||||
event_listener = EventListener()
|
||||
|
||||
if is_tracing_enabled() or self.tracing:
|
||||
if (
|
||||
is_tracing_enabled()
|
||||
or self.tracing
|
||||
or should_auto_collect_first_time_traces()
|
||||
):
|
||||
trace_listener = TraceCollectionListener()
|
||||
trace_listener.setup_listeners(crewai_event_bus)
|
||||
event_listener.verbose = self.verbose
|
||||
@@ -314,7 +334,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
def create_crew_memory(self) -> "Crew":
|
||||
"""Initialize private memory attributes."""
|
||||
self._external_memory = (
|
||||
# External memory doesn’t support a default value since it was designed to be managed entirely externally
|
||||
# External memory does not support a default value since it was
|
||||
# designed to be managed entirely externally
|
||||
self.external_memory.set_crew(self) if self.external_memory else None
|
||||
)
|
||||
|
||||
@@ -355,7 +376,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
if not self.manager_llm and not self.manager_agent:
|
||||
raise PydanticCustomError(
|
||||
"missing_manager_llm_or_manager_agent",
|
||||
"Attribute `manager_llm` or `manager_agent` is required when using hierarchical process.",
|
||||
(
|
||||
"Attribute `manager_llm` or `manager_agent` is required "
|
||||
"when using hierarchical process."
|
||||
),
|
||||
{},
|
||||
)
|
||||
|
||||
@@ -398,7 +422,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
if task.agent is None:
|
||||
raise PydanticCustomError(
|
||||
"missing_agent_in_task",
|
||||
f"Sequential process error: Agent is missing in the task with the following description: {task.description}", # type: ignore # Argument of type "str" cannot be assigned to parameter "message_template" of type "LiteralString"
|
||||
(
|
||||
f"Sequential process error: Agent is missing in the task "
|
||||
f"with the following description: {task.description}"
|
||||
), # type: ignore # Dynamic string in error message
|
||||
{},
|
||||
)
|
||||
|
||||
@@ -459,7 +486,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
if task.async_execution and isinstance(task, ConditionalTask):
|
||||
raise PydanticCustomError(
|
||||
"invalid_async_conditional_task",
|
||||
f"Conditional Task: {task.description} , cannot be executed asynchronously.", # type: ignore # Argument of type "str" cannot be assigned to parameter "message_template" of type "LiteralString"
|
||||
(
|
||||
f"Conditional Task: {task.description}, "
|
||||
f"cannot be executed asynchronously."
|
||||
),
|
||||
{},
|
||||
)
|
||||
return self
|
||||
@@ -478,7 +508,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
for j in range(i - 1, -1, -1):
|
||||
if self.tasks[j] == context_task:
|
||||
raise ValueError(
|
||||
f"Task '{task.description}' is asynchronous and cannot include other sequential asynchronous tasks in its context."
|
||||
f"Task '{task.description}' is asynchronous and "
|
||||
f"cannot include other sequential asynchronous "
|
||||
f"tasks in its context."
|
||||
)
|
||||
if not self.tasks[j].async_execution:
|
||||
break
|
||||
@@ -496,13 +528,15 @@ class Crew(FlowTrackable, BaseModel):
|
||||
continue # Skip context tasks not in the main tasks list
|
||||
if task_indices[id(context_task)] > task_indices[id(task)]:
|
||||
raise ValueError(
|
||||
f"Task '{task.description}' has a context dependency on a future task '{context_task.description}', which is not allowed."
|
||||
f"Task '{task.description}' has a context dependency "
|
||||
f"on a future task '{context_task.description}', "
|
||||
f"which is not allowed."
|
||||
)
|
||||
return self
|
||||
|
||||
@property
|
||||
def key(self) -> str:
|
||||
source: List[str] = [agent.key for agent in self.agents] + [
|
||||
source: list[str] = [agent.key for agent in self.agents] + [
|
||||
task.key for task in self.tasks
|
||||
]
|
||||
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
||||
@@ -518,9 +552,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return self.security_config.fingerprint
|
||||
|
||||
def _setup_from_config(self):
|
||||
assert self.config is not None, "Config should not be None."
|
||||
|
||||
"""Initializes agents and tasks from the provided config."""
|
||||
if self.config is None:
|
||||
raise ValueError("Config should not be None.")
|
||||
if not self.config.get("agents") or not self.config.get("tasks"):
|
||||
raise PydanticCustomError(
|
||||
"missing_keys_in_config", "Config should have 'agents' and 'tasks'.", {}
|
||||
@@ -530,7 +564,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self.agents = [Agent(**agent) for agent in self.config["agents"]]
|
||||
self.tasks = [self._create_task(task) for task in self.config["tasks"]]
|
||||
|
||||
def _create_task(self, task_config: Dict[str, Any]) -> Task:
|
||||
def _create_task(self, task_config: dict[str, Any]) -> Task:
|
||||
"""Creates a task instance from its configuration.
|
||||
|
||||
Args:
|
||||
@@ -559,7 +593,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
CrewTrainingHandler(filename).initialize_file()
|
||||
|
||||
def train(
|
||||
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = None
|
||||
self, n_iterations: int, filename: str, inputs: dict[str, Any] | None = None
|
||||
) -> None:
|
||||
"""Trains the crew for a given number of iterations."""
|
||||
inputs = inputs or {}
|
||||
@@ -611,7 +645,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
) -> CrewOutput:
|
||||
ctx = baggage.set_baggage(
|
||||
"crew_context", CrewContext(id=str(self.id), key=self.key)
|
||||
@@ -682,9 +716,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
finally:
|
||||
detach(token)
|
||||
|
||||
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]:
|
||||
"""Executes the Crew's workflow for each input in the list and aggregates results."""
|
||||
results: List[CrewOutput] = []
|
||||
def kickoff_for_each(self, inputs: list[dict[str, Any]]) -> list[CrewOutput]:
|
||||
"""Executes the Crew's workflow for each input and aggregates results."""
|
||||
results: list[CrewOutput] = []
|
||||
|
||||
# Initialize the parent crew's usage metrics
|
||||
total_usage_metrics = UsageMetrics()
|
||||
@@ -703,14 +737,12 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self._task_output_handler.reset()
|
||||
return results
|
||||
|
||||
async def kickoff_async(
|
||||
self, inputs: Optional[Dict[str, Any]] = None
|
||||
) -> CrewOutput:
|
||||
async def kickoff_async(self, inputs: dict[str, Any] | None = None) -> CrewOutput:
|
||||
"""Asynchronous kickoff method to start the crew execution."""
|
||||
inputs = inputs or {}
|
||||
return await asyncio.to_thread(self.kickoff, inputs)
|
||||
|
||||
async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[CrewOutput]:
|
||||
async def kickoff_for_each_async(self, inputs: list[dict]) -> list[CrewOutput]:
|
||||
crew_copies = [self.copy() for _ in inputs]
|
||||
|
||||
async def run_crew(crew, input_data):
|
||||
@@ -739,7 +771,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tasks=self.tasks, planning_agent_llm=self.planning_llm
|
||||
)._handle_crew_planning()
|
||||
|
||||
for task, step_plan in zip(self.tasks, result.list_of_plans_per_task):
|
||||
for task, step_plan in zip(
|
||||
self.tasks, result.list_of_plans_per_task, strict=False
|
||||
):
|
||||
task.description += step_plan.plan
|
||||
|
||||
def _store_execution_log(
|
||||
@@ -776,7 +810,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return self._execute_tasks(self.tasks)
|
||||
|
||||
def _run_hierarchical_process(self) -> CrewOutput:
|
||||
"""Creates and assigns a manager agent to make sure the crew completes the tasks."""
|
||||
"""Creates and assigns a manager agent to complete the tasks."""
|
||||
self._create_manager_agent()
|
||||
return self._execute_tasks(self.tasks)
|
||||
|
||||
@@ -807,23 +841,24 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
def _execute_tasks(
|
||||
self,
|
||||
tasks: List[Task],
|
||||
start_index: Optional[int] = 0,
|
||||
tasks: list[Task],
|
||||
start_index: int | None = 0,
|
||||
was_replayed: bool = False,
|
||||
) -> CrewOutput:
|
||||
"""Executes tasks sequentially and returns the final output.
|
||||
|
||||
Args:
|
||||
tasks (List[Task]): List of tasks to execute
|
||||
manager (Optional[BaseAgent], optional): Manager agent to use for delegation. Defaults to None.
|
||||
manager (Optional[BaseAgent], optional): Manager agent to use for
|
||||
delegation. Defaults to None.
|
||||
|
||||
Returns:
|
||||
CrewOutput: Final output of the crew
|
||||
"""
|
||||
|
||||
task_outputs: List[TaskOutput] = []
|
||||
futures: List[Tuple[Task, Future[TaskOutput], int]] = []
|
||||
last_sync_output: Optional[TaskOutput] = None
|
||||
task_outputs: list[TaskOutput] = []
|
||||
futures: list[tuple[Task, Future[TaskOutput], int]] = []
|
||||
last_sync_output: TaskOutput | None = None
|
||||
|
||||
for task_index, task in enumerate(tasks):
|
||||
if start_index is not None and task_index < start_index:
|
||||
@@ -838,7 +873,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
agent_to_use = self._get_agent_to_use(task)
|
||||
if agent_to_use is None:
|
||||
raise ValueError(
|
||||
f"No agent available for task: {task.description}. Ensure that either the task has an assigned agent or a manager agent is provided."
|
||||
f"No agent available for task: {task.description}. "
|
||||
f"Ensure that either the task has an assigned agent "
|
||||
f"or a manager agent is provided."
|
||||
)
|
||||
|
||||
# Determine which tools to use - task tools take precedence over agent tools
|
||||
@@ -847,7 +884,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tools_for_task = self._prepare_tools(
|
||||
agent_to_use,
|
||||
task,
|
||||
cast(Union[List[Tool], List[BaseTool]], tools_for_task),
|
||||
cast(list[Tool] | list[BaseTool], tools_for_task),
|
||||
)
|
||||
|
||||
self._log_task_start(task, agent_to_use.role)
|
||||
@@ -867,7 +904,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
future = task.execute_async(
|
||||
agent=agent_to_use,
|
||||
context=context,
|
||||
tools=cast(List[BaseTool], tools_for_task),
|
||||
tools=cast(list[BaseTool], tools_for_task),
|
||||
)
|
||||
futures.append((task, future, task_index))
|
||||
else:
|
||||
@@ -879,7 +916,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
task_output = task.execute_sync(
|
||||
agent=agent_to_use,
|
||||
context=context,
|
||||
tools=cast(List[BaseTool], tools_for_task),
|
||||
tools=cast(list[BaseTool], tools_for_task),
|
||||
)
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(task, task_output)
|
||||
@@ -893,11 +930,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
def _handle_conditional_task(
|
||||
self,
|
||||
task: ConditionalTask,
|
||||
task_outputs: List[TaskOutput],
|
||||
futures: List[Tuple[Task, Future[TaskOutput], int]],
|
||||
task_outputs: list[TaskOutput],
|
||||
futures: list[tuple[Task, Future[TaskOutput], int]],
|
||||
task_index: int,
|
||||
was_replayed: bool,
|
||||
) -> Optional[TaskOutput]:
|
||||
) -> TaskOutput | None:
|
||||
if futures:
|
||||
task_outputs = self._process_async_tasks(futures, was_replayed)
|
||||
futures.clear()
|
||||
@@ -917,8 +954,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return None
|
||||
|
||||
def _prepare_tools(
|
||||
self, agent: BaseAgent, task: Task, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, agent: BaseAgent, task: Task, tools: list[Tool] | list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
# Add delegation tools if agent allows delegation
|
||||
if hasattr(agent, "allow_delegation") and getattr(
|
||||
agent, "allow_delegation", False
|
||||
@@ -947,22 +984,22 @@ class Crew(FlowTrackable, BaseModel):
|
||||
):
|
||||
tools = self._add_multimodal_tools(agent, tools)
|
||||
|
||||
# Return a List[BaseTool] which is compatible with both Task.execute_sync and Task.execute_async
|
||||
return cast(List[BaseTool], tools)
|
||||
# Return a List[BaseTool] compatible with Task.execute_sync and execute_async
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _get_agent_to_use(self, task: Task) -> Optional[BaseAgent]:
|
||||
def _get_agent_to_use(self, task: Task) -> BaseAgent | None:
|
||||
if self.process == Process.hierarchical:
|
||||
return self.manager_agent
|
||||
return task.agent
|
||||
|
||||
def _merge_tools(
|
||||
self,
|
||||
existing_tools: Union[List[Tool], List[BaseTool]],
|
||||
new_tools: Union[List[Tool], List[BaseTool]],
|
||||
) -> List[BaseTool]:
|
||||
"""Merge new tools into existing tools list, avoiding duplicates by tool name."""
|
||||
existing_tools: list[Tool] | list[BaseTool],
|
||||
new_tools: list[Tool] | list[BaseTool],
|
||||
) -> list[BaseTool]:
|
||||
"""Merge new tools into existing tools list, avoiding duplicates."""
|
||||
if not new_tools:
|
||||
return cast(List[BaseTool], existing_tools)
|
||||
return cast(list[BaseTool], existing_tools)
|
||||
|
||||
# Create mapping of tool names to new tools
|
||||
new_tool_map = {tool.name: tool for tool in new_tools}
|
||||
@@ -973,41 +1010,41 @@ class Crew(FlowTrackable, BaseModel):
|
||||
# Add all new tools
|
||||
tools.extend(new_tools)
|
||||
|
||||
return cast(List[BaseTool], tools)
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _inject_delegation_tools(
|
||||
self,
|
||||
tools: Union[List[Tool], List[BaseTool]],
|
||||
tools: list[Tool] | list[BaseTool],
|
||||
task_agent: BaseAgent,
|
||||
agents: List[BaseAgent],
|
||||
) -> List[BaseTool]:
|
||||
agents: list[BaseAgent],
|
||||
) -> list[BaseTool]:
|
||||
if hasattr(task_agent, "get_delegation_tools"):
|
||||
delegation_tools = task_agent.get_delegation_tools(agents)
|
||||
# Cast delegation_tools to the expected type for _merge_tools
|
||||
return self._merge_tools(tools, cast(List[BaseTool], delegation_tools))
|
||||
return cast(List[BaseTool], tools)
|
||||
return self._merge_tools(tools, cast(list[BaseTool], delegation_tools))
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _add_multimodal_tools(
|
||||
self, agent: BaseAgent, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, agent: BaseAgent, tools: list[Tool] | list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
if hasattr(agent, "get_multimodal_tools"):
|
||||
multimodal_tools = agent.get_multimodal_tools()
|
||||
# Cast multimodal_tools to the expected type for _merge_tools
|
||||
return self._merge_tools(tools, cast(List[BaseTool], multimodal_tools))
|
||||
return cast(List[BaseTool], tools)
|
||||
return self._merge_tools(tools, cast(list[BaseTool], multimodal_tools))
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _add_code_execution_tools(
|
||||
self, agent: BaseAgent, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, agent: BaseAgent, tools: list[Tool] | list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
if hasattr(agent, "get_code_execution_tools"):
|
||||
code_tools = agent.get_code_execution_tools()
|
||||
# Cast code_tools to the expected type for _merge_tools
|
||||
return self._merge_tools(tools, cast(List[BaseTool], code_tools))
|
||||
return cast(List[BaseTool], tools)
|
||||
return self._merge_tools(tools, cast(list[BaseTool], code_tools))
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _add_delegation_tools(
|
||||
self, task: Task, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, task: Task, tools: list[Tool] | list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
agents_for_delegation = [agent for agent in self.agents if agent != task.agent]
|
||||
if len(self.agents) > 1 and len(agents_for_delegation) > 0 and task.agent:
|
||||
if not tools:
|
||||
@@ -1015,7 +1052,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tools = self._inject_delegation_tools(
|
||||
tools, task.agent, agents_for_delegation
|
||||
)
|
||||
return cast(List[BaseTool], tools)
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _log_task_start(self, task: Task, role: str = "None"):
|
||||
if self.output_log_file:
|
||||
@@ -1024,8 +1061,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
def _update_manager_tools(
|
||||
self, task: Task, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, task: Task, tools: list[Tool] | list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
if self.manager_agent:
|
||||
if task.agent:
|
||||
tools = self._inject_delegation_tools(tools, task.agent, [task.agent])
|
||||
@@ -1033,18 +1070,17 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tools = self._inject_delegation_tools(
|
||||
tools, self.manager_agent, self.agents
|
||||
)
|
||||
return cast(List[BaseTool], tools)
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _get_context(self, task: Task, task_outputs: List[TaskOutput]) -> str:
|
||||
def _get_context(self, task: Task, task_outputs: list[TaskOutput]) -> str:
|
||||
if not task.context:
|
||||
return ""
|
||||
|
||||
context = (
|
||||
return (
|
||||
aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
if task.context is NOT_SPECIFIED
|
||||
else aggregate_raw_outputs_from_tasks(task.context)
|
||||
)
|
||||
return context
|
||||
|
||||
def _process_task_result(self, task: Task, output: TaskOutput) -> None:
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
@@ -1057,7 +1093,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
output=output.raw,
|
||||
)
|
||||
|
||||
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
||||
def _create_crew_output(self, task_outputs: list[TaskOutput]) -> CrewOutput:
|
||||
if not task_outputs:
|
||||
raise ValueError("No task outputs available to create crew output.")
|
||||
|
||||
@@ -1088,10 +1124,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
def _process_async_tasks(
|
||||
self,
|
||||
futures: List[Tuple[Task, Future[TaskOutput], int]],
|
||||
futures: list[tuple[Task, Future[TaskOutput], int]],
|
||||
was_replayed: bool = False,
|
||||
) -> List[TaskOutput]:
|
||||
task_outputs: List[TaskOutput] = []
|
||||
) -> list[TaskOutput]:
|
||||
task_outputs: list[TaskOutput] = []
|
||||
for future_task, future, task_index in futures:
|
||||
task_output = future.result()
|
||||
task_outputs.append(task_output)
|
||||
@@ -1101,9 +1137,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
return task_outputs
|
||||
|
||||
def _find_task_index(
|
||||
self, task_id: str, stored_outputs: List[Any]
|
||||
) -> Optional[int]:
|
||||
def _find_task_index(self, task_id: str, stored_outputs: list[Any]) -> int | None:
|
||||
return next(
|
||||
(
|
||||
index
|
||||
@@ -1113,9 +1147,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
None,
|
||||
)
|
||||
|
||||
def replay(
|
||||
self, task_id: str, inputs: Optional[Dict[str, Any]] = None
|
||||
) -> CrewOutput:
|
||||
def replay(self, task_id: str, inputs: dict[str, Any] | None = None) -> CrewOutput:
|
||||
"""Replay the crew execution from a specific task."""
|
||||
stored_outputs = self._task_output_handler.load()
|
||||
if not stored_outputs:
|
||||
raise ValueError(f"Task with id {task_id} not found in the crew's tasks.")
|
||||
@@ -1151,19 +1184,19 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self.tasks[i].output = task_output
|
||||
|
||||
self._logging_color = "bold_blue"
|
||||
result = self._execute_tasks(self.tasks, start_index, True)
|
||||
return result
|
||||
return self._execute_tasks(self.tasks, start_index, True)
|
||||
|
||||
def query_knowledge(
|
||||
self, query: List[str], results_limit: int = 3, score_threshold: float = 0.35
|
||||
) -> Union[List[Dict[str, Any]], None]:
|
||||
self, query: list[str], results_limit: int = 3, score_threshold: float = 0.35
|
||||
) -> list[SearchResult] | None:
|
||||
"""Query the crew's knowledge base for relevant information."""
|
||||
if self.knowledge:
|
||||
return self.knowledge.query(
|
||||
query, results_limit=results_limit, score_threshold=score_threshold
|
||||
)
|
||||
return None
|
||||
|
||||
def fetch_inputs(self) -> Set[str]:
|
||||
def fetch_inputs(self) -> set[str]:
|
||||
"""
|
||||
Gathers placeholders (e.g., {something}) referenced in tasks or agents.
|
||||
Scans each task's 'description' + 'expected_output', and each agent's
|
||||
@@ -1172,11 +1205,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
Returns a set of all discovered placeholder names.
|
||||
"""
|
||||
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||
required_inputs: Set[str] = set()
|
||||
required_inputs: set[str] = set()
|
||||
|
||||
# Scan tasks for inputs
|
||||
for task in self.tasks:
|
||||
# description and expected_output might contain e.g. {topic}, {user_name}, etc.
|
||||
# description and expected_output might contain e.g. {topic}, {user_name}
|
||||
text = f"{task.description or ''} {task.expected_output or ''}"
|
||||
required_inputs.update(placeholder_pattern.findall(text))
|
||||
|
||||
@@ -1230,7 +1263,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
cloned_tasks.append(cloned_task)
|
||||
task_mapping[task.key] = cloned_task
|
||||
|
||||
for cloned_task, original_task in zip(cloned_tasks, self.tasks):
|
||||
for cloned_task, original_task in zip(cloned_tasks, self.tasks, strict=False):
|
||||
if isinstance(original_task.context, list):
|
||||
cloned_context = [
|
||||
task_mapping[context_task.key]
|
||||
@@ -1256,7 +1289,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
copied_data.pop("agents", None)
|
||||
copied_data.pop("tasks", None)
|
||||
|
||||
copied_crew = Crew(
|
||||
return Crew(
|
||||
**copied_data,
|
||||
agents=cloned_agents,
|
||||
tasks=cloned_tasks,
|
||||
@@ -1266,15 +1299,13 @@ class Crew(FlowTrackable, BaseModel):
|
||||
manager_llm=manager_llm,
|
||||
)
|
||||
|
||||
return copied_crew
|
||||
|
||||
def _set_tasks_callbacks(self) -> None:
|
||||
"""Sets callback for every task suing task_callback"""
|
||||
for task in self.tasks:
|
||||
if not task.callback:
|
||||
task.callback = self.task_callback
|
||||
|
||||
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
def _interpolate_inputs(self, inputs: dict[str, Any]) -> None:
|
||||
"""Interpolates the inputs in the tasks and agents."""
|
||||
[
|
||||
task.interpolate_inputs_and_add_conversation_history(
|
||||
@@ -1307,10 +1338,13 @@ class Crew(FlowTrackable, BaseModel):
|
||||
def test(
|
||||
self,
|
||||
n_iterations: int,
|
||||
eval_llm: Union[str, InstanceOf[BaseLLM]],
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
eval_llm: str | InstanceOf[BaseLLM],
|
||||
inputs: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations.
|
||||
|
||||
Uses concurrent.futures for concurrent execution.
|
||||
"""
|
||||
try:
|
||||
# Create LLM instance and ensure it's of type LLM for CrewEvaluator
|
||||
llm_instance = create_llm(eval_llm)
|
||||
@@ -1350,7 +1384,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
raise
|
||||
|
||||
def __repr__(self):
|
||||
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
||||
return (
|
||||
f"Crew(id={self.id}, process={self.process}, "
|
||||
f"number_of_agents={len(self.agents)}, "
|
||||
f"number_of_tasks={len(self.tasks)})"
|
||||
)
|
||||
|
||||
def reset_memories(self, command_type: str) -> None:
|
||||
"""Reset specific or all memories for the crew.
|
||||
@@ -1364,7 +1402,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
ValueError: If an invalid command type is provided.
|
||||
RuntimeError: If memory reset operation fails.
|
||||
"""
|
||||
VALID_TYPES = frozenset(
|
||||
valid_types = frozenset(
|
||||
[
|
||||
"long",
|
||||
"short",
|
||||
@@ -1377,9 +1415,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
]
|
||||
)
|
||||
|
||||
if command_type not in VALID_TYPES:
|
||||
if command_type not in valid_types:
|
||||
raise ValueError(
|
||||
f"Invalid command type. Must be one of: {', '.join(sorted(VALID_TYPES))}"
|
||||
f"Invalid command type. Must be one of: "
|
||||
f"{', '.join(sorted(valid_types))}"
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -1389,7 +1428,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self._reset_specific_memory(command_type)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to reset {command_type} memory: {str(e)}"
|
||||
error_msg = f"Failed to reset {command_type} memory: {e!s}"
|
||||
self._logger.log("error", error_msg)
|
||||
raise RuntimeError(error_msg) from e
|
||||
|
||||
@@ -1397,7 +1436,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
"""Reset all available memory systems."""
|
||||
memory_systems = self._get_memory_systems()
|
||||
|
||||
for memory_type, config in memory_systems.items():
|
||||
for config in memory_systems.values():
|
||||
if (system := config.get("system")) is not None:
|
||||
name = config.get("name")
|
||||
try:
|
||||
@@ -1405,11 +1444,13 @@ class Crew(FlowTrackable, BaseModel):
|
||||
reset_fn(system)
|
||||
self._logger.log(
|
||||
"info",
|
||||
f"[Crew ({self.name if self.name else self.id})] {name} memory has been reset",
|
||||
f"[Crew ({self.name if self.name else self.id})] "
|
||||
f"{name} memory has been reset",
|
||||
)
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"[Crew ({self.name if self.name else self.id})] Failed to reset {name} memory: {str(e)}"
|
||||
f"[Crew ({self.name if self.name else self.id})] "
|
||||
f"Failed to reset {name} memory: {e!s}"
|
||||
) from e
|
||||
|
||||
def _reset_specific_memory(self, memory_type: str) -> None:
|
||||
@@ -1434,18 +1475,21 @@ class Crew(FlowTrackable, BaseModel):
|
||||
reset_fn(system)
|
||||
self._logger.log(
|
||||
"info",
|
||||
f"[Crew ({self.name if self.name else self.id})] {name} memory has been reset",
|
||||
f"[Crew ({self.name if self.name else self.id})] "
|
||||
f"{name} memory has been reset",
|
||||
)
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"[Crew ({self.name if self.name else self.id})] Failed to reset {name} memory: {str(e)}"
|
||||
f"[Crew ({self.name if self.name else self.id})] "
|
||||
f"Failed to reset {name} memory: {e!s}"
|
||||
) from e
|
||||
|
||||
def _get_memory_systems(self):
|
||||
"""Get all available memory systems with their configuration.
|
||||
|
||||
Returns:
|
||||
Dict containing all memory systems with their reset functions and display names.
|
||||
Dict containing all memory systems with their reset functions and
|
||||
display names.
|
||||
"""
|
||||
|
||||
def default_reset(memory):
|
||||
@@ -1506,7 +1550,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
},
|
||||
}
|
||||
|
||||
def reset_knowledge(self, knowledges: List[Knowledge]) -> None:
|
||||
def reset_knowledge(self, knowledges: list[Knowledge]) -> None:
|
||||
"""Reset crew and agent knowledge storage."""
|
||||
for ks in knowledges:
|
||||
ks.reset()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import json
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -12,19 +12,21 @@ class CrewOutput(BaseModel):
|
||||
"""Class that represents the result of a crew."""
|
||||
|
||||
raw: str = Field(description="Raw output of crew", default="")
|
||||
pydantic: Optional[BaseModel] = Field(
|
||||
pydantic: BaseModel | None = Field(
|
||||
description="Pydantic output of Crew", default=None
|
||||
)
|
||||
json_dict: Optional[Dict[str, Any]] = Field(
|
||||
json_dict: dict[str, Any] | None = Field(
|
||||
description="JSON dict output of Crew", default=None
|
||||
)
|
||||
tasks_output: list[TaskOutput] = Field(
|
||||
description="Output of each task", default=[]
|
||||
)
|
||||
token_usage: UsageMetrics = Field(description="Processed token summary", default={})
|
||||
token_usage: UsageMetrics = Field(
|
||||
description="Processed token summary", default_factory=UsageMetrics
|
||||
)
|
||||
|
||||
@property
|
||||
def json(self) -> Optional[str]:
|
||||
def json(self) -> str | None: # type: ignore[override]
|
||||
if self.tasks_output[-1].output_format != OutputFormat.JSON:
|
||||
raise ValueError(
|
||||
"No JSON output found in the final task. Please make sure to set the output_json property in the final task in your crew."
|
||||
@@ -32,7 +34,7 @@ class CrewOutput(BaseModel):
|
||||
|
||||
return json.dumps(self.json_dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert json_output and pydantic_output to a dictionary."""
|
||||
output_dict = {}
|
||||
if self.json_dict:
|
||||
@@ -44,10 +46,9 @@ class CrewOutput(BaseModel):
|
||||
def __getitem__(self, key):
|
||||
if self.pydantic and hasattr(self.pydantic, key):
|
||||
return getattr(self.pydantic, key)
|
||||
elif self.json_dict and key in self.json_dict:
|
||||
if self.json_dict and key in self.json_dict:
|
||||
return self.json_dict[key]
|
||||
else:
|
||||
raise KeyError(f"Key '{key}' not found in CrewOutput.")
|
||||
raise KeyError(f"Key '{key}' not found in CrewOutput.")
|
||||
|
||||
def __str__(self):
|
||||
if self.pydantic:
|
||||
|
||||
@@ -9,48 +9,158 @@ This module provides the event infrastructure that allows users to:
|
||||
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentEvaluationCompletedEvent,
|
||||
AgentEvaluationFailedEvent,
|
||||
AgentEvaluationStartedEvent,
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
CrewTestResultEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.events.types.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowEvent,
|
||||
FlowFinishedEvent,
|
||||
FlowPlotEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
)
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
LLMCallStartedEvent,
|
||||
LLMStreamChunkEvent,
|
||||
)
|
||||
from crewai.events.types.llm_guardrail_events import (
|
||||
LLMGuardrailCompletedEvent,
|
||||
LLMGuardrailStartedEvent,
|
||||
)
|
||||
from crewai.events.types.logging_events import (
|
||||
AgentLogsExecutionEvent,
|
||||
AgentLogsStartedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
from crewai.events.types.reasoning_events import (
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
AgentReasoningStartedEvent,
|
||||
ReasoningEvent,
|
||||
)
|
||||
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffStartedEvent,
|
||||
CrewKickoffCompletedEvent,
|
||||
from crewai.events.types.task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskEvaluationEvent,
|
||||
TaskFailedEvent,
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
)
|
||||
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMStreamChunkEvent,
|
||||
from crewai.events.types.tool_usage_events import (
|
||||
ToolExecutionErrorEvent,
|
||||
ToolSelectionErrorEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
ToolValidateInputErrorEvent,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AgentEvaluationCompletedEvent",
|
||||
"AgentEvaluationFailedEvent",
|
||||
"AgentEvaluationStartedEvent",
|
||||
"AgentExecutionCompletedEvent",
|
||||
"AgentExecutionErrorEvent",
|
||||
"AgentExecutionStartedEvent",
|
||||
"AgentLogsExecutionEvent",
|
||||
"AgentLogsStartedEvent",
|
||||
"AgentReasoningCompletedEvent",
|
||||
"AgentReasoningFailedEvent",
|
||||
"AgentReasoningStartedEvent",
|
||||
"BaseEventListener",
|
||||
"crewai_event_bus",
|
||||
"CrewKickoffCompletedEvent",
|
||||
"CrewKickoffFailedEvent",
|
||||
"CrewKickoffStartedEvent",
|
||||
"CrewTestCompletedEvent",
|
||||
"CrewTestFailedEvent",
|
||||
"CrewTestResultEvent",
|
||||
"CrewTestStartedEvent",
|
||||
"CrewTrainCompletedEvent",
|
||||
"CrewTrainFailedEvent",
|
||||
"CrewTrainStartedEvent",
|
||||
"FlowCreatedEvent",
|
||||
"FlowEvent",
|
||||
"FlowFinishedEvent",
|
||||
"FlowPlotEvent",
|
||||
"FlowStartedEvent",
|
||||
"KnowledgeQueryCompletedEvent",
|
||||
"KnowledgeQueryFailedEvent",
|
||||
"KnowledgeQueryStartedEvent",
|
||||
"KnowledgeRetrievalCompletedEvent",
|
||||
"KnowledgeRetrievalStartedEvent",
|
||||
"KnowledgeSearchQueryFailedEvent",
|
||||
"LLMCallCompletedEvent",
|
||||
"LLMCallFailedEvent",
|
||||
"LLMCallStartedEvent",
|
||||
"LLMGuardrailCompletedEvent",
|
||||
"LLMGuardrailStartedEvent",
|
||||
"LLMStreamChunkEvent",
|
||||
"LiteAgentExecutionCompletedEvent",
|
||||
"LiteAgentExecutionErrorEvent",
|
||||
"LiteAgentExecutionStartedEvent",
|
||||
"MemoryQueryCompletedEvent",
|
||||
"MemorySaveCompletedEvent",
|
||||
"MemorySaveStartedEvent",
|
||||
"MemoryQueryFailedEvent",
|
||||
"MemoryQueryStartedEvent",
|
||||
"MemoryRetrievalCompletedEvent",
|
||||
"MemoryRetrievalStartedEvent",
|
||||
"MemorySaveCompletedEvent",
|
||||
"MemorySaveFailedEvent",
|
||||
"MemoryQueryFailedEvent",
|
||||
"KnowledgeRetrievalStartedEvent",
|
||||
"KnowledgeRetrievalCompletedEvent",
|
||||
"CrewKickoffStartedEvent",
|
||||
"CrewKickoffCompletedEvent",
|
||||
"AgentExecutionCompletedEvent",
|
||||
"LLMStreamChunkEvent",
|
||||
]
|
||||
"MemorySaveStartedEvent",
|
||||
"MethodExecutionFailedEvent",
|
||||
"MethodExecutionFinishedEvent",
|
||||
"MethodExecutionStartedEvent",
|
||||
"ReasoningEvent",
|
||||
"TaskCompletedEvent",
|
||||
"TaskEvaluationEvent",
|
||||
"TaskFailedEvent",
|
||||
"TaskStartedEvent",
|
||||
"ToolExecutionErrorEvent",
|
||||
"ToolSelectionErrorEvent",
|
||||
"ToolUsageErrorEvent",
|
||||
"ToolUsageEvent",
|
||||
"ToolUsageFinishedEvent",
|
||||
"ToolUsageStartedEvent",
|
||||
"ToolValidateInputErrorEvent",
|
||||
"crewai_event_bus",
|
||||
]
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.utilities.serialization import to_serializable
|
||||
@@ -10,11 +11,11 @@ class BaseEvent(BaseModel):
|
||||
|
||||
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
type: str
|
||||
source_fingerprint: Optional[str] = None # UUID string of the source entity
|
||||
source_type: Optional[str] = (
|
||||
source_fingerprint: str | None = None # UUID string of the source entity
|
||||
source_type: str | None = (
|
||||
None # "agent", "task", "crew", "memory", "entity_memory", "short_term_memory", "long_term_memory", "external_memory"
|
||||
)
|
||||
fingerprint_metadata: Optional[Dict[str, Any]] = None # Any relevant metadata
|
||||
fingerprint_metadata: dict[str, Any] | None = None # Any relevant metadata
|
||||
|
||||
def to_json(self, exclude: set[str] | None = None):
|
||||
"""
|
||||
@@ -28,13 +29,13 @@ class BaseEvent(BaseModel):
|
||||
"""
|
||||
return to_serializable(self, exclude=exclude)
|
||||
|
||||
def _set_task_params(self, data: Dict[str, Any]):
|
||||
def _set_task_params(self, data: dict[str, Any]):
|
||||
if "from_task" in data and (task := data["from_task"]):
|
||||
self.task_id = task.id
|
||||
self.task_name = task.name or task.description
|
||||
self.from_task = None
|
||||
|
||||
def _set_agent_params(self, data: Dict[str, Any]):
|
||||
def _set_agent_params(self, data: dict[str, Any]):
|
||||
task = data.get("from_task", None)
|
||||
agent = task.agent if task else data.get("from_agent", None)
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
from collections.abc import Callable
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Callable, Dict, List, Type, TypeVar, cast
|
||||
from typing import Any, TypeVar, cast
|
||||
|
||||
from blinker import Signal
|
||||
|
||||
@@ -25,17 +26,17 @@ class CrewAIEventsBus:
|
||||
if cls._instance is None:
|
||||
with cls._lock:
|
||||
if cls._instance is None: # prevent race condition
|
||||
cls._instance = super(CrewAIEventsBus, cls).__new__(cls)
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance._initialize()
|
||||
return cls._instance
|
||||
|
||||
def _initialize(self) -> None:
|
||||
"""Initialize the event bus internal state"""
|
||||
self._signal = Signal("crewai_event_bus")
|
||||
self._handlers: Dict[Type[BaseEvent], List[Callable]] = {}
|
||||
self._handlers: dict[type[BaseEvent], list[Callable]] = {}
|
||||
|
||||
def on(
|
||||
self, event_type: Type[EventT]
|
||||
self, event_type: type[EventT]
|
||||
) -> Callable[[Callable[[Any, EventT], None]], Callable[[Any, EventT], None]]:
|
||||
"""
|
||||
Decorator to register an event handler for a specific event type.
|
||||
@@ -61,6 +62,18 @@ class CrewAIEventsBus:
|
||||
|
||||
return decorator
|
||||
|
||||
@staticmethod
|
||||
def _call_handler(
|
||||
handler: Callable, source: Any, event: BaseEvent, event_type: type
|
||||
) -> None:
|
||||
"""Call a single handler with error handling."""
|
||||
try:
|
||||
handler(source, event)
|
||||
except Exception as e:
|
||||
print(
|
||||
f"[EventBus Error] Handler '{handler.__name__}' failed for event '{event_type.__name__}': {e}"
|
||||
)
|
||||
|
||||
def emit(self, source: Any, event: BaseEvent) -> None:
|
||||
"""
|
||||
Emit an event to all registered handlers
|
||||
@@ -72,17 +85,12 @@ class CrewAIEventsBus:
|
||||
for event_type, handlers in self._handlers.items():
|
||||
if isinstance(event, event_type):
|
||||
for handler in handlers:
|
||||
try:
|
||||
handler(source, event)
|
||||
except Exception as e:
|
||||
print(
|
||||
f"[EventBus Error] Handler '{handler.__name__}' failed for event '{event_type.__name__}': {e}"
|
||||
)
|
||||
self._call_handler(handler, source, event, event_type)
|
||||
|
||||
self._signal.send(source, event=event)
|
||||
|
||||
def register_handler(
|
||||
self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
|
||||
self, event_type: type[EventTypes], handler: Callable[[Any, EventTypes], None]
|
||||
) -> None:
|
||||
"""Register an event handler for a specific event type"""
|
||||
if event_type not in self._handlers:
|
||||
|
||||
@@ -1,15 +1,30 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from io import StringIO
|
||||
from typing import Any, Dict
|
||||
from typing import Any
|
||||
|
||||
from pydantic import Field, PrivateAttr
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.constants import EMITTER_COLOR
|
||||
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
CrewTestResultEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
@@ -25,34 +40,21 @@ from crewai.events.types.llm_events import (
|
||||
LLMStreamChunkEvent,
|
||||
)
|
||||
from crewai.events.types.llm_guardrail_events import (
|
||||
LLMGuardrailStartedEvent,
|
||||
LLMGuardrailCompletedEvent,
|
||||
)
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
LLMGuardrailStartedEvent,
|
||||
)
|
||||
from crewai.events.types.logging_events import (
|
||||
AgentLogsStartedEvent,
|
||||
AgentLogsExecutionEvent,
|
||||
AgentLogsStartedEvent,
|
||||
)
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
CrewTestResultEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.constants import EMITTER_COLOR
|
||||
|
||||
from .listeners.memory_listener import MemoryListener
|
||||
from .types.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowFinishedEvent,
|
||||
@@ -61,26 +63,24 @@ from .types.flow_events import (
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from .types.reasoning_events import (
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
AgentReasoningStartedEvent,
|
||||
)
|
||||
from .types.task_events import TaskCompletedEvent, TaskFailedEvent, TaskStartedEvent
|
||||
from .types.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from .types.reasoning_events import (
|
||||
AgentReasoningStartedEvent,
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
)
|
||||
|
||||
from .listeners.memory_listener import MemoryListener
|
||||
|
||||
|
||||
class EventListener(BaseEventListener):
|
||||
_instance = None
|
||||
_telemetry: Telemetry = PrivateAttr(default_factory=lambda: Telemetry())
|
||||
logger = Logger(verbose=True, default_color=EMITTER_COLOR)
|
||||
execution_spans: Dict[Task, Any] = Field(default_factory=dict)
|
||||
execution_spans: dict[Task, Any] = Field(default_factory=dict)
|
||||
next_chunk = 0
|
||||
text_stream = StringIO()
|
||||
knowledge_retrieval_in_progress = False
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
from typing import Union
|
||||
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
)
|
||||
|
||||
from .types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
@@ -24,6 +23,14 @@ from .types.flow_events import (
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from .types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
)
|
||||
from .types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
@@ -34,6 +41,21 @@ from .types.llm_guardrail_events import (
|
||||
LLMGuardrailCompletedEvent,
|
||||
LLMGuardrailStartedEvent,
|
||||
)
|
||||
from .types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from .types.reasoning_events import (
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
AgentReasoningStartedEvent,
|
||||
)
|
||||
from .types.task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
@@ -44,77 +66,53 @@ from .types.tool_usage_events import (
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from .types.reasoning_events import (
|
||||
AgentReasoningStartedEvent,
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
)
|
||||
from .types.knowledge_events import (
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
)
|
||||
|
||||
from .types.memory_events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
EventTypes = (
|
||||
CrewKickoffStartedEvent
|
||||
| CrewKickoffCompletedEvent
|
||||
| CrewKickoffFailedEvent
|
||||
| CrewTestStartedEvent
|
||||
| CrewTestCompletedEvent
|
||||
| CrewTestFailedEvent
|
||||
| CrewTrainStartedEvent
|
||||
| CrewTrainCompletedEvent
|
||||
| CrewTrainFailedEvent
|
||||
| AgentExecutionStartedEvent
|
||||
| AgentExecutionCompletedEvent
|
||||
| LiteAgentExecutionCompletedEvent
|
||||
| TaskStartedEvent
|
||||
| TaskCompletedEvent
|
||||
| TaskFailedEvent
|
||||
| FlowStartedEvent
|
||||
| FlowFinishedEvent
|
||||
| MethodExecutionStartedEvent
|
||||
| MethodExecutionFinishedEvent
|
||||
| MethodExecutionFailedEvent
|
||||
| AgentExecutionErrorEvent
|
||||
| ToolUsageFinishedEvent
|
||||
| ToolUsageErrorEvent
|
||||
| ToolUsageStartedEvent
|
||||
| LLMCallStartedEvent
|
||||
| LLMCallCompletedEvent
|
||||
| LLMCallFailedEvent
|
||||
| LLMStreamChunkEvent
|
||||
| LLMGuardrailStartedEvent
|
||||
| LLMGuardrailCompletedEvent
|
||||
| AgentReasoningStartedEvent
|
||||
| AgentReasoningCompletedEvent
|
||||
| AgentReasoningFailedEvent
|
||||
| KnowledgeRetrievalStartedEvent
|
||||
| KnowledgeRetrievalCompletedEvent
|
||||
| KnowledgeQueryStartedEvent
|
||||
| KnowledgeQueryCompletedEvent
|
||||
| KnowledgeQueryFailedEvent
|
||||
| KnowledgeSearchQueryFailedEvent
|
||||
| MemorySaveStartedEvent
|
||||
| MemorySaveCompletedEvent
|
||||
| MemorySaveFailedEvent
|
||||
| MemoryQueryStartedEvent
|
||||
| MemoryQueryCompletedEvent
|
||||
| MemoryQueryFailedEvent
|
||||
| MemoryRetrievalStartedEvent
|
||||
| MemoryRetrievalCompletedEvent
|
||||
)
|
||||
|
||||
EventTypes = Union[
|
||||
CrewKickoffStartedEvent,
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
AgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
TaskStartedEvent,
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
FlowStartedEvent,
|
||||
FlowFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
LLMCallStartedEvent,
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
LLMStreamChunkEvent,
|
||||
LLMGuardrailStartedEvent,
|
||||
LLMGuardrailCompletedEvent,
|
||||
AgentReasoningStartedEvent,
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
]
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
|
||||
This module contains various event listener implementations
|
||||
for handling memory, tracing, and other event-driven functionality.
|
||||
"""
|
||||
"""
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
|
||||
|
||||
|
||||
229
src/crewai/events/listeners/tracing/first_time_trace_handler.py
Normal file
229
src/crewai/events/listeners/tracing/first_time_trace_handler.py
Normal file
@@ -0,0 +1,229 @@
|
||||
import logging
|
||||
import uuid
|
||||
import webbrowser
|
||||
from pathlib import Path
|
||||
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.events.listeners.tracing.trace_batch_manager import TraceBatchManager
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
mark_first_execution_completed,
|
||||
prompt_user_for_trace_viewing,
|
||||
should_auto_collect_first_time_traces,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _update_or_create_env_file():
|
||||
"""Update or create .env file with CREWAI_TRACING_ENABLED=true."""
|
||||
env_path = Path(".env")
|
||||
env_content = ""
|
||||
variable_name = "CREWAI_TRACING_ENABLED"
|
||||
variable_value = "true"
|
||||
|
||||
# Read existing content if file exists
|
||||
if env_path.exists():
|
||||
with open(env_path, "r") as f:
|
||||
env_content = f.read()
|
||||
|
||||
# Check if CREWAI_TRACING_ENABLED is already set
|
||||
lines = env_content.splitlines()
|
||||
variable_exists = False
|
||||
updated_lines = []
|
||||
|
||||
for line in lines:
|
||||
if line.strip().startswith(f"{variable_name}="):
|
||||
# Update existing variable
|
||||
updated_lines.append(f"{variable_name}={variable_value}")
|
||||
variable_exists = True
|
||||
else:
|
||||
updated_lines.append(line)
|
||||
|
||||
# Add variable if it doesn't exist
|
||||
if not variable_exists:
|
||||
if updated_lines and not updated_lines[-1].strip():
|
||||
# If last line is empty, replace it
|
||||
updated_lines[-1] = f"{variable_name}={variable_value}"
|
||||
else:
|
||||
# Add new line and then the variable
|
||||
updated_lines.append(f"{variable_name}={variable_value}")
|
||||
|
||||
# Write updated content
|
||||
with open(env_path, "w") as f:
|
||||
f.write("\n".join(updated_lines))
|
||||
if updated_lines: # Add final newline if there's content
|
||||
f.write("\n")
|
||||
|
||||
|
||||
class FirstTimeTraceHandler:
|
||||
"""Handles the first-time user trace collection and display flow."""
|
||||
|
||||
def __init__(self):
|
||||
self.is_first_time: bool = False
|
||||
self.collected_events: bool = False
|
||||
self.trace_batch_id: str | None = None
|
||||
self.ephemeral_url: str | None = None
|
||||
self.batch_manager: TraceBatchManager | None = None
|
||||
|
||||
def initialize_for_first_time_user(self) -> bool:
|
||||
"""Check if this is first time and initialize collection."""
|
||||
self.is_first_time = should_auto_collect_first_time_traces()
|
||||
return self.is_first_time
|
||||
|
||||
def set_batch_manager(self, batch_manager: TraceBatchManager):
|
||||
"""Set reference to batch manager for sending events."""
|
||||
self.batch_manager = batch_manager
|
||||
|
||||
def mark_events_collected(self):
|
||||
"""Mark that events have been collected during execution."""
|
||||
self.collected_events = True
|
||||
|
||||
def handle_execution_completion(self):
|
||||
"""Handle the completion flow as shown in your diagram."""
|
||||
if not self.is_first_time or not self.collected_events:
|
||||
return
|
||||
|
||||
try:
|
||||
user_wants_traces = prompt_user_for_trace_viewing(timeout_seconds=20)
|
||||
|
||||
if user_wants_traces:
|
||||
self._initialize_backend_and_send_events()
|
||||
|
||||
# Enable tracing for future runs by updating .env file
|
||||
try:
|
||||
_update_or_create_env_file()
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
if self.ephemeral_url:
|
||||
self._display_ephemeral_trace_link()
|
||||
|
||||
mark_first_execution_completed()
|
||||
|
||||
except Exception as e:
|
||||
self._gracefully_fail(f"Error in trace handling: {e}")
|
||||
mark_first_execution_completed()
|
||||
|
||||
def _initialize_backend_and_send_events(self):
|
||||
"""Initialize backend batch and send collected events."""
|
||||
if not self.batch_manager:
|
||||
return
|
||||
|
||||
try:
|
||||
if not self.batch_manager.backend_initialized:
|
||||
original_metadata = (
|
||||
self.batch_manager.current_batch.execution_metadata
|
||||
if self.batch_manager.current_batch
|
||||
else {}
|
||||
)
|
||||
|
||||
user_context = {
|
||||
"privacy_level": "standard",
|
||||
"user_id": "first_time_user",
|
||||
"session_id": str(uuid.uuid4()),
|
||||
"trace_id": self.batch_manager.trace_batch_id,
|
||||
}
|
||||
|
||||
execution_metadata = {
|
||||
"execution_type": original_metadata.get("execution_type", "crew"),
|
||||
"crew_name": original_metadata.get(
|
||||
"crew_name", "First Time Execution"
|
||||
),
|
||||
"flow_name": original_metadata.get("flow_name"),
|
||||
"agent_count": original_metadata.get("agent_count", 1),
|
||||
"task_count": original_metadata.get("task_count", 1),
|
||||
"crewai_version": original_metadata.get("crewai_version"),
|
||||
}
|
||||
|
||||
self.batch_manager._initialize_backend_batch(
|
||||
user_context=user_context,
|
||||
execution_metadata=execution_metadata,
|
||||
use_ephemeral=True,
|
||||
)
|
||||
self.batch_manager.backend_initialized = True
|
||||
|
||||
if self.batch_manager.event_buffer:
|
||||
self.batch_manager._send_events_to_backend()
|
||||
|
||||
self.batch_manager.finalize_batch()
|
||||
self.ephemeral_url = self.batch_manager.ephemeral_trace_url
|
||||
|
||||
if not self.ephemeral_url:
|
||||
self._show_local_trace_message()
|
||||
|
||||
except Exception as e:
|
||||
self._gracefully_fail(f"Backend initialization failed: {e}")
|
||||
|
||||
def _display_ephemeral_trace_link(self):
|
||||
"""Display the ephemeral trace link to the user and automatically open browser."""
|
||||
console = Console()
|
||||
|
||||
try:
|
||||
webbrowser.open(self.ephemeral_url)
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
panel_content = f"""
|
||||
🎉 Your First CrewAI Execution Trace is Ready!
|
||||
|
||||
View your execution details here:
|
||||
{self.ephemeral_url}
|
||||
|
||||
This trace shows:
|
||||
• Agent decisions and interactions
|
||||
• Task execution timeline
|
||||
• Tool usage and results
|
||||
• LLM calls and responses
|
||||
|
||||
✅ Tracing has been enabled for future runs! (CREWAI_TRACING_ENABLED=true added to .env)
|
||||
You can also add tracing=True to your Crew(tracing=True) / Flow(tracing=True) for more control.
|
||||
|
||||
📝 Note: This link will expire in 24 hours.
|
||||
""".strip()
|
||||
|
||||
panel = Panel(
|
||||
panel_content,
|
||||
title="🔍 Execution Trace Generated",
|
||||
border_style="bright_green",
|
||||
padding=(1, 2),
|
||||
)
|
||||
|
||||
console.print("\n")
|
||||
console.print(panel)
|
||||
console.print()
|
||||
|
||||
def _gracefully_fail(self, error_message: str):
|
||||
"""Handle errors gracefully without disrupting user experience."""
|
||||
console = Console()
|
||||
console.print(f"[yellow]Note: {error_message}[/yellow]")
|
||||
|
||||
logger.debug(f"First-time trace error: {error_message}")
|
||||
|
||||
def _show_local_trace_message(self):
|
||||
"""Show message when traces were collected locally but couldn't be uploaded."""
|
||||
console = Console()
|
||||
|
||||
panel_content = f"""
|
||||
📊 Your execution traces were collected locally!
|
||||
|
||||
Unfortunately, we couldn't upload them to the server right now, but here's what we captured:
|
||||
• {len(self.batch_manager.event_buffer)} trace events
|
||||
• Execution duration: {self.batch_manager.calculate_duration("execution")}ms
|
||||
• Batch ID: {self.batch_manager.trace_batch_id}
|
||||
|
||||
Tracing has been enabled for future runs! (CREWAI_TRACING_ENABLED=true added to .env)
|
||||
The traces include agent decisions, task execution, and tool usage.
|
||||
""".strip()
|
||||
|
||||
panel = Panel(
|
||||
panel_content,
|
||||
title="🔍 Local Traces Collected",
|
||||
border_style="yellow",
|
||||
padding=(1, 2),
|
||||
)
|
||||
|
||||
console.print("\n")
|
||||
console.print(panel)
|
||||
console.print()
|
||||
@@ -1,18 +1,18 @@
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from logging import getLogger
|
||||
from typing import Any
|
||||
|
||||
from crewai.utilities.constants import CREWAI_BASE_URL
|
||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.events.listeners.tracing.types import TraceEvent
|
||||
from logging import getLogger
|
||||
from crewai.events.listeners.tracing.utils import should_auto_collect_first_time_traces
|
||||
from crewai.utilities.constants import CREWAI_BASE_URL
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
@@ -23,11 +23,11 @@ class TraceBatch:
|
||||
|
||||
version: str = field(default_factory=get_crewai_version)
|
||||
batch_id: str = field(default_factory=lambda: str(uuid.uuid4()))
|
||||
user_context: Dict[str, str] = field(default_factory=dict)
|
||||
execution_metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
events: List[TraceEvent] = field(default_factory=list)
|
||||
user_context: dict[str, str] = field(default_factory=dict)
|
||||
execution_metadata: dict[str, Any] = field(default_factory=dict)
|
||||
events: list[TraceEvent] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"version": self.version,
|
||||
"batch_id": self.batch_id,
|
||||
@@ -40,26 +40,28 @@ class TraceBatch:
|
||||
class TraceBatchManager:
|
||||
"""Single responsibility: Manage batches and event buffering"""
|
||||
|
||||
is_current_batch_ephemeral: bool = False
|
||||
trace_batch_id: Optional[str] = None
|
||||
current_batch: Optional[TraceBatch] = None
|
||||
event_buffer: List[TraceEvent] = []
|
||||
execution_start_times: Dict[str, datetime] = {}
|
||||
batch_owner_type: Optional[str] = None
|
||||
batch_owner_id: Optional[str] = None
|
||||
|
||||
def __init__(self):
|
||||
self.is_current_batch_ephemeral: bool = False
|
||||
self.trace_batch_id: str | None = None
|
||||
self.current_batch: TraceBatch | None = None
|
||||
self.event_buffer: list[TraceEvent] = []
|
||||
self.execution_start_times: dict[str, datetime] = {}
|
||||
self.batch_owner_type: str | None = None
|
||||
self.batch_owner_id: str | None = None
|
||||
self.backend_initialized: bool = False
|
||||
self.ephemeral_trace_url: str | None = None
|
||||
try:
|
||||
self.plus_api = PlusAPI(
|
||||
api_key=get_auth_token(),
|
||||
)
|
||||
except AuthError:
|
||||
self.plus_api = PlusAPI(api_key="")
|
||||
self.ephemeral_trace_url = None
|
||||
|
||||
def initialize_batch(
|
||||
self,
|
||||
user_context: Dict[str, str],
|
||||
execution_metadata: Dict[str, Any],
|
||||
user_context: dict[str, str],
|
||||
execution_metadata: dict[str, Any],
|
||||
use_ephemeral: bool = False,
|
||||
) -> TraceBatch:
|
||||
"""Initialize a new trace batch"""
|
||||
@@ -70,14 +72,21 @@ class TraceBatchManager:
|
||||
self.is_current_batch_ephemeral = use_ephemeral
|
||||
|
||||
self.record_start_time("execution")
|
||||
self._initialize_backend_batch(user_context, execution_metadata, use_ephemeral)
|
||||
|
||||
if should_auto_collect_first_time_traces():
|
||||
self.trace_batch_id = self.current_batch.batch_id
|
||||
else:
|
||||
self._initialize_backend_batch(
|
||||
user_context, execution_metadata, use_ephemeral
|
||||
)
|
||||
self.backend_initialized = True
|
||||
|
||||
return self.current_batch
|
||||
|
||||
def _initialize_backend_batch(
|
||||
self,
|
||||
user_context: Dict[str, str],
|
||||
execution_metadata: Dict[str, Any],
|
||||
user_context: dict[str, str],
|
||||
execution_metadata: dict[str, Any],
|
||||
use_ephemeral: bool = False,
|
||||
):
|
||||
"""Send batch initialization to backend"""
|
||||
@@ -129,13 +138,6 @@ class TraceBatchManager:
|
||||
if not use_ephemeral
|
||||
else response_data["ephemeral_trace_id"]
|
||||
)
|
||||
console = Console()
|
||||
panel = Panel(
|
||||
f"✅ Trace batch initialized with session ID: {self.trace_batch_id}",
|
||||
title="Trace Batch Initialization",
|
||||
border_style="green",
|
||||
)
|
||||
console.print(panel)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Trace batch initialization returned status {response.status_code}. Continuing without tracing."
|
||||
@@ -143,7 +145,7 @@ class TraceBatchManager:
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Error initializing trace batch: {str(e)}. Continuing without tracing."
|
||||
f"Error initializing trace batch: {e}. Continuing without tracing."
|
||||
)
|
||||
|
||||
def add_event(self, trace_event: TraceEvent):
|
||||
@@ -154,7 +156,6 @@ class TraceBatchManager:
|
||||
"""Send buffered events to backend with graceful failure handling"""
|
||||
if not self.plus_api or not self.trace_batch_id or not self.event_buffer:
|
||||
return 500
|
||||
|
||||
try:
|
||||
payload = {
|
||||
"events": [event.to_dict() for event in self.event_buffer],
|
||||
@@ -178,19 +179,19 @@ class TraceBatchManager:
|
||||
if response.status_code in [200, 201]:
|
||||
self.event_buffer.clear()
|
||||
return 200
|
||||
else:
|
||||
logger.warning(
|
||||
f"Failed to send events: {response.status_code}. Events will be lost."
|
||||
)
|
||||
return 500
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Error sending events to backend: {str(e)}. Events will be lost."
|
||||
f"Failed to send events: {response.status_code}. Events will be lost."
|
||||
)
|
||||
return 500
|
||||
|
||||
def finalize_batch(self) -> Optional[TraceBatch]:
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Error sending events to backend: {e}. Events will be lost."
|
||||
)
|
||||
return 500
|
||||
|
||||
def finalize_batch(self) -> TraceBatch | None:
|
||||
"""Finalize batch and return it for sending"""
|
||||
if not self.current_batch:
|
||||
return None
|
||||
@@ -246,12 +247,27 @@ class TraceBatchManager:
|
||||
if not self.is_current_batch_ephemeral and access_code is None
|
||||
else f"{CREWAI_BASE_URL}/crewai_plus/ephemeral_trace_batches/{self.trace_batch_id}?access_code={access_code}"
|
||||
)
|
||||
|
||||
if self.is_current_batch_ephemeral:
|
||||
self.ephemeral_trace_url = return_link
|
||||
|
||||
# Create a properly formatted message with URL on its own line
|
||||
message_parts = [
|
||||
f"✅ Trace batch finalized with session ID: {self.trace_batch_id}",
|
||||
"",
|
||||
f"🔗 View here: {return_link}",
|
||||
]
|
||||
|
||||
if access_code:
|
||||
message_parts.append(f"🔑 Access Code: {access_code}")
|
||||
|
||||
panel = Panel(
|
||||
f"✅ Trace batch finalized with session ID: {self.trace_batch_id}. View here: {return_link} {f', Access Code: {access_code}' if access_code else ''}",
|
||||
"\n".join(message_parts),
|
||||
title="Trace Batch Finalization",
|
||||
border_style="green",
|
||||
)
|
||||
console.print(panel)
|
||||
if not should_auto_collect_first_time_traces():
|
||||
console.print(panel)
|
||||
|
||||
else:
|
||||
logger.error(
|
||||
@@ -259,8 +275,8 @@ class TraceBatchManager:
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error finalizing trace batch: {str(e)}")
|
||||
# TODO: send error to app
|
||||
logger.error(f"❌ Error finalizing trace batch: {e}")
|
||||
# TODO: send error to app marking as failed
|
||||
|
||||
def _cleanup_batch_data(self):
|
||||
"""Clean up batch data after successful finalization to free memory"""
|
||||
@@ -277,7 +293,7 @@ class TraceBatchManager:
|
||||
self.batch_sequence = 0
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Warning: Error during cleanup: {str(e)}")
|
||||
logger.error(f"Warning: Error during cleanup: {e}")
|
||||
|
||||
def has_events(self) -> bool:
|
||||
"""Check if there are events in the buffer"""
|
||||
@@ -306,7 +322,7 @@ class TraceBatchManager:
|
||||
return duration_ms
|
||||
return 0
|
||||
|
||||
def get_trace_id(self) -> Optional[str]:
|
||||
def get_trace_id(self) -> str | None:
|
||||
"""Get current trace ID"""
|
||||
if self.current_batch:
|
||||
return self.current_batch.user_context.get("trace_id")
|
||||
|
||||
@@ -1,28 +1,59 @@
|
||||
import os
|
||||
import uuid
|
||||
from typing import Any, ClassVar
|
||||
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
from crewai.events.listeners.tracing.first_time_trace_handler import (
|
||||
FirstTimeTraceHandler,
|
||||
)
|
||||
from crewai.events.listeners.tracing.types import TraceEvent
|
||||
from crewai.events.types.reasoning_events import (
|
||||
AgentReasoningStartedEvent,
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
from crewai.events.listeners.tracing.utils import safe_serialize_to_dict
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
)
|
||||
from crewai.events.types.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowFinishedEvent,
|
||||
FlowPlotEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
LLMCallStartedEvent,
|
||||
)
|
||||
from crewai.events.types.llm_guardrail_events import (
|
||||
LLMGuardrailCompletedEvent,
|
||||
LLMGuardrailStartedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.events.types.reasoning_events import (
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
AgentReasoningStartedEvent,
|
||||
)
|
||||
from crewai.events.types.task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
@@ -33,49 +64,16 @@ from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
LLMCallStartedEvent,
|
||||
)
|
||||
|
||||
from crewai.events.types.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowStartedEvent,
|
||||
FlowFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
FlowPlotEvent,
|
||||
)
|
||||
from crewai.events.types.llm_guardrail_events import (
|
||||
LLMGuardrailStartedEvent,
|
||||
LLMGuardrailCompletedEvent,
|
||||
)
|
||||
from crewai.utilities.serialization import to_serializable
|
||||
|
||||
|
||||
from .trace_batch_manager import TraceBatchManager
|
||||
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
|
||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
class TraceCollectionListener(BaseEventListener):
|
||||
"""
|
||||
Trace collection listener that orchestrates trace collection
|
||||
"""
|
||||
|
||||
complex_events = [
|
||||
complex_events: ClassVar[list[str]] = [
|
||||
"task_started",
|
||||
"task_completed",
|
||||
"llm_call_started",
|
||||
@@ -88,14 +86,14 @@ class TraceCollectionListener(BaseEventListener):
|
||||
_initialized = False
|
||||
_listeners_setup = False
|
||||
|
||||
def __new__(cls, batch_manager=None):
|
||||
def __new__(cls, batch_manager: TraceBatchManager | None = None):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
batch_manager: Optional[TraceBatchManager] = None,
|
||||
batch_manager: TraceBatchManager | None = None,
|
||||
):
|
||||
if self._initialized:
|
||||
return
|
||||
@@ -103,16 +101,19 @@ class TraceCollectionListener(BaseEventListener):
|
||||
super().__init__()
|
||||
self.batch_manager = batch_manager or TraceBatchManager()
|
||||
self._initialized = True
|
||||
self.first_time_handler = FirstTimeTraceHandler()
|
||||
|
||||
if self.first_time_handler.initialize_for_first_time_user():
|
||||
self.first_time_handler.set_batch_manager(self.batch_manager)
|
||||
|
||||
def _check_authenticated(self) -> bool:
|
||||
"""Check if tracing should be enabled"""
|
||||
try:
|
||||
res = bool(get_auth_token())
|
||||
return res
|
||||
return bool(get_auth_token())
|
||||
except AuthError:
|
||||
return False
|
||||
|
||||
def _get_user_context(self) -> Dict[str, str]:
|
||||
def _get_user_context(self) -> dict[str, str]:
|
||||
"""Extract user context for tracing"""
|
||||
return {
|
||||
"user_id": os.getenv("CREWAI_USER_ID", "anonymous"),
|
||||
@@ -161,8 +162,14 @@ class TraceCollectionListener(BaseEventListener):
|
||||
@event_bus.on(FlowFinishedEvent)
|
||||
def on_flow_finished(source, event):
|
||||
self._handle_trace_event("flow_finished", source, event)
|
||||
|
||||
if self.batch_manager.batch_owner_type == "flow":
|
||||
self.batch_manager.finalize_batch()
|
||||
if self.first_time_handler.is_first_time:
|
||||
self.first_time_handler.mark_events_collected()
|
||||
self.first_time_handler.handle_execution_completion()
|
||||
else:
|
||||
# Normal flow finalization
|
||||
self.batch_manager.finalize_batch()
|
||||
|
||||
@event_bus.on(FlowPlotEvent)
|
||||
def on_flow_plot(source, event):
|
||||
@@ -181,12 +188,20 @@ class TraceCollectionListener(BaseEventListener):
|
||||
def on_crew_completed(source, event):
|
||||
self._handle_trace_event("crew_kickoff_completed", source, event)
|
||||
if self.batch_manager.batch_owner_type == "crew":
|
||||
self.batch_manager.finalize_batch()
|
||||
if self.first_time_handler.is_first_time:
|
||||
self.first_time_handler.mark_events_collected()
|
||||
self.first_time_handler.handle_execution_completion()
|
||||
else:
|
||||
self.batch_manager.finalize_batch()
|
||||
|
||||
@event_bus.on(CrewKickoffFailedEvent)
|
||||
def on_crew_failed(source, event):
|
||||
self._handle_trace_event("crew_kickoff_failed", source, event)
|
||||
self.batch_manager.finalize_batch()
|
||||
if self.first_time_handler.is_first_time:
|
||||
self.first_time_handler.mark_events_collected()
|
||||
self.first_time_handler.handle_execution_completion()
|
||||
else:
|
||||
self.batch_manager.finalize_batch()
|
||||
|
||||
@event_bus.on(TaskStartedEvent)
|
||||
def on_task_started(source, event):
|
||||
@@ -325,17 +340,19 @@ class TraceCollectionListener(BaseEventListener):
|
||||
self._initialize_batch(user_context, execution_metadata)
|
||||
|
||||
def _initialize_batch(
|
||||
self, user_context: Dict[str, str], execution_metadata: Dict[str, Any]
|
||||
self, user_context: dict[str, str], execution_metadata: dict[str, Any]
|
||||
):
|
||||
"""Initialize trace batch if ephemeral"""
|
||||
if not self._check_authenticated():
|
||||
self.batch_manager.initialize_batch(
|
||||
"""Initialize trace batch - auto-enable ephemeral for first-time users."""
|
||||
|
||||
if self.first_time_handler.is_first_time:
|
||||
return self.batch_manager.initialize_batch(
|
||||
user_context, execution_metadata, use_ephemeral=True
|
||||
)
|
||||
else:
|
||||
self.batch_manager.initialize_batch(
|
||||
user_context, execution_metadata, use_ephemeral=False
|
||||
)
|
||||
|
||||
use_ephemeral = not self._check_authenticated()
|
||||
return self.batch_manager.initialize_batch(
|
||||
user_context, execution_metadata, use_ephemeral=use_ephemeral
|
||||
)
|
||||
|
||||
def _handle_trace_event(self, event_type: str, source: Any, event: Any):
|
||||
"""Generic handler for context end events"""
|
||||
@@ -371,11 +388,11 @@ class TraceCollectionListener(BaseEventListener):
|
||||
|
||||
def _build_event_data(
|
||||
self, event_type: str, event: Any, source: Any
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
"""Build event data"""
|
||||
if event_type not in self.complex_events:
|
||||
return self._safe_serialize_to_dict(event)
|
||||
elif event_type == "task_started":
|
||||
return safe_serialize_to_dict(event)
|
||||
if event_type == "task_started":
|
||||
return {
|
||||
"task_description": event.task.description,
|
||||
"expected_output": event.task.expected_output,
|
||||
@@ -384,7 +401,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
"agent_role": source.agent.role,
|
||||
"task_id": str(event.task.id),
|
||||
}
|
||||
elif event_type == "task_completed":
|
||||
if event_type == "task_completed":
|
||||
return {
|
||||
"task_description": event.task.description if event.task else None,
|
||||
"task_name": event.task.name or event.task.description
|
||||
@@ -397,63 +414,31 @@ class TraceCollectionListener(BaseEventListener):
|
||||
else None,
|
||||
"agent_role": event.output.agent if event.output else None,
|
||||
}
|
||||
elif event_type == "agent_execution_started":
|
||||
if event_type == "agent_execution_started":
|
||||
return {
|
||||
"agent_role": event.agent.role,
|
||||
"agent_goal": event.agent.goal,
|
||||
"agent_backstory": event.agent.backstory,
|
||||
}
|
||||
elif event_type == "agent_execution_completed":
|
||||
if event_type == "agent_execution_completed":
|
||||
return {
|
||||
"agent_role": event.agent.role,
|
||||
"agent_goal": event.agent.goal,
|
||||
"agent_backstory": event.agent.backstory,
|
||||
}
|
||||
elif event_type == "llm_call_started":
|
||||
event_data = self._safe_serialize_to_dict(event)
|
||||
if event_type == "llm_call_started":
|
||||
event_data = safe_serialize_to_dict(event)
|
||||
event_data["task_name"] = (
|
||||
event.task_name or event.task_description
|
||||
if hasattr(event, "task_name") and event.task_name
|
||||
else None
|
||||
)
|
||||
return event_data
|
||||
elif event_type == "llm_call_completed":
|
||||
return self._safe_serialize_to_dict(event)
|
||||
else:
|
||||
return {
|
||||
"event_type": event_type,
|
||||
"event": self._safe_serialize_to_dict(event),
|
||||
"source": source,
|
||||
}
|
||||
if event_type == "llm_call_completed":
|
||||
return safe_serialize_to_dict(event)
|
||||
|
||||
# TODO: move to utils
|
||||
def _safe_serialize_to_dict(
|
||||
self, obj, exclude: set[str] | None = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Safely serialize an object to a dictionary for event data."""
|
||||
try:
|
||||
serialized = to_serializable(obj, exclude)
|
||||
if isinstance(serialized, dict):
|
||||
return serialized
|
||||
else:
|
||||
return {"serialized_data": serialized}
|
||||
except Exception as e:
|
||||
return {"serialization_error": str(e), "object_type": type(obj).__name__}
|
||||
|
||||
# TODO: move to utils
|
||||
def _truncate_messages(self, messages, max_content_length=500, max_messages=5):
|
||||
"""Truncate message content and limit number of messages"""
|
||||
if not messages or not isinstance(messages, list):
|
||||
return messages
|
||||
|
||||
# Limit number of messages
|
||||
limited_messages = messages[:max_messages]
|
||||
|
||||
# Truncate each message content
|
||||
for msg in limited_messages:
|
||||
if isinstance(msg, dict) and "content" in msg:
|
||||
content = msg["content"]
|
||||
if len(content) > max_content_length:
|
||||
msg["content"] = content[:max_content_length] + "..."
|
||||
|
||||
return limited_messages
|
||||
return {
|
||||
"event_type": event_type,
|
||||
"event": safe_serialize_to_dict(event),
|
||||
"source": source,
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, Any
|
||||
import uuid
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -13,7 +13,7 @@ class TraceEvent:
|
||||
default_factory=lambda: datetime.now(timezone.utc).isoformat()
|
||||
)
|
||||
type: str = ""
|
||||
event_data: Dict[str, Any] = field(default_factory=dict)
|
||||
event_data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
@@ -1,17 +1,25 @@
|
||||
import getpass
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import uuid
|
||||
import hashlib
|
||||
import subprocess
|
||||
import getpass
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
import re
|
||||
import json
|
||||
import subprocess
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import click
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
from crewai.utilities.serialization import to_serializable
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def is_tracing_enabled() -> bool:
|
||||
@@ -43,49 +51,167 @@ def _get_machine_id() -> str:
|
||||
|
||||
try:
|
||||
mac = ":".join(
|
||||
["{:02x}".format((uuid.getnode() >> b) & 0xFF) for b in range(0, 12, 2)][
|
||||
::-1
|
||||
]
|
||||
[f"{(uuid.getnode() >> b) & 0xFF:02x}" for b in range(0, 12, 2)][::-1]
|
||||
)
|
||||
parts.append(mac)
|
||||
except Exception:
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
sysname = platform.system()
|
||||
parts.append(sysname)
|
||||
try:
|
||||
sysname = platform.system()
|
||||
parts.append(sysname)
|
||||
except Exception:
|
||||
sysname = "unknown"
|
||||
parts.append(sysname)
|
||||
|
||||
try:
|
||||
if sysname == "Darwin":
|
||||
res = subprocess.run(
|
||||
["system_profiler", "SPHardwareDataType"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=2,
|
||||
)
|
||||
m = re.search(r"Hardware UUID:\s*([A-Fa-f0-9\-]+)", res.stdout)
|
||||
if m:
|
||||
parts.append(m.group(1))
|
||||
elif sysname == "Linux":
|
||||
try:
|
||||
parts.append(Path("/etc/machine-id").read_text().strip())
|
||||
except Exception:
|
||||
parts.append(Path("/sys/class/dmi/id/product_uuid").read_text().strip())
|
||||
res = subprocess.run(
|
||||
["/usr/sbin/system_profiler", "SPHardwareDataType"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=2,
|
||||
)
|
||||
m = re.search(r"Hardware UUID:\s*([A-Fa-f0-9\-]+)", res.stdout)
|
||||
if m:
|
||||
parts.append(m.group(1))
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
elif sysname == "Linux":
|
||||
linux_id = _get_linux_machine_id()
|
||||
if linux_id:
|
||||
parts.append(linux_id)
|
||||
|
||||
elif sysname == "Windows":
|
||||
res = subprocess.run(
|
||||
["wmic", "csproduct", "get", "UUID"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=2,
|
||||
)
|
||||
lines = [line.strip() for line in res.stdout.splitlines() if line.strip()]
|
||||
if len(lines) >= 2:
|
||||
parts.append(lines[1])
|
||||
except Exception:
|
||||
try:
|
||||
res = subprocess.run(
|
||||
[
|
||||
"C:\\Windows\\System32\\wbem\\wmic.exe",
|
||||
"csproduct",
|
||||
"get",
|
||||
"UUID",
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=2,
|
||||
)
|
||||
lines = [
|
||||
line.strip() for line in res.stdout.splitlines() if line.strip()
|
||||
]
|
||||
if len(lines) >= 2:
|
||||
parts.append(lines[1])
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
else:
|
||||
generic_id = _get_generic_system_id()
|
||||
if generic_id:
|
||||
parts.append(generic_id)
|
||||
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
if len(parts) <= 1:
|
||||
try:
|
||||
import socket
|
||||
|
||||
parts.append(socket.gethostname())
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
try:
|
||||
parts.append(getpass.getuser())
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
try:
|
||||
parts.append(platform.machine())
|
||||
parts.append(platform.processor())
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
if not parts:
|
||||
parts.append("unknown-system")
|
||||
parts.append(str(uuid.uuid4()))
|
||||
|
||||
return hashlib.sha256("".join(parts).encode()).hexdigest()
|
||||
|
||||
|
||||
def _get_linux_machine_id() -> str | None:
|
||||
linux_id_sources = [
|
||||
"/etc/machine-id",
|
||||
"/sys/class/dmi/id/product_uuid",
|
||||
"/proc/sys/kernel/random/boot_id",
|
||||
"/sys/class/dmi/id/board_serial",
|
||||
"/sys/class/dmi/id/chassis_serial",
|
||||
]
|
||||
|
||||
for source in linux_id_sources:
|
||||
try:
|
||||
path = Path(source)
|
||||
if path.exists() and path.is_file():
|
||||
content = path.read_text().strip()
|
||||
if content and content.lower() not in [
|
||||
"unknown",
|
||||
"to be filled by o.e.m.",
|
||||
"",
|
||||
]:
|
||||
return content
|
||||
except Exception: # noqa: S112, PERF203
|
||||
continue
|
||||
|
||||
try:
|
||||
import socket
|
||||
|
||||
hostname = socket.gethostname()
|
||||
arch = platform.machine()
|
||||
if hostname and arch:
|
||||
return f"{hostname}-{arch}"
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _get_generic_system_id() -> str | None:
|
||||
try:
|
||||
parts = []
|
||||
|
||||
try:
|
||||
import socket
|
||||
|
||||
hostname = socket.gethostname()
|
||||
if hostname:
|
||||
parts.append(hostname)
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
try:
|
||||
parts.append(platform.machine())
|
||||
parts.append(platform.processor())
|
||||
parts.append(platform.architecture()[0])
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
try:
|
||||
container_id = os.environ.get(
|
||||
"HOSTNAME", os.environ.get("CONTAINER_ID", "")
|
||||
)
|
||||
if container_id:
|
||||
parts.append(container_id)
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
if parts:
|
||||
return "-".join(filter(None, parts))
|
||||
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _user_data_file() -> Path:
|
||||
base = Path(db_storage_path())
|
||||
base.mkdir(parents=True, exist_ok=True)
|
||||
@@ -97,8 +223,8 @@ def _load_user_data() -> dict:
|
||||
if p.exists():
|
||||
try:
|
||||
return json.loads(p.read_text())
|
||||
except Exception:
|
||||
pass
|
||||
except (json.JSONDecodeError, OSError, PermissionError) as e:
|
||||
logger.warning(f"Failed to load user data: {e}")
|
||||
return {}
|
||||
|
||||
|
||||
@@ -106,8 +232,8 @@ def _save_user_data(data: dict) -> None:
|
||||
try:
|
||||
p = _user_data_file()
|
||||
p.write_text(json.dumps(data, indent=2))
|
||||
except Exception:
|
||||
pass
|
||||
except (OSError, PermissionError) as e:
|
||||
logger.warning(f"Failed to save user data: {e}")
|
||||
|
||||
|
||||
def get_user_id() -> str:
|
||||
@@ -151,3 +277,103 @@ def mark_first_execution_done() -> None:
|
||||
}
|
||||
)
|
||||
_save_user_data(data)
|
||||
|
||||
|
||||
def safe_serialize_to_dict(obj, exclude: set[str] | None = None) -> dict[str, Any]:
|
||||
"""Safely serialize an object to a dictionary for event data."""
|
||||
try:
|
||||
serialized = to_serializable(obj, exclude)
|
||||
if isinstance(serialized, dict):
|
||||
return serialized
|
||||
return {"serialized_data": serialized}
|
||||
except Exception as e:
|
||||
return {"serialization_error": str(e), "object_type": type(obj).__name__}
|
||||
|
||||
|
||||
def truncate_messages(messages, max_content_length=500, max_messages=5):
|
||||
"""Truncate message content and limit number of messages"""
|
||||
if not messages or not isinstance(messages, list):
|
||||
return messages
|
||||
|
||||
limited_messages = messages[:max_messages]
|
||||
|
||||
for msg in limited_messages:
|
||||
if isinstance(msg, dict) and "content" in msg:
|
||||
content = msg["content"]
|
||||
if len(content) > max_content_length:
|
||||
msg["content"] = content[:max_content_length] + "..."
|
||||
|
||||
return limited_messages
|
||||
|
||||
|
||||
def should_auto_collect_first_time_traces() -> bool:
|
||||
"""True if we should auto-collect traces for first-time user."""
|
||||
if _is_test_environment():
|
||||
return False
|
||||
return is_first_execution()
|
||||
|
||||
|
||||
def prompt_user_for_trace_viewing(timeout_seconds: int = 20) -> bool:
|
||||
"""
|
||||
Prompt user if they want to see their traces with timeout.
|
||||
Returns True if user wants to see traces, False otherwise.
|
||||
"""
|
||||
if _is_test_environment():
|
||||
return False
|
||||
|
||||
try:
|
||||
import threading
|
||||
|
||||
console = Console()
|
||||
|
||||
content = Text()
|
||||
content.append("🔍 ", style="cyan bold")
|
||||
content.append(
|
||||
"Detailed execution traces are available!\n\n", style="cyan bold"
|
||||
)
|
||||
content.append("View insights including:\n", style="white")
|
||||
content.append(" • Agent decision-making process\n", style="bright_blue")
|
||||
content.append(" • Task execution flow and timing\n", style="bright_blue")
|
||||
content.append(" • Tool usage details", style="bright_blue")
|
||||
|
||||
panel = Panel(
|
||||
content,
|
||||
title="[bold cyan]Execution Traces[/bold cyan]",
|
||||
border_style="cyan",
|
||||
padding=(1, 2),
|
||||
)
|
||||
console.print("\n")
|
||||
console.print(panel)
|
||||
|
||||
prompt_text = click.style(
|
||||
f"Would you like to view your execution traces? [y/N] ({timeout_seconds}s timeout): ",
|
||||
fg="white",
|
||||
bold=True,
|
||||
)
|
||||
click.echo(prompt_text, nl=False)
|
||||
|
||||
result = [False]
|
||||
|
||||
def get_input():
|
||||
try:
|
||||
response = input().strip().lower()
|
||||
result[0] = response in ["y", "yes"]
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
result[0] = False
|
||||
|
||||
input_thread = threading.Thread(target=get_input, daemon=True)
|
||||
input_thread.start()
|
||||
input_thread.join(timeout=timeout_seconds)
|
||||
|
||||
if input_thread.is_alive():
|
||||
return False
|
||||
|
||||
return result[0]
|
||||
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def mark_first_execution_completed() -> None:
|
||||
"""Mark first execution as completed (called after trace prompt)."""
|
||||
mark_first_execution_done()
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
|
||||
This module contains all event types used throughout the CrewAI system
|
||||
for monitoring and extending agent, crew, task, and tool execution.
|
||||
"""
|
||||
"""
|
||||
|
||||
@@ -2,14 +2,15 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional, Sequence, Union
|
||||
from collections.abc import Sequence
|
||||
from typing import Any
|
||||
|
||||
from pydantic import model_validator
|
||||
from pydantic import ConfigDict, model_validator
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class AgentExecutionStartedEvent(BaseEvent):
|
||||
@@ -17,11 +18,11 @@ class AgentExecutionStartedEvent(BaseEvent):
|
||||
|
||||
agent: BaseAgent
|
||||
task: Any
|
||||
tools: Optional[Sequence[Union[BaseTool, CrewStructuredTool]]]
|
||||
tools: Sequence[BaseTool | CrewStructuredTool] | None
|
||||
task_prompt: str
|
||||
type: str = "agent_execution_started"
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_fingerprint_data(self):
|
||||
@@ -45,7 +46,7 @@ class AgentExecutionCompletedEvent(BaseEvent):
|
||||
output: str
|
||||
type: str = "agent_execution_completed"
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_fingerprint_data(self):
|
||||
@@ -69,7 +70,7 @@ class AgentExecutionErrorEvent(BaseEvent):
|
||||
error: str
|
||||
type: str = "agent_execution_error"
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_fingerprint_data(self):
|
||||
@@ -89,18 +90,18 @@ class AgentExecutionErrorEvent(BaseEvent):
|
||||
class LiteAgentExecutionStartedEvent(BaseEvent):
|
||||
"""Event emitted when a LiteAgent starts executing"""
|
||||
|
||||
agent_info: Dict[str, Any]
|
||||
tools: Optional[Sequence[Union[BaseTool, CrewStructuredTool]]]
|
||||
messages: Union[str, List[Dict[str, str]]]
|
||||
agent_info: dict[str, Any]
|
||||
tools: Sequence[BaseTool | CrewStructuredTool] | None
|
||||
messages: str | list[dict[str, str]]
|
||||
type: str = "lite_agent_execution_started"
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
|
||||
class LiteAgentExecutionCompletedEvent(BaseEvent):
|
||||
"""Event emitted when a LiteAgent completes execution"""
|
||||
|
||||
agent_info: Dict[str, Any]
|
||||
agent_info: dict[str, Any]
|
||||
output: str
|
||||
type: str = "lite_agent_execution_completed"
|
||||
|
||||
@@ -108,7 +109,7 @@ class LiteAgentExecutionCompletedEvent(BaseEvent):
|
||||
class LiteAgentExecutionErrorEvent(BaseEvent):
|
||||
"""Event emitted when a LiteAgent encounters an error during execution"""
|
||||
|
||||
agent_info: Dict[str, Any]
|
||||
agent_info: dict[str, Any]
|
||||
error: str
|
||||
type: str = "lite_agent_execution_error"
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -11,8 +11,8 @@ else:
|
||||
class CrewBaseEvent(BaseEvent):
|
||||
"""Base class for crew events with fingerprint handling"""
|
||||
|
||||
crew_name: Optional[str]
|
||||
crew: Optional[Crew] = None
|
||||
crew_name: str | None
|
||||
crew: Crew | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
@@ -38,7 +38,7 @@ class CrewBaseEvent(BaseEvent):
|
||||
class CrewKickoffStartedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew starts execution"""
|
||||
|
||||
inputs: Optional[Dict[str, Any]]
|
||||
inputs: dict[str, Any] | None
|
||||
type: str = "crew_kickoff_started"
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ class CrewTrainStartedEvent(CrewBaseEvent):
|
||||
|
||||
n_iterations: int
|
||||
filename: str
|
||||
inputs: Optional[Dict[str, Any]]
|
||||
inputs: dict[str, Any] | None
|
||||
type: str = "crew_train_started"
|
||||
|
||||
|
||||
@@ -85,8 +85,8 @@ class CrewTestStartedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew starts testing"""
|
||||
|
||||
n_iterations: int
|
||||
eval_llm: Optional[Union[str, Any]]
|
||||
inputs: Optional[Dict[str, Any]]
|
||||
eval_llm: str | Any | None
|
||||
inputs: dict[str, Any] | None
|
||||
type: str = "crew_test_started"
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, Optional, Union
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
@@ -16,7 +16,7 @@ class FlowStartedEvent(FlowEvent):
|
||||
"""Event emitted when a flow starts execution"""
|
||||
|
||||
flow_name: str
|
||||
inputs: Optional[Dict[str, Any]] = None
|
||||
inputs: dict[str, Any] | None = None
|
||||
type: str = "flow_started"
|
||||
|
||||
|
||||
@@ -32,8 +32,8 @@ class MethodExecutionStartedEvent(FlowEvent):
|
||||
|
||||
flow_name: str
|
||||
method_name: str
|
||||
state: Union[Dict[str, Any], BaseModel]
|
||||
params: Optional[Dict[str, Any]] = None
|
||||
state: dict[str, Any] | BaseModel
|
||||
params: dict[str, Any] | None = None
|
||||
type: str = "method_execution_started"
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ class MethodExecutionFinishedEvent(FlowEvent):
|
||||
flow_name: str
|
||||
method_name: str
|
||||
result: Any = None
|
||||
state: Union[Dict[str, Any], BaseModel]
|
||||
state: dict[str, Any] | BaseModel
|
||||
type: str = "method_execution_finished"
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ class FlowFinishedEvent(FlowEvent):
|
||||
"""Event emitted when a flow completes execution"""
|
||||
|
||||
flow_name: str
|
||||
result: Optional[Any] = None
|
||||
result: Any | None = None
|
||||
type: str = "flow_finished"
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class KnowledgeRetrievalStartedEvent(BaseEvent):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -7,14 +7,14 @@ from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class LLMEventBase(BaseEvent):
|
||||
task_name: Optional[str] = None
|
||||
task_id: Optional[str] = None
|
||||
task_name: str | None = None
|
||||
task_id: str | None = None
|
||||
|
||||
agent_id: Optional[str] = None
|
||||
agent_role: Optional[str] = None
|
||||
agent_id: str | None = None
|
||||
agent_role: str | None = None
|
||||
|
||||
from_task: Optional[Any] = None
|
||||
from_agent: Optional[Any] = None
|
||||
from_task: Any | None = None
|
||||
from_agent: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
@@ -38,11 +38,11 @@ class LLMCallStartedEvent(LLMEventBase):
|
||||
"""
|
||||
|
||||
type: str = "llm_call_started"
|
||||
model: Optional[str] = None
|
||||
messages: Optional[Union[str, List[Dict[str, Any]]]] = None
|
||||
tools: Optional[List[dict[str, Any]]] = None
|
||||
callbacks: Optional[List[Any]] = None
|
||||
available_functions: Optional[Dict[str, Any]] = None
|
||||
model: str | None = None
|
||||
messages: str | list[dict[str, Any]] | None = None
|
||||
tools: list[dict[str, Any]] | None = None
|
||||
callbacks: list[Any] | None = None
|
||||
available_functions: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class LLMCallCompletedEvent(LLMEventBase):
|
||||
@@ -52,7 +52,7 @@ class LLMCallCompletedEvent(LLMEventBase):
|
||||
messages: str | list[dict[str, Any]] | None = None
|
||||
response: Any
|
||||
call_type: LLMCallType
|
||||
model: Optional[str] = None
|
||||
model: str | None = None
|
||||
|
||||
|
||||
class LLMCallFailedEvent(LLMEventBase):
|
||||
@@ -64,13 +64,13 @@ class LLMCallFailedEvent(LLMEventBase):
|
||||
|
||||
class FunctionCall(BaseModel):
|
||||
arguments: str
|
||||
name: Optional[str] = None
|
||||
name: str | None = None
|
||||
|
||||
|
||||
class ToolCall(BaseModel):
|
||||
id: Optional[str] = None
|
||||
id: str | None = None
|
||||
function: FunctionCall
|
||||
type: Optional[str] = None
|
||||
type: str | None = None
|
||||
index: int
|
||||
|
||||
|
||||
@@ -79,4 +79,4 @@ class LLMStreamChunkEvent(LLMEventBase):
|
||||
|
||||
type: str = "llm_stream_chunk"
|
||||
chunk: str
|
||||
tool_call: Optional[ToolCall] = None
|
||||
tool_call: ToolCall | None = None
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from collections.abc import Callable
|
||||
from inspect import getsource
|
||||
from typing import Any, Callable, Optional, Union
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -13,12 +14,12 @@ class LLMGuardrailStartedEvent(BaseEvent):
|
||||
"""
|
||||
|
||||
type: str = "llm_guardrail_started"
|
||||
guardrail: Union[str, Callable]
|
||||
guardrail: str | Callable
|
||||
retry_count: int
|
||||
|
||||
def __init__(self, **data):
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
from crewai.tasks.hallucination_guardrail import HallucinationGuardrail
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
|
||||
super().__init__(**data)
|
||||
|
||||
@@ -41,5 +42,5 @@ class LLMGuardrailCompletedEvent(BaseEvent):
|
||||
type: str = "llm_guardrail_completed"
|
||||
success: bool
|
||||
result: Any
|
||||
error: Optional[str] = None
|
||||
error: str | None = None
|
||||
retry_count: int
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
"""Agent logging events that don't reference BaseAgent to avoid circular imports."""
|
||||
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import ConfigDict
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -9,7 +11,7 @@ class AgentLogsStartedEvent(BaseEvent):
|
||||
"""Event emitted when agent logs should be shown at start"""
|
||||
|
||||
agent_role: str
|
||||
task_description: Optional[str] = None
|
||||
task_description: str | None = None
|
||||
verbose: bool = False
|
||||
type: str = "agent_logs_started"
|
||||
|
||||
@@ -22,4 +24,4 @@ class AgentLogsExecutionEvent(BaseEvent):
|
||||
verbose: bool = False
|
||||
type: str = "agent_logs_execution"
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -7,12 +7,12 @@ class MemoryBaseEvent(BaseEvent):
|
||||
"""Base event for memory operations"""
|
||||
|
||||
type: str
|
||||
task_id: Optional[str] = None
|
||||
task_name: Optional[str] = None
|
||||
from_task: Optional[Any] = None
|
||||
from_agent: Optional[Any] = None
|
||||
agent_role: Optional[str] = None
|
||||
agent_id: Optional[str] = None
|
||||
task_id: str | None = None
|
||||
task_name: str | None = None
|
||||
from_task: Any | None = None
|
||||
from_agent: Any | None = None
|
||||
agent_role: str | None = None
|
||||
agent_id: str | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
@@ -26,7 +26,7 @@ class MemoryQueryStartedEvent(MemoryBaseEvent):
|
||||
type: str = "memory_query_started"
|
||||
query: str
|
||||
limit: int
|
||||
score_threshold: Optional[float] = None
|
||||
score_threshold: float | None = None
|
||||
|
||||
|
||||
class MemoryQueryCompletedEvent(MemoryBaseEvent):
|
||||
@@ -36,7 +36,7 @@ class MemoryQueryCompletedEvent(MemoryBaseEvent):
|
||||
query: str
|
||||
results: Any
|
||||
limit: int
|
||||
score_threshold: Optional[float] = None
|
||||
score_threshold: float | None = None
|
||||
query_time_ms: float
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ class MemoryQueryFailedEvent(MemoryBaseEvent):
|
||||
type: str = "memory_query_failed"
|
||||
query: str
|
||||
limit: int
|
||||
score_threshold: Optional[float] = None
|
||||
score_threshold: float | None = None
|
||||
error: str
|
||||
|
||||
|
||||
@@ -54,9 +54,9 @@ class MemorySaveStartedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory save operation is started"""
|
||||
|
||||
type: str = "memory_save_started"
|
||||
value: Optional[str] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
agent_role: Optional[str] = None
|
||||
value: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
agent_role: str | None = None
|
||||
|
||||
|
||||
class MemorySaveCompletedEvent(MemoryBaseEvent):
|
||||
@@ -64,8 +64,8 @@ class MemorySaveCompletedEvent(MemoryBaseEvent):
|
||||
|
||||
type: str = "memory_save_completed"
|
||||
value: str
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
agent_role: Optional[str] = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
agent_role: str | None = None
|
||||
save_time_ms: float
|
||||
|
||||
|
||||
@@ -73,9 +73,9 @@ class MemorySaveFailedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory save operation fails"""
|
||||
|
||||
type: str = "memory_save_failed"
|
||||
value: Optional[str] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
agent_role: Optional[str] = None
|
||||
value: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
agent_role: str | None = None
|
||||
error: str
|
||||
|
||||
|
||||
@@ -83,13 +83,13 @@ class MemoryRetrievalStartedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when memory retrieval for a task prompt starts"""
|
||||
|
||||
type: str = "memory_retrieval_started"
|
||||
task_id: Optional[str] = None
|
||||
task_id: str | None = None
|
||||
|
||||
|
||||
class MemoryRetrievalCompletedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when memory retrieval for a task prompt completes successfully"""
|
||||
|
||||
type: str = "memory_retrieval_completed"
|
||||
task_id: Optional[str] = None
|
||||
task_id: str | None = None
|
||||
memory_content: str
|
||||
retrieval_time_ms: float
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
class ReasoningEvent(BaseEvent):
|
||||
@@ -9,10 +10,10 @@ class ReasoningEvent(BaseEvent):
|
||||
attempt: int = 1
|
||||
agent_role: str
|
||||
task_id: str
|
||||
task_name: Optional[str] = None
|
||||
from_task: Optional[Any] = None
|
||||
agent_id: Optional[str] = None
|
||||
from_agent: Optional[Any] = None
|
||||
task_name: str | None = None
|
||||
from_task: Any | None = None
|
||||
agent_id: str | None = None
|
||||
from_agent: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
|
||||
class TaskStartedEvent(BaseEvent):
|
||||
"""Event emitted when a task starts"""
|
||||
|
||||
type: str = "task_started"
|
||||
context: Optional[str]
|
||||
task: Optional[Any] = None
|
||||
context: str | None
|
||||
task: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
@@ -29,7 +29,7 @@ class TaskCompletedEvent(BaseEvent):
|
||||
|
||||
output: TaskOutput
|
||||
type: str = "task_completed"
|
||||
task: Optional[Any] = None
|
||||
task: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
@@ -49,7 +49,7 @@ class TaskFailedEvent(BaseEvent):
|
||||
|
||||
error: str
|
||||
type: str = "task_failed"
|
||||
task: Optional[Any] = None
|
||||
task: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
@@ -69,7 +69,7 @@ class TaskEvaluationEvent(BaseEvent):
|
||||
|
||||
type: str = "task_evaluation"
|
||||
evaluation_type: str
|
||||
task: Optional[Any] = None
|
||||
task: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
from collections.abc import Callable
|
||||
from datetime import datetime
|
||||
from typing import Any, Callable, Dict, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import ConfigDict
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -7,21 +10,21 @@ from crewai.events.base_events import BaseEvent
|
||||
class ToolUsageEvent(BaseEvent):
|
||||
"""Base event for tool usage tracking"""
|
||||
|
||||
agent_key: Optional[str] = None
|
||||
agent_role: Optional[str] = None
|
||||
agent_id: Optional[str] = None
|
||||
agent_key: str | None = None
|
||||
agent_role: str | None = None
|
||||
agent_id: str | None = None
|
||||
tool_name: str
|
||||
tool_args: Dict[str, Any] | str
|
||||
tool_class: Optional[str] = None
|
||||
tool_args: dict[str, Any] | str
|
||||
tool_class: str | None = None
|
||||
run_attempts: int | None = None
|
||||
delegations: int | None = None
|
||||
agent: Optional[Any] = None
|
||||
task_name: Optional[str] = None
|
||||
task_id: Optional[str] = None
|
||||
from_task: Optional[Any] = None
|
||||
from_agent: Optional[Any] = None
|
||||
agent: Any | None = None
|
||||
task_name: str | None = None
|
||||
task_id: str | None = None
|
||||
from_task: Any | None = None
|
||||
from_agent: Any | None = None
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
@@ -81,9 +84,9 @@ class ToolExecutionErrorEvent(BaseEvent):
|
||||
error: Any
|
||||
type: str = "tool_execution_error"
|
||||
tool_name: str
|
||||
tool_args: Dict[str, Any]
|
||||
tool_args: dict[str, Any]
|
||||
tool_class: Callable
|
||||
agent: Optional[Any] = None
|
||||
agent: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
|
||||
@@ -1,25 +1,25 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, ClassVar
|
||||
|
||||
from rich.console import Console
|
||||
from rich.live import Live
|
||||
from rich.panel import Panel
|
||||
from rich.syntax import Syntax
|
||||
from rich.text import Text
|
||||
from rich.tree import Tree
|
||||
from rich.live import Live
|
||||
from rich.syntax import Syntax
|
||||
|
||||
|
||||
class ConsoleFormatter:
|
||||
current_crew_tree: Optional[Tree] = None
|
||||
current_task_branch: Optional[Tree] = None
|
||||
current_agent_branch: Optional[Tree] = None
|
||||
current_tool_branch: Optional[Tree] = None
|
||||
current_flow_tree: Optional[Tree] = None
|
||||
current_method_branch: Optional[Tree] = None
|
||||
current_lite_agent_branch: Optional[Tree] = None
|
||||
tool_usage_counts: Dict[str, int] = {}
|
||||
current_reasoning_branch: Optional[Tree] = None # Track reasoning status
|
||||
current_crew_tree: Tree | None = None
|
||||
current_task_branch: Tree | None = None
|
||||
current_agent_branch: Tree | None = None
|
||||
current_tool_branch: Tree | None = None
|
||||
current_flow_tree: Tree | None = None
|
||||
current_method_branch: Tree | None = None
|
||||
current_lite_agent_branch: Tree | None = None
|
||||
tool_usage_counts: ClassVar[dict[str, int]] = {}
|
||||
current_reasoning_branch: Tree | None = None # Track reasoning status
|
||||
_live_paused: bool = False
|
||||
current_llm_tool_tree: Optional[Tree] = None
|
||||
current_llm_tool_tree: Tree | None = None
|
||||
|
||||
def __init__(self, verbose: bool = False):
|
||||
self.console = Console(width=None)
|
||||
@@ -29,7 +29,7 @@ class ConsoleFormatter:
|
||||
# instance so the previous render is replaced instead of writing a new one.
|
||||
# Once any non-Tree renderable is printed we stop the Live session so the
|
||||
# final Tree persists on the terminal.
|
||||
self._live: Optional[Live] = None
|
||||
self._live: Live | None = None
|
||||
|
||||
def create_panel(self, content: Text, title: str, style: str = "blue") -> Panel:
|
||||
"""Create a standardized panel with consistent styling."""
|
||||
@@ -45,7 +45,7 @@ class ConsoleFormatter:
|
||||
title: str,
|
||||
name: str,
|
||||
status_style: str = "blue",
|
||||
tool_args: Dict[str, Any] | str = "",
|
||||
tool_args: dict[str, Any] | str = "",
|
||||
**fields,
|
||||
) -> Text:
|
||||
"""Create standardized status content with consistent formatting."""
|
||||
@@ -70,7 +70,7 @@ class ConsoleFormatter:
|
||||
prefix: str,
|
||||
name: str,
|
||||
style: str = "blue",
|
||||
status: Optional[str] = None,
|
||||
status: str | None = None,
|
||||
) -> None:
|
||||
"""Update tree label with consistent formatting."""
|
||||
label = Text()
|
||||
@@ -115,7 +115,7 @@ class ConsoleFormatter:
|
||||
self._live.update(tree, refresh=True)
|
||||
return # Nothing else to do
|
||||
|
||||
# Case 2: blank line while a live session is running – ignore so we
|
||||
# Case 2: blank line while a live session is running - ignore so we
|
||||
# don't break the in-place rendering behaviour
|
||||
if len(args) == 0 and self._live:
|
||||
return
|
||||
@@ -156,7 +156,7 @@ class ConsoleFormatter:
|
||||
|
||||
def update_crew_tree(
|
||||
self,
|
||||
tree: Optional[Tree],
|
||||
tree: Tree | None,
|
||||
crew_name: str,
|
||||
source_id: str,
|
||||
status: str = "completed",
|
||||
@@ -196,7 +196,7 @@ class ConsoleFormatter:
|
||||
|
||||
self.print_panel(content, title, style)
|
||||
|
||||
def create_crew_tree(self, crew_name: str, source_id: str) -> Optional[Tree]:
|
||||
def create_crew_tree(self, crew_name: str, source_id: str) -> Tree | None:
|
||||
"""Create and initialize a new crew tree with initial status."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
@@ -220,8 +220,8 @@ class ConsoleFormatter:
|
||||
return tree
|
||||
|
||||
def create_task_branch(
|
||||
self, crew_tree: Optional[Tree], task_id: str, task_name: Optional[str] = None
|
||||
) -> Optional[Tree]:
|
||||
self, crew_tree: Tree | None, task_id: str, task_name: str | None = None
|
||||
) -> Tree | None:
|
||||
"""Create and initialize a task branch."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
@@ -255,11 +255,11 @@ class ConsoleFormatter:
|
||||
|
||||
def update_task_status(
|
||||
self,
|
||||
crew_tree: Optional[Tree],
|
||||
crew_tree: Tree | None,
|
||||
task_id: str,
|
||||
agent_role: str,
|
||||
status: str = "completed",
|
||||
task_name: Optional[str] = None,
|
||||
task_name: str | None = None,
|
||||
) -> None:
|
||||
"""Update task status in the tree."""
|
||||
if not self.verbose or crew_tree is None:
|
||||
@@ -306,8 +306,8 @@ class ConsoleFormatter:
|
||||
self.print_panel(content, panel_title, style)
|
||||
|
||||
def create_agent_branch(
|
||||
self, task_branch: Optional[Tree], agent_role: str, crew_tree: Optional[Tree]
|
||||
) -> Optional[Tree]:
|
||||
self, task_branch: Tree | None, agent_role: str, crew_tree: Tree | None
|
||||
) -> Tree | None:
|
||||
"""Create and initialize an agent branch."""
|
||||
if not self.verbose or not task_branch or not crew_tree:
|
||||
return None
|
||||
@@ -325,9 +325,9 @@ class ConsoleFormatter:
|
||||
|
||||
def update_agent_status(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
agent_role: str,
|
||||
crew_tree: Optional[Tree],
|
||||
crew_tree: Tree | None,
|
||||
status: str = "completed",
|
||||
) -> None:
|
||||
"""Update agent status in the tree."""
|
||||
@@ -336,7 +336,7 @@ class ConsoleFormatter:
|
||||
# altering the tree. Keeping it a no-op avoids duplicate status lines.
|
||||
return
|
||||
|
||||
def create_flow_tree(self, flow_name: str, flow_id: str) -> Optional[Tree]:
|
||||
def create_flow_tree(self, flow_name: str, flow_id: str) -> Tree | None:
|
||||
"""Create and initialize a flow tree."""
|
||||
content = self.create_status_content(
|
||||
"Starting Flow Execution", flow_name, "blue", ID=flow_id
|
||||
@@ -356,7 +356,7 @@ class ConsoleFormatter:
|
||||
|
||||
return flow_tree
|
||||
|
||||
def start_flow(self, flow_name: str, flow_id: str) -> Optional[Tree]:
|
||||
def start_flow(self, flow_name: str, flow_id: str) -> Tree | None:
|
||||
"""Initialize a flow execution tree."""
|
||||
flow_tree = Tree("")
|
||||
flow_label = Text()
|
||||
@@ -376,7 +376,7 @@ class ConsoleFormatter:
|
||||
|
||||
def update_flow_status(
|
||||
self,
|
||||
flow_tree: Optional[Tree],
|
||||
flow_tree: Tree | None,
|
||||
flow_name: str,
|
||||
flow_id: str,
|
||||
status: str = "completed",
|
||||
@@ -423,11 +423,11 @@ class ConsoleFormatter:
|
||||
|
||||
def update_method_status(
|
||||
self,
|
||||
method_branch: Optional[Tree],
|
||||
flow_tree: Optional[Tree],
|
||||
method_branch: Tree | None,
|
||||
flow_tree: Tree | None,
|
||||
method_name: str,
|
||||
status: str = "running",
|
||||
) -> Optional[Tree]:
|
||||
) -> Tree | None:
|
||||
"""Update method status in the flow tree."""
|
||||
if not flow_tree:
|
||||
return None
|
||||
@@ -480,7 +480,7 @@ class ConsoleFormatter:
|
||||
def handle_llm_tool_usage_started(
|
||||
self,
|
||||
tool_name: str,
|
||||
tool_args: Dict[str, Any] | str,
|
||||
tool_args: dict[str, Any] | str,
|
||||
):
|
||||
# Create status content for the tool usage
|
||||
content = self.create_status_content(
|
||||
@@ -520,11 +520,11 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_tool_usage_started(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
tool_name: str,
|
||||
crew_tree: Optional[Tree],
|
||||
tool_args: Dict[str, Any] | str = "",
|
||||
) -> Optional[Tree]:
|
||||
crew_tree: Tree | None,
|
||||
tool_args: dict[str, Any] | str = "",
|
||||
) -> Tree | None:
|
||||
"""Handle tool usage started event."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
@@ -569,9 +569,9 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_tool_usage_finished(
|
||||
self,
|
||||
tool_branch: Optional[Tree],
|
||||
tool_branch: Tree | None,
|
||||
tool_name: str,
|
||||
crew_tree: Optional[Tree],
|
||||
crew_tree: Tree | None,
|
||||
) -> None:
|
||||
"""Handle tool usage finished event."""
|
||||
if not self.verbose or tool_branch is None:
|
||||
@@ -600,10 +600,10 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_tool_usage_error(
|
||||
self,
|
||||
tool_branch: Optional[Tree],
|
||||
tool_branch: Tree | None,
|
||||
tool_name: str,
|
||||
error: str,
|
||||
crew_tree: Optional[Tree],
|
||||
crew_tree: Tree | None,
|
||||
) -> None:
|
||||
"""Handle tool usage error event."""
|
||||
if not self.verbose:
|
||||
@@ -631,9 +631,9 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_llm_call_started(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
) -> Optional[Tree]:
|
||||
agent_branch: Tree | None,
|
||||
crew_tree: Tree | None,
|
||||
) -> Tree | None:
|
||||
"""Handle LLM call started event."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
@@ -672,9 +672,9 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_llm_call_completed(
|
||||
self,
|
||||
tool_branch: Optional[Tree],
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
tool_branch: Tree | None,
|
||||
agent_branch: Tree | None,
|
||||
crew_tree: Tree | None,
|
||||
) -> None:
|
||||
"""Handle LLM call completed event."""
|
||||
if not self.verbose:
|
||||
@@ -736,7 +736,7 @@ class ConsoleFormatter:
|
||||
self.print()
|
||||
|
||||
def handle_llm_call_failed(
|
||||
self, tool_branch: Optional[Tree], error: str, crew_tree: Optional[Tree]
|
||||
self, tool_branch: Tree | None, error: str, crew_tree: Tree | None
|
||||
) -> None:
|
||||
"""Handle LLM call failed event."""
|
||||
if not self.verbose:
|
||||
@@ -789,7 +789,7 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_crew_test_started(
|
||||
self, crew_name: str, source_id: str, n_iterations: int
|
||||
) -> Optional[Tree]:
|
||||
) -> Tree | None:
|
||||
"""Handle crew test started event."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
@@ -823,7 +823,7 @@ class ConsoleFormatter:
|
||||
return test_tree
|
||||
|
||||
def handle_crew_test_completed(
|
||||
self, flow_tree: Optional[Tree], crew_name: str
|
||||
self, flow_tree: Tree | None, crew_name: str
|
||||
) -> None:
|
||||
"""Handle crew test completed event."""
|
||||
if not self.verbose:
|
||||
@@ -913,7 +913,7 @@ class ConsoleFormatter:
|
||||
self.print_panel(failure_content, "Test Failure", "red")
|
||||
self.print()
|
||||
|
||||
def create_lite_agent_branch(self, lite_agent_role: str) -> Optional[Tree]:
|
||||
def create_lite_agent_branch(self, lite_agent_role: str) -> Tree | None:
|
||||
"""Create and initialize a lite agent branch."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
@@ -935,10 +935,10 @@ class ConsoleFormatter:
|
||||
|
||||
def update_lite_agent_status(
|
||||
self,
|
||||
lite_agent_branch: Optional[Tree],
|
||||
lite_agent_branch: Tree | None,
|
||||
lite_agent_role: str,
|
||||
status: str = "completed",
|
||||
**fields: Dict[str, Any],
|
||||
**fields: dict[str, Any],
|
||||
) -> None:
|
||||
"""Update lite agent status in the tree."""
|
||||
if not self.verbose or lite_agent_branch is None:
|
||||
@@ -981,7 +981,7 @@ class ConsoleFormatter:
|
||||
lite_agent_role: str,
|
||||
status: str = "started",
|
||||
error: Any = None,
|
||||
**fields: Dict[str, Any],
|
||||
**fields: dict[str, Any],
|
||||
) -> None:
|
||||
"""Handle lite agent execution events with consistent formatting."""
|
||||
if not self.verbose:
|
||||
@@ -1006,9 +1006,9 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_knowledge_retrieval_started(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
) -> Optional[Tree]:
|
||||
agent_branch: Tree | None,
|
||||
crew_tree: Tree | None,
|
||||
) -> Tree | None:
|
||||
"""Handle knowledge retrieval started event."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
@@ -1034,13 +1034,13 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_knowledge_retrieval_completed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
crew_tree: Tree | None,
|
||||
retrieved_knowledge: Any,
|
||||
) -> None:
|
||||
"""Handle knowledge retrieval completed event."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
return
|
||||
|
||||
branch_to_use = self.current_lite_agent_branch or agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
@@ -1062,7 +1062,7 @@ class ConsoleFormatter:
|
||||
)
|
||||
self.print(knowledge_panel)
|
||||
self.print()
|
||||
return None
|
||||
return
|
||||
|
||||
knowledge_branch_found = False
|
||||
for child in branch_to_use.children:
|
||||
@@ -1111,18 +1111,18 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_knowledge_query_started(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
task_prompt: str,
|
||||
crew_tree: Optional[Tree],
|
||||
crew_tree: Tree | None,
|
||||
) -> None:
|
||||
"""Handle knowledge query generated event."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
return
|
||||
|
||||
branch_to_use = self.current_lite_agent_branch or agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
if branch_to_use is None or tree_to_use is None:
|
||||
return None
|
||||
return
|
||||
|
||||
query_branch = branch_to_use.add("")
|
||||
self.update_tree_label(
|
||||
@@ -1134,9 +1134,9 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_knowledge_query_failed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
error: str,
|
||||
crew_tree: Optional[Tree],
|
||||
crew_tree: Tree | None,
|
||||
) -> None:
|
||||
"""Handle knowledge query failed event."""
|
||||
if not self.verbose:
|
||||
@@ -1159,18 +1159,18 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_knowledge_query_completed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
crew_tree: Tree | None,
|
||||
) -> None:
|
||||
"""Handle knowledge query completed event."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
return
|
||||
|
||||
branch_to_use = self.current_lite_agent_branch or agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
|
||||
if branch_to_use is None or tree_to_use is None:
|
||||
return None
|
||||
return
|
||||
|
||||
query_branch = branch_to_use.add("")
|
||||
self.update_tree_label(query_branch, "✅", "Knowledge Query Completed", "green")
|
||||
@@ -1180,9 +1180,9 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_knowledge_search_query_failed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
error: str,
|
||||
crew_tree: Optional[Tree],
|
||||
crew_tree: Tree | None,
|
||||
) -> None:
|
||||
"""Handle knowledge search query failed event."""
|
||||
if not self.verbose:
|
||||
@@ -1207,10 +1207,10 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_reasoning_started(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
attempt: int,
|
||||
crew_tree: Optional[Tree],
|
||||
) -> Optional[Tree]:
|
||||
crew_tree: Tree | None,
|
||||
) -> Tree | None:
|
||||
"""Handle agent reasoning started (or refinement) event."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
@@ -1249,7 +1249,7 @@ class ConsoleFormatter:
|
||||
self,
|
||||
plan: str,
|
||||
ready: bool,
|
||||
crew_tree: Optional[Tree],
|
||||
crew_tree: Tree | None,
|
||||
) -> None:
|
||||
"""Handle agent reasoning completed event."""
|
||||
if not self.verbose:
|
||||
@@ -1292,7 +1292,7 @@ class ConsoleFormatter:
|
||||
def handle_reasoning_failed(
|
||||
self,
|
||||
error: str,
|
||||
crew_tree: Optional[Tree],
|
||||
crew_tree: Tree | None,
|
||||
) -> None:
|
||||
"""Handle agent reasoning failure event."""
|
||||
if not self.verbose:
|
||||
@@ -1329,7 +1329,7 @@ class ConsoleFormatter:
|
||||
def handle_agent_logs_started(
|
||||
self,
|
||||
agent_role: str,
|
||||
task_description: Optional[str] = None,
|
||||
task_description: str | None = None,
|
||||
verbose: bool = False,
|
||||
) -> None:
|
||||
"""Handle agent logs started event."""
|
||||
@@ -1367,10 +1367,11 @@ class ConsoleFormatter:
|
||||
if not verbose:
|
||||
return
|
||||
|
||||
from crewai.agents.parser import AgentAction, AgentFinish
|
||||
import json
|
||||
import re
|
||||
|
||||
from crewai.agents.parser import AgentAction, AgentFinish
|
||||
|
||||
agent_role = agent_role.partition("\n")[0]
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
@@ -1473,9 +1474,9 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_memory_retrieval_started(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
) -> Optional[Tree]:
|
||||
agent_branch: Tree | None,
|
||||
crew_tree: Tree | None,
|
||||
) -> Tree | None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
@@ -1497,13 +1498,13 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_memory_retrieval_completed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
crew_tree: Tree | None,
|
||||
memory_content: str,
|
||||
retrieval_time_ms: float,
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
return
|
||||
|
||||
branch_to_use = self.current_lite_agent_branch or agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
@@ -1528,7 +1529,7 @@ class ConsoleFormatter:
|
||||
|
||||
if branch_to_use is None or tree_to_use is None:
|
||||
add_panel()
|
||||
return None
|
||||
return
|
||||
|
||||
memory_branch_found = False
|
||||
for child in branch_to_use.children:
|
||||
@@ -1565,13 +1566,13 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_memory_query_completed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
source_type: str,
|
||||
query_time_ms: float,
|
||||
crew_tree: Optional[Tree],
|
||||
crew_tree: Tree | None,
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
return
|
||||
|
||||
branch_to_use = self.current_lite_agent_branch or agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
@@ -1580,15 +1581,15 @@ class ConsoleFormatter:
|
||||
branch_to_use = tree_to_use
|
||||
|
||||
if branch_to_use is None:
|
||||
return None
|
||||
return
|
||||
|
||||
memory_type = source_type.replace("_", " ").title()
|
||||
|
||||
for child in branch_to_use.children:
|
||||
if "Memory Retrieval" in str(child.label):
|
||||
for child in child.children:
|
||||
sources_branch = child
|
||||
if "Sources Used" in str(child.label):
|
||||
for inner_child in child.children:
|
||||
sources_branch = inner_child
|
||||
if "Sources Used" in str(inner_child.label):
|
||||
sources_branch.add(f"✅ {memory_type} ({query_time_ms:.2f}ms)")
|
||||
break
|
||||
else:
|
||||
@@ -1598,13 +1599,13 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_memory_query_failed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
crew_tree: Tree | None,
|
||||
error: str,
|
||||
source_type: str,
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
return
|
||||
|
||||
branch_to_use = self.current_lite_agent_branch or agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
@@ -1613,15 +1614,15 @@ class ConsoleFormatter:
|
||||
branch_to_use = tree_to_use
|
||||
|
||||
if branch_to_use is None:
|
||||
return None
|
||||
return
|
||||
|
||||
memory_type = source_type.replace("_", " ").title()
|
||||
|
||||
for child in branch_to_use.children:
|
||||
if "Memory Retrieval" in str(child.label):
|
||||
for child in child.children:
|
||||
sources_branch = child
|
||||
if "Sources Used" in str(child.label):
|
||||
for inner_child in child.children:
|
||||
sources_branch = inner_child
|
||||
if "Sources Used" in str(inner_child.label):
|
||||
sources_branch.add(f"❌ {memory_type} - Error: {error}")
|
||||
break
|
||||
else:
|
||||
@@ -1630,16 +1631,16 @@ class ConsoleFormatter:
|
||||
break
|
||||
|
||||
def handle_memory_save_started(
|
||||
self, agent_branch: Optional[Tree], crew_tree: Optional[Tree]
|
||||
self, agent_branch: Tree | None, crew_tree: Tree | None
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
return
|
||||
|
||||
branch_to_use = agent_branch or self.current_lite_agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
|
||||
if tree_to_use is None:
|
||||
return None
|
||||
return
|
||||
|
||||
for child in tree_to_use.children:
|
||||
if "Memory Update" in str(child.label):
|
||||
@@ -1655,19 +1656,19 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_memory_save_completed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
crew_tree: Tree | None,
|
||||
save_time_ms: float,
|
||||
source_type: str,
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
return
|
||||
|
||||
branch_to_use = agent_branch or self.current_lite_agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
|
||||
if tree_to_use is None:
|
||||
return None
|
||||
return
|
||||
|
||||
memory_type = source_type.replace("_", " ").title()
|
||||
content = f"✅ {memory_type} Memory Saved ({save_time_ms:.2f}ms)"
|
||||
@@ -1685,19 +1686,19 @@ class ConsoleFormatter:
|
||||
|
||||
def handle_memory_save_failed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
agent_branch: Tree | None,
|
||||
error: str,
|
||||
source_type: str,
|
||||
crew_tree: Optional[Tree],
|
||||
crew_tree: Tree | None,
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
return
|
||||
|
||||
branch_to_use = agent_branch or self.current_lite_agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
|
||||
if branch_to_use is None or tree_to_use is None:
|
||||
return None
|
||||
return
|
||||
|
||||
memory_type = source_type.replace("_", " ").title()
|
||||
content = f"❌ {memory_type} Memory Save Failed"
|
||||
@@ -1738,7 +1739,7 @@ class ConsoleFormatter:
|
||||
def handle_guardrail_completed(
|
||||
self,
|
||||
success: bool,
|
||||
error: Optional[str],
|
||||
error: str | None,
|
||||
retry_count: int,
|
||||
) -> None:
|
||||
"""Display guardrail evaluation result.
|
||||
|
||||
@@ -2,30 +2,22 @@ import asyncio
|
||||
import copy
|
||||
import inspect
|
||||
import logging
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Generic,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from collections.abc import Callable
|
||||
from typing import Any, ClassVar, Generic, TypeVar, cast
|
||||
from uuid import uuid4
|
||||
|
||||
from opentelemetry import baggage
|
||||
from opentelemetry.context import attach, detach
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
|
||||
from crewai.flow.flow_visualizer import plot_flow
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.types import FlowExecutionData
|
||||
from crewai.flow.utils import get_possible_return_constants
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
should_auto_collect_first_time_traces,
|
||||
)
|
||||
from crewai.events.types.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowFinishedEvent,
|
||||
@@ -35,12 +27,10 @@ from crewai.events.types.flow_events import (
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
)
|
||||
from crewai.flow.flow_visualizer import plot_flow
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.types import FlowExecutionData
|
||||
from crewai.flow.utils import get_possible_return_constants
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -55,16 +45,14 @@ class FlowState(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
# Type variables with explicit bounds
|
||||
T = TypeVar(
|
||||
"T", bound=Union[Dict[str, Any], BaseModel]
|
||||
) # Generic flow state type parameter
|
||||
# type variables with explicit bounds
|
||||
T = TypeVar("T", bound=dict[str, Any] | BaseModel) # Generic flow state type parameter
|
||||
StateT = TypeVar(
|
||||
"StateT", bound=Union[Dict[str, Any], BaseModel]
|
||||
"StateT", bound=dict[str, Any] | BaseModel
|
||||
) # State validation type parameter
|
||||
|
||||
|
||||
def ensure_state_type(state: Any, expected_type: Type[StateT]) -> StateT:
|
||||
def ensure_state_type(state: Any, expected_type: type[StateT]) -> StateT:
|
||||
"""Ensure state matches expected type with proper validation.
|
||||
|
||||
Args:
|
||||
@@ -104,7 +92,7 @@ def ensure_state_type(state: Any, expected_type: Type[StateT]) -> StateT:
|
||||
raise TypeError(f"Invalid expected_type: {expected_type}")
|
||||
|
||||
|
||||
def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
||||
def start(condition: str | dict | Callable | None = None) -> Callable:
|
||||
"""
|
||||
Marks a method as a flow's starting point.
|
||||
|
||||
@@ -171,7 +159,7 @@ def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
||||
return decorator
|
||||
|
||||
|
||||
def listen(condition: Union[str, dict, Callable]) -> Callable:
|
||||
def listen(condition: str | dict | Callable) -> Callable:
|
||||
"""
|
||||
Creates a listener that executes when specified conditions are met.
|
||||
|
||||
@@ -231,7 +219,7 @@ def listen(condition: Union[str, dict, Callable]) -> Callable:
|
||||
return decorator
|
||||
|
||||
|
||||
def router(condition: Union[str, dict, Callable]) -> Callable:
|
||||
def router(condition: str | dict | Callable) -> Callable:
|
||||
"""
|
||||
Creates a routing method that directs flow execution based on conditions.
|
||||
|
||||
@@ -297,7 +285,7 @@ def router(condition: Union[str, dict, Callable]) -> Callable:
|
||||
return decorator
|
||||
|
||||
|
||||
def or_(*conditions: Union[str, dict, Callable]) -> dict:
|
||||
def or_(*conditions: str | dict | Callable) -> dict:
|
||||
"""
|
||||
Combines multiple conditions with OR logic for flow control.
|
||||
|
||||
@@ -343,7 +331,7 @@ def or_(*conditions: Union[str, dict, Callable]) -> dict:
|
||||
return {"type": "OR", "methods": methods}
|
||||
|
||||
|
||||
def and_(*conditions: Union[str, dict, Callable]) -> dict:
|
||||
def and_(*conditions: str | dict | Callable) -> dict:
|
||||
"""
|
||||
Combines multiple conditions with AND logic for flow control.
|
||||
|
||||
@@ -425,10 +413,10 @@ class FlowMeta(type):
|
||||
if possible_returns:
|
||||
router_paths[attr_name] = possible_returns
|
||||
|
||||
setattr(cls, "_start_methods", start_methods)
|
||||
setattr(cls, "_listeners", listeners)
|
||||
setattr(cls, "_routers", routers)
|
||||
setattr(cls, "_router_paths", router_paths)
|
||||
cls._start_methods = start_methods
|
||||
cls._listeners = listeners
|
||||
cls._routers = routers
|
||||
cls._router_paths = router_paths
|
||||
|
||||
return cls
|
||||
|
||||
@@ -436,29 +424,29 @@ class FlowMeta(type):
|
||||
class Flow(Generic[T], metaclass=FlowMeta):
|
||||
"""Base class for all flows.
|
||||
|
||||
Type parameter T must be either Dict[str, Any] or a subclass of BaseModel."""
|
||||
type parameter T must be either dict[str, Any] or a subclass of BaseModel."""
|
||||
|
||||
_printer = Printer()
|
||||
|
||||
_start_methods: List[str] = []
|
||||
_listeners: Dict[str, tuple[str, List[str]]] = {}
|
||||
_routers: Set[str] = set()
|
||||
_router_paths: Dict[str, List[str]] = {}
|
||||
initial_state: Union[Type[T], T, None] = None
|
||||
name: Optional[str] = None
|
||||
tracing: Optional[bool] = False
|
||||
_start_methods: ClassVar[list[str]] = []
|
||||
_listeners: ClassVar[dict[str, tuple[str, list[str]]]] = {}
|
||||
_routers: ClassVar[set[str]] = set()
|
||||
_router_paths: ClassVar[dict[str, list[str]]] = {}
|
||||
initial_state: type[T] | T | None = None
|
||||
name: str | None = None
|
||||
tracing: bool | None = False
|
||||
|
||||
def __class_getitem__(cls: Type["Flow"], item: Type[T]) -> Type["Flow"]:
|
||||
def __class_getitem__(cls: type["Flow"], item: type[T]) -> type["Flow"]:
|
||||
class _FlowGeneric(cls): # type: ignore
|
||||
_initial_state_T = item # type: ignore
|
||||
_initial_state_t = item # type: ignore
|
||||
|
||||
_FlowGeneric.__name__ = f"{cls.__name__}[{item.__name__}]"
|
||||
return _FlowGeneric
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
persistence: Optional[FlowPersistence] = None,
|
||||
tracing: Optional[bool] = False,
|
||||
persistence: FlowPersistence | None = None,
|
||||
tracing: bool | None = False,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize a new Flow instance.
|
||||
@@ -468,18 +456,22 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
**kwargs: Additional state values to initialize or override
|
||||
"""
|
||||
# Initialize basic instance attributes
|
||||
self._methods: Dict[str, Callable] = {}
|
||||
self._method_execution_counts: Dict[str, int] = {}
|
||||
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
||||
self._method_outputs: List[Any] = [] # List to store all method outputs
|
||||
self._completed_methods: Set[str] = set() # Track completed methods for reload
|
||||
self._persistence: Optional[FlowPersistence] = persistence
|
||||
self._methods: dict[str, Callable] = {}
|
||||
self._method_execution_counts: dict[str, int] = {}
|
||||
self._pending_and_listeners: dict[str, set[str]] = {}
|
||||
self._method_outputs: list[Any] = [] # list to store all method outputs
|
||||
self._completed_methods: set[str] = set() # Track completed methods for reload
|
||||
self._persistence: FlowPersistence | None = persistence
|
||||
self._is_execution_resuming: bool = False
|
||||
|
||||
# Initialize state with initial values
|
||||
self._state = self._create_initial_state()
|
||||
self.tracing = tracing
|
||||
if is_tracing_enabled() or self.tracing:
|
||||
if (
|
||||
is_tracing_enabled()
|
||||
or self.tracing
|
||||
or should_auto_collect_first_time_traces()
|
||||
):
|
||||
trace_listener = TraceCollectionListener()
|
||||
trace_listener.setup_listeners(crewai_event_bus)
|
||||
# Apply any additional kwargs
|
||||
@@ -521,25 +513,25 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
TypeError: If state is neither BaseModel nor dictionary
|
||||
"""
|
||||
# Handle case where initial_state is None but we have a type parameter
|
||||
if self.initial_state is None and hasattr(self, "_initial_state_T"):
|
||||
state_type = getattr(self, "_initial_state_T")
|
||||
if self.initial_state is None and hasattr(self, "_initial_state_t"):
|
||||
state_type = self._initial_state_t
|
||||
if isinstance(state_type, type):
|
||||
if issubclass(state_type, FlowState):
|
||||
# Create instance without id, then set it
|
||||
instance = state_type()
|
||||
if not hasattr(instance, "id"):
|
||||
setattr(instance, "id", str(uuid4()))
|
||||
instance.id = str(uuid4())
|
||||
return cast(T, instance)
|
||||
elif issubclass(state_type, BaseModel):
|
||||
if issubclass(state_type, BaseModel):
|
||||
# Create a new type that includes the ID field
|
||||
class StateWithId(state_type, FlowState): # type: ignore
|
||||
pass
|
||||
|
||||
instance = StateWithId()
|
||||
if not hasattr(instance, "id"):
|
||||
setattr(instance, "id", str(uuid4()))
|
||||
instance.id = str(uuid4())
|
||||
return cast(T, instance)
|
||||
elif state_type is dict:
|
||||
if state_type is dict:
|
||||
return cast(T, {"id": str(uuid4())})
|
||||
|
||||
# Handle case where no initial state is provided
|
||||
@@ -550,13 +542,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if isinstance(self.initial_state, type):
|
||||
if issubclass(self.initial_state, FlowState):
|
||||
return cast(T, self.initial_state()) # Uses model defaults
|
||||
elif issubclass(self.initial_state, BaseModel):
|
||||
if issubclass(self.initial_state, BaseModel):
|
||||
# Validate that the model has an id field
|
||||
model_fields = getattr(self.initial_state, "model_fields", None)
|
||||
if not model_fields or "id" not in model_fields:
|
||||
raise ValueError("Flow state model must have an 'id' field")
|
||||
return cast(T, self.initial_state()) # Uses model defaults
|
||||
elif self.initial_state is dict:
|
||||
if self.initial_state is dict:
|
||||
return cast(T, {"id": str(uuid4())})
|
||||
|
||||
# Handle dictionary instance case
|
||||
@@ -600,7 +592,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return self._state
|
||||
|
||||
@property
|
||||
def method_outputs(self) -> List[Any]:
|
||||
def method_outputs(self) -> list[Any]:
|
||||
"""Returns the list of all outputs from executed methods."""
|
||||
return self._method_outputs
|
||||
|
||||
@@ -631,13 +623,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
if isinstance(self._state, dict):
|
||||
return str(self._state.get("id", ""))
|
||||
elif isinstance(self._state, BaseModel):
|
||||
if isinstance(self._state, BaseModel):
|
||||
return str(getattr(self._state, "id", ""))
|
||||
return ""
|
||||
except (AttributeError, TypeError):
|
||||
return "" # Safely handle any unexpected attribute access issues
|
||||
|
||||
def _initialize_state(self, inputs: Dict[str, Any]) -> None:
|
||||
def _initialize_state(self, inputs: dict[str, Any]) -> None:
|
||||
"""Initialize or update flow state with new inputs.
|
||||
|
||||
Args:
|
||||
@@ -691,7 +683,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
else:
|
||||
raise TypeError("State must be a BaseModel instance or a dictionary.")
|
||||
|
||||
def _restore_state(self, stored_state: Dict[str, Any]) -> None:
|
||||
def _restore_state(self, stored_state: dict[str, Any]) -> None:
|
||||
"""Restore flow state from persistence.
|
||||
|
||||
Args:
|
||||
@@ -735,7 +727,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
execution_data: Flow execution data containing:
|
||||
- id: Flow execution ID
|
||||
- flow: Flow structure
|
||||
- completed_methods: List of successfully completed methods
|
||||
- completed_methods: list of successfully completed methods
|
||||
- execution_methods: All execution methods with their status
|
||||
"""
|
||||
flow_id = execution_data.get("id")
|
||||
@@ -771,7 +763,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if state_to_apply:
|
||||
self._apply_state_updates(state_to_apply)
|
||||
|
||||
for i, method in enumerate(sorted_methods[:-1]):
|
||||
for method in sorted_methods[:-1]:
|
||||
method_name = method.get("flow_method", {}).get("name")
|
||||
if method_name:
|
||||
self._completed_methods.add(method_name)
|
||||
@@ -783,7 +775,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
elif hasattr(self._state, field_name):
|
||||
object.__setattr__(self._state, field_name, value)
|
||||
|
||||
def _apply_state_updates(self, updates: Dict[str, Any]) -> None:
|
||||
def _apply_state_updates(self, updates: dict[str, Any]) -> None:
|
||||
"""Apply multiple state updates efficiently."""
|
||||
if isinstance(self._state, dict):
|
||||
self._state.update(updates)
|
||||
@@ -792,7 +784,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if hasattr(self._state, key):
|
||||
object.__setattr__(self._state, key, value)
|
||||
|
||||
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
def kickoff(self, inputs: dict[str, Any] | None = None) -> Any:
|
||||
"""
|
||||
Start the flow execution in a synchronous context.
|
||||
|
||||
@@ -805,7 +797,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
return asyncio.run(run_flow())
|
||||
|
||||
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
async def kickoff_async(self, inputs: dict[str, Any] | None = None) -> Any:
|
||||
"""
|
||||
Start the flow execution asynchronously.
|
||||
|
||||
@@ -840,7 +832,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if isinstance(self._state, dict):
|
||||
self._state["id"] = inputs["id"]
|
||||
elif isinstance(self._state, BaseModel):
|
||||
setattr(self._state, "id", inputs["id"])
|
||||
setattr(self._state, "id", inputs["id"]) # noqa: B010
|
||||
|
||||
# If persistence is enabled, attempt to restore the stored state using the provided id.
|
||||
if "id" in inputs and self._persistence is not None:
|
||||
@@ -1075,7 +1067,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
)
|
||||
|
||||
# Now execute normal listeners for all router results and the original trigger
|
||||
all_triggers = [trigger_method] + router_results
|
||||
all_triggers = [trigger_method, *router_results]
|
||||
|
||||
for current_trigger in all_triggers:
|
||||
if current_trigger: # Skip None results
|
||||
@@ -1109,7 +1101,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
def _find_triggered_methods(
|
||||
self, trigger_method: str, router_only: bool
|
||||
) -> List[str]:
|
||||
) -> list[str]:
|
||||
"""
|
||||
Finds all methods that should be triggered based on conditions.
|
||||
|
||||
@@ -1126,7 +1118,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[str]
|
||||
list[str]
|
||||
Names of methods that should be triggered.
|
||||
|
||||
Notes
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import os
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
from crewai.rag.types import SearchResult
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # removes logging from fastembed
|
||||
|
||||
@@ -13,23 +14,23 @@ class Knowledge(BaseModel):
|
||||
"""
|
||||
Knowledge is a collection of sources and setup for the vector store to save and query relevant context.
|
||||
Args:
|
||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||
embedder: Optional[Dict[str, Any]] = None
|
||||
sources: list[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
storage: KnowledgeStorage | None = Field(default=None)
|
||||
embedder: dict[str, Any] | None = None
|
||||
"""
|
||||
|
||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
sources: list[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||
embedder: Optional[Dict[str, Any]] = None
|
||||
collection_name: Optional[str] = None
|
||||
storage: KnowledgeStorage | None = Field(default=None)
|
||||
embedder: dict[str, Any] | None = None
|
||||
collection_name: str | None = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
collection_name: str,
|
||||
sources: List[BaseKnowledgeSource],
|
||||
embedder: Optional[Dict[str, Any]] = None,
|
||||
storage: Optional[KnowledgeStorage] = None,
|
||||
sources: list[BaseKnowledgeSource],
|
||||
embedder: dict[str, Any] | None = None,
|
||||
storage: KnowledgeStorage | None = None,
|
||||
**data,
|
||||
):
|
||||
super().__init__(**data)
|
||||
@@ -40,11 +41,10 @@ class Knowledge(BaseModel):
|
||||
embedder=embedder, collection_name=collection_name
|
||||
)
|
||||
self.sources = sources
|
||||
self.storage.initialize_knowledge_storage()
|
||||
|
||||
def query(
|
||||
self, query: List[str], results_limit: int = 3, score_threshold: float = 0.35
|
||||
) -> List[Dict[str, Any]]:
|
||||
self, query: list[str], results_limit: int = 5, score_threshold: float = 0.6
|
||||
) -> list[SearchResult]:
|
||||
"""
|
||||
Query across all knowledge sources to find the most relevant information.
|
||||
Returns the top_k most relevant chunks.
|
||||
@@ -55,12 +55,11 @@ class Knowledge(BaseModel):
|
||||
if self.storage is None:
|
||||
raise ValueError("Storage is not initialized.")
|
||||
|
||||
results = self.storage.search(
|
||||
return self.storage.search(
|
||||
query,
|
||||
limit=results_limit,
|
||||
score_threshold=score_threshold,
|
||||
)
|
||||
return results
|
||||
|
||||
def add_sources(self):
|
||||
try:
|
||||
|
||||
@@ -9,8 +9,8 @@ class KnowledgeConfig(BaseModel):
|
||||
score_threshold (float): The minimum score for a document to be considered relevant.
|
||||
"""
|
||||
|
||||
results_limit: int = Field(default=3, description="The number of results to return")
|
||||
results_limit: int = Field(default=5, description="The number of results to return")
|
||||
score_threshold: float = Field(
|
||||
default=0.35,
|
||||
default=0.6,
|
||||
description="The minimum score for a result to be considered relevant",
|
||||
)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
|
||||
@@ -14,19 +13,19 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
|
||||
"""Base class for knowledge sources that load content from files."""
|
||||
|
||||
_logger: Logger = Logger(verbose=True)
|
||||
file_path: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
||||
file_path: Path | list[Path] | str | list[str] | None = Field(
|
||||
default=None,
|
||||
description="[Deprecated] The path to the file. Use file_paths instead.",
|
||||
)
|
||||
file_paths: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
||||
file_paths: Path | list[Path] | str | list[str] | None = Field(
|
||||
default_factory=list, description="The path to the file"
|
||||
)
|
||||
content: Dict[Path, str] = Field(init=False, default_factory=dict)
|
||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||
safe_file_paths: List[Path] = Field(default_factory=list)
|
||||
content: dict[Path, str] = Field(init=False, default_factory=dict)
|
||||
storage: KnowledgeStorage | None = Field(default=None)
|
||||
safe_file_paths: list[Path] = Field(default_factory=list)
|
||||
|
||||
@field_validator("file_path", "file_paths", mode="before")
|
||||
def validate_file_path(cls, v, info):
|
||||
def validate_file_path(cls, v, info): # noqa: N805
|
||||
"""Validate that at least one of file_path or file_paths is provided."""
|
||||
# Single check if both are None, O(1) instead of nested conditions
|
||||
if (
|
||||
@@ -46,9 +45,8 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
|
||||
self.content = self.load_content()
|
||||
|
||||
@abstractmethod
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
def load_content(self) -> dict[Path, str]:
|
||||
"""Load and preprocess file content. Should be overridden by subclasses. Assume that the file path is relative to the project root in the knowledge directory."""
|
||||
pass
|
||||
|
||||
def validate_content(self):
|
||||
"""Validate the paths."""
|
||||
@@ -74,11 +72,11 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
|
||||
else:
|
||||
raise ValueError("No storage found to save documents.")
|
||||
|
||||
def convert_to_path(self, path: Union[Path, str]) -> Path:
|
||||
def convert_to_path(self, path: Path | str) -> Path:
|
||||
"""Convert a path to a Path object."""
|
||||
return Path(KNOWLEDGE_DIRECTORY + "/" + path) if isinstance(path, str) else path
|
||||
|
||||
def _process_file_paths(self) -> List[Path]:
|
||||
def _process_file_paths(self) -> list[Path]:
|
||||
"""Convert file_path to a list of Path objects."""
|
||||
|
||||
if hasattr(self, "file_path") and self.file_path is not None:
|
||||
@@ -93,7 +91,7 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
|
||||
raise ValueError("Your source must be provided with a file_paths: []")
|
||||
|
||||
# Convert single path to list
|
||||
path_list: List[Union[Path, str]] = (
|
||||
path_list: list[Path | str] = (
|
||||
[self.file_paths]
|
||||
if isinstance(self.file_paths, (str, Path))
|
||||
else list(self.file_paths)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
@@ -12,29 +12,27 @@ class BaseKnowledgeSource(BaseModel, ABC):
|
||||
|
||||
chunk_size: int = 4000
|
||||
chunk_overlap: int = 200
|
||||
chunks: List[str] = Field(default_factory=list)
|
||||
chunk_embeddings: List[np.ndarray] = Field(default_factory=list)
|
||||
chunks: list[str] = Field(default_factory=list)
|
||||
chunk_embeddings: list[np.ndarray] = Field(default_factory=list)
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict) # Currently unused
|
||||
collection_name: Optional[str] = Field(default=None)
|
||||
storage: KnowledgeStorage | None = Field(default=None)
|
||||
metadata: dict[str, Any] = Field(default_factory=dict) # Currently unused
|
||||
collection_name: str | None = Field(default=None)
|
||||
|
||||
@abstractmethod
|
||||
def validate_content(self) -> Any:
|
||||
"""Load and preprocess content from the source."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add(self) -> None:
|
||||
"""Process content, chunk it, compute embeddings, and save them."""
|
||||
pass
|
||||
|
||||
def get_embeddings(self) -> List[np.ndarray]:
|
||||
def get_embeddings(self) -> list[np.ndarray]:
|
||||
"""Return the list of embeddings for the chunks."""
|
||||
return self.chunk_embeddings
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
def _chunk_text(self, text: str) -> list[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
|
||||
@@ -1,13 +1,21 @@
|
||||
from collections.abc import Iterator
|
||||
from pathlib import Path
|
||||
from typing import Iterator, List, Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
try:
|
||||
from docling.datamodel.base_models import InputFormat
|
||||
from docling.document_converter import DocumentConverter
|
||||
from docling.exceptions import ConversionError
|
||||
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
|
||||
from docling_core.types.doc.document import DoclingDocument
|
||||
from docling.datamodel.base_models import ( # type: ignore[import-not-found]
|
||||
InputFormat,
|
||||
)
|
||||
from docling.document_converter import ( # type: ignore[import-not-found]
|
||||
DocumentConverter,
|
||||
)
|
||||
from docling.exceptions import ConversionError # type: ignore[import-not-found]
|
||||
from docling_core.transforms.chunker.hierarchical_chunker import ( # type: ignore[import-not-found]
|
||||
HierarchicalChunker,
|
||||
)
|
||||
from docling_core.types.doc.document import ( # type: ignore[import-not-found]
|
||||
DoclingDocument,
|
||||
)
|
||||
|
||||
DOCLING_AVAILABLE = True
|
||||
except ImportError:
|
||||
@@ -35,11 +43,11 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
||||
|
||||
_logger: Logger = Logger(verbose=True)
|
||||
|
||||
file_path: Optional[List[Union[Path, str]]] = Field(default=None)
|
||||
file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
||||
chunks: List[str] = Field(default_factory=list)
|
||||
safe_file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
||||
content: List["DoclingDocument"] = Field(default_factory=list)
|
||||
file_path: list[Path | str] | None = Field(default=None)
|
||||
file_paths: list[Path | str] = Field(default_factory=list)
|
||||
chunks: list[str] = Field(default_factory=list)
|
||||
safe_file_paths: list[Path | str] = Field(default_factory=list)
|
||||
content: list["DoclingDocument"] = Field(default_factory=list)
|
||||
document_converter: "DocumentConverter" = Field(
|
||||
default_factory=lambda: DocumentConverter(
|
||||
allowed_formats=[
|
||||
@@ -66,7 +74,7 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
||||
self.safe_file_paths = self.validate_content()
|
||||
self.content = self._load_content()
|
||||
|
||||
def _load_content(self) -> List["DoclingDocument"]:
|
||||
def _load_content(self) -> list["DoclingDocument"]:
|
||||
try:
|
||||
return self._convert_source_to_docling_documents()
|
||||
except ConversionError as e:
|
||||
@@ -88,7 +96,7 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
||||
self.chunks.extend(list(new_chunks_iterable))
|
||||
self._save_documents()
|
||||
|
||||
def _convert_source_to_docling_documents(self) -> List["DoclingDocument"]:
|
||||
def _convert_source_to_docling_documents(self) -> list["DoclingDocument"]:
|
||||
conv_results_iter = self.document_converter.convert_all(self.safe_file_paths)
|
||||
return [result.document for result in conv_results_iter]
|
||||
|
||||
@@ -97,8 +105,8 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
||||
for chunk in chunker.chunk(doc):
|
||||
yield chunk.text
|
||||
|
||||
def validate_content(self) -> List[Union[Path, str]]:
|
||||
processed_paths: List[Union[Path, str]] = []
|
||||
def validate_content(self) -> list[Path | str]:
|
||||
processed_paths: list[Path | str] = []
|
||||
for path in self.file_paths:
|
||||
if isinstance(path, str):
|
||||
if path.startswith(("http://", "https://")):
|
||||
@@ -108,7 +116,7 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
||||
else:
|
||||
raise ValueError(f"Invalid URL format: {path}")
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid URL: {path}. Error: {str(e)}")
|
||||
raise ValueError(f"Invalid URL: {path}. Error: {e!s}") from e
|
||||
else:
|
||||
local_path = Path(KNOWLEDGE_DIRECTORY + "/" + path)
|
||||
if local_path.exists():
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import csv
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
@@ -8,7 +7,7 @@ from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledge
|
||||
class CSVKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries CSV file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
def load_content(self) -> dict[Path, str]:
|
||||
"""Load and preprocess CSV file content."""
|
||||
content_dict = {}
|
||||
for file_path in self.safe_file_paths:
|
||||
@@ -32,7 +31,7 @@ class CSVKnowledgeSource(BaseFileKnowledgeSource):
|
||||
self.chunks.extend(new_chunks)
|
||||
self._save_documents()
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
def _chunk_text(self, text: str) -> list[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterator, List, Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
|
||||
@@ -16,19 +14,19 @@ class ExcelKnowledgeSource(BaseKnowledgeSource):
|
||||
|
||||
_logger: Logger = Logger(verbose=True)
|
||||
|
||||
file_path: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
||||
file_path: Path | list[Path] | str | list[str] | None = Field(
|
||||
default=None,
|
||||
description="[Deprecated] The path to the file. Use file_paths instead.",
|
||||
)
|
||||
file_paths: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
||||
file_paths: Path | list[Path] | str | list[str] | None = Field(
|
||||
default_factory=list, description="The path to the file"
|
||||
)
|
||||
chunks: List[str] = Field(default_factory=list)
|
||||
content: Dict[Path, Dict[str, str]] = Field(default_factory=dict)
|
||||
safe_file_paths: List[Path] = Field(default_factory=list)
|
||||
chunks: list[str] = Field(default_factory=list)
|
||||
content: dict[Path, dict[str, str]] = Field(default_factory=dict)
|
||||
safe_file_paths: list[Path] = Field(default_factory=list)
|
||||
|
||||
@field_validator("file_path", "file_paths", mode="before")
|
||||
def validate_file_path(cls, v, info):
|
||||
def validate_file_path(cls, v, info): # noqa: N805
|
||||
"""Validate that at least one of file_path or file_paths is provided."""
|
||||
# Single check if both are None, O(1) instead of nested conditions
|
||||
if (
|
||||
@@ -41,7 +39,7 @@ class ExcelKnowledgeSource(BaseKnowledgeSource):
|
||||
raise ValueError("Either file_path or file_paths must be provided")
|
||||
return v
|
||||
|
||||
def _process_file_paths(self) -> List[Path]:
|
||||
def _process_file_paths(self) -> list[Path]:
|
||||
"""Convert file_path to a list of Path objects."""
|
||||
|
||||
if hasattr(self, "file_path") and self.file_path is not None:
|
||||
@@ -56,7 +54,7 @@ class ExcelKnowledgeSource(BaseKnowledgeSource):
|
||||
raise ValueError("Your source must be provided with a file_paths: []")
|
||||
|
||||
# Convert single path to list
|
||||
path_list: List[Union[Path, str]] = (
|
||||
path_list: list[Path | str] = (
|
||||
[self.file_paths]
|
||||
if isinstance(self.file_paths, (str, Path))
|
||||
else list(self.file_paths)
|
||||
@@ -100,7 +98,7 @@ class ExcelKnowledgeSource(BaseKnowledgeSource):
|
||||
self.validate_content()
|
||||
self.content = self._load_content()
|
||||
|
||||
def _load_content(self) -> Dict[Path, Dict[str, str]]:
|
||||
def _load_content(self) -> dict[Path, dict[str, str]]:
|
||||
"""Load and preprocess Excel file content from multiple sheets.
|
||||
|
||||
Each sheet's content is converted to CSV format and stored.
|
||||
@@ -126,21 +124,21 @@ class ExcelKnowledgeSource(BaseKnowledgeSource):
|
||||
content_dict[file_path] = sheet_dict
|
||||
return content_dict
|
||||
|
||||
def convert_to_path(self, path: Union[Path, str]) -> Path:
|
||||
def convert_to_path(self, path: Path | str) -> Path:
|
||||
"""Convert a path to a Path object."""
|
||||
return Path(KNOWLEDGE_DIRECTORY + "/" + path) if isinstance(path, str) else path
|
||||
|
||||
def _import_dependencies(self):
|
||||
"""Dynamically import dependencies."""
|
||||
try:
|
||||
import pandas as pd
|
||||
import pandas as pd # type: ignore[import-untyped,import-not-found]
|
||||
|
||||
return pd
|
||||
except ImportError as e:
|
||||
missing_package = str(e).split()[-1]
|
||||
raise ImportError(
|
||||
f"{missing_package} is not installed. Please install it with: pip install {missing_package}"
|
||||
)
|
||||
) from e
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
@@ -161,7 +159,7 @@ class ExcelKnowledgeSource(BaseKnowledgeSource):
|
||||
self.chunks.extend(new_chunks)
|
||||
self._save_documents()
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
def _chunk_text(self, text: str) -> list[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
@@ -8,9 +8,9 @@ from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledge
|
||||
class JSONKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries JSON file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
def load_content(self) -> dict[Path, str]:
|
||||
"""Load and preprocess JSON file content."""
|
||||
content: Dict[Path, str] = {}
|
||||
content: dict[Path, str] = {}
|
||||
for path in self.safe_file_paths:
|
||||
path = self.convert_to_path(path)
|
||||
with open(path, "r", encoding="utf-8") as json_file:
|
||||
@@ -29,7 +29,7 @@ class JSONKnowledgeSource(BaseFileKnowledgeSource):
|
||||
for item in data:
|
||||
text += f"{indent}- {self._json_to_text(item, level + 1)}\n"
|
||||
else:
|
||||
text += f"{str(data)}"
|
||||
text += f"{data!s}"
|
||||
return text
|
||||
|
||||
def add(self) -> None:
|
||||
@@ -44,7 +44,7 @@ class JSONKnowledgeSource(BaseFileKnowledgeSource):
|
||||
self.chunks.extend(new_chunks)
|
||||
self._save_documents()
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
def _chunk_text(self, text: str) -> list[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
@@ -7,7 +6,7 @@ from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledge
|
||||
class PDFKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries PDF file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
def load_content(self) -> dict[Path, str]:
|
||||
"""Load and preprocess PDF file content."""
|
||||
pdfplumber = self._import_pdfplumber()
|
||||
|
||||
@@ -30,22 +29,22 @@ class PDFKnowledgeSource(BaseFileKnowledgeSource):
|
||||
import pdfplumber
|
||||
|
||||
return pdfplumber
|
||||
except ImportError:
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"pdfplumber is not installed. Please install it with: pip install pdfplumber"
|
||||
)
|
||||
) from e
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add PDF file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
for _, text in self.content.items():
|
||||
for text in self.content.values():
|
||||
new_chunks = self._chunk_text(text)
|
||||
self.chunks.extend(new_chunks)
|
||||
self._save_documents()
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
def _chunk_text(self, text: str) -> list[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
@@ -9,7 +7,7 @@ class StringKnowledgeSource(BaseKnowledgeSource):
|
||||
"""A knowledge source that stores and queries plain text content using embeddings."""
|
||||
|
||||
content: str = Field(...)
|
||||
collection_name: Optional[str] = Field(default=None)
|
||||
collection_name: str | None = Field(default=None)
|
||||
|
||||
def model_post_init(self, _):
|
||||
"""Post-initialization method to validate content."""
|
||||
@@ -26,7 +24,7 @@ class StringKnowledgeSource(BaseKnowledgeSource):
|
||||
self.chunks.extend(new_chunks)
|
||||
self._save_documents()
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
def _chunk_text(self, text: str) -> list[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
@@ -7,7 +6,7 @@ from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledge
|
||||
class TextFileKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries text file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
def load_content(self) -> dict[Path, str]:
|
||||
"""Load and preprocess text file content."""
|
||||
content = {}
|
||||
for path in self.safe_file_paths:
|
||||
@@ -21,12 +20,12 @@ class TextFileKnowledgeSource(BaseFileKnowledgeSource):
|
||||
Add text file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
for _, text in self.content.items():
|
||||
for text in self.content.values():
|
||||
new_chunks = self._chunk_text(text)
|
||||
self.chunks.extend(new_chunks)
|
||||
self._save_documents()
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
def _chunk_text(self, text: str) -> list[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
from crewai.rag.types import SearchResult
|
||||
|
||||
|
||||
class BaseKnowledgeStorage(ABC):
|
||||
@@ -8,22 +10,17 @@ class BaseKnowledgeStorage(ABC):
|
||||
@abstractmethod
|
||||
def search(
|
||||
self,
|
||||
query: List[str],
|
||||
limit: int = 3,
|
||||
filter: Optional[dict] = None,
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Dict[str, Any]]:
|
||||
query: list[str],
|
||||
limit: int = 5,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[SearchResult]:
|
||||
"""Search for documents in the knowledge base."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save(
|
||||
self, documents: List[str], metadata: Dict[str, Any] | List[Dict[str, Any]]
|
||||
) -> None:
|
||||
def save(self, documents: list[str]) -> None:
|
||||
"""Save documents to the knowledge base."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def reset(self) -> None:
|
||||
"""Reset the knowledge base."""
|
||||
pass
|
||||
|
||||
@@ -1,24 +1,17 @@
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import chromadb
|
||||
import chromadb.errors
|
||||
from chromadb.api import ClientAPI
|
||||
from chromadb.api.types import OneOrMany
|
||||
from chromadb.config import Settings
|
||||
import traceback
|
||||
import warnings
|
||||
from typing import Any, cast
|
||||
|
||||
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
|
||||
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
|
||||
from crewai.utilities.chromadb import sanitize_collection_name
|
||||
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
|
||||
from crewai.rag.chromadb.config import ChromaDBConfig
|
||||
from crewai.rag.chromadb.types import ChromaEmbeddingFunctionWrapper
|
||||
from crewai.rag.config.utils import get_rag_client
|
||||
from crewai.rag.core.base_client import BaseClient
|
||||
from crewai.rag.embeddings.factory import get_embedding_function
|
||||
from crewai.rag.factory import create_client
|
||||
from crewai.rag.types import BaseRecord, SearchResult
|
||||
from crewai.utilities.logger import Logger
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
from crewai.utilities.chromadb import create_persistent_client
|
||||
from crewai.utilities.logger_utils import suppress_logging
|
||||
|
||||
|
||||
class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
@@ -27,167 +20,105 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
search efficiency.
|
||||
"""
|
||||
|
||||
collection: Optional[chromadb.Collection] = None
|
||||
collection_name: Optional[str] = "knowledge"
|
||||
app: Optional[ClientAPI] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embedder: Optional[Dict[str, Any]] = None,
|
||||
collection_name: Optional[str] = None,
|
||||
):
|
||||
embedder: dict[str, Any] | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
self.collection_name = collection_name
|
||||
self._set_embedder_config(embedder)
|
||||
self._client: BaseClient | None = None
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: List[str],
|
||||
limit: int = 3,
|
||||
filter: Optional[dict] = None,
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Dict[str, Any]]:
|
||||
with suppress_logging(
|
||||
"chromadb.segment.impl.vector.local_persistent_hnsw", logging.ERROR
|
||||
):
|
||||
if self.collection:
|
||||
fetched = self.collection.query(
|
||||
query_texts=query,
|
||||
n_results=limit,
|
||||
where=filter,
|
||||
)
|
||||
results = []
|
||||
for i in range(len(fetched["ids"][0])): # type: ignore
|
||||
result = {
|
||||
"id": fetched["ids"][0][i], # type: ignore
|
||||
"metadata": fetched["metadatas"][0][i], # type: ignore
|
||||
"context": fetched["documents"][0][i], # type: ignore
|
||||
"score": fetched["distances"][0][i], # type: ignore
|
||||
}
|
||||
if result["score"] >= score_threshold:
|
||||
results.append(result)
|
||||
return results
|
||||
else:
|
||||
raise Exception("Collection not initialized")
|
||||
|
||||
def initialize_knowledge_storage(self):
|
||||
# Suppress deprecation warnings from chromadb, which are not relevant to us
|
||||
# TODO: Remove this once we upgrade chromadb to at least 1.0.8.
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=r".*'model_fields'.*is deprecated.*",
|
||||
module=r"^chromadb(\.|$)",
|
||||
)
|
||||
|
||||
self.app = create_persistent_client(
|
||||
path=os.path.join(db_storage_path(), "knowledge"),
|
||||
settings=Settings(allow_reset=True),
|
||||
)
|
||||
if embedder:
|
||||
embedding_function = get_embedding_function(embedder)
|
||||
config = ChromaDBConfig(
|
||||
embedding_function=cast(
|
||||
ChromaEmbeddingFunctionWrapper, embedding_function
|
||||
)
|
||||
)
|
||||
self._client = create_client(config)
|
||||
|
||||
def _get_client(self) -> BaseClient:
|
||||
"""Get the appropriate client - instance-specific or global."""
|
||||
return self._client if self._client else get_rag_client()
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: list[str],
|
||||
limit: int = 5,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[SearchResult]:
|
||||
try:
|
||||
if not query:
|
||||
raise ValueError("Query cannot be empty")
|
||||
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"knowledge_{self.collection_name}"
|
||||
if self.collection_name
|
||||
else "knowledge"
|
||||
)
|
||||
if self.app:
|
||||
self.collection = self.app.get_or_create_collection(
|
||||
name=sanitize_collection_name(collection_name),
|
||||
embedding_function=self.embedder,
|
||||
)
|
||||
else:
|
||||
raise Exception("Vector Database Client not initialized")
|
||||
except Exception:
|
||||
raise Exception("Failed to create or get collection")
|
||||
query_text = " ".join(query) if len(query) > 1 else query[0]
|
||||
|
||||
def reset(self):
|
||||
base_path = os.path.join(db_storage_path(), KNOWLEDGE_DIRECTORY)
|
||||
if not self.app:
|
||||
self.app = create_persistent_client(
|
||||
path=base_path, settings=Settings(allow_reset=True)
|
||||
return client.search(
|
||||
collection_name=collection_name,
|
||||
query=query_text,
|
||||
limit=limit,
|
||||
metadata_filter=metadata_filter,
|
||||
score_threshold=score_threshold,
|
||||
)
|
||||
|
||||
self.app.reset()
|
||||
shutil.rmtree(base_path)
|
||||
self.app = None
|
||||
self.collection = None
|
||||
|
||||
def save(
|
||||
self,
|
||||
documents: List[str],
|
||||
metadata: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
|
||||
):
|
||||
if not self.collection:
|
||||
raise Exception("Collection not initialized")
|
||||
|
||||
try:
|
||||
# Create a dictionary to store unique documents
|
||||
unique_docs = {}
|
||||
|
||||
# Generate IDs and create a mapping of id -> (document, metadata)
|
||||
for idx, doc in enumerate(documents):
|
||||
doc_id = hashlib.sha256(doc.encode("utf-8")).hexdigest()
|
||||
doc_metadata = None
|
||||
if metadata is not None:
|
||||
if isinstance(metadata, list):
|
||||
doc_metadata = metadata[idx]
|
||||
else:
|
||||
doc_metadata = metadata
|
||||
unique_docs[doc_id] = (doc, doc_metadata)
|
||||
|
||||
# Prepare filtered lists for ChromaDB
|
||||
filtered_docs = []
|
||||
filtered_metadata = []
|
||||
filtered_ids = []
|
||||
|
||||
# Build the filtered lists
|
||||
for doc_id, (doc, meta) in unique_docs.items():
|
||||
filtered_docs.append(doc)
|
||||
filtered_metadata.append(meta)
|
||||
filtered_ids.append(doc_id)
|
||||
|
||||
# If we have no metadata at all, set it to None
|
||||
final_metadata: Optional[OneOrMany[chromadb.Metadata]] = (
|
||||
None if all(m is None for m in filtered_metadata) else filtered_metadata
|
||||
)
|
||||
|
||||
self.collection.upsert(
|
||||
documents=filtered_docs,
|
||||
metadatas=final_metadata,
|
||||
ids=filtered_ids,
|
||||
)
|
||||
except chromadb.errors.InvalidDimensionException as e:
|
||||
Logger(verbose=True).log(
|
||||
"error",
|
||||
"Embedding dimension mismatch. This usually happens when mixing different embedding models. Try resetting the collection using `crewai reset-memories -a`",
|
||||
"red",
|
||||
)
|
||||
raise ValueError(
|
||||
"Embedding dimension mismatch. Make sure you're using the same embedding model "
|
||||
"across all operations with this collection."
|
||||
"Try resetting the collection using `crewai reset-memories -a`"
|
||||
) from e
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error during knowledge search: {e!s}\n{traceback.format_exc()}"
|
||||
)
|
||||
return []
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"knowledge_{self.collection_name}"
|
||||
if self.collection_name
|
||||
else "knowledge"
|
||||
)
|
||||
client.delete_collection(collection_name=collection_name)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error during knowledge reset: {e!s}\n{traceback.format_exc()}"
|
||||
)
|
||||
|
||||
def save(self, documents: list[str]) -> None:
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"knowledge_{self.collection_name}"
|
||||
if self.collection_name
|
||||
else "knowledge"
|
||||
)
|
||||
client.get_or_create_collection(collection_name=collection_name)
|
||||
|
||||
rag_documents: list[BaseRecord] = [{"content": doc} for doc in documents]
|
||||
|
||||
client.add_documents(
|
||||
collection_name=collection_name, documents=rag_documents
|
||||
)
|
||||
except Exception as e:
|
||||
if "dimension mismatch" in str(e).lower():
|
||||
Logger(verbose=True).log(
|
||||
"error",
|
||||
"Embedding dimension mismatch. This usually happens when mixing different embedding models. Try resetting the collection using `crewai reset-memories -a`",
|
||||
"red",
|
||||
)
|
||||
raise ValueError(
|
||||
"Embedding dimension mismatch. Make sure you're using the same embedding model "
|
||||
"across all operations with this collection."
|
||||
"Try resetting the collection using `crewai reset-memories -a`"
|
||||
) from e
|
||||
Logger(verbose=True).log("error", f"Failed to upsert documents: {e}", "red")
|
||||
raise
|
||||
|
||||
def _create_default_embedding_function(self):
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
|
||||
return OpenAIEmbeddingFunction(
|
||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||
)
|
||||
|
||||
def _set_embedder_config(self, embedder: Optional[Dict[str, Any]] = None) -> None:
|
||||
"""Set the embedding configuration for the knowledge storage.
|
||||
|
||||
Args:
|
||||
embedder_config (Optional[Dict[str, Any]]): Configuration dictionary for the embedder.
|
||||
If None or empty, defaults to the default embedding function.
|
||||
"""
|
||||
self.embedder = (
|
||||
EmbeddingConfigurator().configure_embedder(embedder)
|
||||
if embedder
|
||||
else self._create_default_embedding_function()
|
||||
)
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from typing import Any, Dict, List
|
||||
from crewai.rag.types import SearchResult
|
||||
|
||||
|
||||
def extract_knowledge_context(knowledge_snippets: List[Dict[str, Any]]) -> str:
|
||||
def extract_knowledge_context(knowledge_snippets: list[SearchResult]) -> str:
|
||||
"""Extract knowledge from the task prompt."""
|
||||
valid_snippets = [
|
||||
result["context"]
|
||||
result["content"]
|
||||
for result in knowledge_snippets
|
||||
if result and result.get("context")
|
||||
if result and result.get("content")
|
||||
]
|
||||
snippet = "\n".join(valid_snippets)
|
||||
return f"Additional Information: {snippet}" if valid_snippets else ""
|
||||
|
||||
@@ -1,35 +1,24 @@
|
||||
import asyncio
|
||||
import inspect
|
||||
import uuid
|
||||
from collections.abc import Callable
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
cast,
|
||||
get_args,
|
||||
get_origin,
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
from typing import Self
|
||||
except ImportError:
|
||||
from typing_extensions import Self
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
Field,
|
||||
InstanceOf,
|
||||
PrivateAttr,
|
||||
model_validator,
|
||||
field_validator,
|
||||
model_validator,
|
||||
)
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
@@ -37,14 +26,20 @@ from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.parser import (
|
||||
AgentAction,
|
||||
AgentFinish,
|
||||
OutputParserException,
|
||||
OutputParserError,
|
||||
)
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.logging_events import AgentLogsExecutionEvent
|
||||
from crewai.flow.flow_trackable import FlowTrackable
|
||||
from crewai.llm import LLM, BaseLLM
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities.guardrail import process_guardrail
|
||||
from crewai.utilities.agent_utils import (
|
||||
enforce_rpm_limit,
|
||||
format_message_for_llm,
|
||||
@@ -62,14 +57,7 @@ from crewai.utilities.agent_utils import (
|
||||
render_text_description_and_args,
|
||||
)
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.events.types.logging_events import AgentLogsExecutionEvent
|
||||
from crewai.events.types.agent_events import (
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
|
||||
from crewai.utilities.guardrail import process_guardrail
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
@@ -82,15 +70,15 @@ class LiteAgentOutput(BaseModel):
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
raw: str = Field(description="Raw output of the agent", default="")
|
||||
pydantic: Optional[BaseModel] = Field(
|
||||
pydantic: BaseModel | None = Field(
|
||||
description="Pydantic output of the agent", default=None
|
||||
)
|
||||
agent_role: str = Field(description="Role of the agent that produced this output")
|
||||
usage_metrics: Optional[Dict[str, Any]] = Field(
|
||||
usage_metrics: dict[str, Any] | None = Field(
|
||||
description="Token usage metrics for this execution", default=None
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert pydantic_output to a dictionary."""
|
||||
if self.pydantic:
|
||||
return self.pydantic.model_dump()
|
||||
@@ -130,10 +118,10 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Goal of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
|
||||
llm: str | InstanceOf[BaseLLM] | Any | None = Field(
|
||||
default=None, description="Language model that will run the agent"
|
||||
)
|
||||
tools: List[BaseTool] = Field(
|
||||
tools: list[BaseTool] = Field(
|
||||
default_factory=list, description="Tools at agent's disposal"
|
||||
)
|
||||
|
||||
@@ -141,7 +129,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
max_iterations: int = Field(
|
||||
default=15, description="Maximum number of iterations for tool usage"
|
||||
)
|
||||
max_execution_time: Optional[int] = Field(
|
||||
max_execution_time: int | None = Field(
|
||||
default=None, description=". Maximum execution time in seconds"
|
||||
)
|
||||
respect_context_window: bool = Field(
|
||||
@@ -152,52 +140,50 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
default=True,
|
||||
description="Whether to use stop words to prevent the LLM from using tools",
|
||||
)
|
||||
request_within_rpm_limit: Optional[Callable[[], bool]] = Field(
|
||||
request_within_rpm_limit: Callable[[], bool] | None = Field(
|
||||
default=None,
|
||||
description="Callback to check if the request is within the RPM limit",
|
||||
)
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
|
||||
# Output and Formatting Properties
|
||||
response_format: Optional[Type[BaseModel]] = Field(
|
||||
response_format: type[BaseModel] | None = Field(
|
||||
default=None, description="Pydantic model for structured output"
|
||||
)
|
||||
verbose: bool = Field(
|
||||
default=False, description="Whether to print execution details"
|
||||
)
|
||||
callbacks: List[Callable] = Field(
|
||||
callbacks: list[Callable] = Field(
|
||||
default=[], description="Callbacks to be used for the agent"
|
||||
)
|
||||
|
||||
# Guardrail Properties
|
||||
guardrail: Optional[Union[Callable[[LiteAgentOutput], Tuple[bool, Any]], str]] = (
|
||||
Field(
|
||||
default=None,
|
||||
description="Function or string description of a guardrail to validate agent output",
|
||||
)
|
||||
guardrail: Callable[[LiteAgentOutput], tuple[bool, Any]] | str | None = Field(
|
||||
default=None,
|
||||
description="Function or string description of a guardrail to validate agent output",
|
||||
)
|
||||
guardrail_max_retries: int = Field(
|
||||
default=3, description="Maximum number of retries when guardrail fails"
|
||||
)
|
||||
|
||||
# State and Results
|
||||
tools_results: List[Dict[str, Any]] = Field(
|
||||
tools_results: list[dict[str, Any]] = Field(
|
||||
default=[], description="Results of the tools used by the agent."
|
||||
)
|
||||
|
||||
# Reference of Agent
|
||||
original_agent: Optional[BaseAgent] = Field(
|
||||
original_agent: BaseAgent | None = Field(
|
||||
default=None, description="Reference to the agent that created this LiteAgent"
|
||||
)
|
||||
# Private Attributes
|
||||
_parsed_tools: List[CrewStructuredTool] = PrivateAttr(default_factory=list)
|
||||
_parsed_tools: list[CrewStructuredTool] = PrivateAttr(default_factory=list)
|
||||
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
|
||||
_cache_handler: CacheHandler = PrivateAttr(default_factory=CacheHandler)
|
||||
_key: str = PrivateAttr(default_factory=lambda: str(uuid.uuid4()))
|
||||
_messages: List[Dict[str, str]] = PrivateAttr(default_factory=list)
|
||||
_messages: list[dict[str, str]] = PrivateAttr(default_factory=list)
|
||||
_iterations: int = PrivateAttr(default=0)
|
||||
_printer: Printer = PrivateAttr(default_factory=Printer)
|
||||
_guardrail: Optional[Callable] = PrivateAttr(default=None)
|
||||
_guardrail: Callable | None = PrivateAttr(default=None)
|
||||
_guardrail_retry_count: int = PrivateAttr(default=0)
|
||||
|
||||
@model_validator(mode="after")
|
||||
@@ -241,8 +227,8 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
@field_validator("guardrail", mode="before")
|
||||
@classmethod
|
||||
def validate_guardrail_function(
|
||||
cls, v: Optional[Union[Callable, str]]
|
||||
) -> Optional[Union[Callable, str]]:
|
||||
cls, v: Callable | str | None
|
||||
) -> Callable | str | None:
|
||||
"""Validate that the guardrail function has the correct signature.
|
||||
|
||||
If v is a callable, validate that it has the correct signature.
|
||||
@@ -267,7 +253,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
# Check return annotation if present
|
||||
if sig.return_annotation is not sig.empty:
|
||||
if sig.return_annotation == Tuple[bool, Any]:
|
||||
if sig.return_annotation == tuple[bool, Any]:
|
||||
return v
|
||||
|
||||
origin = get_origin(sig.return_annotation)
|
||||
@@ -290,7 +276,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
"""Return the original role for compatibility with tool interfaces."""
|
||||
return self.role
|
||||
|
||||
def kickoff(self, messages: Union[str, List[Dict[str, str]]]) -> LiteAgentOutput:
|
||||
def kickoff(self, messages: str | list[dict[str, str]]) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent with the given messages.
|
||||
|
||||
@@ -338,7 +324,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
raise e
|
||||
|
||||
def _execute_core(self, agent_info: Dict[str, Any]) -> LiteAgentOutput:
|
||||
def _execute_core(self, agent_info: dict[str, Any]) -> LiteAgentOutput:
|
||||
# Emit event for agent execution start
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -351,7 +337,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
# Execute the agent using invoke loop
|
||||
agent_finish = self._invoke_loop()
|
||||
formatted_result: Optional[BaseModel] = None
|
||||
formatted_result: BaseModel | None = None
|
||||
if self.response_format:
|
||||
try:
|
||||
# Cast to BaseModel to ensure type safety
|
||||
@@ -360,7 +346,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
formatted_result = result
|
||||
except Exception as e:
|
||||
self._printer.print(
|
||||
content=f"Failed to parse output into response format: {str(e)}",
|
||||
content=f"Failed to parse output into response format: {e!s}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
@@ -428,7 +414,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
return output
|
||||
|
||||
async def kickoff_async(
|
||||
self, messages: Union[str, List[Dict[str, str]]]
|
||||
self, messages: str | list[dict[str, str]]
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent asynchronously with the given messages.
|
||||
@@ -475,8 +461,8 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
return base_prompt
|
||||
|
||||
def _format_messages(
|
||||
self, messages: Union[str, List[Dict[str, str]]]
|
||||
) -> List[Dict[str, str]]:
|
||||
self, messages: str | list[dict[str, str]]
|
||||
) -> list[dict[str, str]]:
|
||||
"""Format messages for the LLM."""
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
@@ -548,7 +534,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
self._append_message(formatted_answer.text, role="assistant")
|
||||
except OutputParserException as e:
|
||||
except OutputParserError as e: # noqa: PERF203
|
||||
formatted_answer = handle_output_parser_exception(
|
||||
e=e,
|
||||
messages=self._messages,
|
||||
@@ -571,18 +557,21 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
i18n=self.i18n,
|
||||
)
|
||||
continue
|
||||
else:
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
|
||||
finally:
|
||||
self._iterations += 1
|
||||
|
||||
assert isinstance(formatted_answer, AgentFinish)
|
||||
if not isinstance(formatted_answer, AgentFinish):
|
||||
raise RuntimeError(
|
||||
"Agent execution ended without reaching a final answer. "
|
||||
f"Got {type(formatted_answer).__name__} instead of AgentFinish."
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
|
||||
def _show_logs(self, formatted_answer: AgentAction | AgentFinish):
|
||||
"""Show logs for the agent's execution."""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from .entity.entity_memory import EntityMemory
|
||||
from .external.external_memory import ExternalMemory
|
||||
from .long_term.long_term_memory import LongTermMemory
|
||||
from .short_term.short_term_memory import ShortTermMemory
|
||||
from .external.external_memory import ExternalMemory
|
||||
|
||||
__all__ = [
|
||||
"EntityMemory",
|
||||
"ExternalMemory",
|
||||
"LongTermMemory",
|
||||
"ShortTermMemory",
|
||||
"ExternalMemory",
|
||||
]
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
from typing import Optional, TYPE_CHECKING
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.memory import (
|
||||
EntityMemory,
|
||||
@@ -19,9 +21,9 @@ class ContextualMemory:
|
||||
ltm: LongTermMemory,
|
||||
em: EntityMemory,
|
||||
exm: ExternalMemory,
|
||||
agent: Optional["Agent"] = None,
|
||||
task: Optional["Task"] = None,
|
||||
):
|
||||
agent: Agent | None = None,
|
||||
task: Task | None = None,
|
||||
) -> None:
|
||||
self.stm = stm
|
||||
self.ltm = ltm
|
||||
self.em = em
|
||||
@@ -42,7 +44,7 @@ class ContextualMemory:
|
||||
self.exm.agent = self.agent
|
||||
self.exm.task = self.task
|
||||
|
||||
def build_context_for_task(self, task, context) -> str:
|
||||
def build_context_for_task(self, task: Task, context: str) -> str:
|
||||
"""
|
||||
Automatically builds a minimal, highly relevant set of contextual information
|
||||
for a given task.
|
||||
@@ -52,14 +54,15 @@ class ContextualMemory:
|
||||
if query == "":
|
||||
return ""
|
||||
|
||||
context = []
|
||||
context.append(self._fetch_ltm_context(task.description))
|
||||
context.append(self._fetch_stm_context(query))
|
||||
context.append(self._fetch_entity_context(query))
|
||||
context.append(self._fetch_external_context(query))
|
||||
return "\n".join(filter(None, context))
|
||||
context_parts = [
|
||||
self._fetch_ltm_context(task.description),
|
||||
self._fetch_stm_context(query),
|
||||
self._fetch_entity_context(query),
|
||||
self._fetch_external_context(query),
|
||||
]
|
||||
return "\n".join(filter(None, context_parts))
|
||||
|
||||
def _fetch_stm_context(self, query) -> str:
|
||||
def _fetch_stm_context(self, query: str) -> str:
|
||||
"""
|
||||
Fetches recent relevant insights from STM related to the task's description and expected_output,
|
||||
formatted as bullet points.
|
||||
@@ -70,11 +73,11 @@ class ContextualMemory:
|
||||
|
||||
stm_results = self.stm.search(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['context']}" for result in stm_results]
|
||||
[f"- {result['content']}" for result in stm_results]
|
||||
)
|
||||
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
|
||||
|
||||
def _fetch_ltm_context(self, task) -> Optional[str]:
|
||||
def _fetch_ltm_context(self, task: str) -> str | None:
|
||||
"""
|
||||
Fetches historical data or insights from LTM that are relevant to the task's description and expected_output,
|
||||
formatted as bullet points.
|
||||
@@ -90,14 +93,14 @@ class ContextualMemory:
|
||||
formatted_results = [
|
||||
suggestion
|
||||
for result in ltm_results
|
||||
for suggestion in result["metadata"]["suggestions"] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
|
||||
for suggestion in result["metadata"]["suggestions"]
|
||||
]
|
||||
formatted_results = list(dict.fromkeys(formatted_results))
|
||||
formatted_results = "\n".join([f"- {result}" for result in formatted_results]) # type: ignore # Incompatible types in assignment (expression has type "str", variable has type "list[str]")
|
||||
|
||||
return f"Historical Data:\n{formatted_results}" if ltm_results else ""
|
||||
|
||||
def _fetch_entity_context(self, query) -> str:
|
||||
def _fetch_entity_context(self, query: str) -> str:
|
||||
"""
|
||||
Fetches relevant entity information from Entity Memory related to the task's description and expected_output,
|
||||
formatted as bullet points.
|
||||
@@ -107,7 +110,7 @@ class ContextualMemory:
|
||||
|
||||
em_results = self.em.search(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['context']}" for result in em_results] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
|
||||
[f"- {result['content']}" for result in em_results]
|
||||
)
|
||||
return f"Entities:\n{formatted_results}" if em_results else ""
|
||||
|
||||
@@ -128,6 +131,6 @@ class ContextualMemory:
|
||||
return ""
|
||||
|
||||
formatted_memories = "\n".join(
|
||||
f"- {result['context']}" for result in external_memories
|
||||
f"- {result['content']}" for result in external_memories
|
||||
)
|
||||
return f"External memories:\n{formatted_memories}"
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
from typing import Any
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
|
||||
|
||||
class EntityMemory(Memory):
|
||||
@@ -31,10 +31,10 @@ class EntityMemory(Memory):
|
||||
if memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError:
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
)
|
||||
) from e
|
||||
config = embedder_config.get("config") if embedder_config else None
|
||||
storage = Mem0Storage(type="short_term", crew=crew, config=config)
|
||||
else:
|
||||
@@ -90,23 +90,31 @@ class EntityMemory(Memory):
|
||||
saved_count = 0
|
||||
errors = []
|
||||
|
||||
def save_single_item(item: EntityMemoryItem) -> tuple[bool, str | None]:
|
||||
"""Save a single item and return success status."""
|
||||
try:
|
||||
if self._memory_provider == "mem0":
|
||||
data = f"""
|
||||
Remember details about the following entity:
|
||||
Name: {item.name}
|
||||
Type: {item.type}
|
||||
Entity Description: {item.description}
|
||||
"""
|
||||
else:
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
|
||||
super(EntityMemory, self).save(data, item.metadata)
|
||||
return True, None
|
||||
except Exception as e:
|
||||
return False, f"{item.name}: {e!s}"
|
||||
|
||||
try:
|
||||
for item in items:
|
||||
try:
|
||||
if self._memory_provider == "mem0":
|
||||
data = f"""
|
||||
Remember details about the following entity:
|
||||
Name: {item.name}
|
||||
Type: {item.type}
|
||||
Entity Description: {item.description}
|
||||
"""
|
||||
else:
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
|
||||
super().save(data, item.metadata)
|
||||
success, error = save_single_item(item)
|
||||
if success:
|
||||
saved_count += 1
|
||||
except Exception as e:
|
||||
errors.append(f"{item.name}: {str(e)}")
|
||||
else:
|
||||
errors.append(error)
|
||||
|
||||
if is_batch:
|
||||
emit_value = f"Saved {saved_count} entities"
|
||||
@@ -153,8 +161,8 @@ class EntityMemory(Memory):
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -206,4 +214,6 @@ class EntityMemory(Memory):
|
||||
try:
|
||||
self.storage.reset()
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while resetting the entity memory: {e}")
|
||||
raise Exception(
|
||||
f"An error occurred while resetting the entity memory: {e}"
|
||||
) from e
|
||||
|
||||
34
src/crewai/memory/external/external_memory.py
vendored
34
src/crewai/memory/external/external_memory.py
vendored
@@ -1,41 +1,41 @@
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.external.external_memory_item import ExternalMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
|
||||
|
||||
class ExternalMemory(Memory):
|
||||
def __init__(self, storage: Optional[Storage] = None, **data: Any):
|
||||
def __init__(self, storage: Storage | None = None, **data: Any):
|
||||
super().__init__(storage=storage, **data)
|
||||
|
||||
@staticmethod
|
||||
def _configure_mem0(crew: Any, config: Dict[str, Any]) -> "Mem0Storage":
|
||||
def _configure_mem0(crew: Any, config: dict[str, Any]) -> "Mem0Storage":
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
|
||||
return Mem0Storage(type="external", crew=crew, config=config)
|
||||
|
||||
@staticmethod
|
||||
def external_supported_storages() -> Dict[str, Any]:
|
||||
def external_supported_storages() -> dict[str, Any]:
|
||||
return {
|
||||
"mem0": ExternalMemory._configure_mem0,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def create_storage(crew: Any, embedder_config: Optional[Dict[str, Any]]) -> Storage:
|
||||
def create_storage(crew: Any, embedder_config: dict[str, Any] | None) -> Storage:
|
||||
if not embedder_config:
|
||||
raise ValueError("embedder_config is required")
|
||||
|
||||
@@ -52,7 +52,7 @@ class ExternalMemory(Memory):
|
||||
def save(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Saves a value into the external storage."""
|
||||
crewai_event_bus.emit(
|
||||
@@ -103,8 +103,8 @@ class ExternalMemory(Memory):
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any
|
||||
|
||||
|
||||
class ExternalMemoryItem:
|
||||
def __init__(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
agent: Optional[str] = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
agent: str | None = None,
|
||||
):
|
||||
self.value = value
|
||||
self.metadata = metadata
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
from typing import Any, Dict, List
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.ltm_sqlite_storage import LTMSQLiteStorage
|
||||
|
||||
|
||||
@@ -84,7 +84,7 @@ class LongTermMemory(Memory):
|
||||
self,
|
||||
task: str,
|
||||
latest_n: int = 3,
|
||||
) -> List[Dict[str, Any]]: # type: ignore # signature of "search" incompatible with supertype "Memory"
|
||||
) -> list[dict[str, Any]]: # type: ignore # signature of "search" incompatible with supertype "Memory"
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, Optional, Union
|
||||
from typing import Any
|
||||
|
||||
|
||||
class LongTermMemoryItem:
|
||||
@@ -8,8 +8,8 @@ class LongTermMemoryItem:
|
||||
task: str,
|
||||
expected_output: str,
|
||||
datetime: str,
|
||||
quality: Optional[Union[int, float]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
quality: int | float | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
):
|
||||
self.task = task
|
||||
self.agent = agent
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -12,8 +12,8 @@ class Memory(BaseModel):
|
||||
Base class for memory, now supporting agent tags and generic metadata.
|
||||
"""
|
||||
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
crew: Optional[Any] = None
|
||||
embedder_config: dict[str, Any] | None = None
|
||||
crew: Any | None = None
|
||||
|
||||
storage: Any
|
||||
_agent: Optional["Agent"] = None
|
||||
@@ -45,7 +45,7 @@ class Memory(BaseModel):
|
||||
def save(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
metadata = metadata or {}
|
||||
|
||||
@@ -54,9 +54,9 @@ class Memory(BaseModel):
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Any]:
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
return self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
from typing import Any, Dict, Optional
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
|
||||
|
||||
class ShortTermMemory(Memory):
|
||||
@@ -26,17 +26,17 @@ class ShortTermMemory(Memory):
|
||||
MemoryItem instances.
|
||||
"""
|
||||
|
||||
_memory_provider: Optional[str] = PrivateAttr()
|
||||
_memory_provider: str | None = PrivateAttr()
|
||||
|
||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||
memory_provider = embedder_config.get("provider") if embedder_config else None
|
||||
if memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError:
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
)
|
||||
) from e
|
||||
config = embedder_config.get("config") if embedder_config else None
|
||||
storage = Mem0Storage(type="short_term", crew=crew, config=config)
|
||||
else:
|
||||
@@ -56,7 +56,7 @@ class ShortTermMemory(Memory):
|
||||
def save(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -112,8 +112,8 @@ class ShortTermMemory(Memory):
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -167,4 +167,4 @@ class ShortTermMemory(Memory):
|
||||
except Exception as e:
|
||||
raise Exception(
|
||||
f"An error occurred while resetting the short-term memory: {e}"
|
||||
)
|
||||
) from e
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any
|
||||
|
||||
|
||||
class ShortTermMemoryItem:
|
||||
def __init__(
|
||||
self,
|
||||
data: Any,
|
||||
agent: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
agent: str | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
):
|
||||
self.data = data
|
||||
self.agent = agent
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any
|
||||
|
||||
|
||||
class Storage:
|
||||
"""Abstract base class defining the storage interface"""
|
||||
|
||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||
def save(self, value: Any, metadata: dict[str, Any]) -> None:
|
||||
pass
|
||||
|
||||
def search(
|
||||
self, query: str, limit: int, score_threshold: float
|
||||
) -> Dict[str, Any] | List[Any]:
|
||||
) -> dict[str, Any] | list[Any]:
|
||||
return {}
|
||||
|
||||
def reset(self) -> None:
|
||||
|
||||
@@ -2,7 +2,7 @@ import json
|
||||
import logging
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
from crewai.task import Task
|
||||
from crewai.utilities import Printer
|
||||
@@ -18,7 +18,7 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
An updated SQLite storage class for kickoff task outputs storage.
|
||||
"""
|
||||
|
||||
def __init__(self, db_path: Optional[str] = None) -> None:
|
||||
def __init__(self, db_path: str | None = None) -> None:
|
||||
if db_path is None:
|
||||
# Get the parent directory of the default db path and create our db file there
|
||||
db_path = str(Path(db_storage_path()) / "latest_kickoff_task_outputs.db")
|
||||
@@ -57,15 +57,15 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
except sqlite3.Error as e:
|
||||
error_msg = DatabaseError.format_error(DatabaseError.INIT_ERROR, e)
|
||||
logger.error(error_msg)
|
||||
raise DatabaseOperationError(error_msg, e)
|
||||
raise DatabaseOperationError(error_msg, e) from e
|
||||
|
||||
def add(
|
||||
self,
|
||||
task: Task,
|
||||
output: Dict[str, Any],
|
||||
output: dict[str, Any],
|
||||
task_index: int,
|
||||
was_replayed: bool = False,
|
||||
inputs: Dict[str, Any] | None = None,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Add a new task output record to the database.
|
||||
|
||||
@@ -103,7 +103,7 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
except sqlite3.Error as e:
|
||||
error_msg = DatabaseError.format_error(DatabaseError.SAVE_ERROR, e)
|
||||
logger.error(error_msg)
|
||||
raise DatabaseOperationError(error_msg, e)
|
||||
raise DatabaseOperationError(error_msg, e) from e
|
||||
|
||||
def update(
|
||||
self,
|
||||
@@ -138,7 +138,7 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
else value
|
||||
)
|
||||
|
||||
query = f"UPDATE latest_kickoff_task_outputs SET {', '.join(fields)} WHERE task_index = ?" # nosec
|
||||
query = f"UPDATE latest_kickoff_task_outputs SET {', '.join(fields)} WHERE task_index = ?" # nosec # noqa: S608
|
||||
values.append(task_index)
|
||||
|
||||
cursor.execute(query, tuple(values))
|
||||
@@ -151,9 +151,9 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
except sqlite3.Error as e:
|
||||
error_msg = DatabaseError.format_error(DatabaseError.UPDATE_ERROR, e)
|
||||
logger.error(error_msg)
|
||||
raise DatabaseOperationError(error_msg, e)
|
||||
raise DatabaseOperationError(error_msg, e) from e
|
||||
|
||||
def load(self) -> List[Dict[str, Any]]:
|
||||
def load(self) -> list[dict[str, Any]]:
|
||||
"""Load all task output records from the database.
|
||||
|
||||
Returns:
|
||||
@@ -192,7 +192,7 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
except sqlite3.Error as e:
|
||||
error_msg = DatabaseError.format_error(DatabaseError.LOAD_ERROR, e)
|
||||
logger.error(error_msg)
|
||||
raise DatabaseOperationError(error_msg, e)
|
||||
raise DatabaseOperationError(error_msg, e) from e
|
||||
|
||||
def delete_all(self) -> None:
|
||||
"""Delete all task output records from the database.
|
||||
@@ -212,4 +212,4 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
except sqlite3.Error as e:
|
||||
error_msg = DatabaseError.format_error(DatabaseError.DELETE_ERROR, e)
|
||||
logger.error(error_msg)
|
||||
raise DatabaseOperationError(error_msg, e)
|
||||
raise DatabaseOperationError(error_msg, e) from e
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from typing import Any
|
||||
|
||||
from crewai.utilities import Printer
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
@@ -12,9 +12,7 @@ class LTMSQLiteStorage:
|
||||
An updated SQLite storage class for LTM data storage.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, db_path: Optional[str] = None
|
||||
) -> None:
|
||||
def __init__(self, db_path: str | None = None) -> None:
|
||||
if db_path is None:
|
||||
# Get the parent directory of the default db path and create our db file there
|
||||
db_path = str(Path(db_storage_path()) / "long_term_memory_storage.db")
|
||||
@@ -53,9 +51,9 @@ class LTMSQLiteStorage:
|
||||
def save(
|
||||
self,
|
||||
task_description: str,
|
||||
metadata: Dict[str, Any],
|
||||
metadata: dict[str, Any],
|
||||
datetime: str,
|
||||
score: Union[int, float],
|
||||
score: int | float,
|
||||
) -> None:
|
||||
"""Saves data to the LTM table with error handling."""
|
||||
try:
|
||||
@@ -75,9 +73,7 @@ class LTMSQLiteStorage:
|
||||
color="red",
|
||||
)
|
||||
|
||||
def load(
|
||||
self, task_description: str, latest_n: int
|
||||
) -> Optional[List[Dict[str, Any]]]:
|
||||
def load(self, task_description: str, latest_n: int) -> list[dict[str, Any]] | None:
|
||||
"""Queries the LTM table by task description with error handling."""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
@@ -89,7 +85,7 @@ class LTMSQLiteStorage:
|
||||
WHERE task_description = ?
|
||||
ORDER BY datetime DESC, score ASC
|
||||
LIMIT {latest_n}
|
||||
""", # nosec
|
||||
""", # nosec # noqa: S608
|
||||
(task_description,),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
@@ -125,4 +121,4 @@ class LTMSQLiteStorage:
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
return
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from mem0 import Memory, MemoryClient
|
||||
from crewai.utilities.chromadb import sanitize_collection_name
|
||||
from collections.abc import Iterable
|
||||
from typing import Any
|
||||
|
||||
from mem0 import Memory, MemoryClient # type: ignore[import-untyped,import-not-found]
|
||||
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.rag.chromadb.utils import _sanitize_collection_name
|
||||
|
||||
MAX_AGENT_ID_LENGTH_MEM0 = 255
|
||||
|
||||
@@ -13,6 +16,7 @@ class Mem0Storage(Storage):
|
||||
"""
|
||||
Extends Storage to handle embedding and searching across entities using Mem0.
|
||||
"""
|
||||
|
||||
def __init__(self, type, crew=None, config=None):
|
||||
super().__init__()
|
||||
|
||||
@@ -28,7 +32,8 @@ class Mem0Storage(Storage):
|
||||
supported_types = {"short_term", "long_term", "entities", "external"}
|
||||
if type not in supported_types:
|
||||
raise ValueError(
|
||||
f"Invalid type '{type}' for Mem0Storage. Must be one of: {', '.join(supported_types)}"
|
||||
f"Invalid type '{type}' for Mem0Storage. "
|
||||
f"Must be one of: {', '.join(supported_types)}"
|
||||
)
|
||||
|
||||
def _extract_config_values(self):
|
||||
@@ -66,7 +71,8 @@ class Mem0Storage(Storage):
|
||||
- Includes user_id and agent_id if both are present.
|
||||
- Includes user_id if only user_id is present.
|
||||
- Includes agent_id if only agent_id is present.
|
||||
- Includes run_id if memory_type is 'short_term' and mem0_run_id is present.
|
||||
- Includes run_id if memory_type is 'short_term' and
|
||||
mem0_run_id is present.
|
||||
"""
|
||||
filter = defaultdict(list)
|
||||
|
||||
@@ -86,21 +92,44 @@ class Mem0Storage(Storage):
|
||||
|
||||
return filter
|
||||
|
||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||
def save(self, value: Any, metadata: dict[str, Any]) -> None:
|
||||
def _last_content(messages: Iterable[dict[str, Any]], role: str) -> str:
|
||||
return next(
|
||||
(
|
||||
m.get("content", "")
|
||||
for m in reversed(list(messages))
|
||||
if m.get("role") == role
|
||||
),
|
||||
"",
|
||||
)
|
||||
|
||||
conversations = []
|
||||
messages = metadata.pop("messages", None)
|
||||
if messages:
|
||||
last_user = _last_content(messages, "user")
|
||||
last_assistant = _last_content(messages, "assistant")
|
||||
|
||||
if user_msg := self._get_user_message(last_user):
|
||||
conversations.append({"role": "user", "content": user_msg})
|
||||
|
||||
if assistant_msg := self._get_assistant_message(last_assistant):
|
||||
conversations.append({"role": "assistant", "content": assistant_msg})
|
||||
else:
|
||||
conversations.append({"role": "assistant", "content": value})
|
||||
|
||||
user_id = self.config.get("user_id", "")
|
||||
assistant_message = [{"role" : "assistant","content" : value}]
|
||||
|
||||
base_metadata = {
|
||||
"short_term": "short_term",
|
||||
"long_term": "long_term",
|
||||
"entities": "entity",
|
||||
"external": "external"
|
||||
"external": "external",
|
||||
}
|
||||
|
||||
# Shared base params
|
||||
params: dict[str, Any] = {
|
||||
"metadata": {"type": base_metadata[self.memory_type], **metadata},
|
||||
"infer": self.infer
|
||||
"infer": self.infer,
|
||||
}
|
||||
|
||||
# MemoryClient-specific overrides
|
||||
@@ -119,15 +148,17 @@ class Mem0Storage(Storage):
|
||||
if agent_id := self.config.get("agent_id", self._get_agent_name()):
|
||||
params["agent_id"] = agent_id
|
||||
|
||||
self.memory.add(assistant_message, **params)
|
||||
self.memory.add(conversations, **params)
|
||||
|
||||
def search(self,query: str,limit: int = 3,score_threshold: float = 0.35) -> List[Any]:
|
||||
def search(
|
||||
self, query: str, limit: int = 5, score_threshold: float = 0.6
|
||||
) -> list[Any]:
|
||||
params = {
|
||||
"query": query,
|
||||
"limit": limit,
|
||||
"version": "v2",
|
||||
"output_format": "v1.1"
|
||||
}
|
||||
"output_format": "v1.1",
|
||||
}
|
||||
|
||||
if user_id := self.config.get("user_id", ""):
|
||||
params["user_id"] = user_id
|
||||
@@ -148,10 +179,10 @@ class Mem0Storage(Storage):
|
||||
# automatically when the crew is created.
|
||||
|
||||
params["filters"] = self._create_filter_for_search()
|
||||
params['threshold'] = score_threshold
|
||||
params["threshold"] = score_threshold
|
||||
|
||||
if isinstance(self.memory, Memory):
|
||||
del params["metadata"], params["version"], params['output_format']
|
||||
del params["metadata"], params["version"], params["output_format"]
|
||||
if params.get("run_id"):
|
||||
del params["run_id"]
|
||||
|
||||
@@ -159,8 +190,8 @@ class Mem0Storage(Storage):
|
||||
|
||||
# This makes it compatible for Contextual Memory to retrieve
|
||||
for result in results["results"]:
|
||||
result["context"] = result["memory"]
|
||||
|
||||
result["content"] = result["memory"]
|
||||
|
||||
return [r for r in results["results"]]
|
||||
|
||||
def reset(self):
|
||||
@@ -180,4 +211,19 @@ class Mem0Storage(Storage):
|
||||
agents = self.crew.agents
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
agents = "_".join(agents)
|
||||
return sanitize_collection_name(name=agents, max_collection_length=MAX_AGENT_ID_LENGTH_MEM0)
|
||||
return _sanitize_collection_name(
|
||||
name=agents, max_collection_length=MAX_AGENT_ID_LENGTH_MEM0
|
||||
)
|
||||
|
||||
def _get_assistant_message(self, text: str) -> str:
|
||||
marker = "Final Answer:"
|
||||
if marker in text:
|
||||
return text.split(marker, 1)[1].strip()
|
||||
return text
|
||||
|
||||
def _get_user_message(self, text: str) -> str:
|
||||
pattern = r"User message:\s*(.*)"
|
||||
match = re.search(pattern, text)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
return text
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user