mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-07 07:08:31 +00:00
Compare commits
4 Commits
devin/1758
...
devin/1757
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d53bb141d8 | ||
|
|
ac93c81076 | ||
|
|
2c59748437 | ||
|
|
b6c2493111 |
46
.github/workflows/build-uv-cache.yml
vendored
46
.github/workflows/build-uv-cache.yml
vendored
@@ -1,46 +0,0 @@
|
||||
name: Build uv cache
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "uv.lock"
|
||||
- "pyproject.toml"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-cache:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11", "3.12", "3.13"]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install dependencies and populate cache
|
||||
run: |
|
||||
echo "Building global UV cache for Python ${{ matrix.python-version }}..."
|
||||
uv sync --all-groups --all-extras --no-install-project
|
||||
echo "Cache populated successfully"
|
||||
|
||||
- name: Save uv caches
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
37
.github/workflows/linter.yml
vendored
37
.github/workflows/linter.yml
vendored
@@ -2,9 +2,6 @@ name: Lint
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -18,27 +15,19 @@ jobs:
|
||||
- name: Fetch Target Branch
|
||||
run: git fetch origin $TARGET_BRANCH --depth=1
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py3.11-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: "3.11"
|
||||
enable-cache: false
|
||||
enable-cache: true
|
||||
cache-dependency-glob: |
|
||||
**/pyproject.toml
|
||||
**/uv.lock
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.11
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-groups --all-extras --no-install-project
|
||||
run: uv sync --dev --no-install-project
|
||||
|
||||
- name: Get Changed Python Files
|
||||
id: changed-files
|
||||
@@ -56,13 +45,3 @@ jobs:
|
||||
| tr ' ' '\n' \
|
||||
| grep -v 'src/crewai/cli/templates/' \
|
||||
| xargs -I{} uv run ruff check "{}"
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
|
||||
29
.github/workflows/security-checker.yml
vendored
Normal file
29
.github/workflows/security-checker.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Security Checker
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
security-check:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
cache-dependency-glob: |
|
||||
**/pyproject.toml
|
||||
**/uv.lock
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.11
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --dev --no-install-project
|
||||
|
||||
- name: Run Bandit
|
||||
run: uv run bandit -c pyproject.toml -r src/ -ll
|
||||
|
||||
65
.github/workflows/tests.yml
vendored
65
.github/workflows/tests.yml
vendored
@@ -3,7 +3,7 @@ name: Run Tests
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
contents: write
|
||||
|
||||
env:
|
||||
OPENAI_API_KEY: fake-api-key
|
||||
@@ -22,76 +22,29 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for proper diff
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py${{ matrix.python-version }}-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
enable-cache: true
|
||||
cache-dependency-glob: |
|
||||
**/pyproject.toml
|
||||
**/uv.lock
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
run: uv python install ${{ matrix.python-version }}
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
- name: Restore test durations
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: .test_durations_py*
|
||||
key: test-durations-py${{ matrix.python-version }}
|
||||
run: uv sync --dev --all-extras
|
||||
|
||||
- name: Run tests (group ${{ matrix.group }} of 8)
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
DURATION_FILE=".test_durations_py${PYTHON_VERSION_SAFE}"
|
||||
|
||||
# Temporarily always skip cached durations to fix test splitting
|
||||
# When durations don't match, pytest-split runs duplicate tests instead of splitting
|
||||
echo "Using even test splitting (duration cache disabled until fix merged)"
|
||||
DURATIONS_ARG=""
|
||||
|
||||
# Original logic (disabled temporarily):
|
||||
# if [ ! -f "$DURATION_FILE" ]; then
|
||||
# echo "No cached durations found, tests will be split evenly"
|
||||
# DURATIONS_ARG=""
|
||||
# elif git diff origin/${{ github.base_ref }}...HEAD --name-only 2>/dev/null | grep -q "^tests/.*\.py$"; then
|
||||
# echo "Test files have changed, skipping cached durations to avoid mismatches"
|
||||
# DURATIONS_ARG=""
|
||||
# else
|
||||
# echo "No test changes detected, using cached test durations for optimal splitting"
|
||||
# DURATIONS_ARG="--durations-path=${DURATION_FILE}"
|
||||
# fi
|
||||
|
||||
uv run pytest \
|
||||
--block-network \
|
||||
--timeout=30 \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--group ${{ matrix.group }} \
|
||||
$DURATIONS_ARG \
|
||||
--durations=10 \
|
||||
-n auto \
|
||||
--maxfail=3
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
|
||||
36
.github/workflows/type-checker.yml
vendored
36
.github/workflows/type-checker.yml
vendored
@@ -3,7 +3,7 @@ name: Run Type Checks
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
type-checker-matrix:
|
||||
@@ -20,27 +20,19 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for proper diff
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py${{ matrix.python-version }}-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
enable-cache: true
|
||||
cache-dependency-glob: |
|
||||
**/pyproject.toml
|
||||
**/uv.lock
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
run: uv python install ${{ matrix.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-groups --all-extras
|
||||
run: uv sync --dev --all-extras --no-install-project
|
||||
|
||||
- name: Get changed Python files
|
||||
id: changed-files
|
||||
@@ -74,16 +66,6 @@ jobs:
|
||||
if: steps.changed-files.outputs.has_changes == 'false'
|
||||
run: echo "No Python files in src/ were modified - skipping type checks"
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
|
||||
# Summary job to provide single status for branch protection
|
||||
type-checker:
|
||||
name: type-checker
|
||||
|
||||
71
.github/workflows/update-test-durations.yml
vendored
71
.github/workflows/update-test-durations.yml
vendored
@@ -1,71 +0,0 @@
|
||||
name: Update Test Durations
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'tests/**/*.py'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
update-durations:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13']
|
||||
env:
|
||||
OPENAI_API_KEY: fake-api-key
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py${{ matrix.python-version }}-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
- name: Run all tests and store durations
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
uv run pytest --store-durations --durations-path=.test_durations_py${PYTHON_VERSION_SAFE} -n auto
|
||||
continue-on-error: true
|
||||
|
||||
- name: Save durations to cache
|
||||
if: always()
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: .test_durations_py*
|
||||
key: test-durations-py${{ matrix.python-version }}
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
@@ -16,4 +16,3 @@ repos:
|
||||
entry: uv run mypy
|
||||
language: system
|
||||
types: [python]
|
||||
exclude: ^tests/
|
||||
|
||||
@@ -142,7 +142,7 @@ with MCPServerAdapter(server_params, "tool_name", connect_timeout=60) as mcp_too
|
||||
|
||||
## Using with CrewBase
|
||||
|
||||
To use MCPServer tools within a CrewBase class, use the `get_mcp_tools` method. Server configurations should be provided via the `mcp_server_params` attribute. You can pass either a single configuration or a list of multiple server configurations.
|
||||
To use MCPServer tools within a CrewBase class, use the `mcp_tools` method. Server configurations should be provided via the mcp_server_params attribute. You can pass either a single configuration or a list of multiple server configurations.
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
@@ -175,34 +175,6 @@ class CrewWithMCP:
|
||||
# ... rest of your crew setup ...
|
||||
```
|
||||
|
||||
### Connection Timeout Configuration
|
||||
|
||||
You can configure the connection timeout for MCP servers by setting the `mcp_connect_timeout` class attribute. If no timeout is specified, it defaults to 30 seconds.
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class CrewWithMCP:
|
||||
mcp_server_params = [...]
|
||||
mcp_connect_timeout = 60 # 60 seconds timeout for all MCP connections
|
||||
|
||||
@agent
|
||||
def your_agent(self):
|
||||
return Agent(config=self.agents_config["your_agent"], tools=self.get_mcp_tools())
|
||||
```
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class CrewWithDefaultTimeout:
|
||||
mcp_server_params = [...]
|
||||
# No mcp_connect_timeout specified - uses default 30 seconds
|
||||
|
||||
@agent
|
||||
def your_agent(self):
|
||||
return Agent(config=self.agents_config["your_agent"], tools=self.get_mcp_tools())
|
||||
```
|
||||
|
||||
### Filtering Tools
|
||||
|
||||
You can filter which tools are available to your agent by passing a list of tool names to the `get_mcp_tools` method.
|
||||
|
||||
```python
|
||||
@@ -214,22 +186,6 @@ def another_agent(self):
|
||||
)
|
||||
```
|
||||
|
||||
The timeout configuration applies to all MCP tool calls within the crew:
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class CrewWithCustomTimeout:
|
||||
mcp_server_params = [...]
|
||||
mcp_connect_timeout = 90 # 90 seconds timeout for all MCP connections
|
||||
|
||||
@agent
|
||||
def filtered_agent(self):
|
||||
return Agent(
|
||||
config=self.agents_config["your_agent"],
|
||||
tools=self.get_mcp_tools("tool_1", "tool_2") # specific tools with custom timeout
|
||||
)
|
||||
```
|
||||
|
||||
## Explore MCP Integrations
|
||||
|
||||
<CardGroup cols={2}>
|
||||
|
||||
@@ -7,8 +7,8 @@ mode: "wide"
|
||||
|
||||
## 개요
|
||||
|
||||
[Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP)는 AI 에이전트가 MCP 서버로 알려진 외부 서비스와 통신함으로써 LLM에 컨텍스트를 제공할 수 있도록 표준화된 방식을 제공합니다.
|
||||
`crewai-tools` 라이브러리는 CrewAI의 기능을 확장하여, 이러한 MCP 서버에서 제공하는 툴을 에이전트에 원활하게 통합할 수 있도록 해줍니다.
|
||||
[Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP)는 AI 에이전트가 MCP 서버로 알려진 외부 서비스와 통신함으로써 LLM에 컨텍스트를 제공할 수 있도록 표준화된 방식을 제공합니다.
|
||||
`crewai-tools` 라이브러리는 CrewAI의 기능을 확장하여, 이러한 MCP 서버에서 제공하는 툴을 에이전트에 원활하게 통합할 수 있도록 해줍니다.
|
||||
이를 통해 여러분의 crew는 방대한 기능 에코시스템에 접근할 수 있습니다.
|
||||
|
||||
현재 다음과 같은 전송 메커니즘을 지원합니다:
|
||||
@@ -142,7 +142,7 @@ with MCPServerAdapter(server_params, "tool_name", connect_timeout=60) as mcp_too
|
||||
|
||||
## CrewBase와 함께 사용하기
|
||||
|
||||
CrewBase 클래스 내에서 MCPServer 도구를 사용하려면 `get_mcp_tools` 메서드를 사용하세요. 서버 구성은 `mcp_server_params` 속성을 통해 제공되어야 합니다. 단일 구성 또는 여러 서버 구성을 리스트 형태로 전달할 수 있습니다.
|
||||
CrewBase 클래스 내에서 MCPServer 도구를 사용하려면 `mcp_tools` 메서드를 사용하세요. 서버 구성은 mcp_server_params 속성을 통해 제공되어야 합니다. 단일 구성 또는 여러 서버 구성을 리스트 형태로 전달할 수 있습니다.
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
@@ -175,34 +175,6 @@ class CrewWithMCP:
|
||||
# ... 나머지 crew 설정 ...
|
||||
```
|
||||
|
||||
### 연결 타임아웃 구성
|
||||
|
||||
`mcp_connect_timeout` 클래스 속성을 설정하여 MCP 서버의 연결 타임아웃을 구성할 수 있습니다. 타임아웃을 지정하지 않으면 기본값으로 30초가 사용됩니다.
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class CrewWithMCP:
|
||||
mcp_server_params = [...]
|
||||
mcp_connect_timeout = 60 # 모든 MCP 연결에 60초 타임아웃
|
||||
|
||||
@agent
|
||||
def your_agent(self):
|
||||
return Agent(config=self.agents_config["your_agent"], tools=self.get_mcp_tools())
|
||||
```
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class CrewWithDefaultTimeout:
|
||||
mcp_server_params = [...]
|
||||
# mcp_connect_timeout 지정하지 않음 - 기본 30초 사용
|
||||
|
||||
@agent
|
||||
def your_agent(self):
|
||||
return Agent(config=self.agents_config["your_agent"], tools=self.get_mcp_tools())
|
||||
```
|
||||
|
||||
### 도구 필터링
|
||||
|
||||
`get_mcp_tools` 메서드에 도구 이름의 리스트를 전달하여, 에이전트에 제공되는 도구를 필터링할 수 있습니다.
|
||||
|
||||
```python
|
||||
@@ -214,22 +186,6 @@ def another_agent(self):
|
||||
)
|
||||
```
|
||||
|
||||
타임아웃 구성은 crew 내의 모든 MCP 도구 호출에 적용됩니다:
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class CrewWithCustomTimeout:
|
||||
mcp_server_params = [...]
|
||||
mcp_connect_timeout = 90 # 모든 MCP 연결에 90초 타임아웃
|
||||
|
||||
@agent
|
||||
def filtered_agent(self):
|
||||
return Agent(
|
||||
config=self.agents_config["your_agent"],
|
||||
tools=self.get_mcp_tools("tool_1", "tool_2") # 사용자 지정 타임아웃으로 특정 도구
|
||||
)
|
||||
```
|
||||
|
||||
## MCP 통합 탐색
|
||||
|
||||
<CardGroup cols={2}>
|
||||
@@ -305,4 +261,4 @@ SSE 전송은 적절하게 보안되지 않은 경우 DNS 리바인딩 공격에
|
||||
|
||||
### 제한 사항
|
||||
* **지원되는 프리미티브**: 현재 `MCPServerAdapter`는 주로 MCP `tools`를 어댑팅하는 기능을 지원합니다. 다른 MCP 프리미티브(예: `prompts` 또는 `resources`)는 현재 이 어댑터를 통해 CrewAI 컴포넌트로 직접 통합되어 있지 않습니다.
|
||||
* **출력 처리**: 어댑터는 일반적으로 MCP tool의 주요 텍스트 출력(예: `.content[0].text`)을 처리합니다. 복잡하거나 멀티모달 출력의 경우 이 패턴에 맞지 않으면 별도의 커스텀 처리가 필요할 수 있습니다.
|
||||
* **출력 처리**: 어댑터는 일반적으로 MCP tool의 주요 텍스트 출력(예: `.content[0].text`)을 처리합니다. 복잡하거나 멀티모달 출력의 경우 이 패턴에 맞지 않으면 별도의 커스텀 처리가 필요할 수 있습니다.
|
||||
@@ -118,7 +118,7 @@ with MCPServerAdapter(server_params, connect_timeout=60) as mcp_tools:
|
||||
|
||||
## Usando com CrewBase
|
||||
|
||||
Para usar ferramentas de servidores MCP dentro de uma classe CrewBase, utilize o método `get_mcp_tools`. As configurações dos servidores devem ser fornecidas via o atributo `mcp_server_params`. Você pode passar uma configuração única ou uma lista com múltiplas configurações.
|
||||
Para usar ferramentas de servidores MCP dentro de uma classe CrewBase, utilize o método `mcp_tools`. As configurações dos servidores devem ser fornecidas via o atributo mcp_server_params. Você pode passar uma configuração única ou uma lista com múltiplas configurações.
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
@@ -146,65 +146,10 @@ class CrewWithMCP:
|
||||
|
||||
@agent
|
||||
def your_agent(self):
|
||||
return Agent(config=self.agents_config["your_agent"], tools=self.get_mcp_tools()) # obter todas as ferramentas disponíveis
|
||||
return Agent(config=self.agents_config["your_agent"], tools=self.get_mcp_tools()) # você também pode filtrar quais ferramentas estarão disponíveis
|
||||
|
||||
# ... restante da configuração do seu crew ...
|
||||
```
|
||||
|
||||
### Configuração de Timeout de Conexão
|
||||
|
||||
Você pode configurar o timeout de conexão para servidores MCP definindo o atributo de classe `mcp_connect_timeout`. Se nenhum timeout for especificado, o padrão é 30 segundos.
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class CrewWithMCP:
|
||||
mcp_server_params = [...]
|
||||
mcp_connect_timeout = 60 # timeout de 60 segundos para todas as conexões MCP
|
||||
|
||||
@agent
|
||||
def your_agent(self):
|
||||
return Agent(config=self.agents_config["your_agent"], tools=self.get_mcp_tools())
|
||||
```
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class CrewWithDefaultTimeout:
|
||||
mcp_server_params = [...]
|
||||
# Nenhum mcp_connect_timeout especificado - usa padrão de 30 segundos
|
||||
|
||||
@agent
|
||||
def your_agent(self):
|
||||
return Agent(config=self.agents_config["your_agent"], tools=self.get_mcp_tools())
|
||||
```
|
||||
|
||||
### Filtragem de Ferramentas
|
||||
|
||||
Você pode filtrar quais ferramentas estão disponíveis para seu agente passando uma lista de nomes de ferramentas para o método `get_mcp_tools`.
|
||||
|
||||
```python
|
||||
@agent
|
||||
def another_agent(self):
|
||||
return Agent(
|
||||
config=self.agents_config["your_agent"],
|
||||
tools=self.get_mcp_tools("tool_1", "tool_2") # obter ferramentas específicas
|
||||
)
|
||||
```
|
||||
|
||||
A configuração de timeout se aplica a todas as chamadas de ferramentas MCP dentro do crew:
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class CrewWithCustomTimeout:
|
||||
mcp_server_params = [...]
|
||||
mcp_connect_timeout = 90 # timeout de 90 segundos para todas as conexões MCP
|
||||
|
||||
@agent
|
||||
def filtered_agent(self):
|
||||
return Agent(
|
||||
config=self.agents_config["your_agent"],
|
||||
tools=self.get_mcp_tools("tool_1", "tool_2") # ferramentas específicas com timeout personalizado
|
||||
)
|
||||
```
|
||||
## Explore Integrações MCP
|
||||
|
||||
<CardGroup cols={2}>
|
||||
|
||||
297
examples/responsibility_tracking_example.py
Normal file
297
examples/responsibility_tracking_example.py
Normal file
@@ -0,0 +1,297 @@
|
||||
"""
|
||||
Example demonstrating the formal responsibility tracking system in CrewAI.
|
||||
|
||||
This example shows how to:
|
||||
1. Set up agents with capabilities
|
||||
2. Use responsibility-based task assignment
|
||||
3. Monitor accountability and performance
|
||||
4. Generate system insights and recommendations
|
||||
"""
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.responsibility.models import AgentCapability, CapabilityType, TaskRequirement
|
||||
from crewai.responsibility.system import ResponsibilitySystem
|
||||
from crewai.responsibility.assignment import AssignmentStrategy
|
||||
|
||||
|
||||
def create_agents_with_capabilities():
|
||||
"""Create agents with defined capabilities."""
|
||||
|
||||
python_capabilities = [
|
||||
AgentCapability(
|
||||
name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
proficiency_level=0.9,
|
||||
confidence_score=0.8,
|
||||
description="Expert in Python development and scripting",
|
||||
keywords=["python", "programming", "development", "scripting"]
|
||||
),
|
||||
AgentCapability(
|
||||
name="Web Development",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
proficiency_level=0.7,
|
||||
confidence_score=0.7,
|
||||
description="Experience with web frameworks",
|
||||
keywords=["web", "flask", "django", "fastapi"]
|
||||
)
|
||||
]
|
||||
|
||||
python_agent = Agent(
|
||||
role="Python Developer",
|
||||
goal="Develop high-quality Python applications and scripts",
|
||||
backstory="Experienced Python developer with expertise in various frameworks",
|
||||
capabilities=python_capabilities
|
||||
)
|
||||
|
||||
analysis_capabilities = [
|
||||
AgentCapability(
|
||||
name="Data Analysis",
|
||||
capability_type=CapabilityType.ANALYTICAL,
|
||||
proficiency_level=0.9,
|
||||
confidence_score=0.9,
|
||||
description="Expert in statistical analysis and data interpretation",
|
||||
keywords=["data", "analysis", "statistics", "pandas", "numpy"]
|
||||
),
|
||||
AgentCapability(
|
||||
name="Machine Learning",
|
||||
capability_type=CapabilityType.ANALYTICAL,
|
||||
proficiency_level=0.8,
|
||||
confidence_score=0.7,
|
||||
description="Experience with ML algorithms and model building",
|
||||
keywords=["machine learning", "ml", "scikit-learn", "tensorflow"]
|
||||
)
|
||||
]
|
||||
|
||||
analyst_agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Extract insights from data and build predictive models",
|
||||
backstory="Data scientist with strong statistical background",
|
||||
capabilities=analysis_capabilities
|
||||
)
|
||||
|
||||
management_capabilities = [
|
||||
AgentCapability(
|
||||
name="Project Management",
|
||||
capability_type=CapabilityType.LEADERSHIP,
|
||||
proficiency_level=0.8,
|
||||
confidence_score=0.9,
|
||||
description="Experienced in managing technical projects",
|
||||
keywords=["project management", "coordination", "planning"]
|
||||
),
|
||||
AgentCapability(
|
||||
name="Communication",
|
||||
capability_type=CapabilityType.COMMUNICATION,
|
||||
proficiency_level=0.9,
|
||||
confidence_score=0.8,
|
||||
description="Excellent communication and coordination skills",
|
||||
keywords=["communication", "coordination", "stakeholder management"]
|
||||
)
|
||||
]
|
||||
|
||||
manager_agent = Agent(
|
||||
role="Project Manager",
|
||||
goal="Coordinate team efforts and ensure project success",
|
||||
backstory="Experienced project manager with technical background",
|
||||
capabilities=management_capabilities
|
||||
)
|
||||
|
||||
return [python_agent, analyst_agent, manager_agent]
|
||||
|
||||
|
||||
def create_tasks_with_requirements():
|
||||
"""Create tasks with specific capability requirements."""
|
||||
|
||||
data_processing_requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.7,
|
||||
weight=1.0,
|
||||
keywords=["python", "programming"]
|
||||
),
|
||||
TaskRequirement(
|
||||
capability_name="Data Analysis",
|
||||
capability_type=CapabilityType.ANALYTICAL,
|
||||
minimum_proficiency=0.6,
|
||||
weight=0.8,
|
||||
keywords=["data", "analysis"]
|
||||
)
|
||||
]
|
||||
|
||||
data_task = Task(
|
||||
description="Create a Python script to process and analyze customer data",
|
||||
expected_output="A Python script that processes CSV data and generates summary statistics"
|
||||
)
|
||||
|
||||
web_dashboard_requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Web Development",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.6,
|
||||
weight=1.0,
|
||||
keywords=["web", "development"]
|
||||
),
|
||||
TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.5,
|
||||
weight=0.7,
|
||||
keywords=["python", "programming"]
|
||||
)
|
||||
]
|
||||
|
||||
web_task = Task(
|
||||
description="Create a web dashboard to visualize data analysis results",
|
||||
expected_output="A web application with interactive charts and data visualization"
|
||||
)
|
||||
|
||||
coordination_requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Project Management",
|
||||
capability_type=CapabilityType.LEADERSHIP,
|
||||
minimum_proficiency=0.7,
|
||||
weight=1.0,
|
||||
keywords=["project management", "coordination"]
|
||||
),
|
||||
TaskRequirement(
|
||||
capability_name="Communication",
|
||||
capability_type=CapabilityType.COMMUNICATION,
|
||||
minimum_proficiency=0.8,
|
||||
weight=0.9,
|
||||
keywords=["communication", "coordination"]
|
||||
)
|
||||
]
|
||||
|
||||
coordination_task = Task(
|
||||
description="Coordinate the team efforts and ensure project milestones are met",
|
||||
expected_output="Project status report with timeline and deliverable tracking"
|
||||
)
|
||||
|
||||
return [
|
||||
(data_task, data_processing_requirements),
|
||||
(web_task, web_dashboard_requirements),
|
||||
(coordination_task, coordination_requirements)
|
||||
]
|
||||
|
||||
|
||||
def demonstrate_responsibility_tracking():
|
||||
"""Demonstrate the complete responsibility tracking workflow."""
|
||||
|
||||
print("🚀 CrewAI Formal Responsibility Tracking System Demo")
|
||||
print("=" * 60)
|
||||
|
||||
print("\n1. Creating agents with defined capabilities...")
|
||||
agents = create_agents_with_capabilities()
|
||||
|
||||
for agent in agents:
|
||||
print(f" ✓ {agent.role}: {len(agent.capabilities)} capabilities")
|
||||
|
||||
print("\n2. Setting up crew with responsibility tracking...")
|
||||
crew = Crew(
|
||||
agents=agents,
|
||||
tasks=[],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
responsibility_system = crew.responsibility_system
|
||||
print(f" ✓ Responsibility system enabled: {responsibility_system.enabled}")
|
||||
|
||||
print("\n3. System overview:")
|
||||
overview = responsibility_system.get_system_overview()
|
||||
print(f" • Total agents: {overview['total_agents']}")
|
||||
print(f" • Capability distribution: {overview['capability_distribution']}")
|
||||
|
||||
print("\n4. Creating tasks with capability requirements...")
|
||||
tasks_with_requirements = create_tasks_with_requirements()
|
||||
|
||||
print("\n5. Demonstrating responsibility assignment strategies...")
|
||||
|
||||
for i, (task, requirements) in enumerate(tasks_with_requirements):
|
||||
print(f"\n Task {i+1}: {task.description[:50]}...")
|
||||
|
||||
for strategy in [AssignmentStrategy.GREEDY, AssignmentStrategy.BALANCED, AssignmentStrategy.OPTIMAL]:
|
||||
assignment = responsibility_system.assign_task_responsibility(
|
||||
task, requirements, strategy
|
||||
)
|
||||
|
||||
if assignment:
|
||||
agent = responsibility_system._get_agent_by_id(assignment.agent_id)
|
||||
print(f" • {strategy.value}: {agent.role} (score: {assignment.responsibility_score:.3f})")
|
||||
print(f" Capabilities matched: {', '.join(assignment.capability_matches)}")
|
||||
|
||||
responsibility_system.complete_task(
|
||||
agent=agent,
|
||||
task=task,
|
||||
success=True,
|
||||
completion_time=1800.0,
|
||||
quality_score=0.85,
|
||||
outcome_description="Task completed successfully"
|
||||
)
|
||||
else:
|
||||
print(f" • {strategy.value}: No suitable agent found")
|
||||
|
||||
print("\n6. Agent status and performance:")
|
||||
for agent in agents:
|
||||
status = responsibility_system.get_agent_status(agent)
|
||||
print(f"\n {agent.role}:")
|
||||
print(f" • Current workload: {status['current_workload']}")
|
||||
if status['performance']:
|
||||
perf = status['performance']
|
||||
print(f" • Success rate: {perf['success_rate']:.2f}")
|
||||
print(f" • Quality score: {perf['quality_score']:.2f}")
|
||||
print(f" • Total tasks: {perf['total_tasks']}")
|
||||
|
||||
print("\n7. Accountability tracking:")
|
||||
for agent in agents:
|
||||
report = responsibility_system.accountability.generate_accountability_report(agent=agent)
|
||||
if report['total_records'] > 0:
|
||||
print(f"\n {agent.role} accountability:")
|
||||
print(f" • Total records: {report['total_records']}")
|
||||
print(f" • Action types: {list(report['action_counts'].keys())}")
|
||||
print(f" • Recent actions: {len(report['recent_actions'])}")
|
||||
|
||||
print("\n8. System recommendations:")
|
||||
recommendations = responsibility_system.generate_recommendations()
|
||||
if recommendations:
|
||||
for rec in recommendations:
|
||||
print(f" • {rec['type']}: {rec['description']} (Priority: {rec['priority']})")
|
||||
else:
|
||||
print(" • No recommendations at this time")
|
||||
|
||||
print("\n9. Demonstrating task delegation:")
|
||||
if len(agents) >= 2:
|
||||
delegation_task = Task(
|
||||
description="Complex task requiring delegation",
|
||||
expected_output="Delegated task completion report"
|
||||
)
|
||||
|
||||
responsibility_system.delegate_task(
|
||||
delegating_agent=agents[0],
|
||||
receiving_agent=agents[1],
|
||||
task=delegation_task,
|
||||
reason="Specialized expertise required"
|
||||
)
|
||||
|
||||
print(f" ✓ Delegated task from {agents[0].role} to {agents[1].role}")
|
||||
|
||||
delegation_records = responsibility_system.accountability.get_delegation_chain(delegation_task)
|
||||
print(f" • Delegation chain length: {len(delegation_records)}")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("🎉 Responsibility tracking demonstration completed!")
|
||||
print("\nKey features demonstrated:")
|
||||
print("• Capability-based agent hierarchy")
|
||||
print("• Mathematical responsibility assignment")
|
||||
print("• Accountability logging")
|
||||
print("• Performance-based capability adjustment")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
demonstrate_responsibility_tracking()
|
||||
print("\n✅ All demonstrations completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ Error during demonstration: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
@@ -48,7 +48,7 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools~=0.71.0"]
|
||||
tools = ["crewai-tools~=0.69.0"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
]
|
||||
@@ -131,11 +131,7 @@ select = [
|
||||
"I001", # sort imports
|
||||
"I002", # remove unused imports
|
||||
]
|
||||
ignore = ["E501"] # ignore line too long
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"tests/**/*.py" = ["S101"] # Allow assert statements in tests
|
||||
"src/crewai/cli/subprocess_utils.py" = ["S602", "S603"] # Allow shell=True for Windows compatibility
|
||||
ignore = ["E501", "S101"] # ignore line too long and assert statements
|
||||
|
||||
[tool.mypy]
|
||||
exclude = ["src/crewai/cli/templates", "tests"]
|
||||
|
||||
@@ -1,21 +1,6 @@
|
||||
import threading
|
||||
import urllib.request
|
||||
import warnings
|
||||
from typing import Any
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
|
||||
|
||||
def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
"""Suppress Pydantic deprecation warnings using targeted monkey patch."""
|
||||
@@ -35,12 +20,27 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
return None
|
||||
return original_warn(message, category, stacklevel + 1, source)
|
||||
|
||||
warnings.warn = filtered_warn # type: ignore[assignment]
|
||||
setattr(warnings, "warn", filtered_warn)
|
||||
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "0.186.1"
|
||||
import threading
|
||||
import urllib.request
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
@@ -54,12 +54,13 @@ def _track_install() -> None:
|
||||
try:
|
||||
pixel_url = "https://api.scarf.sh/v2/packages/CrewAI/crewai/docs/00f2dad1-8334-4a39-934e-003b2e1146db"
|
||||
|
||||
req = urllib.request.Request(pixel_url) # noqa: S310
|
||||
req = urllib.request.Request(pixel_url)
|
||||
req.add_header("User-Agent", f"CrewAI-Python/{__version__}")
|
||||
|
||||
with urllib.request.urlopen(req, timeout=2): # noqa: S310
|
||||
with urllib.request.urlopen(req, timeout=2): # nosec B310
|
||||
_telemetry_submitted = True
|
||||
except Exception: # noqa: S110
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
@@ -71,17 +72,19 @@ def _track_install_async() -> None:
|
||||
|
||||
|
||||
_track_install_async()
|
||||
|
||||
__version__ = "0.177.0"
|
||||
__all__ = [
|
||||
"LLM",
|
||||
"Agent",
|
||||
"BaseLLM",
|
||||
"Crew",
|
||||
"CrewOutput",
|
||||
"Flow",
|
||||
"Knowledge",
|
||||
"LLMGuardrail",
|
||||
"Process",
|
||||
"Task",
|
||||
"LLM",
|
||||
"BaseLLM",
|
||||
"Flow",
|
||||
"Knowledge",
|
||||
"TaskOutput",
|
||||
"LLMGuardrail",
|
||||
"__version__",
|
||||
]
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
from collections.abc import Callable, Sequence
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
@@ -19,12 +12,31 @@ from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
)
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
||||
from crewai.lite_agent import LiteAgent, LiteAgentOutput
|
||||
from crewai.llm import BaseLLM
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.responsibility.models import AgentCapability
|
||||
from crewai.security import Fingerprint
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool
|
||||
@@ -38,24 +50,6 @@ from crewai.utilities.agent_utils import (
|
||||
)
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
)
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
)
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
@@ -87,36 +81,36 @@ class Agent(BaseAgent):
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
max_execution_time: Optional[int] = Field(
|
||||
max_execution_time: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum execution time for an agent to execute a task",
|
||||
)
|
||||
agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||
agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||
step_callback: Optional[Any] = Field(
|
||||
step_callback: Any | None = Field(
|
||||
default=None,
|
||||
description="Callback to be executed after each step of the agent execution.",
|
||||
)
|
||||
use_system_prompt: Optional[bool] = Field(
|
||||
use_system_prompt: bool | None = Field(
|
||||
default=True,
|
||||
description="Use system prompt for the agent.",
|
||||
)
|
||||
llm: Union[str, InstanceOf[BaseLLM], Any] = Field(
|
||||
llm: str | InstanceOf[BaseLLM] | Any = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
function_calling_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
|
||||
function_calling_llm: str | InstanceOf[BaseLLM] | Any | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
system_template: Optional[str] = Field(
|
||||
system_template: str | None = Field(
|
||||
default=None, description="System format for the agent."
|
||||
)
|
||||
prompt_template: Optional[str] = Field(
|
||||
prompt_template: str | None = Field(
|
||||
default=None, description="Prompt format for the agent."
|
||||
)
|
||||
response_template: Optional[str] = Field(
|
||||
response_template: str | None = Field(
|
||||
default=None, description="Response format for the agent."
|
||||
)
|
||||
allow_code_execution: Optional[bool] = Field(
|
||||
allow_code_execution: bool | None = Field(
|
||||
default=False, description="Enable code execution for the agent."
|
||||
)
|
||||
respect_context_window: bool = Field(
|
||||
@@ -147,39 +141,44 @@ class Agent(BaseAgent):
|
||||
default=False,
|
||||
description="Whether the agent should reflect and create a plan before executing a task.",
|
||||
)
|
||||
max_reasoning_attempts: Optional[int] = Field(
|
||||
max_reasoning_attempts: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum number of reasoning attempts before executing the task. If None, will try until ready.",
|
||||
)
|
||||
embedder: Optional[Dict[str, Any]] = Field(
|
||||
embedder: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description="Embedder configuration for the agent.",
|
||||
)
|
||||
agent_knowledge_context: Optional[str] = Field(
|
||||
agent_knowledge_context: str | None = Field(
|
||||
default=None,
|
||||
description="Knowledge context for the agent.",
|
||||
)
|
||||
crew_knowledge_context: Optional[str] = Field(
|
||||
crew_knowledge_context: str | None = Field(
|
||||
default=None,
|
||||
description="Knowledge context for the crew.",
|
||||
)
|
||||
knowledge_search_query: Optional[str] = Field(
|
||||
knowledge_search_query: str | None = Field(
|
||||
default=None,
|
||||
description="Knowledge search query for the agent dynamically generated by the agent.",
|
||||
)
|
||||
from_repository: Optional[str] = Field(
|
||||
from_repository: str | None = Field(
|
||||
default=None,
|
||||
description="The Agent's role to be used from your repository.",
|
||||
)
|
||||
guardrail: Optional[Union[Callable[[Any], Tuple[bool, Any]], str]] = Field(
|
||||
guardrail: Callable[[Any], tuple[bool, Any]] | str | None = Field(
|
||||
default=None,
|
||||
description="Function or string description of a guardrail to validate agent output",
|
||||
)
|
||||
guardrail_max_retries: int = Field(
|
||||
default=3, description="Maximum number of retries when guardrail fails"
|
||||
)
|
||||
capabilities: list[AgentCapability] | None = Field(
|
||||
default_factory=list,
|
||||
description="List of agent capabilities for responsibility tracking"
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_from_repository(cls, v):
|
||||
if v is not None and (from_repository := v.get("from_repository")):
|
||||
return load_agent_from_repository(from_repository) | v
|
||||
@@ -188,6 +187,7 @@ class Agent(BaseAgent):
|
||||
@model_validator(mode="after")
|
||||
def post_init_setup(self):
|
||||
self.agent_ops_agent_name = self.role
|
||||
self._responsibility_system = None
|
||||
|
||||
self.llm = create_llm(self.llm)
|
||||
if self.function_calling_llm and not isinstance(
|
||||
@@ -208,7 +208,31 @@ class Agent(BaseAgent):
|
||||
self.cache_handler = CacheHandler()
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
|
||||
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
||||
def set_responsibility_system(self, responsibility_system) -> None:
|
||||
"""Set the responsibility tracking system for this agent."""
|
||||
self._responsibility_system = responsibility_system
|
||||
|
||||
if self.capabilities:
|
||||
self._responsibility_system.register_agent(self, self.capabilities)
|
||||
|
||||
def add_capability(self, capability: AgentCapability) -> None:
|
||||
"""Add a capability to this agent."""
|
||||
if self.capabilities is None:
|
||||
self.capabilities = []
|
||||
self.capabilities.append(capability)
|
||||
|
||||
if self._responsibility_system:
|
||||
self._responsibility_system.hierarchy.add_agent(self, self.capabilities)
|
||||
|
||||
def get_capabilities(self) -> list[AgentCapability]:
|
||||
"""Get all capabilities for this agent."""
|
||||
return self.capabilities or []
|
||||
|
||||
def get_responsibility_system(self):
|
||||
"""Get the responsibility tracking system for this agent."""
|
||||
return self._responsibility_system
|
||||
|
||||
def set_knowledge(self, crew_embedder: dict[str, Any] | None = None):
|
||||
try:
|
||||
if self.embedder is None and crew_embedder:
|
||||
self.embedder = crew_embedder
|
||||
@@ -224,7 +248,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
self.knowledge.add_sources()
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
|
||||
raise ValueError(f"Invalid Knowledge Configuration: {e!s}") from e
|
||||
|
||||
def _is_any_available_memory(self) -> bool:
|
||||
"""Check if any memory is available."""
|
||||
@@ -244,8 +268,8 @@ class Agent(BaseAgent):
|
||||
def execute_task(
|
||||
self,
|
||||
task: Task,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> str:
|
||||
"""Execute a task with the agent.
|
||||
|
||||
@@ -279,10 +303,10 @@ class Agent(BaseAgent):
|
||||
except Exception as e:
|
||||
if hasattr(self, "_logger"):
|
||||
self._logger.log(
|
||||
"error", f"Error during reasoning process: {str(e)}"
|
||||
"error", f"Error during reasoning process: {e!s}"
|
||||
)
|
||||
else:
|
||||
print(f"Error during reasoning process: {str(e)}")
|
||||
print(f"Error during reasoning process: {e!s}")
|
||||
|
||||
self._inject_date_to_task(task)
|
||||
|
||||
@@ -525,14 +549,14 @@ class Agent(BaseAgent):
|
||||
|
||||
try:
|
||||
return future.result(timeout=timeout)
|
||||
except concurrent.futures.TimeoutError:
|
||||
except concurrent.futures.TimeoutError as e:
|
||||
future.cancel()
|
||||
raise TimeoutError(
|
||||
f"Task '{task.description}' execution timed out after {timeout} seconds. Consider increasing max_execution_time or optimizing the task."
|
||||
)
|
||||
) from e
|
||||
except Exception as e:
|
||||
future.cancel()
|
||||
raise RuntimeError(f"Task execution failed: {str(e)}")
|
||||
raise RuntimeError(f"Task execution failed: {e!s}") from e
|
||||
|
||||
def _execute_without_timeout(self, task_prompt: str, task: Task) -> str:
|
||||
"""Execute a task without a timeout.
|
||||
@@ -554,14 +578,14 @@ class Agent(BaseAgent):
|
||||
)["output"]
|
||||
|
||||
def create_agent_executor(
|
||||
self, tools: Optional[List[BaseTool]] = None, task=None
|
||||
self, tools: list[BaseTool] | None = None, task=None
|
||||
) -> None:
|
||||
"""Create an agent executor for the agent.
|
||||
|
||||
Returns:
|
||||
An instance of the CrewAgentExecutor class.
|
||||
"""
|
||||
raw_tools: List[BaseTool] = tools or self.tools or []
|
||||
raw_tools: list[BaseTool] = tools or self.tools or []
|
||||
parsed_tools = parse_tools(raw_tools)
|
||||
|
||||
prompt = Prompts(
|
||||
@@ -603,10 +627,9 @@ class Agent(BaseAgent):
|
||||
callbacks=[TokenCalcHandler(self._token_process)],
|
||||
)
|
||||
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]):
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]):
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
tools = agent_tools.tools()
|
||||
return tools
|
||||
return agent_tools.tools()
|
||||
|
||||
def get_multimodal_tools(self) -> Sequence[BaseTool]:
|
||||
from crewai.tools.agent_tools.add_image_tool import AddImageTool
|
||||
@@ -654,7 +677,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
return task_prompt
|
||||
|
||||
def _render_text_description(self, tools: List[Any]) -> str:
|
||||
def _render_text_description(self, tools: list[Any]) -> str:
|
||||
"""Render the tool name and description in plain text.
|
||||
|
||||
Output will be in the format of:
|
||||
@@ -664,15 +687,13 @@ class Agent(BaseAgent):
|
||||
search: This tool is used for search
|
||||
calculator: This tool is used for math
|
||||
"""
|
||||
description = "\n".join(
|
||||
return "\n".join(
|
||||
[
|
||||
f"Tool name: {tool.name}\nTool description:\n{tool.description}"
|
||||
for tool in tools
|
||||
]
|
||||
)
|
||||
|
||||
return description
|
||||
|
||||
def _inject_date_to_task(self, task):
|
||||
"""Inject the current date into the task description if inject_date is enabled."""
|
||||
if self.inject_date:
|
||||
@@ -700,9 +721,9 @@ class Agent(BaseAgent):
|
||||
task.description += f"\n\nCurrent Date: {current_date}"
|
||||
except Exception as e:
|
||||
if hasattr(self, "_logger"):
|
||||
self._logger.log("warning", f"Failed to inject date: {str(e)}")
|
||||
self._logger.log("warning", f"Failed to inject date: {e!s}")
|
||||
else:
|
||||
print(f"Warning: Failed to inject date: {str(e)}")
|
||||
print(f"Warning: Failed to inject date: {e!s}")
|
||||
|
||||
def _validate_docker_installation(self) -> None:
|
||||
"""Check if Docker is installed and running."""
|
||||
@@ -713,15 +734,15 @@ class Agent(BaseAgent):
|
||||
|
||||
try:
|
||||
subprocess.run(
|
||||
["docker", "info"],
|
||||
["/usr/bin/docker", "info"],
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise RuntimeError(
|
||||
f"Docker is not running. Please start Docker to use code execution with agent: {self.role}"
|
||||
)
|
||||
) from e
|
||||
|
||||
def __repr__(self):
|
||||
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
|
||||
@@ -796,8 +817,8 @@ class Agent(BaseAgent):
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
response_format: Optional[Type[Any]] = None,
|
||||
messages: str | list[dict[str, str]],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent with the given messages using a LiteAgent instance.
|
||||
@@ -836,8 +857,8 @@ class Agent(BaseAgent):
|
||||
|
||||
async def kickoff_async(
|
||||
self,
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
response_format: Optional[Type[Any]] = None,
|
||||
messages: str | list[dict[str, str]],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent asynchronously with the given messages using a LiteAgent instance.
|
||||
|
||||
@@ -1,56 +1,47 @@
|
||||
"""LangGraph agent adapter for CrewAI integration.
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
This module contains the LangGraphAgentAdapter class that integrates LangGraph ReAct agents
|
||||
with CrewAI's agent system. Provides memory persistence, tool integration, and structured
|
||||
output functionality.
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import Any, cast
|
||||
|
||||
from pydantic import ConfigDict, Field, PrivateAttr
|
||||
from pydantic import Field, PrivateAttr
|
||||
|
||||
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
|
||||
from crewai.agents.agent_adapters.langgraph.langgraph_tool_adapter import (
|
||||
LangGraphToolAdapter,
|
||||
)
|
||||
from crewai.agents.agent_adapters.langgraph.protocols import (
|
||||
LangGraphCheckPointMemoryModule,
|
||||
LangGraphPrebuiltModule,
|
||||
)
|
||||
from crewai.agents.agent_adapters.langgraph.structured_output_converter import (
|
||||
LangGraphConverterAdapter,
|
||||
)
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.import_utils import require
|
||||
|
||||
try:
|
||||
from langgraph.checkpoint.memory import MemorySaver
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
|
||||
LANGGRAPH_AVAILABLE = True
|
||||
except ImportError:
|
||||
LANGGRAPH_AVAILABLE = False
|
||||
|
||||
|
||||
class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
"""Adapter for LangGraph agents to work with CrewAI.
|
||||
"""Adapter for LangGraph agents to work with CrewAI."""
|
||||
|
||||
This adapter integrates LangGraph's ReAct agents with CrewAI's agent system,
|
||||
providing memory persistence, tool integration, and structured output support.
|
||||
"""
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
_logger: Logger = PrivateAttr(default_factory=Logger)
|
||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger())
|
||||
_tool_adapter: LangGraphToolAdapter = PrivateAttr()
|
||||
_graph: Any = PrivateAttr(default=None)
|
||||
_memory: Any = PrivateAttr(default=None)
|
||||
_max_iterations: int = PrivateAttr(default=10)
|
||||
function_calling_llm: Any = Field(default=None)
|
||||
step_callback: Callable[..., Any] | None = Field(default=None)
|
||||
step_callback: Any = Field(default=None)
|
||||
|
||||
model: str = Field(default="gpt-4o")
|
||||
verbose: bool = Field(default=False)
|
||||
@@ -60,24 +51,17 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
role: str,
|
||||
goal: str,
|
||||
backstory: str,
|
||||
tools: list[BaseTool] | None = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
llm: Any = None,
|
||||
max_iterations: int = 10,
|
||||
agent_config: dict[str, Any] | None = None,
|
||||
agent_config: Optional[Dict[str, Any]] = None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""Initialize the LangGraph agent adapter.
|
||||
|
||||
Args:
|
||||
role: The role description for the agent.
|
||||
goal: The primary goal the agent should achieve.
|
||||
backstory: Background information about the agent.
|
||||
tools: Optional list of tools available to the agent.
|
||||
llm: Language model to use, defaults to gpt-4o.
|
||||
max_iterations: Maximum number of iterations for task execution.
|
||||
agent_config: Additional configuration for the LangGraph agent.
|
||||
**kwargs: Additional arguments passed to the base adapter.
|
||||
"""
|
||||
):
|
||||
"""Initialize the LangGraph agent adapter."""
|
||||
if not LANGGRAPH_AVAILABLE:
|
||||
raise ImportError(
|
||||
"LangGraph Agent Dependencies are not installed. Please install it using `uv add langchain-core langgraph`"
|
||||
)
|
||||
super().__init__(
|
||||
role=role,
|
||||
goal=goal,
|
||||
@@ -88,65 +72,46 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
**kwargs,
|
||||
)
|
||||
self._tool_adapter = LangGraphToolAdapter(tools=tools)
|
||||
self._converter_adapter: LangGraphConverterAdapter = LangGraphConverterAdapter(
|
||||
self
|
||||
)
|
||||
self._converter_adapter = LangGraphConverterAdapter(self)
|
||||
self._max_iterations = max_iterations
|
||||
self._setup_graph()
|
||||
|
||||
def _setup_graph(self) -> None:
|
||||
"""Set up the LangGraph workflow graph.
|
||||
"""Set up the LangGraph workflow graph."""
|
||||
try:
|
||||
self._memory = MemorySaver()
|
||||
|
||||
Initializes the memory saver and creates a ReAct agent with the configured
|
||||
tools, memory checkpointer, and debug settings.
|
||||
"""
|
||||
converted_tools: List[Any] = self._tool_adapter.tools()
|
||||
if self._agent_config:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools,
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
**self._agent_config,
|
||||
)
|
||||
else:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools or [],
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
)
|
||||
|
||||
memory_saver: type[Any] = cast(
|
||||
LangGraphCheckPointMemoryModule,
|
||||
require(
|
||||
"langgraph.checkpoint.memory",
|
||||
purpose="LangGraph core functionality",
|
||||
),
|
||||
).MemorySaver
|
||||
create_react_agent: Callable[..., Any] = cast(
|
||||
LangGraphPrebuiltModule,
|
||||
require(
|
||||
"langgraph.prebuilt",
|
||||
purpose="LangGraph core functionality",
|
||||
),
|
||||
).create_react_agent
|
||||
|
||||
self._memory = memory_saver()
|
||||
|
||||
converted_tools: list[Any] = self._tool_adapter.tools()
|
||||
if self._agent_config:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools,
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
**self._agent_config,
|
||||
)
|
||||
else:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools or [],
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
except ImportError as e:
|
||||
self._logger.log(
|
||||
"error", f"Failed to import LangGraph dependencies: {str(e)}"
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Error setting up LangGraph agent: {str(e)}")
|
||||
raise
|
||||
|
||||
def _build_system_prompt(self) -> str:
|
||||
"""Build a system prompt for the LangGraph agent.
|
||||
|
||||
Creates a prompt that includes the agent's role, goal, and backstory,
|
||||
then enhances it through the converter adapter for structured output.
|
||||
|
||||
Returns:
|
||||
The complete system prompt string.
|
||||
"""
|
||||
"""Build a system prompt for the LangGraph agent."""
|
||||
base_prompt = f"""
|
||||
You are {self.role}.
|
||||
|
||||
|
||||
Your goal is: {self.goal}
|
||||
|
||||
Your backstory: {self.backstory}
|
||||
@@ -158,25 +123,10 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> str:
|
||||
"""Execute a task using the LangGraph workflow.
|
||||
|
||||
Configures the agent, processes the task through the LangGraph workflow,
|
||||
and handles event emission for execution tracking.
|
||||
|
||||
Args:
|
||||
task: The task object to execute.
|
||||
context: Optional context information for the task.
|
||||
tools: Optional additional tools for this specific execution.
|
||||
|
||||
Returns:
|
||||
The final answer from the task execution.
|
||||
|
||||
Raises:
|
||||
Exception: If task execution fails.
|
||||
"""
|
||||
"""Execute a task using the LangGraph workflow."""
|
||||
self.create_agent_executor(tools)
|
||||
|
||||
self.configure_structured_output(task)
|
||||
@@ -201,11 +151,9 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
|
||||
session_id = f"task_{id(task)}"
|
||||
|
||||
config: dict[str, dict[str, str]] = {
|
||||
"configurable": {"thread_id": session_id}
|
||||
}
|
||||
config = {"configurable": {"thread_id": session_id}}
|
||||
|
||||
result: dict[str, Any] = self._graph.invoke(
|
||||
result = self._graph.invoke(
|
||||
{
|
||||
"messages": [
|
||||
("system", self._build_system_prompt()),
|
||||
@@ -215,10 +163,10 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
config,
|
||||
)
|
||||
|
||||
messages: list[Any] = result.get("messages", [])
|
||||
last_message: Any = messages[-1] if messages else None
|
||||
messages = result.get("messages", [])
|
||||
last_message = messages[-1] if messages else None
|
||||
|
||||
final_answer: str = ""
|
||||
final_answer = ""
|
||||
if isinstance(last_message, dict):
|
||||
final_answer = last_message.get("content", "")
|
||||
elif hasattr(last_message, "content"):
|
||||
@@ -238,7 +186,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
return final_answer
|
||||
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Error executing LangGraph task: {e!s}")
|
||||
self._logger.log("error", f"Error executing LangGraph task: {str(e)}")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionErrorEvent(
|
||||
@@ -249,67 +197,29 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
)
|
||||
raise
|
||||
|
||||
def create_agent_executor(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure the LangGraph agent for execution.
|
||||
|
||||
Args:
|
||||
tools: Optional tools to configure for the agent.
|
||||
"""
|
||||
def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
"""Configure the LangGraph agent for execution."""
|
||||
self.configure_tools(tools)
|
||||
|
||||
def configure_tools(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure tools for the LangGraph agent.
|
||||
|
||||
Merges additional tools with existing ones and updates the graph's
|
||||
available tools through the tool adapter.
|
||||
|
||||
Args:
|
||||
tools: Optional additional tools to configure.
|
||||
"""
|
||||
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
"""Configure tools for the LangGraph agent."""
|
||||
if tools:
|
||||
all_tools: list[BaseTool] = list(self.tools or []) + list(tools or [])
|
||||
all_tools = list(self.tools or []) + list(tools or [])
|
||||
self._tool_adapter.configure_tools(all_tools)
|
||||
available_tools: list[Any] = self._tool_adapter.tools()
|
||||
available_tools = self._tool_adapter.tools()
|
||||
self._graph.tools = available_tools
|
||||
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
"""Implement delegation tools support for LangGraph.
|
||||
|
||||
Creates delegation tools that allow this agent to delegate tasks to other agents.
|
||||
|
||||
Args:
|
||||
agents: List of agents available for delegation.
|
||||
|
||||
Returns:
|
||||
List of delegation tools.
|
||||
"""
|
||||
agent_tools: AgentTools = AgentTools(agents=agents)
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]) -> List[BaseTool]:
|
||||
"""Implement delegation tools support for LangGraph."""
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
return agent_tools.tools()
|
||||
|
||||
@staticmethod
|
||||
def get_output_converter(
|
||||
llm: Any, text: str, model: Any, instructions: str
|
||||
) -> Converter:
|
||||
"""Convert output format if needed.
|
||||
|
||||
Args:
|
||||
llm: Language model instance.
|
||||
text: Text to convert.
|
||||
model: Model configuration.
|
||||
instructions: Conversion instructions.
|
||||
|
||||
Returns:
|
||||
Converter instance for output transformation.
|
||||
"""
|
||||
self, llm: Any, text: str, model: Any, instructions: str
|
||||
) -> Any:
|
||||
"""Convert output format if needed."""
|
||||
return Converter(llm=llm, text=text, model=model, instructions=instructions)
|
||||
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
"""Configure the structured output for LangGraph.
|
||||
|
||||
Uses the converter adapter to set up structured output formatting
|
||||
based on the task requirements.
|
||||
|
||||
Args:
|
||||
task: Task object containing output requirements.
|
||||
"""
|
||||
def configure_structured_output(self, task) -> None:
|
||||
"""Configure the structured output for LangGraph."""
|
||||
self._converter_adapter.configure_structured_output(task)
|
||||
|
||||
@@ -1,72 +1,38 @@
|
||||
"""LangGraph tool adapter for CrewAI tool integration.
|
||||
|
||||
This module contains the LangGraphToolAdapter class that converts CrewAI tools
|
||||
to LangGraph-compatible format using langchain_core.tools.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
from collections.abc import Awaitable
|
||||
from typing import Any
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
class LangGraphToolAdapter(BaseToolAdapter):
|
||||
"""Adapts CrewAI tools to LangGraph agent tool compatible format.
|
||||
"""Adapts CrewAI tools to LangGraph agent tool compatible format"""
|
||||
|
||||
Converts CrewAI BaseTool instances to langchain_core.tools format
|
||||
that can be used by LangGraph agents.
|
||||
"""
|
||||
def __init__(self, tools: Optional[List[BaseTool]] = None):
|
||||
self.original_tools = tools or []
|
||||
self.converted_tools = []
|
||||
|
||||
def __init__(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Initialize the tool adapter.
|
||||
|
||||
Args:
|
||||
tools: Optional list of CrewAI tools to adapt.
|
||||
def configure_tools(self, tools: List[BaseTool]) -> None:
|
||||
"""
|
||||
super().__init__()
|
||||
self.original_tools: list[BaseTool] = tools or []
|
||||
self.converted_tools: list[Any] = []
|
||||
|
||||
def configure_tools(self, tools: list[BaseTool]) -> None:
|
||||
"""Configure and convert CrewAI tools to LangGraph-compatible format.
|
||||
|
||||
LangGraph expects tools in langchain_core.tools format. This method
|
||||
converts CrewAI BaseTool instances to StructuredTool instances.
|
||||
|
||||
Args:
|
||||
tools: List of CrewAI tools to convert.
|
||||
Configure and convert CrewAI tools to LangGraph-compatible format.
|
||||
LangGraph expects tools in langchain_core.tools format.
|
||||
"""
|
||||
from langchain_core.tools import BaseTool as LangChainBaseTool
|
||||
from langchain_core.tools import StructuredTool
|
||||
from langchain_core.tools import BaseTool, StructuredTool
|
||||
|
||||
converted_tools: list[Any] = []
|
||||
converted_tools = []
|
||||
if self.original_tools:
|
||||
all_tools: list[BaseTool] = tools + self.original_tools
|
||||
all_tools = tools + self.original_tools
|
||||
else:
|
||||
all_tools = tools
|
||||
for tool in all_tools:
|
||||
if isinstance(tool, LangChainBaseTool):
|
||||
if isinstance(tool, BaseTool):
|
||||
converted_tools.append(tool)
|
||||
continue
|
||||
|
||||
sanitized_name: str = self.sanitize_tool_name(tool.name)
|
||||
sanitized_name = self.sanitize_tool_name(tool.name)
|
||||
|
||||
async def tool_wrapper(
|
||||
*args: Any, tool: BaseTool = tool, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Wrapper function to adapt CrewAI tool calls to LangGraph format.
|
||||
|
||||
Args:
|
||||
*args: Positional arguments for the tool.
|
||||
tool: The CrewAI tool to wrap.
|
||||
**kwargs: Keyword arguments for the tool.
|
||||
|
||||
Returns:
|
||||
The result from the tool execution.
|
||||
"""
|
||||
output: Any | Awaitable[Any]
|
||||
async def tool_wrapper(*args, tool=tool, **kwargs):
|
||||
output = None
|
||||
if len(args) > 0 and isinstance(args[0], str):
|
||||
output = tool.run(args[0])
|
||||
elif "input" in kwargs:
|
||||
@@ -75,12 +41,12 @@ class LangGraphToolAdapter(BaseToolAdapter):
|
||||
output = tool.run(**kwargs)
|
||||
|
||||
if inspect.isawaitable(output):
|
||||
result: Any = await output
|
||||
result = await output
|
||||
else:
|
||||
result = output
|
||||
return result
|
||||
|
||||
converted_tool: StructuredTool = StructuredTool(
|
||||
converted_tool = StructuredTool(
|
||||
name=sanitized_name,
|
||||
description=tool.description,
|
||||
func=tool_wrapper,
|
||||
@@ -91,10 +57,5 @@ class LangGraphToolAdapter(BaseToolAdapter):
|
||||
|
||||
self.converted_tools = converted_tools
|
||||
|
||||
def tools(self) -> list[Any]:
|
||||
"""Get the list of converted tools.
|
||||
|
||||
Returns:
|
||||
List of LangGraph-compatible tools.
|
||||
"""
|
||||
def tools(self) -> List[Any]:
|
||||
return self.converted_tools or []
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
"""Type protocols for LangGraph modules."""
|
||||
|
||||
from typing import Any, Protocol, runtime_checkable
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class LangGraphMemorySaver(Protocol):
|
||||
"""Protocol for LangGraph MemorySaver.
|
||||
|
||||
Defines the interface for LangGraph's memory persistence mechanism.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the memory saver."""
|
||||
...
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class LangGraphCheckPointMemoryModule(Protocol):
|
||||
"""Protocol for LangGraph checkpoint memory module.
|
||||
|
||||
Defines the interface for modules containing memory checkpoint functionality.
|
||||
"""
|
||||
|
||||
MemorySaver: type[LangGraphMemorySaver]
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class LangGraphPrebuiltModule(Protocol):
|
||||
"""Protocol for LangGraph prebuilt module.
|
||||
|
||||
Defines the interface for modules containing prebuilt agent factories.
|
||||
"""
|
||||
|
||||
def create_react_agent(
|
||||
self,
|
||||
model: Any,
|
||||
tools: list[Any],
|
||||
checkpointer: Any,
|
||||
debug: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Create a ReAct agent with the given configuration.
|
||||
|
||||
Args:
|
||||
model: The language model to use for the agent.
|
||||
tools: List of tools available to the agent.
|
||||
checkpointer: Memory checkpointer for state persistence.
|
||||
debug: Whether to enable debug mode.
|
||||
**kwargs: Additional configuration options.
|
||||
|
||||
Returns:
|
||||
The configured ReAct agent instance.
|
||||
"""
|
||||
...
|
||||
@@ -1,45 +1,21 @@
|
||||
"""LangGraph structured output converter for CrewAI task integration.
|
||||
|
||||
This module contains the LangGraphConverterAdapter class that handles structured
|
||||
output conversion for LangGraph agents, supporting JSON and Pydantic model formats.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
|
||||
|
||||
class LangGraphConverterAdapter(BaseConverterAdapter):
|
||||
"""Adapter for handling structured output conversion in LangGraph agents.
|
||||
"""Adapter for handling structured output conversion in LangGraph agents"""
|
||||
|
||||
Converts task output requirements into system prompt modifications and
|
||||
post-processing logic to ensure agents return properly structured outputs.
|
||||
"""
|
||||
def __init__(self, agent_adapter):
|
||||
"""Initialize the converter adapter with a reference to the agent adapter"""
|
||||
self.agent_adapter = agent_adapter
|
||||
self._output_format = None
|
||||
self._schema = None
|
||||
self._system_prompt_appendix = None
|
||||
|
||||
def __init__(self, agent_adapter: Any) -> None:
|
||||
"""Initialize the converter adapter with a reference to the agent adapter.
|
||||
|
||||
Args:
|
||||
agent_adapter: The LangGraph agent adapter instance.
|
||||
"""
|
||||
super().__init__(agent_adapter=agent_adapter)
|
||||
self.agent_adapter: Any = agent_adapter
|
||||
self._output_format: Literal["json", "pydantic"] | None = None
|
||||
self._schema: str | None = None
|
||||
self._system_prompt_appendix: str | None = None
|
||||
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
"""Configure the structured output for LangGraph.
|
||||
|
||||
Analyzes the task's output requirements and sets up the necessary
|
||||
formatting and validation logic.
|
||||
|
||||
Args:
|
||||
task: The task object containing output format specifications.
|
||||
"""
|
||||
def configure_structured_output(self, task) -> None:
|
||||
"""Configure the structured output for LangGraph."""
|
||||
if not (task.output_json or task.output_pydantic):
|
||||
self._output_format = None
|
||||
self._schema = None
|
||||
@@ -56,14 +32,7 @@ class LangGraphConverterAdapter(BaseConverterAdapter):
|
||||
self._system_prompt_appendix = self._generate_system_prompt_appendix()
|
||||
|
||||
def _generate_system_prompt_appendix(self) -> str:
|
||||
"""Generate an appendix for the system prompt to enforce structured output.
|
||||
|
||||
Creates instructions that are appended to the system prompt to guide
|
||||
the agent in producing properly formatted output.
|
||||
|
||||
Returns:
|
||||
System prompt appendix string, or empty string if no structured output.
|
||||
"""
|
||||
"""Generate an appendix for the system prompt to enforce structured output"""
|
||||
if not self._output_format or not self._schema:
|
||||
return ""
|
||||
|
||||
@@ -72,36 +41,19 @@ Important: Your final answer MUST be provided in the following structured format
|
||||
|
||||
{self._schema}
|
||||
|
||||
DO NOT include any markdown code blocks, backticks, or other formatting around your response.
|
||||
DO NOT include any markdown code blocks, backticks, or other formatting around your response.
|
||||
The output should be raw JSON that exactly matches the specified schema.
|
||||
"""
|
||||
|
||||
def enhance_system_prompt(self, original_prompt: str) -> str:
|
||||
"""Add structured output instructions to the system prompt if needed.
|
||||
|
||||
Args:
|
||||
original_prompt: The base system prompt.
|
||||
|
||||
Returns:
|
||||
Enhanced system prompt with structured output instructions.
|
||||
"""
|
||||
"""Add structured output instructions to the system prompt if needed"""
|
||||
if not self._system_prompt_appendix:
|
||||
return original_prompt
|
||||
|
||||
return f"{original_prompt}\n{self._system_prompt_appendix}"
|
||||
|
||||
def post_process_result(self, result: str) -> str:
|
||||
"""Post-process the result to ensure it matches the expected format.
|
||||
|
||||
Attempts to extract and validate JSON content from agent responses,
|
||||
handling cases where JSON may be wrapped in markdown or other formatting.
|
||||
|
||||
Args:
|
||||
result: The raw result string from the agent.
|
||||
|
||||
Returns:
|
||||
Processed result string, ideally in valid JSON format.
|
||||
"""
|
||||
"""Post-process the result to ensure it matches the expected format"""
|
||||
if not self._output_format:
|
||||
return result
|
||||
|
||||
@@ -113,16 +65,16 @@ The output should be raw JSON that exactly matches the specified schema.
|
||||
return result
|
||||
except json.JSONDecodeError:
|
||||
# Try to extract JSON from the text
|
||||
json_match: re.Match[str] | None = re.search(
|
||||
r"(\{.*})", result, re.DOTALL
|
||||
)
|
||||
import re
|
||||
|
||||
json_match = re.search(r"(\{.*\})", result, re.DOTALL)
|
||||
if json_match:
|
||||
try:
|
||||
extracted: str = json_match.group(1)
|
||||
extracted = json_match.group(1)
|
||||
# Validate it's proper JSON
|
||||
json.loads(extracted)
|
||||
return extracted
|
||||
except json.JSONDecodeError:
|
||||
except:
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
@@ -1,99 +1,78 @@
|
||||
"""OpenAI agents adapter for CrewAI integration.
|
||||
from typing import Any, List, Optional
|
||||
|
||||
This module contains the OpenAIAgentAdapter class that integrates OpenAI Assistants
|
||||
with CrewAI's agent system, providing tool integration and structured output support.
|
||||
"""
|
||||
|
||||
from typing import Any, cast
|
||||
|
||||
from pydantic import ConfigDict, Field, PrivateAttr
|
||||
from typing_extensions import Unpack
|
||||
from pydantic import Field, PrivateAttr
|
||||
|
||||
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
|
||||
from crewai.agents.agent_adapters.openai_agents.openai_agent_tool_adapter import (
|
||||
OpenAIAgentToolAdapter,
|
||||
)
|
||||
from crewai.agents.agent_adapters.openai_agents.protocols import (
|
||||
AgentKwargs,
|
||||
OpenAIAgentsModule,
|
||||
)
|
||||
from crewai.agents.agent_adapters.openai_agents.protocols import (
|
||||
OpenAIAgent as OpenAIAgentProtocol,
|
||||
)
|
||||
from crewai.agents.agent_adapters.openai_agents.structured_output_converter import (
|
||||
OpenAIConverterAdapter,
|
||||
)
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Logger
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.import_utils import require
|
||||
|
||||
openai_agents_module = cast(
|
||||
OpenAIAgentsModule,
|
||||
require(
|
||||
"agents",
|
||||
purpose="OpenAI agents functionality",
|
||||
),
|
||||
)
|
||||
OpenAIAgent = openai_agents_module.Agent
|
||||
Runner = openai_agents_module.Runner
|
||||
enable_verbose_stdout_logging = openai_agents_module.enable_verbose_stdout_logging
|
||||
try:
|
||||
from agents import Agent as OpenAIAgent # type: ignore
|
||||
from agents import Runner, enable_verbose_stdout_logging # type: ignore
|
||||
|
||||
from .openai_agent_tool_adapter import OpenAIAgentToolAdapter
|
||||
|
||||
OPENAI_AVAILABLE = True
|
||||
except ImportError:
|
||||
OPENAI_AVAILABLE = False
|
||||
|
||||
|
||||
class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
"""Adapter for OpenAI Assistants.
|
||||
"""Adapter for OpenAI Assistants"""
|
||||
|
||||
Integrates OpenAI Assistants API with CrewAI's agent system, providing
|
||||
tool configuration, structured output handling, and task execution.
|
||||
"""
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
_openai_agent: OpenAIAgentProtocol = PrivateAttr()
|
||||
_logger: Logger = PrivateAttr(default_factory=Logger)
|
||||
_active_thread: str | None = PrivateAttr(default=None)
|
||||
_openai_agent: "OpenAIAgent" = PrivateAttr()
|
||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger())
|
||||
_active_thread: Optional[str] = PrivateAttr(default=None)
|
||||
function_calling_llm: Any = Field(default=None)
|
||||
step_callback: Any = Field(default=None)
|
||||
_tool_adapter: OpenAIAgentToolAdapter = PrivateAttr()
|
||||
_tool_adapter: "OpenAIAgentToolAdapter" = PrivateAttr()
|
||||
_converter_adapter: OpenAIConverterAdapter = PrivateAttr()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
**kwargs: Unpack[AgentKwargs],
|
||||
) -> None:
|
||||
"""Initialize the OpenAI agent adapter.
|
||||
|
||||
Args:
|
||||
**kwargs: All initialization arguments including role, goal, backstory,
|
||||
model, tools, and agent_config.
|
||||
|
||||
Raises:
|
||||
ImportError: If OpenAI agent dependencies are not installed.
|
||||
"""
|
||||
self.llm = kwargs.pop("model", "gpt-4o-mini")
|
||||
super().__init__(**kwargs)
|
||||
self._tool_adapter = OpenAIAgentToolAdapter(tools=kwargs.get("tools"))
|
||||
self._converter_adapter = OpenAIConverterAdapter(agent_adapter=self)
|
||||
model: str = "gpt-4o-mini",
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
agent_config: Optional[dict] = None,
|
||||
**kwargs,
|
||||
):
|
||||
if not OPENAI_AVAILABLE:
|
||||
raise ImportError(
|
||||
"OpenAI Agent Dependencies are not installed. Please install it using `uv add openai-agents`"
|
||||
)
|
||||
else:
|
||||
role = kwargs.pop("role", None)
|
||||
goal = kwargs.pop("goal", None)
|
||||
backstory = kwargs.pop("backstory", None)
|
||||
super().__init__(
|
||||
role=role,
|
||||
goal=goal,
|
||||
backstory=backstory,
|
||||
tools=tools,
|
||||
agent_config=agent_config,
|
||||
**kwargs,
|
||||
)
|
||||
self._tool_adapter = OpenAIAgentToolAdapter(tools=tools)
|
||||
self.llm = model
|
||||
self._converter_adapter = OpenAIConverterAdapter(self)
|
||||
|
||||
def _build_system_prompt(self) -> str:
|
||||
"""Build a system prompt for the OpenAI agent.
|
||||
|
||||
Creates a prompt containing the agent's role, goal, and backstory,
|
||||
then enhances it with structured output instructions if needed.
|
||||
|
||||
Returns:
|
||||
The complete system prompt string.
|
||||
"""
|
||||
"""Build a system prompt for the OpenAI agent."""
|
||||
base_prompt = f"""
|
||||
You are {self.role}.
|
||||
|
||||
|
||||
Your goal is: {self.goal}
|
||||
|
||||
Your backstory: {self.backstory}
|
||||
@@ -105,25 +84,10 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> str:
|
||||
"""Execute a task using the OpenAI Assistant.
|
||||
|
||||
Configures the assistant, processes the task, and handles event emission
|
||||
for execution tracking.
|
||||
|
||||
Args:
|
||||
task: The task object to execute.
|
||||
context: Optional context information for the task.
|
||||
tools: Optional additional tools for this execution.
|
||||
|
||||
Returns:
|
||||
The final answer from the task execution.
|
||||
|
||||
Raises:
|
||||
Exception: If task execution fails.
|
||||
"""
|
||||
"""Execute a task using the OpenAI Assistant"""
|
||||
self._converter_adapter.configure_structured_output(task)
|
||||
self.create_agent_executor(tools)
|
||||
|
||||
@@ -131,7 +95,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
enable_verbose_stdout_logging()
|
||||
|
||||
try:
|
||||
task_prompt: str = task.prompt()
|
||||
task_prompt = task.prompt()
|
||||
if context:
|
||||
task_prompt = self.i18n.slice("task_with_context").format(
|
||||
task=task_prompt, context=context
|
||||
@@ -145,8 +109,8 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
task=task,
|
||||
),
|
||||
)
|
||||
result: Any = self.agent_executor.run_sync(self._openai_agent, task_prompt)
|
||||
final_answer: str = self.handle_execution_result(result)
|
||||
result = self.agent_executor.run_sync(self._openai_agent, task_prompt)
|
||||
final_answer = self.handle_execution_result(result)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionCompletedEvent(
|
||||
@@ -156,7 +120,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
return final_answer
|
||||
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Error executing OpenAI task: {e!s}")
|
||||
self._logger.log("error", f"Error executing OpenAI task: {str(e)}")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionErrorEvent(
|
||||
@@ -167,22 +131,15 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
)
|
||||
raise
|
||||
|
||||
def create_agent_executor(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure the OpenAI agent for execution.
|
||||
|
||||
While OpenAI handles execution differently through Runner,
|
||||
this method sets up tools and agent configuration.
|
||||
|
||||
Args:
|
||||
tools: Optional tools to configure for the agent.
|
||||
|
||||
Notes:
|
||||
TODO: Properly type agent_executor in BaseAgent to avoid type issues
|
||||
when assigning Runner class to this attribute.
|
||||
def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
"""
|
||||
all_tools: list[BaseTool] = list(self.tools or []) + list(tools or [])
|
||||
Configure the OpenAI agent for execution.
|
||||
While OpenAI handles execution differently through Runner,
|
||||
we can use this method to set up tools and configurations.
|
||||
"""
|
||||
all_tools = list(self.tools or []) + list(tools or [])
|
||||
|
||||
instructions: str = self._build_system_prompt()
|
||||
instructions = self._build_system_prompt()
|
||||
self._openai_agent = OpenAIAgent(
|
||||
name=self.role,
|
||||
instructions=instructions,
|
||||
@@ -195,48 +152,27 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
|
||||
self.agent_executor = Runner
|
||||
|
||||
def configure_tools(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure tools for the OpenAI Assistant.
|
||||
|
||||
Args:
|
||||
tools: Optional tools to configure for the assistant.
|
||||
"""
|
||||
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
"""Configure tools for the OpenAI Assistant"""
|
||||
if tools:
|
||||
self._tool_adapter.configure_tools(tools)
|
||||
if self._tool_adapter.converted_tools:
|
||||
self._openai_agent.tools = self._tool_adapter.converted_tools
|
||||
|
||||
def handle_execution_result(self, result: Any) -> str:
|
||||
"""Process OpenAI Assistant execution result.
|
||||
|
||||
Converts any structured output to a string through the converter adapter.
|
||||
|
||||
Args:
|
||||
result: The execution result from the OpenAI assistant.
|
||||
|
||||
Returns:
|
||||
Processed result as a string.
|
||||
"""
|
||||
"""Process OpenAI Assistant execution result converting any structured output to a string"""
|
||||
return self._converter_adapter.post_process_result(result.final_output)
|
||||
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
"""Implement delegation tools support.
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]) -> List[BaseTool]:
|
||||
"""Implement delegation tools support"""
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
tools = agent_tools.tools()
|
||||
return tools
|
||||
|
||||
Creates delegation tools that allow this agent to delegate tasks to other agents.
|
||||
|
||||
Args:
|
||||
agents: List of agents available for delegation.
|
||||
|
||||
Returns:
|
||||
List of delegation tools.
|
||||
"""
|
||||
agent_tools: AgentTools = AgentTools(agents=agents)
|
||||
return agent_tools.tools()
|
||||
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
def configure_structured_output(self, task) -> None:
|
||||
"""Configure the structured output for the specific agent implementation.
|
||||
|
||||
Args:
|
||||
task: The task object containing output format specifications.
|
||||
structured_output: The structured output to be configured
|
||||
"""
|
||||
self._converter_adapter.configure_structured_output(task)
|
||||
|
||||
@@ -1,125 +1,57 @@
|
||||
"""OpenAI agent tool adapter for CrewAI tool integration.
|
||||
|
||||
This module contains the OpenAIAgentToolAdapter class that converts CrewAI tools
|
||||
to OpenAI Assistant-compatible format using the agents library.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import json
|
||||
import re
|
||||
from collections.abc import Awaitable
|
||||
from typing import Any, cast
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from agents import FunctionTool, Tool
|
||||
|
||||
from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter
|
||||
from crewai.agents.agent_adapters.openai_agents.protocols import (
|
||||
OpenAIFunctionTool,
|
||||
OpenAITool,
|
||||
)
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.utilities.import_utils import require
|
||||
|
||||
agents_module = cast(
|
||||
Any,
|
||||
require(
|
||||
"agents",
|
||||
purpose="OpenAI agents functionality",
|
||||
),
|
||||
)
|
||||
FunctionTool = agents_module.FunctionTool
|
||||
Tool = agents_module.Tool
|
||||
|
||||
|
||||
class OpenAIAgentToolAdapter(BaseToolAdapter):
|
||||
"""Adapter for OpenAI Assistant tools.
|
||||
"""Adapter for OpenAI Assistant tools"""
|
||||
|
||||
Converts CrewAI BaseTool instances to OpenAI Assistant FunctionTool format
|
||||
that can be used by OpenAI agents.
|
||||
"""
|
||||
def __init__(self, tools: Optional[List[BaseTool]] = None):
|
||||
self.original_tools = tools or []
|
||||
|
||||
def __init__(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Initialize the tool adapter.
|
||||
|
||||
Args:
|
||||
tools: Optional list of CrewAI tools to adapt.
|
||||
"""
|
||||
super().__init__()
|
||||
self.original_tools: list[BaseTool] = tools or []
|
||||
self.converted_tools: list[OpenAITool] = []
|
||||
|
||||
def configure_tools(self, tools: list[BaseTool]) -> None:
|
||||
"""Configure tools for the OpenAI Assistant.
|
||||
|
||||
Merges provided tools with original tools and converts them to
|
||||
OpenAI Assistant format.
|
||||
|
||||
Args:
|
||||
tools: List of CrewAI tools to configure.
|
||||
"""
|
||||
def configure_tools(self, tools: List[BaseTool]) -> None:
|
||||
"""Configure tools for the OpenAI Assistant"""
|
||||
if self.original_tools:
|
||||
all_tools: list[BaseTool] = tools + self.original_tools
|
||||
all_tools = tools + self.original_tools
|
||||
else:
|
||||
all_tools = tools
|
||||
if all_tools:
|
||||
self.converted_tools = self._convert_tools_to_openai_format(all_tools)
|
||||
|
||||
@staticmethod
|
||||
def _convert_tools_to_openai_format(
|
||||
tools: list[BaseTool] | None,
|
||||
) -> list[OpenAITool]:
|
||||
"""Convert CrewAI tools to OpenAI Assistant tool format.
|
||||
|
||||
Args:
|
||||
tools: List of CrewAI tools to convert.
|
||||
|
||||
Returns:
|
||||
List of OpenAI Assistant FunctionTool instances.
|
||||
"""
|
||||
self, tools: Optional[List[BaseTool]]
|
||||
) -> List[Tool]:
|
||||
"""Convert CrewAI tools to OpenAI Assistant tool format"""
|
||||
if not tools:
|
||||
return []
|
||||
|
||||
def sanitize_tool_name(name: str) -> str:
|
||||
"""Convert tool name to match OpenAI's required pattern.
|
||||
"""Convert tool name to match OpenAI's required pattern"""
|
||||
import re
|
||||
|
||||
Args:
|
||||
name: Original tool name.
|
||||
sanitized = re.sub(r"[^a-zA-Z0-9_-]", "_", name).lower()
|
||||
return sanitized
|
||||
|
||||
Returns:
|
||||
Sanitized tool name matching OpenAI requirements.
|
||||
"""
|
||||
|
||||
return re.sub(r"[^a-zA-Z0-9_-]", "_", name).lower()
|
||||
|
||||
def create_tool_wrapper(tool: BaseTool) -> Any:
|
||||
"""Create a wrapper function that handles the OpenAI function tool interface.
|
||||
|
||||
Args:
|
||||
tool: The CrewAI tool to wrap.
|
||||
|
||||
Returns:
|
||||
Async wrapper function for OpenAI agent integration.
|
||||
"""
|
||||
def create_tool_wrapper(tool: BaseTool):
|
||||
"""Create a wrapper function that handles the OpenAI function tool interface"""
|
||||
|
||||
async def wrapper(context_wrapper: Any, arguments: Any) -> Any:
|
||||
"""Wrapper function to adapt CrewAI tool calls to OpenAI format.
|
||||
|
||||
Args:
|
||||
context_wrapper: OpenAI context wrapper.
|
||||
arguments: Tool arguments from OpenAI.
|
||||
|
||||
Returns:
|
||||
Tool execution result.
|
||||
"""
|
||||
# Get the parameter name from the schema
|
||||
param_name: str = next(
|
||||
iter(tool.args_schema.model_json_schema()["properties"].keys())
|
||||
)
|
||||
param_name = list(
|
||||
tool.args_schema.model_json_schema()["properties"].keys()
|
||||
)[0]
|
||||
|
||||
# Handle different argument types
|
||||
args_dict: dict[str, Any]
|
||||
if isinstance(arguments, dict):
|
||||
args_dict = arguments
|
||||
elif isinstance(arguments, str):
|
||||
try:
|
||||
import json
|
||||
|
||||
args_dict = json.loads(arguments)
|
||||
except json.JSONDecodeError:
|
||||
args_dict = {param_name: arguments}
|
||||
@@ -127,11 +59,11 @@ class OpenAIAgentToolAdapter(BaseToolAdapter):
|
||||
args_dict = {param_name: str(arguments)}
|
||||
|
||||
# Run the tool with the processed arguments
|
||||
output: Any | Awaitable[Any] = tool._run(**args_dict)
|
||||
output = tool._run(**args_dict)
|
||||
|
||||
# Await if the tool returned a coroutine
|
||||
if inspect.isawaitable(output):
|
||||
result: Any = await output
|
||||
result = await output
|
||||
else:
|
||||
result = output
|
||||
|
||||
@@ -142,20 +74,17 @@ class OpenAIAgentToolAdapter(BaseToolAdapter):
|
||||
|
||||
return wrapper
|
||||
|
||||
openai_tools: list[OpenAITool] = []
|
||||
openai_tools = []
|
||||
for tool in tools:
|
||||
schema: dict[str, Any] = tool.args_schema.model_json_schema()
|
||||
schema = tool.args_schema.model_json_schema()
|
||||
|
||||
schema.update({"additionalProperties": False, "type": "object"})
|
||||
|
||||
openai_tool: OpenAIFunctionTool = cast(
|
||||
OpenAIFunctionTool,
|
||||
FunctionTool(
|
||||
name=sanitize_tool_name(tool.name),
|
||||
description=tool.description,
|
||||
params_json_schema=schema,
|
||||
on_invoke_tool=create_tool_wrapper(tool),
|
||||
),
|
||||
openai_tool = FunctionTool(
|
||||
name=sanitize_tool_name(tool.name),
|
||||
description=tool.description,
|
||||
params_json_schema=schema,
|
||||
on_invoke_tool=create_tool_wrapper(tool),
|
||||
)
|
||||
openai_tools.append(openai_tool)
|
||||
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
"""Type protocols for OpenAI agents modules."""
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import Any, Protocol, TypedDict, runtime_checkable
|
||||
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
class AgentKwargs(TypedDict, total=False):
|
||||
"""Typed dict for agent initialization kwargs."""
|
||||
|
||||
role: str
|
||||
goal: str
|
||||
backstory: str
|
||||
model: str
|
||||
tools: list[BaseTool] | None
|
||||
agent_config: dict[str, Any] | None
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class OpenAIAgent(Protocol):
|
||||
"""Protocol for OpenAI Agent."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
instructions: str,
|
||||
model: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize the OpenAI agent."""
|
||||
...
|
||||
|
||||
tools: list[Any]
|
||||
output_type: Any
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class OpenAIRunner(Protocol):
|
||||
"""Protocol for OpenAI Runner."""
|
||||
|
||||
@classmethod
|
||||
def run_sync(cls, agent: OpenAIAgent, message: str) -> Any:
|
||||
"""Run agent synchronously with a message."""
|
||||
...
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class OpenAIAgentsModule(Protocol):
|
||||
"""Protocol for OpenAI agents module."""
|
||||
|
||||
Agent: type[OpenAIAgent]
|
||||
Runner: type[OpenAIRunner]
|
||||
enable_verbose_stdout_logging: Callable[[], None]
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class OpenAITool(Protocol):
|
||||
"""Protocol for OpenAI Tool."""
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class OpenAIFunctionTool(Protocol):
|
||||
"""Protocol for OpenAI FunctionTool."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
description: str,
|
||||
params_json_schema: dict[str, Any],
|
||||
on_invoke_tool: Any,
|
||||
) -> None:
|
||||
"""Initialize the function tool."""
|
||||
...
|
||||
@@ -1,12 +1,5 @@
|
||||
"""OpenAI structured output converter for CrewAI task integration.
|
||||
|
||||
This module contains the OpenAIConverterAdapter class that handles structured
|
||||
output conversion for OpenAI agents, supporting JSON and Pydantic model formats.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
@@ -14,7 +7,8 @@ from crewai.utilities.i18n import I18N
|
||||
|
||||
|
||||
class OpenAIConverterAdapter(BaseConverterAdapter):
|
||||
"""Adapter for handling structured output conversion in OpenAI agents.
|
||||
"""
|
||||
Adapter for handling structured output conversion in OpenAI agents.
|
||||
|
||||
This adapter enhances the OpenAI agent to handle structured output formats
|
||||
and post-processes the results when needed.
|
||||
@@ -25,23 +19,19 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
||||
_output_model: The Pydantic model for the output
|
||||
"""
|
||||
|
||||
def __init__(self, agent_adapter: Any) -> None:
|
||||
"""Initialize the converter adapter with a reference to the agent adapter.
|
||||
def __init__(self, agent_adapter):
|
||||
"""Initialize the converter adapter with a reference to the agent adapter"""
|
||||
self.agent_adapter = agent_adapter
|
||||
self._output_format = None
|
||||
self._schema = None
|
||||
self._output_model = None
|
||||
|
||||
Args:
|
||||
agent_adapter: The OpenAI agent adapter instance.
|
||||
def configure_structured_output(self, task) -> None:
|
||||
"""
|
||||
super().__init__(agent_adapter=agent_adapter)
|
||||
self.agent_adapter: Any = agent_adapter
|
||||
self._output_format: Literal["json", "pydantic"] | None = None
|
||||
self._schema: str | None = None
|
||||
self._output_model: Any = None
|
||||
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
"""Configure the structured output for OpenAI agent based on task requirements.
|
||||
Configure the structured output for OpenAI agent based on task requirements.
|
||||
|
||||
Args:
|
||||
task: The task containing output format requirements.
|
||||
task: The task containing output format requirements
|
||||
"""
|
||||
# Reset configuration
|
||||
self._output_format = None
|
||||
@@ -65,18 +55,19 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
||||
self._output_model = task.output_pydantic
|
||||
|
||||
def enhance_system_prompt(self, base_prompt: str) -> str:
|
||||
"""Enhance the base system prompt with structured output requirements if needed.
|
||||
"""
|
||||
Enhance the base system prompt with structured output requirements if needed.
|
||||
|
||||
Args:
|
||||
base_prompt: The original system prompt.
|
||||
base_prompt: The original system prompt
|
||||
|
||||
Returns:
|
||||
Enhanced system prompt with output format instructions if needed.
|
||||
Enhanced system prompt with output format instructions if needed
|
||||
"""
|
||||
if not self._output_format:
|
||||
return base_prompt
|
||||
|
||||
output_schema: str = (
|
||||
output_schema = (
|
||||
I18N()
|
||||
.slice("formatted_task_instructions")
|
||||
.format(output_format=self._schema)
|
||||
@@ -85,15 +76,16 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
||||
return f"{base_prompt}\n\n{output_schema}"
|
||||
|
||||
def post_process_result(self, result: str) -> str:
|
||||
"""Post-process the result to ensure it matches the expected format.
|
||||
"""
|
||||
Post-process the result to ensure it matches the expected format.
|
||||
|
||||
This method attempts to extract valid JSON from the result if necessary.
|
||||
|
||||
Args:
|
||||
result: The raw result from the agent.
|
||||
result: The raw result from the agent
|
||||
|
||||
Returns:
|
||||
Processed result conforming to the expected output format.
|
||||
Processed result conforming to the expected output format
|
||||
"""
|
||||
if not self._output_format:
|
||||
return result
|
||||
@@ -105,30 +97,26 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
||||
return result
|
||||
except json.JSONDecodeError:
|
||||
# Try to extract JSON from markdown code blocks
|
||||
code_block_pattern: str = r"```(?:json)?\s*([\s\S]*?)```"
|
||||
code_blocks: list[str] = re.findall(code_block_pattern, result)
|
||||
code_block_pattern = r"```(?:json)?\s*([\s\S]*?)```"
|
||||
code_blocks = re.findall(code_block_pattern, result)
|
||||
|
||||
for block in code_blocks:
|
||||
stripped_block = block.strip()
|
||||
try:
|
||||
json.loads(stripped_block)
|
||||
return stripped_block
|
||||
json.loads(block.strip())
|
||||
return block.strip()
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
continue
|
||||
|
||||
# Try to extract any JSON-like structure
|
||||
json_pattern: str = r"(\{[\s\S]*\})"
|
||||
json_matches: list[str] = re.findall(json_pattern, result, re.DOTALL)
|
||||
json_pattern = r"(\{[\s\S]*\})"
|
||||
json_matches = re.findall(json_pattern, result, re.DOTALL)
|
||||
|
||||
for match in json_matches:
|
||||
is_valid = True
|
||||
try:
|
||||
json.loads(match)
|
||||
except json.JSONDecodeError:
|
||||
is_valid = False
|
||||
|
||||
if is_valid:
|
||||
return match
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# If all extraction attempts fail, return the original
|
||||
return str(result)
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import uuid
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Callable
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
from typing import Any, Callable, Dict, List, Optional, TypeVar
|
||||
from typing import Any, TypeVar
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -25,7 +26,6 @@ from crewai.security.security_config import SecurityConfig
|
||||
from crewai.tools.base_tool import BaseTool, Tool
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.string_utils import interpolate_only
|
||||
|
||||
T = TypeVar("T", bound="BaseAgent")
|
||||
@@ -81,17 +81,17 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
|
||||
_rpm_controller: Optional[RPMController] = PrivateAttr(default=None)
|
||||
_rpm_controller: RPMController | None = PrivateAttr(default=None)
|
||||
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
||||
_original_role: Optional[str] = PrivateAttr(default=None)
|
||||
_original_goal: Optional[str] = PrivateAttr(default=None)
|
||||
_original_backstory: Optional[str] = PrivateAttr(default=None)
|
||||
_original_role: str | None = PrivateAttr(default=None)
|
||||
_original_goal: str | None = PrivateAttr(default=None)
|
||||
_original_backstory: str | None = PrivateAttr(default=None)
|
||||
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Objective of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
config: Optional[Dict[str, Any]] = Field(
|
||||
config: dict[str, Any] | None = Field(
|
||||
description="Configuration for the agent", default=None, exclude=True
|
||||
)
|
||||
cache: bool = Field(
|
||||
@@ -100,7 +100,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
verbose: bool = Field(
|
||||
default=False, description="Verbose mode for the Agent Execution"
|
||||
)
|
||||
max_rpm: Optional[int] = Field(
|
||||
max_rpm: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the agent execution to be respected.",
|
||||
)
|
||||
@@ -108,7 +108,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: Optional[List[BaseTool]] = Field(
|
||||
tools: list[BaseTool] | None = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: int = Field(
|
||||
@@ -122,27 +122,27 @@ class BaseAgent(ABC, BaseModel):
|
||||
)
|
||||
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
||||
cache_handler: InstanceOf[CacheHandler] | None = Field(
|
||||
default=None, description="An instance of the CacheHandler class."
|
||||
)
|
||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||
default_factory=ToolsHandler,
|
||||
description="An instance of the ToolsHandler class.",
|
||||
)
|
||||
tools_results: List[Dict[str, Any]] = Field(
|
||||
tools_results: list[dict[str, Any]] = Field(
|
||||
default=[], description="Results of the tools used by the agent."
|
||||
)
|
||||
max_tokens: Optional[int] = Field(
|
||||
max_tokens: int | None = Field(
|
||||
default=None, description="Maximum number of tokens for the agent's execution."
|
||||
)
|
||||
knowledge: Optional[Knowledge] = Field(
|
||||
knowledge: Knowledge | None = Field(
|
||||
default=None, description="Knowledge for the agent."
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
knowledge_sources: list[BaseKnowledgeSource] | None = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the agent.",
|
||||
)
|
||||
knowledge_storage: Optional[Any] = Field(
|
||||
knowledge_storage: Any | None = Field(
|
||||
default=None,
|
||||
description="Custom knowledge storage for the agent.",
|
||||
)
|
||||
@@ -150,13 +150,13 @@ class BaseAgent(ABC, BaseModel):
|
||||
default_factory=SecurityConfig,
|
||||
description="Security configuration for the agent, including fingerprinting.",
|
||||
)
|
||||
callbacks: List[Callable] = Field(
|
||||
callbacks: list[Callable] = Field(
|
||||
default=[], description="Callbacks to be used for the agent"
|
||||
)
|
||||
adapted_agent: bool = Field(
|
||||
default=False, description="Whether the agent is adapted"
|
||||
)
|
||||
knowledge_config: Optional[KnowledgeConfig] = Field(
|
||||
knowledge_config: KnowledgeConfig | None = Field(
|
||||
default=None,
|
||||
description="Knowledge configuration for the agent such as limits and threshold",
|
||||
)
|
||||
@@ -168,7 +168,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
@field_validator("tools")
|
||||
@classmethod
|
||||
def validate_tools(cls, tools: List[Any]) -> List[BaseTool]:
|
||||
def validate_tools(cls, tools: list[Any]) -> list[BaseTool]:
|
||||
"""Validate and process the tools provided to the agent.
|
||||
|
||||
This method ensures that each tool is either an instance of BaseTool
|
||||
@@ -221,7 +221,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
||||
def _deny_user_set_id(cls, v: UUID4 | None) -> None:
|
||||
if v:
|
||||
raise PydanticCustomError(
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
@@ -252,8 +252,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@@ -262,9 +262,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]:
|
||||
def get_delegation_tools(self, agents: list["BaseAgent"]) -> list[BaseTool]:
|
||||
"""Set the task tools that init BaseAgenTools class."""
|
||||
pass
|
||||
|
||||
def copy(self: T) -> T: # type: ignore # Signature of "copy" incompatible with supertype "BaseModel"
|
||||
"""Create a deep copy of the Agent."""
|
||||
@@ -309,7 +308,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
copied_agent = type(self)(
|
||||
return type(self)(
|
||||
**copied_data,
|
||||
llm=existing_llm,
|
||||
tools=self.tools,
|
||||
@@ -318,9 +317,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
knowledge_storage=copied_knowledge_storage,
|
||||
)
|
||||
|
||||
return copied_agent
|
||||
|
||||
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
def interpolate_inputs(self, inputs: dict[str, Any]) -> None:
|
||||
"""Interpolate inputs into the agent description and backstory."""
|
||||
if self._original_role is None:
|
||||
self._original_role = self.role
|
||||
@@ -362,5 +359,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
self._rpm_controller = rpm_controller
|
||||
self.create_agent_executor()
|
||||
|
||||
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
||||
def set_knowledge(self, crew_embedder: dict[str, Any] | None = None):
|
||||
pass
|
||||
|
||||
def set_responsibility_system(self, responsibility_system: Any) -> None:
|
||||
"""Set the responsibility system for the agent."""
|
||||
|
||||
@@ -1,25 +1,8 @@
|
||||
"""Token usage tracking utilities.
|
||||
|
||||
This module provides utilities for tracking token consumption and request
|
||||
metrics during agent execution.
|
||||
"""
|
||||
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
|
||||
class TokenProcess:
|
||||
"""Track token usage during agent processing.
|
||||
|
||||
Attributes:
|
||||
total_tokens: Total number of tokens used.
|
||||
prompt_tokens: Number of tokens used in prompts.
|
||||
cached_prompt_tokens: Number of cached prompt tokens used.
|
||||
completion_tokens: Number of tokens used in completions.
|
||||
successful_requests: Number of successful requests made.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize token tracking with zero values."""
|
||||
self.total_tokens: int = 0
|
||||
self.prompt_tokens: int = 0
|
||||
self.cached_prompt_tokens: int = 0
|
||||
@@ -27,45 +10,20 @@ class TokenProcess:
|
||||
self.successful_requests: int = 0
|
||||
|
||||
def sum_prompt_tokens(self, tokens: int) -> None:
|
||||
"""Add prompt tokens to the running totals.
|
||||
|
||||
Args:
|
||||
tokens: Number of prompt tokens to add.
|
||||
"""
|
||||
self.prompt_tokens += tokens
|
||||
self.total_tokens += tokens
|
||||
|
||||
def sum_completion_tokens(self, tokens: int) -> None:
|
||||
"""Add completion tokens to the running totals.
|
||||
|
||||
Args:
|
||||
tokens: Number of completion tokens to add.
|
||||
"""
|
||||
self.completion_tokens += tokens
|
||||
self.total_tokens += tokens
|
||||
|
||||
def sum_cached_prompt_tokens(self, tokens: int) -> None:
|
||||
"""Add cached prompt tokens to the running total.
|
||||
|
||||
Args:
|
||||
tokens: Number of cached prompt tokens to add.
|
||||
"""
|
||||
self.cached_prompt_tokens += tokens
|
||||
|
||||
def sum_successful_requests(self, requests: int) -> None:
|
||||
"""Add successful requests to the running total.
|
||||
|
||||
Args:
|
||||
requests: Number of successful requests to add.
|
||||
"""
|
||||
self.successful_requests += requests
|
||||
|
||||
def get_summary(self) -> UsageMetrics:
|
||||
"""Get a summary of all tracked metrics.
|
||||
|
||||
Returns:
|
||||
UsageMetrics object with current totals.
|
||||
"""
|
||||
return UsageMetrics(
|
||||
total_tokens=self.total_tokens,
|
||||
prompt_tokens=self.prompt_tokens,
|
||||
|
||||
@@ -2,8 +2,6 @@ import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.subprocess_utils import run_command
|
||||
|
||||
|
||||
def evaluate_crew(n_iterations: int, model: str) -> None:
|
||||
"""
|
||||
@@ -19,7 +17,7 @@ def evaluate_crew(n_iterations: int, model: str) -> None:
|
||||
if n_iterations <= 0:
|
||||
raise ValueError("The number of iterations must be a positive integer.")
|
||||
|
||||
result = run_command(command, capture_output=False, text=True, check=True)
|
||||
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
|
||||
if result.stderr:
|
||||
click.echo(result.stderr, err=True)
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import subprocess
|
||||
from functools import lru_cache
|
||||
|
||||
from crewai.cli.subprocess_utils import run_command
|
||||
|
||||
|
||||
class Repository:
|
||||
def __init__(self, path="."):
|
||||
@@ -19,7 +17,7 @@ class Repository:
|
||||
def is_git_installed(self) -> bool:
|
||||
"""Check if Git is installed and available in the system."""
|
||||
try:
|
||||
run_command(
|
||||
subprocess.run(
|
||||
["git", "--version"], capture_output=True, check=True, text=True
|
||||
)
|
||||
return True
|
||||
@@ -28,29 +26,24 @@ class Repository:
|
||||
|
||||
def fetch(self) -> None:
|
||||
"""Fetch latest updates from the remote."""
|
||||
run_command(["git", "fetch"], cwd=self.path, check=True)
|
||||
subprocess.run(["git", "fetch"], cwd=self.path, check=True)
|
||||
|
||||
def status(self) -> str:
|
||||
"""Get the git status in porcelain format."""
|
||||
result = run_command(
|
||||
return subprocess.check_output(
|
||||
["git", "status", "--branch", "--porcelain"],
|
||||
cwd=self.path,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
return result.stdout.strip()
|
||||
encoding="utf-8",
|
||||
).strip()
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
def is_git_repo(self) -> bool:
|
||||
"""Check if the current directory is a git repository."""
|
||||
try:
|
||||
run_command(
|
||||
subprocess.check_output(
|
||||
["git", "rev-parse", "--is-inside-work-tree"],
|
||||
cwd=self.path,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
encoding="utf-8",
|
||||
)
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
@@ -77,7 +70,7 @@ class Repository:
|
||||
def origin_url(self) -> str | None:
|
||||
"""Get the Git repository's remote URL."""
|
||||
try:
|
||||
result = run_command(
|
||||
result = subprocess.run(
|
||||
["git", "remote", "get-url", "origin"],
|
||||
cwd=self.path,
|
||||
capture_output=True,
|
||||
|
||||
@@ -2,8 +2,6 @@ import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.subprocess_utils import run_command
|
||||
|
||||
|
||||
# Be mindful about changing this.
|
||||
# on some environments we don't use this command but instead uv sync directly
|
||||
@@ -14,8 +12,8 @@ def install_crew(proxy_options: list[str]) -> None:
|
||||
Install the crew by running the UV command to lock and install.
|
||||
"""
|
||||
try:
|
||||
command = ["uv", "sync", *proxy_options]
|
||||
run_command(command, check=True, capture_output=False, text=True)
|
||||
command = ["uv", "sync"] + proxy_options
|
||||
subprocess.run(command, check=True, capture_output=False, text=True)
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
||||
|
||||
@@ -2,8 +2,6 @@ import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.subprocess_utils import run_command
|
||||
|
||||
|
||||
def kickoff_flow() -> None:
|
||||
"""
|
||||
@@ -12,7 +10,7 @@ def kickoff_flow() -> None:
|
||||
command = ["uv", "run", "kickoff"]
|
||||
|
||||
try:
|
||||
result = run_command(command, capture_output=False, text=True, check=True)
|
||||
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
|
||||
if result.stderr:
|
||||
click.echo(result.stderr, err=True)
|
||||
|
||||
@@ -2,8 +2,6 @@ import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.subprocess_utils import run_command
|
||||
|
||||
|
||||
def plot_flow() -> None:
|
||||
"""
|
||||
@@ -12,7 +10,7 @@ def plot_flow() -> None:
|
||||
command = ["uv", "run", "plot"]
|
||||
|
||||
try:
|
||||
result = run_command(command, capture_output=False, text=True, check=True)
|
||||
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
|
||||
if result.stderr:
|
||||
click.echo(result.stderr, err=True)
|
||||
|
||||
@@ -2,8 +2,6 @@ import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.subprocess_utils import run_command
|
||||
|
||||
|
||||
def replay_task_command(task_id: str) -> None:
|
||||
"""
|
||||
@@ -15,7 +13,7 @@ def replay_task_command(task_id: str) -> None:
|
||||
command = ["uv", "run", "replay", task_id]
|
||||
|
||||
try:
|
||||
result = run_command(command, capture_output=False, text=True, check=True)
|
||||
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
if result.stderr:
|
||||
click.echo(result.stderr, err=True)
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import subprocess
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
import click
|
||||
from packaging import version
|
||||
|
||||
from crewai.cli.utils import read_toml
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.cli.subprocess_utils import run_command
|
||||
|
||||
|
||||
class CrewType(Enum):
|
||||
@@ -57,7 +57,7 @@ def execute_command(crew_type: CrewType) -> None:
|
||||
command = ["uv", "run", "kickoff" if crew_type == CrewType.FLOW else "run_crew"]
|
||||
|
||||
try:
|
||||
run_command(command, capture_output=False, text=True, check=True)
|
||||
subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
handle_error(e, crew_type)
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
import platform
|
||||
import subprocess
|
||||
from typing import Any
|
||||
|
||||
|
||||
def run_command(
|
||||
command: list[str],
|
||||
capture_output: bool = False,
|
||||
text: bool = True,
|
||||
check: bool = True,
|
||||
cwd: str | None = None,
|
||||
env: dict[str, str] | None = None,
|
||||
**kwargs: Any
|
||||
) -> subprocess.CompletedProcess:
|
||||
"""
|
||||
Cross-platform subprocess execution with Windows compatibility.
|
||||
|
||||
On Windows, uses shell=True to avoid permission issues with restrictive
|
||||
security policies. On other platforms, uses the standard approach.
|
||||
|
||||
Args:
|
||||
command: List of command arguments
|
||||
capture_output: Whether to capture stdout/stderr
|
||||
text: Whether to use text mode
|
||||
check: Whether to raise CalledProcessError on non-zero exit
|
||||
cwd: Working directory
|
||||
env: Environment variables
|
||||
**kwargs: Additional subprocess.run arguments
|
||||
|
||||
Returns:
|
||||
CompletedProcess instance
|
||||
|
||||
Raises:
|
||||
subprocess.CalledProcessError: If check=True and command fails
|
||||
"""
|
||||
if platform.system() == "Windows":
|
||||
if isinstance(command, list):
|
||||
command_str = subprocess.list2cmdline(command)
|
||||
else:
|
||||
command_str = command
|
||||
|
||||
return subprocess.run(
|
||||
command_str,
|
||||
shell=True,
|
||||
capture_output=capture_output,
|
||||
text=text,
|
||||
check=check,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
**kwargs
|
||||
)
|
||||
return subprocess.run(
|
||||
command,
|
||||
capture_output=capture_output,
|
||||
text=text,
|
||||
check=check,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
**kwargs
|
||||
)
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.186.1,<1.0.0"
|
||||
"crewai[tools]>=0.177.0,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.186.1,<1.0.0",
|
||||
"crewai[tools]>=0.177.0,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.186.1"
|
||||
"crewai[tools]>=0.177.0"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import base64
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
@@ -10,7 +11,6 @@ from rich.console import Console
|
||||
from crewai.cli import git
|
||||
from crewai.cli.command import BaseCommand, PlusAPIMixin
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.cli.subprocess_utils import run_command
|
||||
from crewai.cli.utils import (
|
||||
extract_available_exports,
|
||||
get_project_description,
|
||||
@@ -56,7 +56,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
os.chdir(project_root)
|
||||
try:
|
||||
self.login()
|
||||
run_command(["git", "init"], check=True)
|
||||
subprocess.run(["git", "init"], check=True)
|
||||
console.print(
|
||||
f"[green]Created custom tool [bold]{folder_name}[/bold]. Run [bold]cd {project_root}[/bold] to start working.[/green]"
|
||||
)
|
||||
@@ -94,7 +94,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
self._print_current_organization()
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_build_dir:
|
||||
run_command(
|
||||
subprocess.run(
|
||||
["uv", "build", "--sdist", "--out-dir", temp_build_dir],
|
||||
check=True,
|
||||
capture_output=False,
|
||||
@@ -196,7 +196,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
else:
|
||||
add_package_command.extend(["--index", index, tool_handle])
|
||||
|
||||
add_package_result = run_command(
|
||||
add_package_result = subprocess.run(
|
||||
add_package_command,
|
||||
capture_output=False,
|
||||
env=self._build_env_with_credentials(repository_handle),
|
||||
|
||||
@@ -2,8 +2,6 @@ import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.subprocess_utils import run_command
|
||||
|
||||
|
||||
def train_crew(n_iterations: int, filename: str) -> None:
|
||||
"""
|
||||
@@ -21,7 +19,7 @@ def train_crew(n_iterations: int, filename: str) -> None:
|
||||
if not filename.endswith(".pkl"):
|
||||
raise ValueError("The filename must not end with .pkl")
|
||||
|
||||
result = run_command(command, capture_output=False, text=True, check=True)
|
||||
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
|
||||
if result.stderr:
|
||||
click.echo(result.stderr, err=True)
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
import os
|
||||
import contextvars
|
||||
from typing import Optional
|
||||
from contextlib import contextmanager
|
||||
|
||||
_platform_integration_token: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar(
|
||||
"platform_integration_token", default=None
|
||||
)
|
||||
|
||||
def set_platform_integration_token(integration_token: str) -> None:
|
||||
_platform_integration_token.set(integration_token)
|
||||
|
||||
def get_platform_integration_token() -> Optional[str]:
|
||||
token = _platform_integration_token.get()
|
||||
if token is None:
|
||||
token = os.getenv("CREWAI_PLATFORM_INTEGRATION_TOKEN")
|
||||
return token
|
||||
|
||||
@contextmanager
|
||||
def platform_context(integration_token: str):
|
||||
token = _platform_integration_token.set(integration_token)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_platform_integration_token.reset(token)
|
||||
@@ -3,26 +3,17 @@ import json
|
||||
import re
|
||||
import uuid
|
||||
import warnings
|
||||
from collections.abc import Callable
|
||||
from concurrent.futures import Future
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from opentelemetry import baggage
|
||||
from opentelemetry.context import attach, detach
|
||||
|
||||
from crewai.utilities.crew.models import CrewContext
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
@@ -39,6 +30,25 @@ from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.event_listener import EventListener
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
)
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.flow.flow_trackable import FlowTrackable
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
@@ -57,29 +67,9 @@ from crewai.tools.base_tool import BaseTool, Tool
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import NOT_SPECIFIED, TRAINING_DATA_FILE
|
||||
from crewai.utilities.crew.models import CrewContext
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.event_listener import EventListener
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
|
||||
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
)
|
||||
from crewai.utilities.formatter import (
|
||||
aggregate_raw_outputs_from_task_outputs,
|
||||
aggregate_raw_outputs_from_tasks,
|
||||
@@ -124,13 +114,13 @@ class Crew(FlowTrackable, BaseModel):
|
||||
_logger: Logger = PrivateAttr()
|
||||
_file_handler: FileHandler = PrivateAttr()
|
||||
_cache_handler: InstanceOf[CacheHandler] = PrivateAttr(default=CacheHandler())
|
||||
_short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr()
|
||||
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
|
||||
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
|
||||
_external_memory: Optional[InstanceOf[ExternalMemory]] = PrivateAttr()
|
||||
_train: Optional[bool] = PrivateAttr(default=False)
|
||||
_train_iteration: Optional[int] = PrivateAttr()
|
||||
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
||||
_short_term_memory: InstanceOf[ShortTermMemory] | None = PrivateAttr()
|
||||
_long_term_memory: InstanceOf[LongTermMemory] | None = PrivateAttr()
|
||||
_entity_memory: InstanceOf[EntityMemory] | None = PrivateAttr()
|
||||
_external_memory: InstanceOf[ExternalMemory] | None = PrivateAttr()
|
||||
_train: bool | None = PrivateAttr(default=False)
|
||||
_train_iteration: int | None = PrivateAttr()
|
||||
_inputs: dict[str, Any] | None = PrivateAttr(default=None)
|
||||
_logging_color: str = PrivateAttr(
|
||||
default="bold_purple",
|
||||
)
|
||||
@@ -138,107 +128,107 @@ class Crew(FlowTrackable, BaseModel):
|
||||
default_factory=TaskOutputStorageHandler
|
||||
)
|
||||
|
||||
name: Optional[str] = Field(default="crew")
|
||||
name: str | None = Field(default="crew")
|
||||
cache: bool = Field(default=True)
|
||||
tasks: List[Task] = Field(default_factory=list)
|
||||
agents: List[BaseAgent] = Field(default_factory=list)
|
||||
tasks: list[Task] = Field(default_factory=list)
|
||||
agents: list[BaseAgent] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: bool = Field(default=False)
|
||||
memory: bool = Field(
|
||||
default=False,
|
||||
description="Whether the crew should use memory to store memories of it's execution",
|
||||
)
|
||||
short_term_memory: Optional[InstanceOf[ShortTermMemory]] = Field(
|
||||
short_term_memory: InstanceOf[ShortTermMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the ShortTermMemory to be used by the Crew",
|
||||
)
|
||||
long_term_memory: Optional[InstanceOf[LongTermMemory]] = Field(
|
||||
long_term_memory: InstanceOf[LongTermMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the LongTermMemory to be used by the Crew",
|
||||
)
|
||||
entity_memory: Optional[InstanceOf[EntityMemory]] = Field(
|
||||
entity_memory: InstanceOf[EntityMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the EntityMemory to be used by the Crew",
|
||||
)
|
||||
external_memory: Optional[InstanceOf[ExternalMemory]] = Field(
|
||||
external_memory: InstanceOf[ExternalMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the ExternalMemory to be used by the Crew",
|
||||
)
|
||||
embedder: Optional[dict] = Field(
|
||||
embedder: dict | None = Field(
|
||||
default=None,
|
||||
description="Configuration for the embedder to be used for the crew.",
|
||||
)
|
||||
usage_metrics: Optional[UsageMetrics] = Field(
|
||||
usage_metrics: UsageMetrics | None = Field(
|
||||
default=None,
|
||||
description="Metrics for the LLM usage during all tasks execution.",
|
||||
)
|
||||
manager_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
|
||||
manager_llm: str | InstanceOf[BaseLLM] | Any | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
manager_agent: Optional[BaseAgent] = Field(
|
||||
manager_agent: BaseAgent | None = Field(
|
||||
description="Custom agent that will be used as manager.", default=None
|
||||
)
|
||||
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
|
||||
function_calling_llm: str | InstanceOf[LLM] | Any | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
||||
config: Json | dict[str, Any] | None = Field(default=None)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
share_crew: Optional[bool] = Field(default=False)
|
||||
step_callback: Optional[Any] = Field(
|
||||
share_crew: bool | None = Field(default=False)
|
||||
step_callback: Any | None = Field(
|
||||
default=None,
|
||||
description="Callback to be executed after each step for all agents execution.",
|
||||
)
|
||||
task_callback: Optional[Any] = Field(
|
||||
task_callback: Any | None = Field(
|
||||
default=None,
|
||||
description="Callback to be executed after each task for all agents execution.",
|
||||
)
|
||||
before_kickoff_callbacks: List[
|
||||
Callable[[Optional[Dict[str, Any]]], Optional[Dict[str, Any]]]
|
||||
before_kickoff_callbacks: list[
|
||||
Callable[[dict[str, Any] | None], dict[str, Any] | None]
|
||||
] = Field(
|
||||
default_factory=list,
|
||||
description="List of callbacks to be executed before crew kickoff. It may be used to adjust inputs before the crew is executed.",
|
||||
)
|
||||
after_kickoff_callbacks: List[Callable[[CrewOutput], CrewOutput]] = Field(
|
||||
after_kickoff_callbacks: list[Callable[[CrewOutput], CrewOutput]] = Field(
|
||||
default_factory=list,
|
||||
description="List of callbacks to be executed after crew kickoff. It may be used to adjust the output of the crew.",
|
||||
)
|
||||
max_rpm: Optional[int] = Field(
|
||||
max_rpm: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the crew execution to be respected.",
|
||||
)
|
||||
prompt_file: Optional[str] = Field(
|
||||
prompt_file: str | None = Field(
|
||||
default=None,
|
||||
description="Path to the prompt json file to be used for the crew.",
|
||||
)
|
||||
output_log_file: Optional[Union[bool, str]] = Field(
|
||||
output_log_file: bool | str | None = Field(
|
||||
default=None,
|
||||
description="Path to the log file to be saved",
|
||||
)
|
||||
planning: Optional[bool] = Field(
|
||||
planning: bool | None = Field(
|
||||
default=False,
|
||||
description="Plan the crew execution and add the plan to the crew.",
|
||||
)
|
||||
planning_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
|
||||
planning_llm: str | InstanceOf[BaseLLM] | Any | None = Field(
|
||||
default=None,
|
||||
description="Language model that will run the AgentPlanner if planning is True.",
|
||||
)
|
||||
task_execution_output_json_files: Optional[List[str]] = Field(
|
||||
task_execution_output_json_files: list[str] | None = Field(
|
||||
default=None,
|
||||
description="List of file paths for task execution JSON files.",
|
||||
)
|
||||
execution_logs: List[Dict[str, Any]] = Field(
|
||||
execution_logs: list[dict[str, Any]] = Field(
|
||||
default=[],
|
||||
description="List of execution logs for tasks",
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
knowledge_sources: list[BaseKnowledgeSource] | None = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the crew. Add knowledge sources to the knowledge object.",
|
||||
)
|
||||
chat_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
|
||||
chat_llm: str | InstanceOf[BaseLLM] | Any | None = Field(
|
||||
default=None,
|
||||
description="LLM used to handle chatting with the crew.",
|
||||
)
|
||||
knowledge: Optional[Knowledge] = Field(
|
||||
knowledge: Knowledge | None = Field(
|
||||
default=None,
|
||||
description="Knowledge for the crew.",
|
||||
)
|
||||
@@ -246,18 +236,18 @@ class Crew(FlowTrackable, BaseModel):
|
||||
default_factory=SecurityConfig,
|
||||
description="Security configuration for the crew, including fingerprinting.",
|
||||
)
|
||||
token_usage: Optional[UsageMetrics] = Field(
|
||||
token_usage: UsageMetrics | None = Field(
|
||||
default=None,
|
||||
description="Metrics for the LLM usage during all tasks execution.",
|
||||
)
|
||||
tracing: Optional[bool] = Field(
|
||||
tracing: bool | None = Field(
|
||||
default=False,
|
||||
description="Whether to enable tracing for the crew.",
|
||||
)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
||||
def _deny_user_set_id(cls, v: UUID4 | None) -> None:
|
||||
"""Prevent manual setting of the 'id' field by users."""
|
||||
if v:
|
||||
raise PydanticCustomError(
|
||||
@@ -267,8 +257,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
@field_validator("config", mode="before")
|
||||
@classmethod
|
||||
def check_config_type(
|
||||
cls, v: Union[Json, Dict[str, Any]]
|
||||
) -> Union[Json, Dict[str, Any]]:
|
||||
cls, v: Json | dict[str, Any]
|
||||
) -> Json | dict[str, Any]:
|
||||
"""Validates that the config is a valid type.
|
||||
Args:
|
||||
v: The config to be validated.
|
||||
@@ -298,8 +288,17 @@ class Crew(FlowTrackable, BaseModel):
|
||||
if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
|
||||
self.function_calling_llm = create_llm(self.function_calling_llm)
|
||||
|
||||
# Initialize responsibility system
|
||||
from crewai.responsibility.system import ResponsibilitySystem
|
||||
self._responsibility_system = ResponsibilitySystem()
|
||||
|
||||
return self
|
||||
|
||||
@property
|
||||
def responsibility_system(self):
|
||||
"""Get the responsibility tracking system for this crew."""
|
||||
return getattr(self, '_responsibility_system', None)
|
||||
|
||||
def _initialize_default_memories(self):
|
||||
self._long_term_memory = self._long_term_memory or LongTermMemory()
|
||||
self._short_term_memory = self._short_term_memory or ShortTermMemory(
|
||||
@@ -314,7 +313,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
def create_crew_memory(self) -> "Crew":
|
||||
"""Initialize private memory attributes."""
|
||||
self._external_memory = (
|
||||
# External memory doesn’t support a default value since it was designed to be managed entirely externally
|
||||
# External memory doesn't support a default value since it was designed to be managed entirely externally
|
||||
self.external_memory.set_crew(self) if self.external_memory else None
|
||||
)
|
||||
|
||||
@@ -389,6 +388,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
agent.set_cache_handler(self._cache_handler)
|
||||
if self.max_rpm:
|
||||
agent.set_rpm_controller(self._rpm_controller)
|
||||
if self.responsibility_system:
|
||||
agent.set_responsibility_system(self.responsibility_system)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
@@ -502,7 +503,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
@property
|
||||
def key(self) -> str:
|
||||
source: List[str] = [agent.key for agent in self.agents] + [
|
||||
source: list[str] = [agent.key for agent in self.agents] + [
|
||||
task.key for task in self.tasks
|
||||
]
|
||||
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
||||
@@ -530,7 +531,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self.agents = [Agent(**agent) for agent in self.config["agents"]]
|
||||
self.tasks = [self._create_task(task) for task in self.config["tasks"]]
|
||||
|
||||
def _create_task(self, task_config: Dict[str, Any]) -> Task:
|
||||
def _create_task(self, task_config: dict[str, Any]) -> Task:
|
||||
"""Creates a task instance from its configuration.
|
||||
|
||||
Args:
|
||||
@@ -559,7 +560,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
CrewTrainingHandler(filename).initialize_file()
|
||||
|
||||
def train(
|
||||
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = None
|
||||
self, n_iterations: int, filename: str, inputs: dict[str, Any] | None = None
|
||||
) -> None:
|
||||
"""Trains the crew for a given number of iterations."""
|
||||
inputs = inputs or {}
|
||||
@@ -611,7 +612,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
) -> CrewOutput:
|
||||
ctx = baggage.set_baggage(
|
||||
"crew_context", CrewContext(id=str(self.id), key=self.key)
|
||||
@@ -682,9 +683,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
finally:
|
||||
detach(token)
|
||||
|
||||
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]:
|
||||
def kickoff_for_each(self, inputs: list[dict[str, Any]]) -> list[CrewOutput]:
|
||||
"""Executes the Crew's workflow for each input in the list and aggregates results."""
|
||||
results: List[CrewOutput] = []
|
||||
results: list[CrewOutput] = []
|
||||
|
||||
# Initialize the parent crew's usage metrics
|
||||
total_usage_metrics = UsageMetrics()
|
||||
@@ -704,13 +705,13 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return results
|
||||
|
||||
async def kickoff_async(
|
||||
self, inputs: Optional[Dict[str, Any]] = None
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
) -> CrewOutput:
|
||||
"""Asynchronous kickoff method to start the crew execution."""
|
||||
inputs = inputs or {}
|
||||
return await asyncio.to_thread(self.kickoff, inputs)
|
||||
|
||||
async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[CrewOutput]:
|
||||
async def kickoff_for_each_async(self, inputs: list[dict]) -> list[CrewOutput]:
|
||||
crew_copies = [self.copy() for _ in inputs]
|
||||
|
||||
async def run_crew(crew, input_data):
|
||||
@@ -739,7 +740,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tasks=self.tasks, planning_agent_llm=self.planning_llm
|
||||
)._handle_crew_planning()
|
||||
|
||||
for task, step_plan in zip(self.tasks, result.list_of_plans_per_task):
|
||||
for task, step_plan in zip(self.tasks, result.list_of_plans_per_task, strict=False):
|
||||
task.description += step_plan.plan
|
||||
|
||||
def _store_execution_log(
|
||||
@@ -778,6 +779,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
def _run_hierarchical_process(self) -> CrewOutput:
|
||||
"""Creates and assigns a manager agent to make sure the crew completes the tasks."""
|
||||
self._create_manager_agent()
|
||||
|
||||
if self.manager_agent and self.responsibility_system:
|
||||
self.manager_agent.set_responsibility_system(self.responsibility_system)
|
||||
|
||||
return self._execute_tasks(self.tasks)
|
||||
|
||||
def _create_manager_agent(self):
|
||||
@@ -807,8 +812,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
def _execute_tasks(
|
||||
self,
|
||||
tasks: List[Task],
|
||||
start_index: Optional[int] = 0,
|
||||
tasks: list[Task],
|
||||
start_index: int | None = 0,
|
||||
was_replayed: bool = False,
|
||||
) -> CrewOutput:
|
||||
"""Executes tasks sequentially and returns the final output.
|
||||
@@ -821,9 +826,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
CrewOutput: Final output of the crew
|
||||
"""
|
||||
|
||||
task_outputs: List[TaskOutput] = []
|
||||
futures: List[Tuple[Task, Future[TaskOutput], int]] = []
|
||||
last_sync_output: Optional[TaskOutput] = None
|
||||
task_outputs: list[TaskOutput] = []
|
||||
futures: list[tuple[Task, Future[TaskOutput], int]] = []
|
||||
last_sync_output: TaskOutput | None = None
|
||||
|
||||
for task_index, task in enumerate(tasks):
|
||||
if start_index is not None and task_index < start_index:
|
||||
@@ -847,7 +852,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tools_for_task = self._prepare_tools(
|
||||
agent_to_use,
|
||||
task,
|
||||
cast(Union[List[Tool], List[BaseTool]], tools_for_task),
|
||||
cast(list[Tool] | list[BaseTool], tools_for_task),
|
||||
)
|
||||
|
||||
self._log_task_start(task, agent_to_use.role)
|
||||
@@ -867,7 +872,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
future = task.execute_async(
|
||||
agent=agent_to_use,
|
||||
context=context,
|
||||
tools=cast(List[BaseTool], tools_for_task),
|
||||
tools=cast(list[BaseTool], tools_for_task),
|
||||
)
|
||||
futures.append((task, future, task_index))
|
||||
else:
|
||||
@@ -879,7 +884,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
task_output = task.execute_sync(
|
||||
agent=agent_to_use,
|
||||
context=context,
|
||||
tools=cast(List[BaseTool], tools_for_task),
|
||||
tools=cast(list[BaseTool], tools_for_task),
|
||||
)
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(task, task_output)
|
||||
@@ -893,11 +898,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
def _handle_conditional_task(
|
||||
self,
|
||||
task: ConditionalTask,
|
||||
task_outputs: List[TaskOutput],
|
||||
futures: List[Tuple[Task, Future[TaskOutput], int]],
|
||||
task_outputs: list[TaskOutput],
|
||||
futures: list[tuple[Task, Future[TaskOutput], int]],
|
||||
task_index: int,
|
||||
was_replayed: bool,
|
||||
) -> Optional[TaskOutput]:
|
||||
) -> TaskOutput | None:
|
||||
if futures:
|
||||
task_outputs = self._process_async_tasks(futures, was_replayed)
|
||||
futures.clear()
|
||||
@@ -917,8 +922,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return None
|
||||
|
||||
def _prepare_tools(
|
||||
self, agent: BaseAgent, task: Task, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, agent: BaseAgent, task: Task, tools: list[Tool] | list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
# Add delegation tools if agent allows delegation
|
||||
if hasattr(agent, "allow_delegation") and getattr(
|
||||
agent, "allow_delegation", False
|
||||
@@ -948,21 +953,21 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tools = self._add_multimodal_tools(agent, tools)
|
||||
|
||||
# Return a List[BaseTool] which is compatible with both Task.execute_sync and Task.execute_async
|
||||
return cast(List[BaseTool], tools)
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _get_agent_to_use(self, task: Task) -> Optional[BaseAgent]:
|
||||
def _get_agent_to_use(self, task: Task) -> BaseAgent | None:
|
||||
if self.process == Process.hierarchical:
|
||||
return self.manager_agent
|
||||
return task.agent
|
||||
|
||||
def _merge_tools(
|
||||
self,
|
||||
existing_tools: Union[List[Tool], List[BaseTool]],
|
||||
new_tools: Union[List[Tool], List[BaseTool]],
|
||||
) -> List[BaseTool]:
|
||||
existing_tools: list[Tool] | list[BaseTool],
|
||||
new_tools: list[Tool] | list[BaseTool],
|
||||
) -> list[BaseTool]:
|
||||
"""Merge new tools into existing tools list, avoiding duplicates by tool name."""
|
||||
if not new_tools:
|
||||
return cast(List[BaseTool], existing_tools)
|
||||
return cast(list[BaseTool], existing_tools)
|
||||
|
||||
# Create mapping of tool names to new tools
|
||||
new_tool_map = {tool.name: tool for tool in new_tools}
|
||||
@@ -973,41 +978,41 @@ class Crew(FlowTrackable, BaseModel):
|
||||
# Add all new tools
|
||||
tools.extend(new_tools)
|
||||
|
||||
return cast(List[BaseTool], tools)
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _inject_delegation_tools(
|
||||
self,
|
||||
tools: Union[List[Tool], List[BaseTool]],
|
||||
tools: list[Tool] | list[BaseTool],
|
||||
task_agent: BaseAgent,
|
||||
agents: List[BaseAgent],
|
||||
) -> List[BaseTool]:
|
||||
agents: list[BaseAgent],
|
||||
) -> list[BaseTool]:
|
||||
if hasattr(task_agent, "get_delegation_tools"):
|
||||
delegation_tools = task_agent.get_delegation_tools(agents)
|
||||
# Cast delegation_tools to the expected type for _merge_tools
|
||||
return self._merge_tools(tools, cast(List[BaseTool], delegation_tools))
|
||||
return cast(List[BaseTool], tools)
|
||||
return self._merge_tools(tools, cast(list[BaseTool], delegation_tools))
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _add_multimodal_tools(
|
||||
self, agent: BaseAgent, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, agent: BaseAgent, tools: list[Tool] | list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
if hasattr(agent, "get_multimodal_tools"):
|
||||
multimodal_tools = agent.get_multimodal_tools()
|
||||
# Cast multimodal_tools to the expected type for _merge_tools
|
||||
return self._merge_tools(tools, cast(List[BaseTool], multimodal_tools))
|
||||
return cast(List[BaseTool], tools)
|
||||
return self._merge_tools(tools, cast(list[BaseTool], multimodal_tools))
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _add_code_execution_tools(
|
||||
self, agent: BaseAgent, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, agent: BaseAgent, tools: list[Tool] | list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
if hasattr(agent, "get_code_execution_tools"):
|
||||
code_tools = agent.get_code_execution_tools()
|
||||
# Cast code_tools to the expected type for _merge_tools
|
||||
return self._merge_tools(tools, cast(List[BaseTool], code_tools))
|
||||
return cast(List[BaseTool], tools)
|
||||
return self._merge_tools(tools, cast(list[BaseTool], code_tools))
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _add_delegation_tools(
|
||||
self, task: Task, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, task: Task, tools: list[Tool] | list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
agents_for_delegation = [agent for agent in self.agents if agent != task.agent]
|
||||
if len(self.agents) > 1 and len(agents_for_delegation) > 0 and task.agent:
|
||||
if not tools:
|
||||
@@ -1015,7 +1020,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tools = self._inject_delegation_tools(
|
||||
tools, task.agent, agents_for_delegation
|
||||
)
|
||||
return cast(List[BaseTool], tools)
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _log_task_start(self, task: Task, role: str = "None"):
|
||||
if self.output_log_file:
|
||||
@@ -1024,8 +1029,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
def _update_manager_tools(
|
||||
self, task: Task, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, task: Task, tools: list[Tool] | list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
if self.manager_agent:
|
||||
if task.agent:
|
||||
tools = self._inject_delegation_tools(tools, task.agent, [task.agent])
|
||||
@@ -1033,18 +1038,17 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tools = self._inject_delegation_tools(
|
||||
tools, self.manager_agent, self.agents
|
||||
)
|
||||
return cast(List[BaseTool], tools)
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _get_context(self, task: Task, task_outputs: List[TaskOutput]) -> str:
|
||||
def _get_context(self, task: Task, task_outputs: list[TaskOutput]) -> str:
|
||||
if not task.context:
|
||||
return ""
|
||||
|
||||
context = (
|
||||
return (
|
||||
aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
if task.context is NOT_SPECIFIED
|
||||
else aggregate_raw_outputs_from_tasks(task.context)
|
||||
)
|
||||
return context
|
||||
|
||||
def _process_task_result(self, task: Task, output: TaskOutput) -> None:
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
@@ -1057,7 +1061,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
output=output.raw,
|
||||
)
|
||||
|
||||
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
||||
def _create_crew_output(self, task_outputs: list[TaskOutput]) -> CrewOutput:
|
||||
if not task_outputs:
|
||||
raise ValueError("No task outputs available to create crew output.")
|
||||
|
||||
@@ -1088,10 +1092,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
def _process_async_tasks(
|
||||
self,
|
||||
futures: List[Tuple[Task, Future[TaskOutput], int]],
|
||||
futures: list[tuple[Task, Future[TaskOutput], int]],
|
||||
was_replayed: bool = False,
|
||||
) -> List[TaskOutput]:
|
||||
task_outputs: List[TaskOutput] = []
|
||||
) -> list[TaskOutput]:
|
||||
task_outputs: list[TaskOutput] = []
|
||||
for future_task, future, task_index in futures:
|
||||
task_output = future.result()
|
||||
task_outputs.append(task_output)
|
||||
@@ -1102,8 +1106,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return task_outputs
|
||||
|
||||
def _find_task_index(
|
||||
self, task_id: str, stored_outputs: List[Any]
|
||||
) -> Optional[int]:
|
||||
self, task_id: str, stored_outputs: list[Any]
|
||||
) -> int | None:
|
||||
return next(
|
||||
(
|
||||
index
|
||||
@@ -1114,7 +1118,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
def replay(
|
||||
self, task_id: str, inputs: Optional[Dict[str, Any]] = None
|
||||
self, task_id: str, inputs: dict[str, Any] | None = None
|
||||
) -> CrewOutput:
|
||||
stored_outputs = self._task_output_handler.load()
|
||||
if not stored_outputs:
|
||||
@@ -1151,19 +1155,18 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self.tasks[i].output = task_output
|
||||
|
||||
self._logging_color = "bold_blue"
|
||||
result = self._execute_tasks(self.tasks, start_index, True)
|
||||
return result
|
||||
return self._execute_tasks(self.tasks, start_index, True)
|
||||
|
||||
def query_knowledge(
|
||||
self, query: List[str], results_limit: int = 3, score_threshold: float = 0.35
|
||||
) -> Union[List[Dict[str, Any]], None]:
|
||||
self, query: list[str], results_limit: int = 3, score_threshold: float = 0.35
|
||||
) -> list[dict[str, Any]] | None:
|
||||
if self.knowledge:
|
||||
return self.knowledge.query(
|
||||
query, results_limit=results_limit, score_threshold=score_threshold
|
||||
)
|
||||
return None
|
||||
|
||||
def fetch_inputs(self) -> Set[str]:
|
||||
def fetch_inputs(self) -> set[str]:
|
||||
"""
|
||||
Gathers placeholders (e.g., {something}) referenced in tasks or agents.
|
||||
Scans each task's 'description' + 'expected_output', and each agent's
|
||||
@@ -1172,7 +1175,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
Returns a set of all discovered placeholder names.
|
||||
"""
|
||||
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||
required_inputs: Set[str] = set()
|
||||
required_inputs: set[str] = set()
|
||||
|
||||
# Scan tasks for inputs
|
||||
for task in self.tasks:
|
||||
@@ -1230,7 +1233,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
cloned_tasks.append(cloned_task)
|
||||
task_mapping[task.key] = cloned_task
|
||||
|
||||
for cloned_task, original_task in zip(cloned_tasks, self.tasks):
|
||||
for cloned_task, original_task in zip(cloned_tasks, self.tasks, strict=False):
|
||||
if isinstance(original_task.context, list):
|
||||
cloned_context = [
|
||||
task_mapping[context_task.key]
|
||||
@@ -1256,7 +1259,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
copied_data.pop("agents", None)
|
||||
copied_data.pop("tasks", None)
|
||||
|
||||
copied_crew = Crew(
|
||||
return Crew(
|
||||
**copied_data,
|
||||
agents=cloned_agents,
|
||||
tasks=cloned_tasks,
|
||||
@@ -1266,15 +1269,13 @@ class Crew(FlowTrackable, BaseModel):
|
||||
manager_llm=manager_llm,
|
||||
)
|
||||
|
||||
return copied_crew
|
||||
|
||||
def _set_tasks_callbacks(self) -> None:
|
||||
"""Sets callback for every task suing task_callback"""
|
||||
for task in self.tasks:
|
||||
if not task.callback:
|
||||
task.callback = self.task_callback
|
||||
|
||||
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
def _interpolate_inputs(self, inputs: dict[str, Any]) -> None:
|
||||
"""Interpolates the inputs in the tasks and agents."""
|
||||
[
|
||||
task.interpolate_inputs_and_add_conversation_history(
|
||||
@@ -1307,8 +1308,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
def test(
|
||||
self,
|
||||
n_iterations: int,
|
||||
eval_llm: Union[str, InstanceOf[BaseLLM]],
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
eval_llm: str | InstanceOf[BaseLLM],
|
||||
inputs: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||
try:
|
||||
@@ -1364,7 +1365,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
ValueError: If an invalid command type is provided.
|
||||
RuntimeError: If memory reset operation fails.
|
||||
"""
|
||||
VALID_TYPES = frozenset(
|
||||
valid_types = frozenset(
|
||||
[
|
||||
"long",
|
||||
"short",
|
||||
@@ -1377,9 +1378,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
]
|
||||
)
|
||||
|
||||
if command_type not in VALID_TYPES:
|
||||
if command_type not in valid_types:
|
||||
raise ValueError(
|
||||
f"Invalid command type. Must be one of: {', '.join(sorted(VALID_TYPES))}"
|
||||
f"Invalid command type. Must be one of: {', '.join(sorted(valid_types))}"
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -1389,7 +1390,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self._reset_specific_memory(command_type)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to reset {command_type} memory: {str(e)}"
|
||||
error_msg = f"Failed to reset {command_type} memory: {e!s}"
|
||||
self._logger.log("error", error_msg)
|
||||
raise RuntimeError(error_msg) from e
|
||||
|
||||
@@ -1397,7 +1398,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
"""Reset all available memory systems."""
|
||||
memory_systems = self._get_memory_systems()
|
||||
|
||||
for memory_type, config in memory_systems.items():
|
||||
for config in memory_systems.values():
|
||||
if (system := config.get("system")) is not None:
|
||||
name = config.get("name")
|
||||
try:
|
||||
@@ -1409,7 +1410,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"[Crew ({self.name if self.name else self.id})] Failed to reset {name} memory: {str(e)}"
|
||||
f"[Crew ({self.name if self.name else self.id})] Failed to reset {name} memory: {e!s}"
|
||||
) from e
|
||||
|
||||
def _reset_specific_memory(self, memory_type: str) -> None:
|
||||
@@ -1438,7 +1439,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"[Crew ({self.name if self.name else self.id})] Failed to reset {name} memory: {str(e)}"
|
||||
f"[Crew ({self.name if self.name else self.id})] Failed to reset {name} memory: {e!s}"
|
||||
) from e
|
||||
|
||||
def _get_memory_systems(self):
|
||||
@@ -1506,7 +1507,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
},
|
||||
}
|
||||
|
||||
def reset_knowledge(self, knowledges: List[Knowledge]) -> None:
|
||||
def reset_knowledge(self, knowledges: list[Knowledge]) -> None:
|
||||
"""Reset crew and agent knowledge storage."""
|
||||
for ks in knowledges:
|
||||
ks.reset()
|
||||
|
||||
@@ -1,14 +1,5 @@
|
||||
"""Base LLM abstract class for CrewAI.
|
||||
|
||||
This module provides the abstract base class for all LLM implementations
|
||||
in CrewAI.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Final
|
||||
|
||||
DEFAULT_CONTEXT_WINDOW_SIZE: Final[int] = 4096
|
||||
DEFAULT_SUPPORTS_STOP_WORDS: Final[bool] = True
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
|
||||
class BaseLLM(ABC):
|
||||
@@ -24,38 +15,41 @@ class BaseLLM(ABC):
|
||||
messages when things go wrong.
|
||||
|
||||
Attributes:
|
||||
model: The model identifier/name.
|
||||
temperature: Optional temperature setting for response generation.
|
||||
stop: A list of stop sequences that the LLM should use to stop generation.
|
||||
stop (list): A list of stop sequences that the LLM should use to stop generation.
|
||||
This is used by the CrewAgentExecutor and other components.
|
||||
"""
|
||||
|
||||
model: str
|
||||
temperature: Optional[float] = None
|
||||
stop: Optional[List[str]] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
temperature: float | None = None,
|
||||
stop: list[str] | None = None,
|
||||
) -> None:
|
||||
temperature: Optional[float] = None,
|
||||
):
|
||||
"""Initialize the BaseLLM with default attributes.
|
||||
|
||||
Args:
|
||||
model: The model identifier/name.
|
||||
temperature: Optional temperature setting for response generation.
|
||||
stop: Optional list of stop sequences for generation.
|
||||
This constructor sets default values for attributes that are expected
|
||||
by the CrewAgentExecutor and other components.
|
||||
|
||||
All custom LLM implementations should call super().__init__() to ensure
|
||||
that these default attributes are properly initialized.
|
||||
"""
|
||||
self.model = model
|
||||
self.temperature = temperature
|
||||
self.stop: list[str] = stop or []
|
||||
self.stop = []
|
||||
|
||||
@abstractmethod
|
||||
def call(
|
||||
self,
|
||||
messages: str | list[dict[str, str]],
|
||||
tools: list[dict] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
) -> str | Any:
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
tools: Optional[List[dict]] = None,
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> Union[str, Any]:
|
||||
"""Call the LLM with the given messages.
|
||||
|
||||
Args:
|
||||
@@ -70,7 +64,6 @@ class BaseLLM(ABC):
|
||||
available_functions: Optional dict mapping function names to callables
|
||||
that can be invoked by the LLM.
|
||||
from_task: Optional task caller to be used for the LLM call.
|
||||
from_agent: Optional agent caller to be used for the LLM call.
|
||||
|
||||
Returns:
|
||||
Either a text response from the LLM (str) or
|
||||
@@ -81,20 +74,21 @@ class BaseLLM(ABC):
|
||||
TimeoutError: If the LLM request times out.
|
||||
RuntimeError: If the LLM request fails for other reasons.
|
||||
"""
|
||||
pass
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
"""Check if the LLM supports stop words.
|
||||
|
||||
Returns:
|
||||
True if the LLM supports stop words, False otherwise.
|
||||
bool: True if the LLM supports stop words, False otherwise.
|
||||
"""
|
||||
return DEFAULT_SUPPORTS_STOP_WORDS
|
||||
return True # Default implementation assumes support for stop words
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the LLM.
|
||||
|
||||
Returns:
|
||||
The number of tokens/characters the model can handle.
|
||||
int: The number of tokens/characters the model can handle.
|
||||
"""
|
||||
# Default implementation - subclasses should override with model-specific values
|
||||
return DEFAULT_CONTEXT_WINDOW_SIZE
|
||||
return 4096
|
||||
|
||||
88
src/crewai/llms/third_party/ai_suite.py
vendored
88
src/crewai/llms/third_party/ai_suite.py
vendored
@@ -1,62 +1,24 @@
|
||||
"""AI Suite LLM integration for CrewAI.
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
This module provides integration with AI Suite for LLM capabilities.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
import aisuite as ai # type: ignore
|
||||
import aisuite as ai
|
||||
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
|
||||
|
||||
class AISuiteLLM(BaseLLM):
|
||||
"""AI Suite LLM implementation.
|
||||
|
||||
This class provides integration with AI Suite models through the BaseLLM interface.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
temperature: float | None = None,
|
||||
stop: list[str] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize the AI Suite LLM.
|
||||
|
||||
Args:
|
||||
model: The model identifier for AI Suite.
|
||||
temperature: Optional temperature setting for response generation.
|
||||
stop: Optional list of stop sequences for generation.
|
||||
**kwargs: Additional keyword arguments passed to the AI Suite client.
|
||||
"""
|
||||
super().__init__(model, temperature, stop)
|
||||
def __init__(self, model: str, temperature: Optional[float] = None, **kwargs):
|
||||
super().__init__(model, temperature, **kwargs)
|
||||
self.client = ai.Client()
|
||||
self.kwargs = kwargs
|
||||
|
||||
def call(
|
||||
self,
|
||||
messages: str | list[dict[str, str]],
|
||||
tools: list[dict] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
) -> str | Any:
|
||||
"""Call the AI Suite LLM with the given messages.
|
||||
|
||||
Args:
|
||||
messages: Input messages for the LLM.
|
||||
tools: Optional list of tool schemas for function calling.
|
||||
callbacks: Optional list of callback functions.
|
||||
available_functions: Optional dict mapping function names to callables.
|
||||
from_task: Optional task caller.
|
||||
from_agent: Optional agent caller.
|
||||
|
||||
Returns:
|
||||
The text response from the LLM.
|
||||
"""
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
tools: Optional[List[dict]] = None,
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> Union[str, Any]:
|
||||
completion_params = self._prepare_completion_params(messages, tools)
|
||||
response = self.client.chat.completions.create(**completion_params)
|
||||
|
||||
@@ -64,35 +26,15 @@ class AISuiteLLM(BaseLLM):
|
||||
|
||||
def _prepare_completion_params(
|
||||
self,
|
||||
messages: str | list[dict[str, str]],
|
||||
tools: list[dict] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Prepare parameters for the AI Suite completion call.
|
||||
|
||||
Args:
|
||||
messages: Input messages for the LLM.
|
||||
tools: Optional list of tool schemas.
|
||||
|
||||
Returns:
|
||||
Dictionary of parameters for the completion API.
|
||||
"""
|
||||
params: dict[str, Any] = {
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
tools: Optional[List[dict]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
return {
|
||||
"model": self.model,
|
||||
"messages": messages,
|
||||
"temperature": self.temperature,
|
||||
"tools": tools,
|
||||
**self.kwargs,
|
||||
}
|
||||
|
||||
if self.stop:
|
||||
params["stop"] = self.stop
|
||||
|
||||
return params
|
||||
|
||||
def supports_function_calling(self) -> bool:
|
||||
"""Check if the LLM supports function calling.
|
||||
|
||||
Returns:
|
||||
False, as AI Suite does not currently support function calling.
|
||||
"""
|
||||
return False
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
import os
|
||||
import re
|
||||
from typing import Any, Dict, List
|
||||
from collections import defaultdict
|
||||
from typing import Any, Iterable
|
||||
|
||||
from mem0 import Memory, MemoryClient # type: ignore[import-untyped]
|
||||
from mem0 import Memory, MemoryClient
|
||||
from crewai.utilities.chromadb import sanitize_collection_name
|
||||
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.utilities.chromadb import sanitize_collection_name
|
||||
|
||||
MAX_AGENT_ID_LENGTH_MEM0 = 255
|
||||
|
||||
@@ -88,28 +86,9 @@ class Mem0Storage(Storage):
|
||||
|
||||
return filter
|
||||
|
||||
def save(self, value: Any, metadata: dict[str, Any]) -> None:
|
||||
def _last_content(messages: Iterable[dict[str, Any]], role: str) -> str:
|
||||
return next(
|
||||
(m.get("content", "") for m in reversed(list(messages)) if m.get("role") == role),
|
||||
""
|
||||
)
|
||||
|
||||
conversations = []
|
||||
messages = metadata.pop("messages", None)
|
||||
if messages:
|
||||
last_user = _last_content(messages, "user")
|
||||
last_assistant = _last_content(messages, "assistant")
|
||||
|
||||
if user_msg := self._get_user_message(last_user):
|
||||
conversations.append({"role": "user", "content": user_msg})
|
||||
|
||||
if assistant_msg := self._get_assistant_message(last_assistant):
|
||||
conversations.append({"role": "assistant", "content": assistant_msg})
|
||||
else:
|
||||
conversations.append({"role": "assistant", "content": value})
|
||||
|
||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||
user_id = self.config.get("user_id", "")
|
||||
assistant_message = [{"role" : "assistant","content" : value}]
|
||||
|
||||
base_metadata = {
|
||||
"short_term": "short_term",
|
||||
@@ -140,9 +119,9 @@ class Mem0Storage(Storage):
|
||||
if agent_id := self.config.get("agent_id", self._get_agent_name()):
|
||||
params["agent_id"] = agent_id
|
||||
|
||||
self.memory.add(conversations, **params)
|
||||
self.memory.add(assistant_message, **params)
|
||||
|
||||
def search(self,query: str,limit: int = 3,score_threshold: float = 0.35) -> list[Any]:
|
||||
def search(self,query: str,limit: int = 3,score_threshold: float = 0.35) -> List[Any]:
|
||||
params = {
|
||||
"query": query,
|
||||
"limit": limit,
|
||||
@@ -181,7 +160,7 @@ class Mem0Storage(Storage):
|
||||
# This makes it compatible for Contextual Memory to retrieve
|
||||
for result in results["results"]:
|
||||
result["context"] = result["memory"]
|
||||
|
||||
|
||||
return [r for r in results["results"]]
|
||||
|
||||
def reset(self):
|
||||
@@ -202,16 +181,3 @@ class Mem0Storage(Storage):
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
agents = "_".join(agents)
|
||||
return sanitize_collection_name(name=agents, max_collection_length=MAX_AGENT_ID_LENGTH_MEM0)
|
||||
|
||||
def _get_assistant_message(self, text: str) -> str:
|
||||
marker = "Final Answer:"
|
||||
if marker in text:
|
||||
return text.split(marker, 1)[1].strip()
|
||||
return text
|
||||
|
||||
def _get_user_message(self, text: str) -> str:
|
||||
pattern = r"User message:\s*(.*)"
|
||||
match = re.search(pattern, text)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
return text
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
import inspect
|
||||
import logging
|
||||
from collections.abc import Callable
|
||||
from pathlib import Path
|
||||
from typing import Any, TypeVar, cast
|
||||
from typing import Any, Callable, Dict, TypeVar, cast, List
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
import yaml
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
load_dotenv()
|
||||
|
||||
T = TypeVar("T", bound=type)
|
||||
@@ -16,7 +14,7 @@ T = TypeVar("T", bound=type)
|
||||
"""Base decorator for creating crew classes with configuration and function management."""
|
||||
|
||||
|
||||
def CrewBase(cls: T) -> T: # noqa: N802
|
||||
def CrewBase(cls: T) -> T:
|
||||
"""Wraps a class with crew functionality and configuration management."""
|
||||
|
||||
class WrappedClass(cls): # type: ignore
|
||||
@@ -31,7 +29,6 @@ def CrewBase(cls: T) -> T: # noqa: N802
|
||||
original_tasks_config_path = getattr(cls, "tasks_config", "config/tasks.yaml")
|
||||
|
||||
mcp_server_params: Any = getattr(cls, "mcp_server_params", None)
|
||||
mcp_connect_timeout: int = getattr(cls, "mcp_connect_timeout", 30)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
@@ -89,18 +86,15 @@ def CrewBase(cls: T) -> T: # noqa: N802
|
||||
import types
|
||||
return types.MethodType(_close_mcp_server, self)
|
||||
|
||||
def get_mcp_tools(self, *tool_names: list[str]) -> list[BaseTool]:
|
||||
def get_mcp_tools(self, *tool_names: list[str]) -> List[BaseTool]:
|
||||
if not self.mcp_server_params:
|
||||
return []
|
||||
|
||||
from crewai_tools import MCPServerAdapter # type: ignore[import-untyped]
|
||||
from crewai_tools import MCPServerAdapter
|
||||
|
||||
adapter = getattr(self, '_mcp_server_adapter', None)
|
||||
if not adapter:
|
||||
self._mcp_server_adapter = MCPServerAdapter(
|
||||
self.mcp_server_params,
|
||||
connect_timeout=self.mcp_connect_timeout
|
||||
)
|
||||
self._mcp_server_adapter = MCPServerAdapter(self.mcp_server_params)
|
||||
|
||||
return self._mcp_server_adapter.tools.filter_by_names(tool_names or None)
|
||||
|
||||
@@ -160,8 +154,8 @@ def CrewBase(cls: T) -> T: # noqa: N802
|
||||
}
|
||||
|
||||
def _filter_functions(
|
||||
self, functions: dict[str, Callable], attribute: str
|
||||
) -> dict[str, Callable]:
|
||||
self, functions: Dict[str, Callable], attribute: str
|
||||
) -> Dict[str, Callable]:
|
||||
return {
|
||||
name: func
|
||||
for name, func in functions.items()
|
||||
@@ -190,11 +184,11 @@ def CrewBase(cls: T) -> T: # noqa: N802
|
||||
def _map_agent_variables(
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_info: dict[str, Any],
|
||||
llms: dict[str, Callable],
|
||||
tool_functions: dict[str, Callable],
|
||||
cache_handler_functions: dict[str, Callable],
|
||||
callbacks: dict[str, Callable],
|
||||
agent_info: Dict[str, Any],
|
||||
llms: Dict[str, Callable],
|
||||
tool_functions: Dict[str, Callable],
|
||||
cache_handler_functions: Dict[str, Callable],
|
||||
callbacks: Dict[str, Callable],
|
||||
) -> None:
|
||||
if llm := agent_info.get("llm"):
|
||||
try:
|
||||
@@ -251,13 +245,13 @@ def CrewBase(cls: T) -> T: # noqa: N802
|
||||
def _map_task_variables(
|
||||
self,
|
||||
task_name: str,
|
||||
task_info: dict[str, Any],
|
||||
agents: dict[str, Callable],
|
||||
tasks: dict[str, Callable],
|
||||
output_json_functions: dict[str, Callable],
|
||||
tool_functions: dict[str, Callable],
|
||||
callback_functions: dict[str, Callable],
|
||||
output_pydantic_functions: dict[str, Callable],
|
||||
task_info: Dict[str, Any],
|
||||
agents: Dict[str, Callable],
|
||||
tasks: Dict[str, Callable],
|
||||
output_json_functions: Dict[str, Callable],
|
||||
tool_functions: Dict[str, Callable],
|
||||
callback_functions: Dict[str, Callable],
|
||||
output_pydantic_functions: Dict[str, Callable],
|
||||
) -> None:
|
||||
if context_list := task_info.get("context"):
|
||||
self.tasks_config[task_name]["context"] = [
|
||||
|
||||
33
src/crewai/responsibility/__init__.py
Normal file
33
src/crewai/responsibility/__init__.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""
|
||||
Formal Responsibility Tracking System for CrewAI
|
||||
|
||||
This module provides comprehensive responsibility tracking capabilities including:
|
||||
- Capability-based agent hierarchy
|
||||
- Mathematical responsibility assignment
|
||||
- Accountability logging
|
||||
- Performance-based capability adjustment
|
||||
"""
|
||||
|
||||
from crewai.responsibility.accountability import AccountabilityLogger
|
||||
from crewai.responsibility.assignment import ResponsibilityCalculator
|
||||
from crewai.responsibility.hierarchy import CapabilityHierarchy
|
||||
from crewai.responsibility.models import (
|
||||
AccountabilityRecord,
|
||||
AgentCapability,
|
||||
PerformanceMetrics,
|
||||
ResponsibilityAssignment,
|
||||
)
|
||||
from crewai.responsibility.performance import PerformanceTracker
|
||||
from crewai.responsibility.system import ResponsibilitySystem
|
||||
|
||||
__all__ = [
|
||||
"AccountabilityLogger",
|
||||
"AccountabilityRecord",
|
||||
"AgentCapability",
|
||||
"CapabilityHierarchy",
|
||||
"PerformanceMetrics",
|
||||
"PerformanceTracker",
|
||||
"ResponsibilityAssignment",
|
||||
"ResponsibilityCalculator",
|
||||
"ResponsibilitySystem",
|
||||
]
|
||||
212
src/crewai/responsibility/accountability.py
Normal file
212
src/crewai/responsibility/accountability.py
Normal file
@@ -0,0 +1,212 @@
|
||||
"""
|
||||
Accountability logging and tracking system.
|
||||
"""
|
||||
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.responsibility.models import AccountabilityRecord
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class AccountabilityLogger:
|
||||
"""Logs and tracks agent actions for accountability."""
|
||||
|
||||
def __init__(self):
|
||||
self.records: list[AccountabilityRecord] = []
|
||||
self.agent_records: dict[str, list[AccountabilityRecord]] = defaultdict(list)
|
||||
self._setup_event_listeners()
|
||||
|
||||
def log_action(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
action_type: str,
|
||||
action_description: str,
|
||||
task: Task | None = None,
|
||||
context: dict[str, Any] | None = None
|
||||
) -> AccountabilityRecord:
|
||||
"""Log an agent action."""
|
||||
agent_id = self._get_agent_id(agent)
|
||||
task_id = str(task.id) if task else None
|
||||
|
||||
record = AccountabilityRecord(
|
||||
agent_id=agent_id,
|
||||
action_type=action_type,
|
||||
action_description=action_description,
|
||||
task_id=task_id,
|
||||
context=context or {},
|
||||
outcome=None,
|
||||
success=None
|
||||
)
|
||||
|
||||
self.records.append(record)
|
||||
self.agent_records[agent_id].append(record)
|
||||
|
||||
return record
|
||||
|
||||
def log_decision(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
decision: str,
|
||||
reasoning: str,
|
||||
task: Task | None = None,
|
||||
alternatives_considered: list[str] | None = None
|
||||
) -> AccountabilityRecord:
|
||||
"""Log an agent decision with reasoning."""
|
||||
context = {
|
||||
"reasoning": reasoning,
|
||||
"alternatives_considered": alternatives_considered or []
|
||||
}
|
||||
|
||||
return self.log_action(
|
||||
agent=agent,
|
||||
action_type="decision",
|
||||
action_description=decision,
|
||||
task=task,
|
||||
context=context
|
||||
)
|
||||
|
||||
def log_delegation(
|
||||
self,
|
||||
delegating_agent: BaseAgent,
|
||||
receiving_agent: BaseAgent,
|
||||
task: Task,
|
||||
delegation_reason: str
|
||||
) -> AccountabilityRecord:
|
||||
"""Log task delegation between agents."""
|
||||
context = {
|
||||
"receiving_agent_id": self._get_agent_id(receiving_agent),
|
||||
"receiving_agent_role": receiving_agent.role,
|
||||
"delegation_reason": delegation_reason
|
||||
}
|
||||
|
||||
return self.log_action(
|
||||
agent=delegating_agent,
|
||||
action_type="delegation",
|
||||
action_description=f"Delegated task to {receiving_agent.role}",
|
||||
task=task,
|
||||
context=context
|
||||
)
|
||||
|
||||
def log_task_completion(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
task: Task,
|
||||
success: bool,
|
||||
outcome_description: str,
|
||||
completion_time: float | None = None
|
||||
) -> AccountabilityRecord:
|
||||
"""Log task completion with outcome."""
|
||||
context = {
|
||||
"completion_time": completion_time,
|
||||
"task_description": task.description
|
||||
}
|
||||
|
||||
record = self.log_action(
|
||||
agent=agent,
|
||||
action_type="task_completion",
|
||||
action_description=f"Completed task: {task.description[:100]}...",
|
||||
task=task,
|
||||
context=context
|
||||
)
|
||||
|
||||
record.set_outcome(outcome_description, success)
|
||||
return record
|
||||
|
||||
def get_agent_records(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
action_type: str | None = None,
|
||||
since: datetime | None = None
|
||||
) -> list[AccountabilityRecord]:
|
||||
"""Get accountability records for a specific agent."""
|
||||
agent_id = self._get_agent_id(agent)
|
||||
records = self.agent_records.get(agent_id, [])
|
||||
|
||||
if action_type:
|
||||
records = [r for r in records if r.action_type == action_type]
|
||||
|
||||
if since:
|
||||
records = [r for r in records if r.timestamp >= since]
|
||||
|
||||
return records
|
||||
|
||||
def get_task_records(self, task: Task) -> list[AccountabilityRecord]:
|
||||
"""Get all accountability records related to a specific task."""
|
||||
task_id = str(task.id)
|
||||
return [r for r in self.records if r.task_id == task_id]
|
||||
|
||||
def get_delegation_chain(self, task: Task) -> list[AccountabilityRecord]:
|
||||
"""Get the delegation chain for a task."""
|
||||
task_records = self.get_task_records(task)
|
||||
delegation_records = [r for r in task_records if r.action_type == "delegation"]
|
||||
|
||||
delegation_records.sort(key=lambda r: r.timestamp)
|
||||
return delegation_records
|
||||
|
||||
def generate_accountability_report(
|
||||
self,
|
||||
agent: BaseAgent | None = None,
|
||||
time_period: timedelta | None = None
|
||||
) -> dict[str, Any]:
|
||||
"""Generate an accountability report."""
|
||||
since = datetime.utcnow() - time_period if time_period else None
|
||||
|
||||
if agent:
|
||||
records = self.get_agent_records(agent, since=since)
|
||||
agent_id = self._get_agent_id(agent)
|
||||
else:
|
||||
records = self.records
|
||||
if since:
|
||||
records = [r for r in records if r.timestamp >= since]
|
||||
agent_id = "all_agents"
|
||||
|
||||
action_counts: dict[str, int] = defaultdict(int)
|
||||
success_counts: dict[str, int] = defaultdict(int)
|
||||
failure_counts: dict[str, int] = defaultdict(int)
|
||||
|
||||
for record in records:
|
||||
action_counts[record.action_type] += 1
|
||||
if record.success is True:
|
||||
success_counts[record.action_type] += 1
|
||||
elif record.success is False:
|
||||
failure_counts[record.action_type] += 1
|
||||
|
||||
success_rates: dict[str, float | None] = {}
|
||||
for action_type in action_counts:
|
||||
total = success_counts[action_type] + failure_counts[action_type]
|
||||
if total > 0:
|
||||
success_rates[action_type] = success_counts[action_type] / total
|
||||
else:
|
||||
success_rates[action_type] = None
|
||||
|
||||
return {
|
||||
"agent_id": agent_id,
|
||||
"report_period": {
|
||||
"start": since.isoformat() if since else None,
|
||||
"end": datetime.utcnow().isoformat()
|
||||
},
|
||||
"total_records": len(records),
|
||||
"action_counts": dict(action_counts),
|
||||
"success_counts": dict(success_counts),
|
||||
"failure_counts": dict(failure_counts),
|
||||
"success_rates": success_rates,
|
||||
"recent_actions": [
|
||||
{
|
||||
"timestamp": r.timestamp.isoformat(),
|
||||
"action_type": r.action_type,
|
||||
"description": r.action_description,
|
||||
"success": r.success
|
||||
}
|
||||
for r in sorted(records, key=lambda x: x.timestamp, reverse=True)[:10]
|
||||
]
|
||||
}
|
||||
|
||||
def _setup_event_listeners(self) -> None:
|
||||
"""Set up event listeners for automatic logging."""
|
||||
|
||||
def _get_agent_id(self, agent: BaseAgent) -> str:
|
||||
"""Get a unique identifier for an agent."""
|
||||
return f"{agent.role}_{id(agent)}"
|
||||
257
src/crewai/responsibility/assignment.py
Normal file
257
src/crewai/responsibility/assignment.py
Normal file
@@ -0,0 +1,257 @@
|
||||
"""
|
||||
Mathematical responsibility assignment algorithms.
|
||||
"""
|
||||
|
||||
import math
|
||||
from enum import Enum
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.responsibility.hierarchy import CapabilityHierarchy
|
||||
from crewai.responsibility.models import ResponsibilityAssignment, TaskRequirement
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class AssignmentStrategy(str, Enum):
|
||||
"""Different strategies for responsibility assignment."""
|
||||
GREEDY = "greedy" # Assign to best available agent
|
||||
BALANCED = "balanced" # Balance workload across agents
|
||||
OPTIMAL = "optimal" # Optimize for overall system performance
|
||||
|
||||
|
||||
class ResponsibilityCalculator:
|
||||
"""Calculates and assigns responsibilities using mathematical algorithms."""
|
||||
|
||||
def __init__(self, hierarchy: CapabilityHierarchy):
|
||||
self.hierarchy = hierarchy
|
||||
self.current_workloads: dict[str, int] = {} # agent_id -> current task count
|
||||
|
||||
def calculate_responsibility_assignment(
|
||||
self,
|
||||
task: Task,
|
||||
requirements: list[TaskRequirement],
|
||||
strategy: AssignmentStrategy = AssignmentStrategy.GREEDY,
|
||||
exclude_agents: list[BaseAgent] | None = None
|
||||
) -> ResponsibilityAssignment | None:
|
||||
"""Calculate the best responsibility assignment for a task."""
|
||||
exclude_agent_ids = set()
|
||||
if exclude_agents:
|
||||
exclude_agent_ids = {self.hierarchy._get_agent_id(agent) for agent in exclude_agents}
|
||||
|
||||
if strategy == AssignmentStrategy.GREEDY:
|
||||
return self._greedy_assignment(task, requirements, exclude_agent_ids)
|
||||
if strategy == AssignmentStrategy.BALANCED:
|
||||
return self._balanced_assignment(task, requirements, exclude_agent_ids)
|
||||
if strategy == AssignmentStrategy.OPTIMAL:
|
||||
return self._optimal_assignment(task, requirements, exclude_agent_ids)
|
||||
raise ValueError(f"Unknown assignment strategy: {strategy}")
|
||||
|
||||
def calculate_multi_agent_assignment(
|
||||
self,
|
||||
task: Task,
|
||||
requirements: list[TaskRequirement],
|
||||
max_agents: int = 3,
|
||||
strategy: AssignmentStrategy = AssignmentStrategy.OPTIMAL
|
||||
) -> list[ResponsibilityAssignment]:
|
||||
"""Calculate assignment for tasks requiring multiple agents."""
|
||||
assignments: list[ResponsibilityAssignment] = []
|
||||
used_agents: set[str] = set()
|
||||
|
||||
sorted_requirements = sorted(requirements, key=lambda r: r.weight, reverse=True)
|
||||
|
||||
for i, requirement in enumerate(sorted_requirements):
|
||||
if len(assignments) >= max_agents:
|
||||
break
|
||||
|
||||
single_req_assignment = self.calculate_responsibility_assignment(
|
||||
task, [requirement], strategy,
|
||||
exclude_agents=[self.hierarchy.agents[agent_id] for agent_id in used_agents]
|
||||
)
|
||||
|
||||
if single_req_assignment:
|
||||
single_req_assignment.responsibility_score *= (1.0 / (i + 1)) # Diminishing returns
|
||||
assignments.append(single_req_assignment)
|
||||
used_agents.add(single_req_assignment.agent_id)
|
||||
|
||||
return assignments
|
||||
|
||||
def update_workload(self, agent: BaseAgent, workload_change: int) -> None:
|
||||
"""Update the current workload for an agent."""
|
||||
agent_id = self.hierarchy._get_agent_id(agent)
|
||||
current = self.current_workloads.get(agent_id, 0)
|
||||
self.current_workloads[agent_id] = max(0, current + workload_change)
|
||||
|
||||
def get_workload_distribution(self) -> dict[str, int]:
|
||||
"""Get current workload distribution across all agents."""
|
||||
return self.current_workloads.copy()
|
||||
|
||||
def _greedy_assignment(
|
||||
self,
|
||||
task: Task,
|
||||
requirements: list[TaskRequirement],
|
||||
exclude_agent_ids: set
|
||||
) -> ResponsibilityAssignment | None:
|
||||
"""Assign to the agent with highest capability match score."""
|
||||
best_match = self.hierarchy.get_best_agent_for_task(requirements, exclude_agent_ids)
|
||||
|
||||
if not best_match:
|
||||
return None
|
||||
|
||||
agent, score, matches = best_match
|
||||
agent_id = self.hierarchy._get_agent_id(agent)
|
||||
|
||||
return ResponsibilityAssignment(
|
||||
agent_id=agent_id,
|
||||
task_id=str(task.id),
|
||||
responsibility_score=score,
|
||||
capability_matches=matches,
|
||||
reasoning=f"Greedy assignment: highest capability match score ({score:.3f})",
|
||||
completed_at=None,
|
||||
success=None
|
||||
)
|
||||
|
||||
def _balanced_assignment(
|
||||
self,
|
||||
task: Task,
|
||||
requirements: list[TaskRequirement],
|
||||
exclude_agent_ids: set
|
||||
) -> ResponsibilityAssignment | None:
|
||||
"""Assign considering both capability and current workload."""
|
||||
capable_agents = self.hierarchy.find_capable_agents(requirements, minimum_match_score=0.3)
|
||||
|
||||
if not capable_agents:
|
||||
return None
|
||||
|
||||
best_agent = None
|
||||
best_score = -1.0
|
||||
best_matches: list[str] = []
|
||||
|
||||
for agent, capability_score in capable_agents:
|
||||
agent_id = self.hierarchy._get_agent_id(agent)
|
||||
|
||||
if agent_id in exclude_agent_ids:
|
||||
continue
|
||||
|
||||
current_workload = self.current_workloads.get(agent_id, 0)
|
||||
workload_penalty = self._calculate_workload_penalty(current_workload)
|
||||
|
||||
combined_score = capability_score * (1.0 - workload_penalty)
|
||||
|
||||
if combined_score > best_score:
|
||||
best_score = combined_score
|
||||
best_agent = agent
|
||||
_, best_matches = self.hierarchy._calculate_detailed_capability_match(agent_id, requirements)
|
||||
|
||||
if best_agent:
|
||||
agent_id = self.hierarchy._get_agent_id(best_agent)
|
||||
return ResponsibilityAssignment(
|
||||
agent_id=agent_id,
|
||||
task_id=str(task.id),
|
||||
responsibility_score=best_score,
|
||||
capability_matches=best_matches,
|
||||
reasoning=f"Balanced assignment: capability ({capability_score:.3f}) with workload consideration",
|
||||
completed_at=None,
|
||||
success=None
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def _optimal_assignment(
|
||||
self,
|
||||
task: Task,
|
||||
requirements: list[TaskRequirement],
|
||||
exclude_agent_ids: set
|
||||
) -> ResponsibilityAssignment | None:
|
||||
"""Assign using optimization for overall system performance."""
|
||||
capable_agents = self.hierarchy.find_capable_agents(requirements, minimum_match_score=0.2)
|
||||
|
||||
if not capable_agents:
|
||||
return None
|
||||
|
||||
best_agent = None
|
||||
best_score = -1.0
|
||||
best_matches: list[str] = []
|
||||
|
||||
for agent, capability_score in capable_agents:
|
||||
agent_id = self.hierarchy._get_agent_id(agent)
|
||||
|
||||
if agent_id in exclude_agent_ids:
|
||||
continue
|
||||
|
||||
optimization_score = self._calculate_optimization_score(
|
||||
agent_id, capability_score, requirements
|
||||
)
|
||||
|
||||
if optimization_score > best_score:
|
||||
best_score = optimization_score
|
||||
best_agent = agent
|
||||
_, best_matches = self.hierarchy._calculate_detailed_capability_match(agent_id, requirements)
|
||||
|
||||
if best_agent:
|
||||
agent_id = self.hierarchy._get_agent_id(best_agent)
|
||||
return ResponsibilityAssignment(
|
||||
agent_id=agent_id,
|
||||
task_id=str(task.id),
|
||||
responsibility_score=best_score,
|
||||
capability_matches=best_matches,
|
||||
reasoning=f"Optimal assignment: multi-factor optimization score ({best_score:.3f})",
|
||||
completed_at=None,
|
||||
success=None
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def _calculate_workload_penalty(self, current_workload: int) -> float:
|
||||
"""Calculate penalty based on current workload."""
|
||||
if current_workload == 0:
|
||||
return 0.0
|
||||
|
||||
return min(0.8, 1.0 - math.exp(-current_workload / 3.0))
|
||||
|
||||
def _calculate_optimization_score(
|
||||
self,
|
||||
agent_id: str,
|
||||
capability_score: float,
|
||||
requirements: list[TaskRequirement]
|
||||
) -> float:
|
||||
"""Calculate multi-factor optimization score."""
|
||||
score = capability_score
|
||||
|
||||
current_workload = self.current_workloads.get(agent_id, 0)
|
||||
workload_factor = 1.0 - self._calculate_workload_penalty(current_workload)
|
||||
|
||||
agent_capabilities = self.hierarchy.agent_capabilities.get(agent_id, [])
|
||||
specialization_bonus = self._calculate_specialization_bonus(agent_capabilities, requirements)
|
||||
|
||||
reliability_factor = 1.0 # Placeholder for future performance integration
|
||||
|
||||
return (
|
||||
score * 0.5 + # 50% capability match
|
||||
workload_factor * 0.2 + # 20% workload consideration
|
||||
specialization_bonus * 0.2 + # 20% specialization bonus
|
||||
reliability_factor * 0.1 # 10% reliability
|
||||
)
|
||||
|
||||
def _calculate_specialization_bonus(
|
||||
self,
|
||||
agent_capabilities: list,
|
||||
requirements: list[TaskRequirement]
|
||||
) -> float:
|
||||
"""Calculate bonus for agents with specialized capabilities."""
|
||||
if not agent_capabilities or not requirements:
|
||||
return 0.0
|
||||
|
||||
high_proficiency_matches = 0
|
||||
total_matches = 0
|
||||
|
||||
for capability in agent_capabilities:
|
||||
for requirement in requirements:
|
||||
if self.hierarchy._capabilities_match(capability, requirement):
|
||||
total_matches += 1
|
||||
if capability.proficiency_level >= 0.8:
|
||||
high_proficiency_matches += 1
|
||||
|
||||
if total_matches == 0:
|
||||
return 0.0
|
||||
|
||||
specialization_ratio = high_proficiency_matches / total_matches
|
||||
return min(0.3, specialization_ratio * 0.3) # Max 30% bonus
|
||||
257
src/crewai/responsibility/hierarchy.py
Normal file
257
src/crewai/responsibility/hierarchy.py
Normal file
@@ -0,0 +1,257 @@
|
||||
"""
|
||||
Capability-based agent hierarchy management.
|
||||
"""
|
||||
|
||||
from collections import defaultdict, deque
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.responsibility.models import (
|
||||
AgentCapability,
|
||||
CapabilityType,
|
||||
TaskRequirement,
|
||||
)
|
||||
|
||||
|
||||
class CapabilityHierarchy:
|
||||
"""Manages capability-based agent hierarchy and relationships."""
|
||||
|
||||
def __init__(self):
|
||||
self.agents: dict[str, BaseAgent] = {}
|
||||
self.agent_capabilities: dict[str, list[AgentCapability]] = defaultdict(list)
|
||||
self.capability_index: dict[str, set[str]] = defaultdict(set) # capability_name -> agent_ids
|
||||
self.hierarchy_relationships: dict[str, set[str]] = defaultdict(set) # supervisor -> subordinates
|
||||
|
||||
def add_agent(self, agent: BaseAgent, capabilities: list[AgentCapability]) -> None:
|
||||
"""Add an agent with their capabilities to the hierarchy."""
|
||||
agent_id = self._get_agent_id(agent)
|
||||
self.agents[agent_id] = agent
|
||||
self.agent_capabilities[agent_id] = capabilities
|
||||
|
||||
for capability in capabilities:
|
||||
self.capability_index[capability.name].add(agent_id)
|
||||
|
||||
def remove_agent(self, agent: BaseAgent) -> None:
|
||||
"""Remove an agent from the hierarchy."""
|
||||
agent_id = self._get_agent_id(agent)
|
||||
|
||||
if agent_id in self.agents:
|
||||
for capability in self.agent_capabilities[agent_id]:
|
||||
self.capability_index[capability.name].discard(agent_id)
|
||||
|
||||
for supervisor_id in self.hierarchy_relationships:
|
||||
self.hierarchy_relationships[supervisor_id].discard(agent_id)
|
||||
if agent_id in self.hierarchy_relationships:
|
||||
del self.hierarchy_relationships[agent_id]
|
||||
|
||||
del self.agents[agent_id]
|
||||
del self.agent_capabilities[agent_id]
|
||||
|
||||
def set_supervision_relationship(self, supervisor: BaseAgent, subordinate: BaseAgent) -> None:
|
||||
"""Establish a supervision relationship between agents."""
|
||||
supervisor_id = self._get_agent_id(supervisor)
|
||||
subordinate_id = self._get_agent_id(subordinate)
|
||||
|
||||
if supervisor_id in self.agents and subordinate_id in self.agents:
|
||||
self.hierarchy_relationships[supervisor_id].add(subordinate_id)
|
||||
|
||||
def get_agent_capabilities(self, agent: BaseAgent) -> list[AgentCapability]:
|
||||
"""Get capabilities for a specific agent."""
|
||||
agent_id = self._get_agent_id(agent)
|
||||
return self.agent_capabilities.get(agent_id, [])
|
||||
|
||||
def update_agent_capability(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
capability_name: str,
|
||||
new_proficiency: float,
|
||||
new_confidence: float
|
||||
) -> bool:
|
||||
"""Update a specific capability for an agent."""
|
||||
agent_id = self._get_agent_id(agent)
|
||||
|
||||
if agent_id not in self.agent_capabilities:
|
||||
return False
|
||||
|
||||
for capability in self.agent_capabilities[agent_id]:
|
||||
if capability.name == capability_name:
|
||||
capability.update_proficiency(new_proficiency, new_confidence)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def find_capable_agents(
|
||||
self,
|
||||
requirements: list[TaskRequirement],
|
||||
minimum_match_score: float = 0.5
|
||||
) -> list[tuple[BaseAgent, float]]:
|
||||
"""Find agents capable of handling the given requirements."""
|
||||
agent_scores = []
|
||||
|
||||
for agent_id, agent in self.agents.items():
|
||||
score = self._calculate_capability_match_score(agent_id, requirements)
|
||||
if score >= minimum_match_score:
|
||||
agent_scores.append((agent, score))
|
||||
|
||||
agent_scores.sort(key=lambda x: x[1], reverse=True)
|
||||
return agent_scores
|
||||
|
||||
def get_best_agent_for_task(
|
||||
self,
|
||||
requirements: list[TaskRequirement],
|
||||
exclude_agents: set[str] | None = None
|
||||
) -> tuple[BaseAgent, float, list[str]] | None:
|
||||
"""Get the best agent for a task based on capability requirements."""
|
||||
exclude_agents = exclude_agents or set()
|
||||
best_agent = None
|
||||
best_score = 0.0
|
||||
best_matches = []
|
||||
|
||||
for agent_id, agent in self.agents.items():
|
||||
if agent_id in exclude_agents:
|
||||
continue
|
||||
|
||||
score, matches = self._calculate_detailed_capability_match(agent_id, requirements)
|
||||
if score > best_score:
|
||||
best_score = score
|
||||
best_agent = agent
|
||||
best_matches = matches
|
||||
|
||||
if best_agent:
|
||||
return best_agent, best_score, best_matches
|
||||
return None
|
||||
|
||||
def get_subordinates(self, supervisor: BaseAgent) -> list[BaseAgent]:
|
||||
"""Get all subordinates of a supervisor agent."""
|
||||
supervisor_id = self._get_agent_id(supervisor)
|
||||
subordinate_ids = self.hierarchy_relationships.get(supervisor_id, set())
|
||||
return [self.agents[sub_id] for sub_id in subordinate_ids if sub_id in self.agents]
|
||||
|
||||
def get_hierarchy_path(self, from_agent: BaseAgent, to_agent: BaseAgent) -> list[BaseAgent] | None:
|
||||
"""Find the shortest path in the hierarchy between two agents."""
|
||||
from_id = self._get_agent_id(from_agent)
|
||||
to_id = self._get_agent_id(to_agent)
|
||||
|
||||
if from_id not in self.agents or to_id not in self.agents:
|
||||
return None
|
||||
|
||||
queue = deque([(from_id, [from_id])])
|
||||
visited = {from_id}
|
||||
|
||||
while queue:
|
||||
current_id, path = queue.popleft()
|
||||
|
||||
if current_id == to_id:
|
||||
return [self.agents[agent_id] for agent_id in path]
|
||||
|
||||
for subordinate_id in self.hierarchy_relationships.get(current_id, set()):
|
||||
if subordinate_id not in visited:
|
||||
visited.add(subordinate_id)
|
||||
queue.append((subordinate_id, [*path, subordinate_id]))
|
||||
|
||||
return None
|
||||
|
||||
def get_capability_distribution(self) -> dict[CapabilityType, dict[str, int]]:
|
||||
"""Get distribution of capabilities across all agents."""
|
||||
distribution: dict[CapabilityType, dict[str, int]] = defaultdict(lambda: defaultdict(int))
|
||||
|
||||
for capabilities in self.agent_capabilities.values():
|
||||
for capability in capabilities:
|
||||
proficiency_level = "high" if capability.proficiency_level >= 0.8 else \
|
||||
"medium" if capability.proficiency_level >= 0.5 else "low"
|
||||
distribution[capability.capability_type][proficiency_level] += 1
|
||||
|
||||
return dict(distribution)
|
||||
|
||||
def _get_agent_id(self, agent: BaseAgent) -> str:
|
||||
"""Get a unique identifier for an agent."""
|
||||
return f"{agent.role}_{id(agent)}"
|
||||
|
||||
def _calculate_capability_match_score(
|
||||
self,
|
||||
agent_id: str,
|
||||
requirements: list[TaskRequirement]
|
||||
) -> float:
|
||||
"""Calculate how well an agent's capabilities match task requirements."""
|
||||
if not requirements:
|
||||
return 1.0
|
||||
|
||||
agent_capabilities = self.agent_capabilities.get(agent_id, [])
|
||||
if not agent_capabilities:
|
||||
return 0.0
|
||||
|
||||
total_weight = sum(req.weight for req in requirements)
|
||||
if total_weight == 0:
|
||||
return 0.0
|
||||
|
||||
weighted_score = 0.0
|
||||
|
||||
for requirement in requirements:
|
||||
best_match_score = 0.0
|
||||
|
||||
for capability in agent_capabilities:
|
||||
if self._capabilities_match(capability, requirement):
|
||||
proficiency_score = min(capability.proficiency_level / requirement.minimum_proficiency, 1.0)
|
||||
confidence_factor = capability.confidence_score
|
||||
match_score = proficiency_score * confidence_factor
|
||||
best_match_score = max(best_match_score, match_score)
|
||||
|
||||
weighted_score += best_match_score * requirement.weight
|
||||
|
||||
return weighted_score / total_weight
|
||||
|
||||
def _calculate_detailed_capability_match(
|
||||
self,
|
||||
agent_id: str,
|
||||
requirements: list[TaskRequirement]
|
||||
) -> tuple[float, list[str]]:
|
||||
"""Calculate detailed capability match with matched capability names."""
|
||||
if not requirements:
|
||||
return 1.0, []
|
||||
|
||||
agent_capabilities = self.agent_capabilities.get(agent_id, [])
|
||||
if not agent_capabilities:
|
||||
return 0.0, []
|
||||
|
||||
total_weight = sum(req.weight for req in requirements)
|
||||
if total_weight == 0:
|
||||
return 0.0, []
|
||||
|
||||
weighted_score = 0.0
|
||||
matched_capabilities = []
|
||||
|
||||
for requirement in requirements:
|
||||
best_match_score = 0.0
|
||||
best_match_capability = None
|
||||
|
||||
for capability in agent_capabilities:
|
||||
if self._capabilities_match(capability, requirement):
|
||||
proficiency_score = min(capability.proficiency_level / requirement.minimum_proficiency, 1.0)
|
||||
confidence_factor = capability.confidence_score
|
||||
match_score = proficiency_score * confidence_factor
|
||||
|
||||
if match_score > best_match_score:
|
||||
best_match_score = match_score
|
||||
best_match_capability = capability.name
|
||||
|
||||
if best_match_capability:
|
||||
matched_capabilities.append(best_match_capability)
|
||||
|
||||
weighted_score += best_match_score * requirement.weight
|
||||
|
||||
return weighted_score / total_weight, matched_capabilities
|
||||
|
||||
def _capabilities_match(self, capability: AgentCapability, requirement: TaskRequirement) -> bool:
|
||||
"""Check if a capability matches a requirement."""
|
||||
if capability.name.lower() == requirement.capability_name.lower():
|
||||
return True
|
||||
|
||||
if capability.capability_type == requirement.capability_type:
|
||||
return True
|
||||
|
||||
capability_keywords = set(kw.lower() for kw in capability.keywords)
|
||||
requirement_keywords = set(kw.lower() for kw in requirement.keywords)
|
||||
|
||||
if capability_keywords.intersection(requirement_keywords):
|
||||
return True
|
||||
|
||||
return False
|
||||
188
src/crewai/responsibility/models.py
Normal file
188
src/crewai/responsibility/models.py
Normal file
@@ -0,0 +1,188 @@
|
||||
"""
|
||||
Data models for the formal responsibility tracking system.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
from uuid import UUID, uuid4
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class CapabilityType(str, Enum):
|
||||
"""Types of capabilities an agent can have."""
|
||||
TECHNICAL = "technical"
|
||||
ANALYTICAL = "analytical"
|
||||
CREATIVE = "creative"
|
||||
COMMUNICATION = "communication"
|
||||
LEADERSHIP = "leadership"
|
||||
DOMAIN_SPECIFIC = "domain_specific"
|
||||
|
||||
|
||||
class AgentCapability(BaseModel):
|
||||
"""Represents a specific capability of an agent."""
|
||||
|
||||
id: UUID = Field(default_factory=uuid4)
|
||||
name: str = Field(..., description="Name of the capability")
|
||||
capability_type: CapabilityType = Field(..., description="Type of capability")
|
||||
proficiency_level: float = Field(
|
||||
...,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Proficiency level from 0.0 to 1.0"
|
||||
)
|
||||
confidence_score: float = Field(
|
||||
...,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Confidence in this capability assessment"
|
||||
)
|
||||
description: str | None = Field(None, description="Detailed description of the capability")
|
||||
keywords: list[str] = Field(default_factory=list, description="Keywords associated with this capability")
|
||||
last_updated: datetime = Field(default_factory=datetime.utcnow)
|
||||
|
||||
def update_proficiency(self, new_level: float, confidence: float) -> None:
|
||||
"""Update proficiency level and confidence."""
|
||||
self.proficiency_level = max(0.0, min(1.0, new_level))
|
||||
self.confidence_score = max(0.0, min(1.0, confidence))
|
||||
self.last_updated = datetime.utcnow()
|
||||
|
||||
|
||||
class ResponsibilityAssignment(BaseModel):
|
||||
"""Represents the assignment of responsibility for a task to an agent."""
|
||||
|
||||
id: UUID = Field(default_factory=uuid4)
|
||||
agent_id: str = Field(..., description="ID of the assigned agent")
|
||||
task_id: str = Field(..., description="ID of the task")
|
||||
responsibility_score: float = Field(
|
||||
...,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Calculated responsibility score"
|
||||
)
|
||||
capability_matches: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Capabilities that matched for this assignment"
|
||||
)
|
||||
reasoning: str = Field(..., description="Explanation for this assignment")
|
||||
assigned_at: datetime = Field(default_factory=datetime.utcnow)
|
||||
completed_at: datetime | None = Field(None)
|
||||
success: bool | None = Field(None, description="Whether the assignment was successful")
|
||||
|
||||
def mark_completed(self, success: bool) -> None:
|
||||
"""Mark the assignment as completed."""
|
||||
self.completed_at = datetime.utcnow()
|
||||
self.success = success
|
||||
|
||||
|
||||
class AccountabilityRecord(BaseModel):
|
||||
"""Records agent actions and decisions for accountability tracking."""
|
||||
|
||||
id: UUID = Field(default_factory=uuid4)
|
||||
agent_id: str = Field(..., description="ID of the agent")
|
||||
action_type: str = Field(..., description="Type of action taken")
|
||||
action_description: str = Field(..., description="Description of the action")
|
||||
task_id: str | None = Field(None, description="Related task ID if applicable")
|
||||
context: dict[str, Any] = Field(default_factory=dict, description="Additional context")
|
||||
outcome: str | None = Field(None, description="Outcome of the action")
|
||||
success: bool | None = Field(None, description="Whether the action was successful")
|
||||
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
||||
|
||||
def set_outcome(self, outcome: str, success: bool) -> None:
|
||||
"""Set the outcome of the action."""
|
||||
self.outcome = outcome
|
||||
self.success = success
|
||||
|
||||
|
||||
class PerformanceMetrics(BaseModel):
|
||||
"""Performance metrics for an agent."""
|
||||
|
||||
agent_id: str = Field(..., description="ID of the agent")
|
||||
total_tasks: int = Field(default=0, description="Total number of tasks assigned")
|
||||
successful_tasks: int = Field(default=0, description="Number of successful tasks")
|
||||
failed_tasks: int = Field(default=0, description="Number of failed tasks")
|
||||
average_completion_time: float = Field(default=0.0, description="Average task completion time in seconds")
|
||||
quality_score: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Overall quality score"
|
||||
)
|
||||
efficiency_score: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Efficiency score based on completion times"
|
||||
)
|
||||
reliability_score: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Reliability score based on success rate"
|
||||
)
|
||||
last_updated: datetime = Field(default_factory=datetime.utcnow)
|
||||
|
||||
@property
|
||||
def success_rate(self) -> float:
|
||||
"""Calculate success rate."""
|
||||
if self.total_tasks == 0:
|
||||
return 0.0
|
||||
return self.successful_tasks / self.total_tasks
|
||||
|
||||
def update_metrics(
|
||||
self,
|
||||
task_success: bool,
|
||||
completion_time: float,
|
||||
quality_score: float | None = None
|
||||
) -> None:
|
||||
"""Update performance metrics with new task result."""
|
||||
self.total_tasks += 1
|
||||
if task_success:
|
||||
self.successful_tasks += 1
|
||||
else:
|
||||
self.failed_tasks += 1
|
||||
|
||||
alpha = 0.1 # Learning rate
|
||||
|
||||
if self.total_tasks == 1:
|
||||
self.average_completion_time = completion_time
|
||||
else:
|
||||
self.average_completion_time = (
|
||||
alpha * completion_time + (1 - alpha) * self.average_completion_time
|
||||
)
|
||||
|
||||
self.reliability_score = self.success_rate
|
||||
|
||||
if completion_time > 0:
|
||||
normalized_time = min(completion_time / 3600, 1.0) # Normalize to hours, cap at 1
|
||||
self.efficiency_score = max(0.1, 1.0 - normalized_time)
|
||||
|
||||
if quality_score is not None:
|
||||
self.quality_score = (
|
||||
alpha * quality_score + (1 - alpha) * self.quality_score
|
||||
)
|
||||
|
||||
self.last_updated = datetime.utcnow()
|
||||
|
||||
|
||||
class TaskRequirement(BaseModel):
|
||||
"""Represents capability requirements for a task."""
|
||||
|
||||
capability_name: str = Field(..., description="Name of required capability")
|
||||
capability_type: CapabilityType = Field(..., description="Type of required capability")
|
||||
minimum_proficiency: float = Field(
|
||||
...,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Minimum required proficiency level"
|
||||
)
|
||||
weight: float = Field(
|
||||
default=1.0,
|
||||
ge=0.0,
|
||||
description="Weight/importance of this requirement"
|
||||
)
|
||||
keywords: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Keywords that help match capabilities"
|
||||
)
|
||||
233
src/crewai/responsibility/performance.py
Normal file
233
src/crewai/responsibility/performance.py
Normal file
@@ -0,0 +1,233 @@
|
||||
"""
|
||||
Performance-based capability adjustment system.
|
||||
"""
|
||||
|
||||
from datetime import timedelta
|
||||
from typing import Any
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.responsibility.hierarchy import CapabilityHierarchy
|
||||
from crewai.responsibility.models import AgentCapability, PerformanceMetrics
|
||||
|
||||
|
||||
class PerformanceTracker:
|
||||
"""Tracks agent performance and adjusts capabilities accordingly."""
|
||||
|
||||
def __init__(self, hierarchy: CapabilityHierarchy):
|
||||
self.hierarchy = hierarchy
|
||||
self.performance_metrics: dict[str, PerformanceMetrics] = {}
|
||||
self.learning_rate = 0.1
|
||||
self.adjustment_threshold = 0.05 # Minimum change to trigger capability update
|
||||
|
||||
def record_task_completion(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
task_success: bool,
|
||||
completion_time: float,
|
||||
quality_score: float | None = None,
|
||||
capability_used: str | None = None
|
||||
) -> None:
|
||||
"""Record a task completion and update performance metrics."""
|
||||
agent_id = self._get_agent_id(agent)
|
||||
|
||||
if agent_id not in self.performance_metrics:
|
||||
self.performance_metrics[agent_id] = PerformanceMetrics(agent_id=agent_id)
|
||||
|
||||
metrics = self.performance_metrics[agent_id]
|
||||
metrics.update_metrics(task_success, completion_time, quality_score)
|
||||
|
||||
if capability_used and task_success is not None:
|
||||
self._update_capability_based_on_performance(
|
||||
agent, capability_used, task_success, quality_score
|
||||
)
|
||||
|
||||
def get_performance_metrics(self, agent: BaseAgent) -> PerformanceMetrics | None:
|
||||
"""Get performance metrics for an agent."""
|
||||
agent_id = self._get_agent_id(agent)
|
||||
return self.performance_metrics.get(agent_id)
|
||||
|
||||
def adjust_capabilities_based_on_performance(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
performance_window: timedelta = timedelta(days=7)
|
||||
) -> list[tuple[str, float, float]]:
|
||||
"""Adjust agent capabilities based on recent performance."""
|
||||
agent_id = self._get_agent_id(agent)
|
||||
metrics = self.performance_metrics.get(agent_id)
|
||||
|
||||
if not metrics:
|
||||
return []
|
||||
|
||||
adjustments = []
|
||||
agent_capabilities = self.hierarchy.get_agent_capabilities(agent)
|
||||
|
||||
for capability in agent_capabilities:
|
||||
old_proficiency = capability.proficiency_level
|
||||
old_confidence = capability.confidence_score
|
||||
|
||||
new_proficiency, new_confidence = self._calculate_adjusted_capability(
|
||||
capability, metrics
|
||||
)
|
||||
|
||||
proficiency_change = abs(new_proficiency - old_proficiency)
|
||||
confidence_change = abs(new_confidence - old_confidence)
|
||||
|
||||
if proficiency_change >= self.adjustment_threshold or confidence_change >= self.adjustment_threshold:
|
||||
self.hierarchy.update_agent_capability(
|
||||
agent, capability.name, new_proficiency, new_confidence
|
||||
)
|
||||
adjustments.append((capability.name, new_proficiency - old_proficiency, new_confidence - old_confidence))
|
||||
|
||||
return adjustments
|
||||
|
||||
def get_performance_trends(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
capability_name: str | None = None
|
||||
) -> dict[str, list[float]]:
|
||||
"""Get performance trends for an agent."""
|
||||
agent_id = self._get_agent_id(agent)
|
||||
metrics = self.performance_metrics.get(agent_id)
|
||||
|
||||
if not metrics:
|
||||
return {}
|
||||
|
||||
return {
|
||||
"success_rate": [metrics.success_rate],
|
||||
"quality_score": [metrics.quality_score],
|
||||
"efficiency_score": [metrics.efficiency_score],
|
||||
"reliability_score": [metrics.reliability_score]
|
||||
}
|
||||
|
||||
def identify_improvement_opportunities(
|
||||
self,
|
||||
agent: BaseAgent
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Identify areas where an agent could improve."""
|
||||
agent_id = self._get_agent_id(agent)
|
||||
metrics = self.performance_metrics.get(agent_id)
|
||||
|
||||
if not metrics:
|
||||
return []
|
||||
|
||||
opportunities = []
|
||||
|
||||
if metrics.success_rate < 0.7:
|
||||
opportunities.append({
|
||||
"area": "success_rate",
|
||||
"current_value": metrics.success_rate,
|
||||
"recommendation": "Focus on task completion accuracy and problem-solving skills"
|
||||
})
|
||||
|
||||
if metrics.quality_score < 0.6:
|
||||
opportunities.append({
|
||||
"area": "quality",
|
||||
"current_value": metrics.quality_score,
|
||||
"recommendation": "Improve attention to detail and output quality"
|
||||
})
|
||||
|
||||
if metrics.efficiency_score < 0.5:
|
||||
opportunities.append({
|
||||
"area": "efficiency",
|
||||
"current_value": metrics.efficiency_score,
|
||||
"recommendation": "Work on time management and process optimization"
|
||||
})
|
||||
|
||||
return opportunities
|
||||
|
||||
def compare_agent_performance(
|
||||
self,
|
||||
agents: list[BaseAgent],
|
||||
metric: str = "overall"
|
||||
) -> list[tuple[BaseAgent, float]]:
|
||||
"""Compare performance across multiple agents."""
|
||||
agent_scores = []
|
||||
|
||||
for agent in agents:
|
||||
agent_id = self._get_agent_id(agent)
|
||||
metrics = self.performance_metrics.get(agent_id)
|
||||
|
||||
if not metrics:
|
||||
continue
|
||||
|
||||
if metric == "overall":
|
||||
score = (
|
||||
metrics.success_rate * 0.4 +
|
||||
metrics.quality_score * 0.3 +
|
||||
metrics.efficiency_score * 0.2 +
|
||||
metrics.reliability_score * 0.1
|
||||
)
|
||||
elif metric == "success_rate":
|
||||
score = metrics.success_rate
|
||||
elif metric == "quality":
|
||||
score = metrics.quality_score
|
||||
elif metric == "efficiency":
|
||||
score = metrics.efficiency_score
|
||||
elif metric == "reliability":
|
||||
score = metrics.reliability_score
|
||||
else:
|
||||
continue
|
||||
|
||||
agent_scores.append((agent, score))
|
||||
|
||||
agent_scores.sort(key=lambda x: x[1], reverse=True)
|
||||
return agent_scores
|
||||
|
||||
def _update_capability_based_on_performance(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
capability_name: str,
|
||||
task_success: bool,
|
||||
quality_score: float | None
|
||||
) -> None:
|
||||
"""Update a specific capability based on task performance."""
|
||||
agent_capabilities = self.hierarchy.get_agent_capabilities(agent)
|
||||
|
||||
for capability in agent_capabilities:
|
||||
if capability.name == capability_name:
|
||||
if task_success:
|
||||
proficiency_adjustment = self.learning_rate * 0.1 # Small positive adjustment
|
||||
confidence_adjustment = self.learning_rate * 0.05
|
||||
else:
|
||||
proficiency_adjustment = -self.learning_rate * 0.05 # Small negative adjustment
|
||||
confidence_adjustment = -self.learning_rate * 0.1
|
||||
|
||||
if quality_score is not None:
|
||||
quality_factor = (quality_score - 0.5) * 2 # Scale to -1 to 1
|
||||
proficiency_adjustment *= (1 + quality_factor * 0.5)
|
||||
|
||||
new_proficiency = max(0.0, min(1.0, capability.proficiency_level + proficiency_adjustment))
|
||||
new_confidence = max(0.0, min(1.0, capability.confidence_score + confidence_adjustment))
|
||||
|
||||
self.hierarchy.update_agent_capability(
|
||||
agent, capability_name, new_proficiency, new_confidence
|
||||
)
|
||||
break
|
||||
|
||||
def _calculate_adjusted_capability(
|
||||
self,
|
||||
capability: AgentCapability,
|
||||
metrics: PerformanceMetrics
|
||||
) -> tuple[float, float]:
|
||||
"""Calculate adjusted capability values based on performance metrics."""
|
||||
performance_factor = (
|
||||
metrics.success_rate * 0.4 +
|
||||
metrics.quality_score * 0.3 +
|
||||
metrics.efficiency_score * 0.2 +
|
||||
metrics.reliability_score * 0.1
|
||||
)
|
||||
|
||||
adjustment_magnitude = (performance_factor - 0.5) * self.learning_rate
|
||||
|
||||
new_proficiency = capability.proficiency_level + adjustment_magnitude
|
||||
new_proficiency = max(0.0, min(1.0, new_proficiency))
|
||||
|
||||
confidence_adjustment = (metrics.reliability_score - 0.5) * self.learning_rate * 0.5
|
||||
new_confidence = capability.confidence_score + confidence_adjustment
|
||||
new_confidence = max(0.0, min(1.0, new_confidence))
|
||||
|
||||
return new_proficiency, new_confidence
|
||||
|
||||
def _get_agent_id(self, agent: BaseAgent) -> str:
|
||||
"""Get a unique identifier for an agent."""
|
||||
return f"{agent.role}_{id(agent)}"
|
||||
259
src/crewai/responsibility/system.py
Normal file
259
src/crewai/responsibility/system.py
Normal file
@@ -0,0 +1,259 @@
|
||||
"""
|
||||
Main responsibility system that coordinates all components.
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.responsibility.accountability import AccountabilityLogger
|
||||
from crewai.responsibility.assignment import (
|
||||
AssignmentStrategy,
|
||||
ResponsibilityCalculator,
|
||||
)
|
||||
from crewai.responsibility.hierarchy import CapabilityHierarchy
|
||||
from crewai.responsibility.models import (
|
||||
AgentCapability,
|
||||
ResponsibilityAssignment,
|
||||
TaskRequirement,
|
||||
)
|
||||
from crewai.responsibility.performance import PerformanceTracker
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class ResponsibilitySystem:
|
||||
"""Main system that coordinates all responsibility tracking components."""
|
||||
|
||||
def __init__(self):
|
||||
self.hierarchy = CapabilityHierarchy()
|
||||
self.calculator = ResponsibilityCalculator(self.hierarchy)
|
||||
self.accountability = AccountabilityLogger()
|
||||
self.performance = PerformanceTracker(self.hierarchy)
|
||||
self.enabled = True
|
||||
|
||||
def register_agent(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
capabilities: list[AgentCapability],
|
||||
supervisor: BaseAgent | None = None
|
||||
) -> None:
|
||||
"""Register an agent with the responsibility system."""
|
||||
if not self.enabled:
|
||||
return
|
||||
|
||||
self.hierarchy.add_agent(agent, capabilities)
|
||||
|
||||
if supervisor:
|
||||
self.hierarchy.set_supervision_relationship(supervisor, agent)
|
||||
|
||||
self.accountability.log_action(
|
||||
agent=agent,
|
||||
action_type="registration",
|
||||
action_description=f"Agent registered with {len(capabilities)} capabilities",
|
||||
context={"capabilities": [cap.name for cap in capabilities]}
|
||||
)
|
||||
|
||||
def assign_task_responsibility(
|
||||
self,
|
||||
task: Task,
|
||||
requirements: list[TaskRequirement],
|
||||
strategy: AssignmentStrategy = AssignmentStrategy.GREEDY,
|
||||
exclude_agents: list[BaseAgent] | None = None
|
||||
) -> ResponsibilityAssignment | None:
|
||||
"""Assign responsibility for a task to the best agent."""
|
||||
if not self.enabled:
|
||||
return None
|
||||
|
||||
assignment = self.calculator.calculate_responsibility_assignment(
|
||||
task, requirements, strategy, exclude_agents
|
||||
)
|
||||
|
||||
if assignment:
|
||||
agent = self._get_agent_by_id(assignment.agent_id)
|
||||
if agent:
|
||||
self.calculator.update_workload(agent, 1)
|
||||
|
||||
self.accountability.log_action(
|
||||
agent=agent,
|
||||
action_type="task_assignment",
|
||||
action_description=f"Assigned responsibility for task: {task.description[:100]}...",
|
||||
task=task,
|
||||
context={
|
||||
"responsibility_score": assignment.responsibility_score,
|
||||
"capability_matches": assignment.capability_matches,
|
||||
"strategy": strategy.value
|
||||
}
|
||||
)
|
||||
|
||||
return assignment
|
||||
|
||||
def complete_task(
|
||||
self,
|
||||
agent: BaseAgent,
|
||||
task: Task,
|
||||
success: bool,
|
||||
completion_time: float,
|
||||
quality_score: float | None = None,
|
||||
outcome_description: str = ""
|
||||
) -> None:
|
||||
"""Record task completion and update performance metrics."""
|
||||
if not self.enabled:
|
||||
return
|
||||
|
||||
self.performance.record_task_completion(
|
||||
agent, success, completion_time, quality_score
|
||||
)
|
||||
|
||||
self.calculator.update_workload(agent, -1)
|
||||
|
||||
self.accountability.log_task_completion(
|
||||
agent, task, success, outcome_description, completion_time
|
||||
)
|
||||
|
||||
adjustments = self.performance.adjust_capabilities_based_on_performance(agent)
|
||||
if adjustments:
|
||||
self.accountability.log_action(
|
||||
agent=agent,
|
||||
action_type="capability_adjustment",
|
||||
action_description="Capabilities adjusted based on performance",
|
||||
context={"adjustments": adjustments}
|
||||
)
|
||||
|
||||
def delegate_task(
|
||||
self,
|
||||
delegating_agent: BaseAgent,
|
||||
receiving_agent: BaseAgent,
|
||||
task: Task,
|
||||
reason: str
|
||||
) -> None:
|
||||
"""Record task delegation between agents."""
|
||||
if not self.enabled:
|
||||
return
|
||||
|
||||
self.calculator.update_workload(delegating_agent, -1)
|
||||
self.calculator.update_workload(receiving_agent, 1)
|
||||
|
||||
self.accountability.log_delegation(
|
||||
delegating_agent, receiving_agent, task, reason
|
||||
)
|
||||
|
||||
def get_agent_status(self, agent: BaseAgent) -> dict[str, Any]:
|
||||
"""Get comprehensive status for an agent."""
|
||||
if not self.enabled:
|
||||
return {}
|
||||
|
||||
agent_id = self.hierarchy._get_agent_id(agent)
|
||||
capabilities = self.hierarchy.get_agent_capabilities(agent)
|
||||
performance = self.performance.get_performance_metrics(agent)
|
||||
recent_records = self.accountability.get_agent_records(
|
||||
agent, since=datetime.utcnow() - timedelta(days=7)
|
||||
)
|
||||
current_workload = self.calculator.current_workloads.get(agent_id, 0)
|
||||
|
||||
return {
|
||||
"agent_id": agent_id,
|
||||
"role": agent.role,
|
||||
"capabilities": [
|
||||
{
|
||||
"name": cap.name,
|
||||
"type": cap.capability_type.value,
|
||||
"proficiency": cap.proficiency_level,
|
||||
"confidence": cap.confidence_score
|
||||
}
|
||||
for cap in capabilities
|
||||
],
|
||||
"performance": {
|
||||
"success_rate": performance.success_rate if performance else 0.0,
|
||||
"quality_score": performance.quality_score if performance else 0.0,
|
||||
"efficiency_score": performance.efficiency_score if performance else 0.0,
|
||||
"total_tasks": performance.total_tasks if performance else 0
|
||||
} if performance else None,
|
||||
"current_workload": current_workload,
|
||||
"recent_activity_count": len(recent_records)
|
||||
}
|
||||
|
||||
def get_system_overview(self) -> dict[str, Any]:
|
||||
"""Get overview of the entire responsibility system."""
|
||||
if not self.enabled:
|
||||
return {"enabled": False}
|
||||
|
||||
total_agents = len(self.hierarchy.agents)
|
||||
capability_distribution = self.hierarchy.get_capability_distribution()
|
||||
workload_distribution = self.calculator.get_workload_distribution()
|
||||
|
||||
all_performance = list(self.performance.performance_metrics.values())
|
||||
avg_success_rate = sum(p.success_rate for p in all_performance) / len(all_performance) if all_performance else 0.0
|
||||
avg_quality = sum(p.quality_score for p in all_performance) / len(all_performance) if all_performance else 0.0
|
||||
|
||||
return {
|
||||
"enabled": True,
|
||||
"total_agents": total_agents,
|
||||
"capability_distribution": capability_distribution,
|
||||
"workload_distribution": workload_distribution,
|
||||
"system_performance": {
|
||||
"average_success_rate": avg_success_rate,
|
||||
"average_quality_score": avg_quality,
|
||||
"total_tasks_completed": sum(p.total_tasks for p in all_performance)
|
||||
},
|
||||
"total_accountability_records": len(self.accountability.records)
|
||||
}
|
||||
|
||||
def generate_recommendations(self) -> list[dict[str, Any]]:
|
||||
"""Generate system-wide recommendations for improvement."""
|
||||
if not self.enabled:
|
||||
return []
|
||||
|
||||
recommendations = []
|
||||
|
||||
workloads = self.calculator.get_workload_distribution()
|
||||
if workloads:
|
||||
max_workload = max(workloads.values())
|
||||
min_workload = min(workloads.values())
|
||||
|
||||
if max_workload - min_workload > 3: # Significant imbalance
|
||||
recommendations.append({
|
||||
"type": "workload_balancing",
|
||||
"priority": "high",
|
||||
"description": "Workload imbalance detected. Consider redistributing tasks.",
|
||||
"details": {"max_workload": max_workload, "min_workload": min_workload}
|
||||
})
|
||||
|
||||
capability_dist = self.hierarchy.get_capability_distribution()
|
||||
for cap_type, levels in capability_dist.items():
|
||||
total_agents_with_cap = sum(levels.values())
|
||||
if total_agents_with_cap < 2: # Too few agents with this capability
|
||||
recommendations.append({
|
||||
"type": "capability_gap",
|
||||
"priority": "medium",
|
||||
"description": f"Limited coverage for {cap_type.value} capabilities",
|
||||
"details": {"capability_type": cap_type.value, "agent_count": total_agents_with_cap}
|
||||
})
|
||||
|
||||
for agent_id, metrics in self.performance.performance_metrics.items():
|
||||
if metrics.success_rate < 0.6: # Low success rate
|
||||
agent = self._get_agent_by_id(agent_id)
|
||||
if agent:
|
||||
recommendations.append({
|
||||
"type": "performance_improvement",
|
||||
"priority": "high",
|
||||
"description": f"Agent {agent.role} has low success rate",
|
||||
"details": {
|
||||
"agent_role": agent.role,
|
||||
"success_rate": metrics.success_rate,
|
||||
"improvement_opportunities": self.performance.identify_improvement_opportunities(agent)
|
||||
}
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def enable_system(self) -> None:
|
||||
"""Enable the responsibility system."""
|
||||
self.enabled = True
|
||||
|
||||
def disable_system(self) -> None:
|
||||
"""Disable the responsibility system."""
|
||||
self.enabled = False
|
||||
|
||||
def _get_agent_by_id(self, agent_id: str) -> BaseAgent | None:
|
||||
"""Get agent by ID."""
|
||||
return self.hierarchy.agents.get(agent_id)
|
||||
@@ -1,9 +1,2 @@
|
||||
"""Telemetry configuration constants.
|
||||
|
||||
This module defines constants used for CrewAI telemetry configuration.
|
||||
"""
|
||||
|
||||
from typing import Final
|
||||
|
||||
CREWAI_TELEMETRY_BASE_URL: Final[str] = "https://telemetry.crewai.com:4319"
|
||||
CREWAI_TELEMETRY_SERVICE_NAME: Final[str] = "crewAI-telemetry"
|
||||
CREWAI_TELEMETRY_BASE_URL: str = "https://telemetry.crewai.com:4319"
|
||||
CREWAI_TELEMETRY_SERVICE_NAME: str = "crewAI-telemetry"
|
||||
|
||||
@@ -1,11 +1,3 @@
|
||||
"""Telemetry module for CrewAI.
|
||||
|
||||
This module provides anonymous telemetry collection for development purposes.
|
||||
No prompts, task descriptions, agent backstories/goals, responses, or sensitive
|
||||
data is collected. Users can opt-in to share more complete data using the
|
||||
`share_crew` attribute.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
@@ -13,10 +5,11 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import threading
|
||||
from collections.abc import Callable
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
from importlib.metadata import version
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING, Any, Callable, Optional
|
||||
import threading
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
|
||||
@@ -28,43 +21,30 @@ from opentelemetry.sdk.trace.export import (
|
||||
BatchSpanProcessor,
|
||||
SpanExportResult,
|
||||
)
|
||||
from opentelemetry.trace import Span
|
||||
from opentelemetry.trace import Span, Status, StatusCode
|
||||
|
||||
from crewai.telemetry.constants import (
|
||||
CREWAI_TELEMETRY_BASE_URL,
|
||||
CREWAI_TELEMETRY_SERVICE_NAME,
|
||||
)
|
||||
from crewai.telemetry.utils import (
|
||||
add_agent_fingerprint_to_span,
|
||||
add_crew_and_task_attributes,
|
||||
add_crew_attributes,
|
||||
close_span,
|
||||
)
|
||||
from crewai.utilities.logger_utils import suppress_warnings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def suppress_warnings():
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings("ignore")
|
||||
yield
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.crew import Crew
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class SafeOTLPSpanExporter(OTLPSpanExporter):
|
||||
"""Safe wrapper for OTLP span exporter that handles exceptions gracefully.
|
||||
|
||||
This exporter prevents telemetry failures from breaking the application
|
||||
by catching and logging exceptions during span export.
|
||||
"""
|
||||
|
||||
def export(self, spans: Any) -> SpanExportResult:
|
||||
"""Export spans to the telemetry backend safely.
|
||||
|
||||
Args:
|
||||
spans: Collection of spans to export.
|
||||
|
||||
Returns:
|
||||
Export result status, FAILURE if an exception occurs.
|
||||
"""
|
||||
def export(self, spans) -> SpanExportResult:
|
||||
try:
|
||||
return super().export(spans)
|
||||
except Exception as e:
|
||||
@@ -73,13 +53,16 @@ class SafeOTLPSpanExporter(OTLPSpanExporter):
|
||||
|
||||
|
||||
class Telemetry:
|
||||
"""Handle anonymous telemetry for the CrewAI package.
|
||||
"""A class to handle anonymous telemetry for the crewai package.
|
||||
|
||||
Attributes:
|
||||
ready: Whether telemetry is initialized and ready.
|
||||
trace_set: Whether the tracer provider has been set.
|
||||
resource: OpenTelemetry resource for the telemetry service.
|
||||
provider: OpenTelemetry tracer provider.
|
||||
The data being collected is for development purpose, all data is anonymous.
|
||||
|
||||
There is NO data being collected on the prompts, tasks descriptions
|
||||
agents backstories or goals nor responses or any data that is being
|
||||
processed by the agents, nor any secrets and env vars.
|
||||
|
||||
Users can opt-in to sharing more complete data using the `share_crew`
|
||||
attribute in the Crew class.
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
@@ -89,14 +72,14 @@ class Telemetry:
|
||||
if cls._instance is None:
|
||||
with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance = super(Telemetry, cls).__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self) -> None:
|
||||
if hasattr(self, "_initialized") and self._initialized:
|
||||
if hasattr(self, '_initialized') and self._initialized:
|
||||
return
|
||||
|
||||
|
||||
self.ready: bool = False
|
||||
self.trace_set: bool = False
|
||||
self._initialized: bool = True
|
||||
@@ -141,41 +124,29 @@ class Telemetry:
|
||||
"""Check if telemetry operations should be executed."""
|
||||
return self.ready and not self._is_telemetry_disabled()
|
||||
|
||||
def set_tracer(self) -> None:
|
||||
"""Set the tracer provider if ready and not already set."""
|
||||
def set_tracer(self):
|
||||
if self.ready and not self.trace_set:
|
||||
try:
|
||||
with suppress_warnings():
|
||||
trace.set_tracer_provider(self.provider)
|
||||
self.trace_set = True
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to set tracer provider: {e}")
|
||||
except Exception:
|
||||
self.ready = False
|
||||
self.trace_set = False
|
||||
|
||||
def _safe_telemetry_operation(self, operation: Callable[[], Any]) -> None:
|
||||
"""Execute telemetry operation safely, checking both readiness and environment variables.
|
||||
|
||||
Args:
|
||||
operation: A callable that performs telemetry operations. May return any value,
|
||||
but the return value is not used by this method.
|
||||
"""
|
||||
def _safe_telemetry_operation(self, operation: Callable[[], None]) -> None:
|
||||
"""Execute telemetry operation safely, checking both readiness and environment variables."""
|
||||
if not self._should_execute_telemetry():
|
||||
return
|
||||
try:
|
||||
operation()
|
||||
except Exception as e:
|
||||
logger.debug(f"Telemetry operation failed: {e}")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def crew_creation(self, crew: Crew, inputs: dict[str, Any] | None) -> None:
|
||||
"""Records the creation of a crew.
|
||||
def crew_creation(self, crew: Crew, inputs: dict[str, Any] | None):
|
||||
"""Records the creation of a crew."""
|
||||
|
||||
Args:
|
||||
crew: The crew being created.
|
||||
inputs: Optional input parameters for the crew.
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Crew Created")
|
||||
self._add_attribute(
|
||||
@@ -184,14 +155,16 @@ class Telemetry:
|
||||
version("crewai"),
|
||||
)
|
||||
self._add_attribute(span, "python_version", platform.python_version())
|
||||
add_crew_attributes(span, crew, self._add_attribute)
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "crew_process", crew.process)
|
||||
self._add_attribute(span, "crew_memory", crew.memory)
|
||||
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
|
||||
self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
|
||||
|
||||
# Add additional fingerprint metadata if available
|
||||
# Add fingerprint data
|
||||
if hasattr(crew, "fingerprint") and crew.fingerprint:
|
||||
self._add_attribute(span, "crew_fingerprint", crew.fingerprint.uuid_str)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_fingerprint_created_at",
|
||||
@@ -370,27 +343,29 @@ class Telemetry:
|
||||
]
|
||||
),
|
||||
)
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def task_started(self, crew: Crew, task: Task) -> Span | None:
|
||||
"""Records task started in a crew.
|
||||
"""Records task started in a crew."""
|
||||
|
||||
Args:
|
||||
crew: The crew executing the task.
|
||||
task: The task being started.
|
||||
|
||||
Returns:
|
||||
The span tracking the task execution, or None if telemetry is disabled.
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
|
||||
created_span = tracer.start_span("Task Created")
|
||||
|
||||
add_crew_and_task_attributes(created_span, crew, task, self._add_attribute)
|
||||
self._add_attribute(created_span, "crew_key", crew.key)
|
||||
self._add_attribute(created_span, "crew_id", str(crew.id))
|
||||
self._add_attribute(created_span, "task_key", task.key)
|
||||
self._add_attribute(created_span, "task_id", str(task.id))
|
||||
|
||||
# Add fingerprint data
|
||||
if hasattr(crew, "fingerprint") and crew.fingerprint:
|
||||
self._add_attribute(
|
||||
created_span, "crew_fingerprint", crew.fingerprint.uuid_str
|
||||
)
|
||||
|
||||
if hasattr(task, "fingerprint") and task.fingerprint:
|
||||
self._add_attribute(
|
||||
@@ -411,9 +386,13 @@ class Telemetry:
|
||||
|
||||
# Add agent fingerprint if task has an assigned agent
|
||||
if hasattr(task, "agent") and task.agent:
|
||||
add_agent_fingerprint_to_span(
|
||||
created_span, task.agent, self._add_attribute
|
||||
agent_fingerprint = getattr(
|
||||
getattr(task.agent, "fingerprint", None), "uuid_str", None
|
||||
)
|
||||
if agent_fingerprint:
|
||||
self._add_attribute(
|
||||
created_span, "agent_fingerprint", agent_fingerprint
|
||||
)
|
||||
|
||||
if crew.share_crew:
|
||||
self._add_attribute(
|
||||
@@ -423,18 +402,30 @@ class Telemetry:
|
||||
created_span, "formatted_expected_output", task.expected_output
|
||||
)
|
||||
|
||||
close_span(created_span)
|
||||
created_span.set_status(Status(StatusCode.OK))
|
||||
created_span.end()
|
||||
|
||||
span = tracer.start_span("Task Execution")
|
||||
|
||||
add_crew_and_task_attributes(span, crew, task, self._add_attribute)
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "task_key", task.key)
|
||||
self._add_attribute(span, "task_id", str(task.id))
|
||||
|
||||
# Add fingerprint data to execution span
|
||||
if hasattr(crew, "fingerprint") and crew.fingerprint:
|
||||
self._add_attribute(span, "crew_fingerprint", crew.fingerprint.uuid_str)
|
||||
|
||||
if hasattr(task, "fingerprint") and task.fingerprint:
|
||||
self._add_attribute(span, "task_fingerprint", task.fingerprint.uuid_str)
|
||||
|
||||
# Add agent fingerprint if task has an assigned agent
|
||||
if hasattr(task, "agent") and task.agent:
|
||||
add_agent_fingerprint_to_span(span, task.agent, self._add_attribute)
|
||||
agent_fingerprint = getattr(
|
||||
getattr(task.agent, "fingerprint", None), "uuid_str", None
|
||||
)
|
||||
if agent_fingerprint:
|
||||
self._add_attribute(span, "agent_fingerprint", agent_fingerprint)
|
||||
|
||||
if crew.share_crew:
|
||||
self._add_attribute(span, "formatted_description", task.description)
|
||||
@@ -444,25 +435,22 @@ class Telemetry:
|
||||
|
||||
return span
|
||||
|
||||
if not self._should_execute_telemetry():
|
||||
return None
|
||||
self._safe_telemetry_operation(operation)
|
||||
return None
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
return _operation()
|
||||
|
||||
def task_ended(self, span: Span, task: Task, crew: Crew) -> None:
|
||||
def task_ended(self, span: Span, task: Task, crew: Crew):
|
||||
"""Records the completion of a task execution in a crew.
|
||||
|
||||
Args:
|
||||
span: The OpenTelemetry span tracking the task execution.
|
||||
task: The task that was completed.
|
||||
crew: The crew context in which the task was executed.
|
||||
span (Span): The OpenTelemetry span tracking the task execution
|
||||
task (Task): The task that was completed
|
||||
crew (Crew): The crew context in which the task was executed
|
||||
|
||||
Note:
|
||||
If share_crew is enabled, this will also record the task output.
|
||||
If share_crew is enabled, this will also record the task output
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
# Ensure fingerprint data is present on completion span
|
||||
if hasattr(task, "fingerprint") and task.fingerprint:
|
||||
self._add_attribute(span, "task_fingerprint", task.fingerprint.uuid_str)
|
||||
@@ -474,20 +462,21 @@ class Telemetry:
|
||||
task.output.raw if task.output else "",
|
||||
)
|
||||
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int) -> None:
|
||||
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||
"""Records when a tool is used repeatedly, which might indicate an issue.
|
||||
|
||||
Args:
|
||||
llm: The language model being used.
|
||||
tool_name: Name of the tool being repeatedly used.
|
||||
attempts: Number of attempts made with this tool.
|
||||
llm (Any): The language model being used
|
||||
tool_name (str): Name of the tool being repeatedly used
|
||||
attempts (int): Number of attempts made with this tool
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Repeated Usage")
|
||||
self._add_attribute(
|
||||
@@ -499,23 +488,22 @@ class Telemetry:
|
||||
self._add_attribute(span, "attempts", attempts)
|
||||
if llm:
|
||||
self._add_attribute(span, "llm", llm.model)
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def tool_usage(
|
||||
self, llm: Any, tool_name: str, attempts: int, agent: Any = None
|
||||
) -> None:
|
||||
def tool_usage(self, llm: Any, tool_name: str, attempts: int, agent: Any = None):
|
||||
"""Records the usage of a tool by an agent.
|
||||
|
||||
Args:
|
||||
llm: The language model being used.
|
||||
tool_name: Name of the tool being used.
|
||||
attempts: Number of attempts made with this tool.
|
||||
agent: The agent using the tool.
|
||||
llm (Any): The language model being used
|
||||
tool_name (str): Name of the tool being used
|
||||
attempts (int): Number of attempts made with this tool
|
||||
agent (Any, optional): The agent using the tool
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Usage")
|
||||
self._add_attribute(
|
||||
@@ -529,23 +517,30 @@ class Telemetry:
|
||||
self._add_attribute(span, "llm", llm.model)
|
||||
|
||||
# Add agent fingerprint data if available
|
||||
add_agent_fingerprint_to_span(span, agent, self._add_attribute)
|
||||
close_span(span)
|
||||
if agent and hasattr(agent, "fingerprint") and agent.fingerprint:
|
||||
self._add_attribute(
|
||||
span, "agent_fingerprint", agent.fingerprint.uuid_str
|
||||
)
|
||||
if hasattr(agent, "role"):
|
||||
self._add_attribute(span, "agent_role", agent.role)
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def tool_usage_error(
|
||||
self, llm: Any, agent: Any = None, tool_name: str | None = None
|
||||
) -> None:
|
||||
self, llm: Any, agent: Any = None, tool_name: Optional[str] = None
|
||||
):
|
||||
"""Records when a tool usage results in an error.
|
||||
|
||||
Args:
|
||||
llm: The language model being used when the error occurred.
|
||||
agent: The agent using the tool.
|
||||
tool_name: Name of the tool that caused the error.
|
||||
llm (Any): The language model being used when the error occurred
|
||||
agent (Any, optional): The agent using the tool
|
||||
tool_name (str, optional): Name of the tool that caused the error
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Usage Error")
|
||||
self._add_attribute(
|
||||
@@ -560,24 +555,31 @@ class Telemetry:
|
||||
self._add_attribute(span, "tool_name", tool_name)
|
||||
|
||||
# Add agent fingerprint data if available
|
||||
add_agent_fingerprint_to_span(span, agent, self._add_attribute)
|
||||
close_span(span)
|
||||
if agent and hasattr(agent, "fingerprint") and agent.fingerprint:
|
||||
self._add_attribute(
|
||||
span, "agent_fingerprint", agent.fingerprint.uuid_str
|
||||
)
|
||||
if hasattr(agent, "role"):
|
||||
self._add_attribute(span, "agent_role", agent.role)
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def individual_test_result_span(
|
||||
self, crew: Crew, quality: float, exec_time: int, model_name: str
|
||||
) -> None:
|
||||
):
|
||||
"""Records individual test results for a crew execution.
|
||||
|
||||
Args:
|
||||
crew: The crew being tested.
|
||||
quality: Quality score of the execution.
|
||||
exec_time: Execution time in seconds.
|
||||
model_name: Name of the model used.
|
||||
crew (Crew): The crew being tested
|
||||
quality (float): Quality score of the execution
|
||||
exec_time (int): Execution time in seconds
|
||||
model_name (str): Name of the model used
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Crew Individual Test Result")
|
||||
|
||||
@@ -586,15 +588,15 @@ class Telemetry:
|
||||
"crewai_version",
|
||||
version("crewai"),
|
||||
)
|
||||
add_crew_attributes(
|
||||
span, crew, self._add_attribute, include_fingerprint=False
|
||||
)
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "quality", str(quality))
|
||||
self._add_attribute(span, "exec_time", str(exec_time))
|
||||
self._add_attribute(span, "model_name", model_name)
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def test_execution_span(
|
||||
self,
|
||||
@@ -602,17 +604,17 @@ class Telemetry:
|
||||
iterations: int,
|
||||
inputs: dict[str, Any] | None,
|
||||
model_name: str,
|
||||
) -> None:
|
||||
):
|
||||
"""Records the execution of a test suite for a crew.
|
||||
|
||||
Args:
|
||||
crew: The crew being tested.
|
||||
iterations: Number of test iterations.
|
||||
inputs: Input parameters for the test.
|
||||
model_name: Name of the model used in testing.
|
||||
crew (Crew): The crew being tested
|
||||
iterations (int): Number of test iterations
|
||||
inputs (dict[str, Any] | None): Input parameters for the test
|
||||
model_name (str): Name of the model used in testing
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Crew Test Execution")
|
||||
|
||||
@@ -621,9 +623,8 @@ class Telemetry:
|
||||
"crewai_version",
|
||||
version("crewai"),
|
||||
)
|
||||
add_crew_attributes(
|
||||
span, crew, self._add_attribute, include_fingerprint=False
|
||||
)
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "iterations", str(iterations))
|
||||
self._add_attribute(span, "model_name", model_name)
|
||||
|
||||
@@ -632,99 +633,93 @@ class Telemetry:
|
||||
span, "inputs", json.dumps(inputs) if inputs else None
|
||||
)
|
||||
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def deploy_signup_error_span(self) -> None:
|
||||
def deploy_signup_error_span(self):
|
||||
"""Records when an error occurs during the deployment signup process."""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Deploy Signup Error")
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def start_deployment_span(self, uuid: str | None = None) -> None:
|
||||
def start_deployment_span(self, uuid: Optional[str] = None):
|
||||
"""Records the start of a deployment process.
|
||||
|
||||
Args:
|
||||
uuid: Unique identifier for the deployment.
|
||||
uuid (Optional[str]): Unique identifier for the deployment
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Start Deployment")
|
||||
if uuid:
|
||||
self._add_attribute(span, "uuid", uuid)
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def create_crew_deployment_span(self) -> None:
|
||||
def create_crew_deployment_span(self):
|
||||
"""Records the creation of a new crew deployment."""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Create Crew Deployment")
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def get_crew_logs_span(
|
||||
self, uuid: str | None, log_type: str = "deployment"
|
||||
) -> None:
|
||||
def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"):
|
||||
"""Records the retrieval of crew logs.
|
||||
|
||||
Args:
|
||||
uuid: Unique identifier for the crew.
|
||||
log_type: Type of logs being retrieved. Defaults to "deployment".
|
||||
uuid (Optional[str]): Unique identifier for the crew
|
||||
log_type (str, optional): Type of logs being retrieved. Defaults to "deployment".
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Get Crew Logs")
|
||||
self._add_attribute(span, "log_type", log_type)
|
||||
if uuid:
|
||||
self._add_attribute(span, "uuid", uuid)
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def remove_crew_span(self, uuid: str | None = None) -> None:
|
||||
def remove_crew_span(self, uuid: Optional[str] = None):
|
||||
"""Records the removal of a crew.
|
||||
|
||||
Args:
|
||||
uuid: Unique identifier for the crew being removed.
|
||||
uuid (Optional[str]): Unique identifier for the crew being removed
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Remove Crew")
|
||||
if uuid:
|
||||
self._add_attribute(span, "uuid", uuid)
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def crew_execution_span(
|
||||
self, crew: Crew, inputs: dict[str, Any] | None
|
||||
) -> Span | None:
|
||||
def crew_execution_span(self, crew: Crew, inputs: dict[str, Any] | None):
|
||||
"""Records the complete execution of a crew.
|
||||
|
||||
This is only collected if the user has opted-in to share the crew.
|
||||
|
||||
Args:
|
||||
crew: The crew being executed.
|
||||
inputs: Optional input parameters for the crew.
|
||||
|
||||
Returns:
|
||||
The execution span if crew sharing is enabled, None otherwise.
|
||||
"""
|
||||
self.crew_creation(crew, inputs)
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Crew Execution")
|
||||
self._add_attribute(
|
||||
@@ -732,9 +727,8 @@ class Telemetry:
|
||||
"crewai_version",
|
||||
version("crewai"),
|
||||
)
|
||||
add_crew_attributes(
|
||||
span, crew, self._add_attribute, include_fingerprint=False
|
||||
)
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(
|
||||
span, "crew_inputs", json.dumps(inputs) if inputs else None
|
||||
)
|
||||
@@ -792,19 +786,12 @@ class Telemetry:
|
||||
return span
|
||||
|
||||
if crew.share_crew:
|
||||
self._safe_telemetry_operation(_operation)
|
||||
return _operation()
|
||||
self._safe_telemetry_operation(operation)
|
||||
return operation()
|
||||
return None
|
||||
|
||||
def end_crew(self, crew: Any, final_string_output: str) -> None:
|
||||
"""Records the end of crew execution.
|
||||
|
||||
Args:
|
||||
crew: The crew that finished execution.
|
||||
final_string_output: The final output from the crew.
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def end_crew(self, crew, final_string_output):
|
||||
def operation():
|
||||
self._add_attribute(
|
||||
crew._execution_span,
|
||||
"crewai_version",
|
||||
@@ -827,70 +814,68 @@ class Telemetry:
|
||||
]
|
||||
),
|
||||
)
|
||||
close_span(crew._execution_span)
|
||||
crew._execution_span.set_status(Status(StatusCode.OK))
|
||||
crew._execution_span.end()
|
||||
|
||||
if crew.share_crew:
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def _add_attribute(self, span: Span, key: str, value: Any) -> None:
|
||||
"""Add an attribute to a span.
|
||||
def _add_attribute(self, span, key, value):
|
||||
"""Add an attribute to a span."""
|
||||
|
||||
Args:
|
||||
span: The span to add the attribute to.
|
||||
key: The attribute key.
|
||||
value: The attribute value.
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
return span.set_attribute(key, value)
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def flow_creation_span(self, flow_name: str) -> None:
|
||||
def flow_creation_span(self, flow_name: str):
|
||||
"""Records the creation of a new flow.
|
||||
|
||||
Args:
|
||||
flow_name: Name of the flow being created.
|
||||
flow_name (str): Name of the flow being created
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Flow Creation")
|
||||
self._add_attribute(span, "flow_name", flow_name)
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def flow_plotting_span(self, flow_name: str, node_names: list[str]) -> None:
|
||||
def flow_plotting_span(self, flow_name: str, node_names: list[str]):
|
||||
"""Records flow visualization/plotting activity.
|
||||
|
||||
Args:
|
||||
flow_name: Name of the flow being plotted.
|
||||
node_names: List of node names in the flow.
|
||||
flow_name (str): Name of the flow being plotted
|
||||
node_names (list[str]): List of node names in the flow
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Flow Plotting")
|
||||
self._add_attribute(span, "flow_name", flow_name)
|
||||
self._add_attribute(span, "node_names", json.dumps(node_names))
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
def flow_execution_span(self, flow_name: str, node_names: list[str]) -> None:
|
||||
def flow_execution_span(self, flow_name: str, node_names: list[str]):
|
||||
"""Records the execution of a flow.
|
||||
|
||||
Args:
|
||||
flow_name: Name of the flow being executed.
|
||||
node_names: List of nodes being executed in the flow.
|
||||
flow_name (str): Name of the flow being executed
|
||||
node_names (list[str]): List of nodes being executed in the flow
|
||||
"""
|
||||
|
||||
def _operation():
|
||||
def operation():
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Flow Execution")
|
||||
self._add_attribute(span, "flow_name", flow_name)
|
||||
self._add_attribute(span, "node_names", json.dumps(node_names))
|
||||
close_span(span)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
self._safe_telemetry_operation(operation)
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
"""Telemetry utility functions.
|
||||
|
||||
This module provides utility functions for telemetry operations.
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from opentelemetry.trace import Span, Status, StatusCode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.crew import Crew
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
def add_agent_fingerprint_to_span(
|
||||
span: Span, agent: Any, add_attribute_fn: Callable[[Span, str, Any], None]
|
||||
) -> None:
|
||||
"""Add agent fingerprint data to a span if available.
|
||||
|
||||
Args:
|
||||
span: The span to add the attributes to.
|
||||
agent: The agent whose fingerprint data should be added.
|
||||
add_attribute_fn: Function to add attributes to the span.
|
||||
"""
|
||||
if agent:
|
||||
# Try to get fingerprint directly
|
||||
if hasattr(agent, "fingerprint") and agent.fingerprint:
|
||||
add_attribute_fn(span, "agent_fingerprint", agent.fingerprint.uuid_str)
|
||||
if hasattr(agent, "role"):
|
||||
add_attribute_fn(span, "agent_role", agent.role)
|
||||
else:
|
||||
# Try to get fingerprint using getattr (for cases where it might not be directly accessible)
|
||||
agent_fingerprint = getattr(
|
||||
getattr(agent, "fingerprint", None), "uuid_str", None
|
||||
)
|
||||
if agent_fingerprint:
|
||||
add_attribute_fn(span, "agent_fingerprint", agent_fingerprint)
|
||||
if hasattr(agent, "role"):
|
||||
add_attribute_fn(span, "agent_role", agent.role)
|
||||
|
||||
|
||||
def add_crew_attributes(
|
||||
span: Span,
|
||||
crew: "Crew",
|
||||
add_attribute_fn: Callable[[Span, str, Any], None],
|
||||
include_fingerprint: bool = True,
|
||||
) -> None:
|
||||
"""Add crew attributes to a span.
|
||||
|
||||
Args:
|
||||
span: The span to add the attributes to.
|
||||
crew: The crew whose attributes should be added.
|
||||
add_attribute_fn: Function to add attributes to the span.
|
||||
include_fingerprint: Whether to include fingerprint data.
|
||||
"""
|
||||
add_attribute_fn(span, "crew_key", crew.key)
|
||||
add_attribute_fn(span, "crew_id", str(crew.id))
|
||||
|
||||
if include_fingerprint and hasattr(crew, "fingerprint") and crew.fingerprint:
|
||||
add_attribute_fn(span, "crew_fingerprint", crew.fingerprint.uuid_str)
|
||||
|
||||
|
||||
def add_task_attributes(
|
||||
span: Span,
|
||||
task: "Task",
|
||||
add_attribute_fn: Callable[[Span, str, Any], None],
|
||||
include_fingerprint: bool = True,
|
||||
) -> None:
|
||||
"""Add task attributes to a span.
|
||||
|
||||
Args:
|
||||
span: The span to add the attributes to.
|
||||
task: The task whose attributes should be added.
|
||||
add_attribute_fn: Function to add attributes to the span.
|
||||
include_fingerprint: Whether to include fingerprint data.
|
||||
"""
|
||||
add_attribute_fn(span, "task_key", task.key)
|
||||
add_attribute_fn(span, "task_id", str(task.id))
|
||||
|
||||
if include_fingerprint and hasattr(task, "fingerprint") and task.fingerprint:
|
||||
add_attribute_fn(span, "task_fingerprint", task.fingerprint.uuid_str)
|
||||
|
||||
|
||||
def add_crew_and_task_attributes(
|
||||
span: Span,
|
||||
crew: "Crew",
|
||||
task: "Task",
|
||||
add_attribute_fn: Callable[[Span, str, Any], None],
|
||||
include_fingerprints: bool = True,
|
||||
) -> None:
|
||||
"""Add both crew and task attributes to a span.
|
||||
|
||||
Args:
|
||||
span: The span to add the attributes to.
|
||||
crew: The crew whose attributes should be added.
|
||||
task: The task whose attributes should be added.
|
||||
add_attribute_fn: Function to add attributes to the span.
|
||||
include_fingerprints: Whether to include fingerprint data.
|
||||
"""
|
||||
add_crew_attributes(span, crew, add_attribute_fn, include_fingerprints)
|
||||
add_task_attributes(span, task, add_attribute_fn, include_fingerprints)
|
||||
|
||||
|
||||
def close_span(span: Span) -> None:
|
||||
"""Set span status to OK and end it.
|
||||
|
||||
Args:
|
||||
span: The span to close.
|
||||
"""
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
@@ -27,4 +27,19 @@ class DelegateWorkTool(BaseAgentTool):
|
||||
**kwargs,
|
||||
) -> str:
|
||||
coworker = self._get_coworker(coworker, **kwargs)
|
||||
|
||||
if hasattr(self, 'agents') and self.agents:
|
||||
delegating_agent = kwargs.get('delegating_agent')
|
||||
if delegating_agent and hasattr(delegating_agent, 'responsibility_system'):
|
||||
responsibility_system = delegating_agent.responsibility_system
|
||||
if responsibility_system and responsibility_system.enabled:
|
||||
task_obj = kwargs.get('task_obj')
|
||||
if task_obj:
|
||||
responsibility_system.delegate_task(
|
||||
delegating_agent=delegating_agent,
|
||||
receiving_agent=coworker,
|
||||
task=task_obj,
|
||||
reason=f"Delegation based on capability match for: {task[:100]}..."
|
||||
)
|
||||
|
||||
return self._execute(coworker, task, context)
|
||||
|
||||
@@ -1,22 +1,26 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
|
||||
import inspect
|
||||
import textwrap
|
||||
from collections.abc import Callable
|
||||
from typing import TYPE_CHECKING, Any, get_type_hints
|
||||
from typing import Any, Callable, Optional, Union, get_type_hints
|
||||
|
||||
from pydantic import BaseModel, Field, create_model
|
||||
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
class ToolUsageLimitExceededError(Exception):
|
||||
class ToolUsageLimitExceeded(Exception):
|
||||
"""Exception raised when a tool has reached its maximum usage limit."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CrewStructuredTool:
|
||||
"""A structured tool that can operate on any number of inputs.
|
||||
@@ -65,10 +69,10 @@ class CrewStructuredTool:
|
||||
def from_function(
|
||||
cls,
|
||||
func: Callable,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
return_direct: bool = False,
|
||||
args_schema: type[BaseModel] | None = None,
|
||||
args_schema: Optional[type[BaseModel]] = None,
|
||||
infer_schema: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> CrewStructuredTool:
|
||||
@@ -160,7 +164,7 @@ class CrewStructuredTool:
|
||||
|
||||
# Create model
|
||||
schema_name = f"{name.title()}Schema"
|
||||
return create_model(schema_name, **fields) # type: ignore[call-overload]
|
||||
return create_model(schema_name, **fields)
|
||||
|
||||
def _validate_function_signature(self) -> None:
|
||||
"""Validate that the function signature matches the args schema."""
|
||||
@@ -188,7 +192,7 @@ class CrewStructuredTool:
|
||||
f"not found in args_schema"
|
||||
)
|
||||
|
||||
def _parse_args(self, raw_args: str | dict) -> dict:
|
||||
def _parse_args(self, raw_args: Union[str, dict]) -> dict:
|
||||
"""Parse and validate the input arguments against the schema.
|
||||
|
||||
Args:
|
||||
@@ -203,18 +207,18 @@ class CrewStructuredTool:
|
||||
|
||||
raw_args = json.loads(raw_args)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Failed to parse arguments as JSON: {e}") from e
|
||||
raise ValueError(f"Failed to parse arguments as JSON: {e}")
|
||||
|
||||
try:
|
||||
validated_args = self.args_schema.model_validate(raw_args)
|
||||
return validated_args.model_dump()
|
||||
except Exception as e:
|
||||
raise ValueError(f"Arguments validation failed: {e}") from e
|
||||
raise ValueError(f"Arguments validation failed: {e}")
|
||||
|
||||
async def ainvoke(
|
||||
self,
|
||||
input: str | dict,
|
||||
config: dict | None = None,
|
||||
input: Union[str, dict],
|
||||
config: Optional[dict] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Asynchronously invoke the tool.
|
||||
@@ -230,7 +234,7 @@ class CrewStructuredTool:
|
||||
parsed_args = self._parse_args(input)
|
||||
|
||||
if self.has_reached_max_usage_count():
|
||||
raise ToolUsageLimitExceededError(
|
||||
raise ToolUsageLimitExceeded(
|
||||
f"Tool '{self.name}' has reached its maximum usage limit of {self.max_usage_count}. You should not use the {self.name} tool again."
|
||||
)
|
||||
|
||||
@@ -239,37 +243,44 @@ class CrewStructuredTool:
|
||||
try:
|
||||
if inspect.iscoroutinefunction(self.func):
|
||||
return await self.func(**parsed_args, **kwargs)
|
||||
# Run sync functions in a thread pool
|
||||
import asyncio
|
||||
else:
|
||||
# Run sync functions in a thread pool
|
||||
import asyncio
|
||||
|
||||
return await asyncio.get_event_loop().run_in_executor(
|
||||
None, lambda: self.func(**parsed_args, **kwargs)
|
||||
)
|
||||
return await asyncio.get_event_loop().run_in_executor(
|
||||
None, lambda: self.func(**parsed_args, **kwargs)
|
||||
)
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
def _run(self, *args, **kwargs) -> Any:
|
||||
"""Legacy method for compatibility."""
|
||||
# Convert args/kwargs to our expected format
|
||||
input_dict = dict(zip(self.args_schema.model_fields.keys(), args, strict=False))
|
||||
input_dict = dict(zip(self.args_schema.model_fields.keys(), args))
|
||||
input_dict.update(kwargs)
|
||||
return self.invoke(input_dict)
|
||||
|
||||
def invoke(
|
||||
self, input: str | dict, config: dict | None = None, **kwargs: Any
|
||||
self, input: Union[str, dict], config: Optional[dict] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Main method for tool execution."""
|
||||
parsed_args = self._parse_args(input)
|
||||
|
||||
if self.has_reached_max_usage_count():
|
||||
raise ToolUsageLimitExceededError(
|
||||
raise ToolUsageLimitExceeded(
|
||||
f"Tool '{self.name}' has reached its maximum usage limit of {self.max_usage_count}. You should not use the {self.name} tool again."
|
||||
)
|
||||
|
||||
self._increment_usage_count()
|
||||
|
||||
if inspect.iscoroutinefunction(self.func):
|
||||
return asyncio.run(self.func(**parsed_args, **kwargs))
|
||||
result = asyncio.run(self.func(**parsed_args, **kwargs))
|
||||
return result
|
||||
|
||||
try:
|
||||
result = self.func(**parsed_args, **kwargs)
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
result = self.func(**parsed_args, **kwargs)
|
||||
|
||||
|
||||
@@ -1,22 +1,16 @@
|
||||
"""Crew chat input models.
|
||||
|
||||
This module provides models for defining chat inputs and fields
|
||||
for crew interactions.
|
||||
"""
|
||||
from typing import List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ChatInputField(BaseModel):
|
||||
"""Represents a single required input for the crew.
|
||||
|
||||
"""
|
||||
Represents a single required input for the crew, with a name and short description.
|
||||
Example:
|
||||
```python
|
||||
field = ChatInputField(
|
||||
name="topic",
|
||||
description="The topic to focus on for the conversation"
|
||||
)
|
||||
```
|
||||
{
|
||||
"name": "topic",
|
||||
"description": "The topic to focus on for the conversation"
|
||||
}
|
||||
"""
|
||||
|
||||
name: str = Field(..., description="The name of the input field")
|
||||
@@ -24,25 +18,23 @@ class ChatInputField(BaseModel):
|
||||
|
||||
|
||||
class ChatInputs(BaseModel):
|
||||
"""Holds crew metadata and input field definitions.
|
||||
|
||||
"""
|
||||
Holds a high-level crew_description plus a list of ChatInputFields.
|
||||
Example:
|
||||
```python
|
||||
inputs = ChatInputs(
|
||||
crew_name="topic-based-qa",
|
||||
crew_description="Use this crew for topic-based Q&A",
|
||||
inputs=[
|
||||
ChatInputField(name="topic", description="The topic to focus on"),
|
||||
ChatInputField(name="username", description="Name of the user"),
|
||||
{
|
||||
"crew_name": "topic-based-qa",
|
||||
"crew_description": "Use this crew for topic-based Q&A",
|
||||
"inputs": [
|
||||
{"name": "topic", "description": "The topic to focus on"},
|
||||
{"name": "username", "description": "Name of the user"},
|
||||
]
|
||||
)
|
||||
```
|
||||
}
|
||||
"""
|
||||
|
||||
crew_name: str = Field(..., description="The name of the crew")
|
||||
crew_description: str = Field(
|
||||
..., description="A description of the crew's purpose"
|
||||
)
|
||||
inputs: list[ChatInputField] = Field(
|
||||
inputs: List[ChatInputField] = Field(
|
||||
default_factory=list, description="A list of input fields for the crew"
|
||||
)
|
||||
|
||||
@@ -1,37 +1,18 @@
|
||||
"""Human-in-the-loop (HITL) type definitions.
|
||||
|
||||
This module provides type definitions for human-in-the-loop interactions
|
||||
in crew executions.
|
||||
"""
|
||||
|
||||
from typing import TypedDict
|
||||
from typing import List, Dict, TypedDict
|
||||
|
||||
|
||||
class HITLResumeInfo(TypedDict, total=False):
|
||||
"""HITL resume information passed from flow to crew.
|
||||
|
||||
Attributes:
|
||||
task_id: Unique identifier for the task.
|
||||
crew_execution_id: Unique identifier for the crew execution.
|
||||
task_key: Key identifying the specific task.
|
||||
task_output: Output from the task before human intervention.
|
||||
human_feedback: Feedback provided by the human.
|
||||
previous_messages: History of messages in the conversation.
|
||||
"""
|
||||
"""HITL resume information passed from flow to crew."""
|
||||
|
||||
task_id: str
|
||||
crew_execution_id: str
|
||||
task_key: str
|
||||
task_output: str
|
||||
human_feedback: str
|
||||
previous_messages: list[dict[str, str]]
|
||||
previous_messages: List[Dict[str, str]]
|
||||
|
||||
|
||||
class CrewInputsWithHITL(TypedDict, total=False):
|
||||
"""Crew inputs that may contain HITL resume information.
|
||||
|
||||
Attributes:
|
||||
_hitl_resume: Optional HITL resume information for continuing execution.
|
||||
"""
|
||||
"""Crew inputs that may contain HITL resume information."""
|
||||
|
||||
_hitl_resume: HITLResumeInfo
|
||||
|
||||
@@ -1,15 +1,9 @@
|
||||
"""Usage metrics tracking for CrewAI execution.
|
||||
|
||||
This module provides models for tracking token usage and request metrics
|
||||
during crew and agent execution.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
class UsageMetrics(BaseModel):
|
||||
"""Track usage metrics for crew execution.
|
||||
"""
|
||||
Model to track usage metrics for the crew's execution.
|
||||
|
||||
Attributes:
|
||||
total_tokens: Total number of tokens used.
|
||||
@@ -33,11 +27,12 @@ class UsageMetrics(BaseModel):
|
||||
default=0, description="Number of successful requests made."
|
||||
)
|
||||
|
||||
def add_usage_metrics(self, usage_metrics: Self) -> None:
|
||||
"""Add usage metrics from another UsageMetrics object.
|
||||
def add_usage_metrics(self, usage_metrics: "UsageMetrics"):
|
||||
"""
|
||||
Add the usage metrics from another UsageMetrics object.
|
||||
|
||||
Args:
|
||||
usage_metrics: The usage metrics to add.
|
||||
usage_metrics (UsageMetrics): The usage metrics to add.
|
||||
"""
|
||||
self.total_tokens += usage_metrics.total_tokens
|
||||
self.prompt_tokens += usage_metrics.prompt_tokens
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
"""Error message definitions for CrewAI database operations.
|
||||
"""Error message definitions for CrewAI database operations."""
|
||||
|
||||
This module provides standardized error classes and message templates
|
||||
for database operations and agent repository handling.
|
||||
"""
|
||||
|
||||
from typing import Final
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class DatabaseOperationError(Exception):
|
||||
"""Base exception class for database operation errors."""
|
||||
|
||||
def __init__(self, message: str, original_error: Exception | None = None) -> None:
|
||||
def __init__(self, message: str, original_error: Optional[Exception] = None):
|
||||
"""Initialize the database operation error.
|
||||
|
||||
Args:
|
||||
@@ -22,17 +18,13 @@ class DatabaseOperationError(Exception):
|
||||
|
||||
|
||||
class DatabaseError:
|
||||
"""Standardized error message templates for database operations.
|
||||
"""Standardized error message templates for database operations."""
|
||||
|
||||
Provides consistent error message formatting for various database
|
||||
operation failures.
|
||||
"""
|
||||
|
||||
INIT_ERROR: Final[str] = "Database initialization error: {}"
|
||||
SAVE_ERROR: Final[str] = "Error saving task outputs: {}"
|
||||
UPDATE_ERROR: Final[str] = "Error updating task outputs: {}"
|
||||
LOAD_ERROR: Final[str] = "Error loading task outputs: {}"
|
||||
DELETE_ERROR: Final[str] = "Error deleting task outputs: {}"
|
||||
INIT_ERROR: str = "Database initialization error: {}"
|
||||
SAVE_ERROR: str = "Error saving task outputs: {}"
|
||||
UPDATE_ERROR: str = "Error updating task outputs: {}"
|
||||
LOAD_ERROR: str = "Error loading task outputs: {}"
|
||||
DELETE_ERROR: str = "Error deleting task outputs: {}"
|
||||
|
||||
@classmethod
|
||||
def format_error(cls, template: str, error: Exception) -> str:
|
||||
@@ -50,3 +42,5 @@ class DatabaseError:
|
||||
|
||||
class AgentRepositoryError(Exception):
|
||||
"""Exception raised when an agent repository is not found."""
|
||||
|
||||
...
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
"""Logging and warning utility functions for CrewAI."""
|
||||
"""Logging utility functions for CrewAI."""
|
||||
|
||||
import contextlib
|
||||
import io
|
||||
import logging
|
||||
import warnings
|
||||
from collections.abc import Generator
|
||||
|
||||
|
||||
@@ -37,20 +36,3 @@ def suppress_logging(
|
||||
):
|
||||
yield
|
||||
logger.setLevel(original_level)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def suppress_warnings() -> Generator[None, None, None]:
|
||||
"""Context manager to suppress all warnings.
|
||||
|
||||
Yields:
|
||||
None during the context execution.
|
||||
|
||||
Note:
|
||||
There is a similar implementation in src/crewai/llm.py that also
|
||||
suppresses a specific deprecation warning. That version may be
|
||||
consolidated here in the future.
|
||||
"""
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings("ignore")
|
||||
yield
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
|
||||
@@ -12,11 +12,11 @@ from crewai.utilities.logger import Logger
|
||||
class RPMController(BaseModel):
|
||||
"""Manages requests per minute limiting."""
|
||||
|
||||
max_rpm: Optional[int] = Field(default=None)
|
||||
max_rpm: int | None = Field(default=None)
|
||||
logger: Logger = Field(default_factory=lambda: Logger(verbose=False))
|
||||
_current_rpm: int = PrivateAttr(default=0)
|
||||
_timer: Optional[threading.Timer] = PrivateAttr(default=None)
|
||||
_lock: Optional[threading.Lock] = PrivateAttr(default=None)
|
||||
_timer: Any = PrivateAttr(default=None)
|
||||
_lock: Any = PrivateAttr(default=None)
|
||||
_shutdown_flag: bool = PrivateAttr(default=False)
|
||||
|
||||
@model_validator(mode="after")
|
||||
@@ -35,7 +35,7 @@ class RPMController(BaseModel):
|
||||
if self.max_rpm is not None and self._current_rpm < self.max_rpm:
|
||||
self._current_rpm += 1
|
||||
return True
|
||||
elif self.max_rpm is not None:
|
||||
if self.max_rpm is not None:
|
||||
self.logger.log(
|
||||
"info", "Max RPM reached, waiting for next minute to start."
|
||||
)
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
"""Task output storage handler for managing task execution results.
|
||||
|
||||
This module provides functionality for storing and retrieving task outputs
|
||||
from persistent storage, supporting replay and audit capabilities.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -14,64 +8,32 @@ from crewai.memory.storage.kickoff_task_outputs_storage import (
|
||||
)
|
||||
from crewai.task import Task
|
||||
|
||||
"""Handles storage and retrieval of task execution outputs."""
|
||||
|
||||
|
||||
class ExecutionLog(BaseModel):
|
||||
"""Represents a log entry for task execution.
|
||||
|
||||
Attributes:
|
||||
task_id: Unique identifier for the task.
|
||||
expected_output: The expected output description for the task.
|
||||
output: The actual output produced by the task.
|
||||
timestamp: When the task was executed.
|
||||
task_index: The position of the task in the execution sequence.
|
||||
inputs: Input parameters provided to the task.
|
||||
was_replayed: Whether this output was replayed from a previous run.
|
||||
"""
|
||||
"""Represents a log entry for task execution."""
|
||||
|
||||
task_id: str
|
||||
expected_output: str | None = None
|
||||
output: dict[str, Any]
|
||||
expected_output: Optional[str] = None
|
||||
output: Dict[str, Any]
|
||||
timestamp: datetime = Field(default_factory=datetime.now)
|
||||
task_index: int
|
||||
inputs: dict[str, Any] = Field(default_factory=dict)
|
||||
inputs: Dict[str, Any] = Field(default_factory=dict)
|
||||
was_replayed: bool = False
|
||||
|
||||
def __getitem__(self, key: str) -> Any:
|
||||
"""Enable dictionary-style access to execution log attributes.
|
||||
|
||||
Args:
|
||||
key: The attribute name to access.
|
||||
|
||||
Returns:
|
||||
The value of the requested attribute.
|
||||
"""
|
||||
return getattr(self, key)
|
||||
|
||||
|
||||
"""Manages storage and retrieval of task outputs."""
|
||||
|
||||
|
||||
class TaskOutputStorageHandler:
|
||||
"""Manages storage and retrieval of task outputs.
|
||||
|
||||
This handler provides an interface to persist and retrieve task execution
|
||||
results, supporting features like replay and audit trails.
|
||||
|
||||
Attributes:
|
||||
storage: The underlying SQLite storage implementation.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the task output storage handler."""
|
||||
self.storage = KickoffTaskOutputsSQLiteStorage()
|
||||
|
||||
def update(self, task_index: int, log: dict[str, Any]) -> None:
|
||||
"""Update an existing task output in storage.
|
||||
|
||||
Args:
|
||||
task_index: The index of the task to update.
|
||||
log: Dictionary containing task execution details.
|
||||
|
||||
Raises:
|
||||
ValueError: If no saved outputs exist.
|
||||
"""
|
||||
def update(self, task_index: int, log: Dict[str, Any]):
|
||||
saved_outputs = self.load()
|
||||
if saved_outputs is None:
|
||||
raise ValueError("Logs cannot be None")
|
||||
@@ -94,31 +56,16 @@ class TaskOutputStorageHandler:
|
||||
def add(
|
||||
self,
|
||||
task: Task,
|
||||
output: dict[str, Any],
|
||||
output: Dict[str, Any],
|
||||
task_index: int,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
inputs: Dict[str, Any] | None = None,
|
||||
was_replayed: bool = False,
|
||||
) -> None:
|
||||
"""Add a new task output to storage.
|
||||
|
||||
Args:
|
||||
task: The task that was executed.
|
||||
output: The output produced by the task.
|
||||
task_index: The position of the task in execution sequence.
|
||||
inputs: Optional input parameters for the task.
|
||||
was_replayed: Whether this is a replayed execution.
|
||||
"""
|
||||
):
|
||||
inputs = inputs or {}
|
||||
self.storage.add(task, output, task_index, was_replayed, inputs)
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Clear all stored task outputs."""
|
||||
def reset(self):
|
||||
self.storage.delete_all()
|
||||
|
||||
def load(self) -> list[dict[str, Any]] | None:
|
||||
"""Load all stored task outputs.
|
||||
|
||||
Returns:
|
||||
List of task output dictionaries, or None if no outputs exist.
|
||||
"""
|
||||
def load(self) -> Optional[List[Dict[str, Any]]]:
|
||||
return self.storage.load()
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
"""Token counting callback handler for LLM interactions.
|
||||
|
||||
This module provides a callback handler that tracks token usage
|
||||
for LLM API calls through the litellm library.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
from typing import Any
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from litellm.types.utils import Usage
|
||||
@@ -14,38 +8,16 @@ from crewai.agents.agent_builder.utilities.base_token_process import TokenProces
|
||||
|
||||
|
||||
class TokenCalcHandler(CustomLogger):
|
||||
"""Handler for calculating and tracking token usage in LLM calls.
|
||||
|
||||
This handler integrates with litellm's logging system to track
|
||||
prompt tokens, completion tokens, and cached tokens across requests.
|
||||
|
||||
Attributes:
|
||||
token_cost_process: The token process tracker to accumulate usage metrics.
|
||||
"""
|
||||
|
||||
def __init__(self, token_cost_process: TokenProcess | None) -> None:
|
||||
"""Initialize the token calculation handler.
|
||||
|
||||
Args:
|
||||
token_cost_process: Optional token process tracker for accumulating metrics.
|
||||
"""
|
||||
def __init__(self, token_cost_process: Optional[TokenProcess]):
|
||||
self.token_cost_process = token_cost_process
|
||||
|
||||
def log_success_event(
|
||||
self,
|
||||
kwargs: dict[str, Any],
|
||||
response_obj: dict[str, Any],
|
||||
kwargs: Dict[str, Any],
|
||||
response_obj: Dict[str, Any],
|
||||
start_time: float,
|
||||
end_time: float,
|
||||
) -> None:
|
||||
"""Log successful LLM API call and track token usage.
|
||||
|
||||
Args:
|
||||
kwargs: The arguments passed to the LLM call.
|
||||
response_obj: The response object from the LLM API.
|
||||
start_time: The timestamp when the call started.
|
||||
end_time: The timestamp when the call completed.
|
||||
"""
|
||||
if self.token_cost_process is None:
|
||||
return
|
||||
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
# ruff: noqa: S101
|
||||
# mypy: ignore-errors
|
||||
from collections import defaultdict
|
||||
from typing import cast
|
||||
from unittest.mock import Mock, patch
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai import LLM, Agent
|
||||
from crewai.flow import Flow, start
|
||||
from crewai.lite_agent import LiteAgent, LiteAgentOutput
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import LiteAgentExecutionStartedEvent
|
||||
from crewai.events.types.tool_usage_events import ToolUsageStartedEvent
|
||||
from crewai.flow import Flow, start
|
||||
from crewai.lite_agent import LiteAgent, LiteAgentOutput
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.tools import BaseTool
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
# A simple test tool
|
||||
@@ -38,9 +37,10 @@ class WebSearchTool(BaseTool):
|
||||
# This is a mock implementation
|
||||
if "tokyo" in query.lower():
|
||||
return "Tokyo's population in 2023 was approximately 21 million people in the city proper, and 37 million in the greater metropolitan area."
|
||||
if "climate change" in query.lower() and "coral" in query.lower():
|
||||
elif "climate change" in query.lower() and "coral" in query.lower():
|
||||
return "Climate change severely impacts coral reefs through: 1) Ocean warming causing coral bleaching, 2) Ocean acidification reducing calcification, 3) Sea level rise affecting light availability, 4) Increased storm frequency damaging reef structures. Sources: NOAA Coral Reef Conservation Program, Global Coral Reef Alliance."
|
||||
return f"Found information about {query}: This is a simulated search result for demonstration purposes."
|
||||
else:
|
||||
return f"Found information about {query}: This is a simulated search result for demonstration purposes."
|
||||
|
||||
|
||||
# Define Mock Calculator Tool
|
||||
@@ -53,11 +53,10 @@ class CalculatorTool(BaseTool):
|
||||
def _run(self, expression: str) -> str:
|
||||
"""Calculate the result of a mathematical expression."""
|
||||
try:
|
||||
# Using eval with restricted builtins for test purposes only
|
||||
result = eval(expression, {"__builtins__": {}}) # noqa: S307
|
||||
result = eval(expression, {"__builtins__": {}})
|
||||
return f"The result of {expression} is {result}"
|
||||
except Exception as e:
|
||||
return f"Error calculating {expression}: {e!s}"
|
||||
return f"Error calculating {expression}: {str(e)}"
|
||||
|
||||
|
||||
# Define a custom response format using Pydantic
|
||||
@@ -149,12 +148,12 @@ def test_lite_agent_with_tools():
|
||||
"What is the population of Tokyo and how many people would that be per square kilometer if Tokyo's area is 2,194 square kilometers?"
|
||||
)
|
||||
|
||||
assert "21 million" in result.raw or "37 million" in result.raw, (
|
||||
"Agent should find Tokyo's population"
|
||||
)
|
||||
assert "per square kilometer" in result.raw, (
|
||||
"Agent should calculate population density"
|
||||
)
|
||||
assert (
|
||||
"21 million" in result.raw or "37 million" in result.raw
|
||||
), "Agent should find Tokyo's population"
|
||||
assert (
|
||||
"per square kilometer" in result.raw
|
||||
), "Agent should calculate population density"
|
||||
|
||||
received_events = []
|
||||
|
||||
@@ -295,7 +294,6 @@ def test_sets_parent_flow_when_inside_flow():
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Test response"
|
||||
mock_llm.stop = []
|
||||
|
||||
class MyFlow(Flow):
|
||||
@start()
|
||||
|
||||
@@ -330,222 +330,4 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": ["Capital of France"], "model": "text-embedding-3-small", "encoding_format":
|
||||
"base64"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '96'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=rvDDZbBWaissP0luvtyuyyAWcPx3AiaoZS9LkAuK4sM-1746636999152-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1R6Ww+yyrbl+/4VK+uV3pGLUFXrjbsISKEgYqfTAUEEBeRWQJ2c/97Br/t094uJ
|
||||
WIZUzVljjjHm/I9//fXX321a5Y/x73/++vtTDuPf/217liVj8vc/f/33f/31119//cfv8/9bmddp
|
||||
nmVlU/yW/34smyxf/v7nL/a/nvzfRf/89XfIFDaOu8OtX0ttiOESiQE+pXaRrnxot/BYFAy5kEUP
|
||||
ONOCJaymU0mOVijT9bPeH/BZXg4kYptvv4S7/QQztojx/ay+Ul5kFR8F+9XEUeizziKFvQ+uIr4T
|
||||
E107h77ZSw1r9zPiUCqSYL4SxUbv5usRy5ZLusgkecBnVXUYOzGbrs25voiDG1c4vEhCTxHz8GCW
|
||||
BAdvL9+sngv6iwv3uxDj7HWgPUkXUIBvFy3YnkpJo7AQIvipwoIEgINatQMnCZbG54EP4NtQGgm1
|
||||
j84HJBDbhxeNI+7DBsboBuS5G27VeLUbGV4TmSEyXDVNYO2qg963GbCJrrazpK3iIk9XzQn1/aGa
|
||||
HvHpDatPvZDj9a5UvL67PpApmxGO6maite0dfTjujIfHLN4F0Dun8IiTkpSc0vyuLRl9hRAz+on4
|
||||
10ruubG6mchYruLEKW2rfYOdyUKFSzKs4XeRsjXNYsmeAMVeXOk9n7hjDu9xnGNTZ57p3DVSDWr5
|
||||
fMXpOdNo4zDGG1m7/QtHFVHoPOe4gw9gDliBlhTQ8z7hRY3aLjneZ4Uu8u3TAfe1ODjU9lrFPfww
|
||||
hvez5uNTtZcBx1Zhglg5CrF231NAhv5ewND/BCQ5FX2/CJdjAW8sKYkyly7lHFe2AdR9HZ9Jc6EC
|
||||
8O8qelzOmcdoegrac2xJ0KLvI8be8UXp28I+fJ/uiCQH4ews2KhKpFTMwxPPo5S+M+Oxh4esr7Hb
|
||||
TDigi6VIaPf8aMS6GVol/PItMgbfA8z3HghaTguYSWJFrmr1AovFCxN6lWOCH8Zy0Cj3tG24rysZ
|
||||
n9r61c8ncz9A32GbqY46xeETxDNIpUswCWHyqRbOfMVIaG0bG4uy9Et2iGSYhTeJqEEyARpwbgEt
|
||||
fzpMxzJXU9bImBBMrPAicq0fwCyP9wiGaWsRR481jeNRCZHD8pG348i7IurdyaE9iRTHha1X0yeE
|
||||
NbicCPCK19j063LnYvSxK5+oxyWshHE3y2h/CD/kfrjXdEaVFIL3y9fwbQWHgApiPMGnM2U4gotf
|
||||
rZ/1nKNkskqcWfRTrXUytNBlfAWr5VD1rak4ETSWm4iVsLxt+8tVwDlaOfWqg7XlfHY6cDjfPGIy
|
||||
4EFJveg+1JsHxmnzzgDl35IHDwafeKabnYLRmXoXamJ1m+ZoDTQara0OOTVZsHr21YoviMHCV9w2
|
||||
Xhyo0Fma0VZh/dXfON9HRTV/7vMAFpFeJxbnZ20Rs28L+x0YiFcKNzDj21P95SexzvRYcVejYNHL
|
||||
3p9w6l15ZzaGYQZWc7gQo/rMKQEr1P+s12KUOD+8gU/9ir193EcVh3mkwnF3eGDjcbS1LV9l8aiE
|
||||
lbfU4hqMtzd6S8X3POFnBStnNPtbjcDbqLGm0n06n77BCkrCD948wBDMDjY8+L7vWRztmSPoLobs
|
||||
/fCAaLN67eeMnxKYHFWJHOpFTvlQEfcwPpWLR7+T6nBPHQ9S9MI6ceprH0yrM/OI8IDBTs6jdNu/
|
||||
DN9WTr3yzmgaX5ATD3fr0/R4tjlWbCmuMlpPOSTqnak0sgPGHo6vm7bhs619j3KhIsKLzDT/8EeV
|
||||
1hpe4nkhF+HtaLPNtTnkXo2LtUA2KUfDD4SscAUY21nSc339iVEYXiSiAuAEfKu9BnSa9hl+HG4i
|
||||
WMTs1aGT3p/w7z7wL7dc0RLgAeMrbKtB08gM7kdXw9nexVrfiZGOPs9uT3R6PmksFWUT1dXxhuWD
|
||||
++l7duVnMGVqiPVdGjpcC+YHhF+uw9bK7bWFrcIYct54xGcd2g6b9t7lT71zGdpRKl2frhTXkeJ9
|
||||
TEnVKDafDxh0yteD3rOp1rgp3uiZ2Tnxjq2vsVdrlKRLvC5Y9vRLT+tFv6DrpZ+JWoHIWVRpfcMt
|
||||
HsSNn2oq3OsgRiqlAbaSl+1wVXSckWv6N5I43Q3wrfdkwHzTHyRWXyxYSb/av/wg3m6t6cJPsJTU
|
||||
lDFxoE2rs6T3ywxD+rxO+8ukpUIowTesh9kh0fhM6dqy1IXtM9OxfBtdwKUjTuA8ePrEcrmiCTwT
|
||||
STCTvSvOZOPjbPjeIvdFHeKJitivfqDP6G7CjBzN/KSNuyV4w21/WAuA3As69GPIvT4uPsx2oq1R
|
||||
zQ+wegGbGLWJtCH83k0QNKHhzVnUpHzifh4ofz1PHmdKpdOuB30AeUY/3jc+Tz2NKrH9xYPkr7Oh
|
||||
Cca7f0PXOb2wdXocA5Zw7AOBV6wSP9bHtPM5wkrIHQdispqv0fNBidD3XJUT3zHYoW7nQyh809Hb
|
||||
H42vI2QobaEa+0+c4XB11putTSA5yhLxrDFKZ41QHd4WDXit8S2rqTo+HvA1zphoPP4GRHncV4jl
|
||||
YzR9g2oE81l+mGLTgpToVcqAWc7LPUpQhDzY2FfA8rkO4Wjw2OOy+FEtmjBc4GMML0S1StXhbOH1
|
||||
RkrTavhxApPGqcevDleddYhy+egBOUmCj2q3GT3JSz/BELGi+wd/5LfHBvRY7WTYIQETXDSPlN3w
|
||||
AgyJhXDEAEgH7RTLcOc5KjlcTueeFtkl+lMPsEISh33czh26z7gnp8K9g29/kic0WAMkh8WgQBh0
|
||||
5gKKy74ix0vLassrjy5o6d4SkfkP0eawuEyQu5V3YufKBcyXbIboy6bvaTl1di+8IzRImSWY09kl
|
||||
PhAI/A6QdbuBKLcG9QMPvrzQTt8nztr6VbHzwJhSFKcslmP9lAp5UXQIjFPiOUTrwWqpX+tP/mtc
|
||||
iPvJCZZQcly+J9rXtirhtexZIJhVTzb8C9aIGy1YfS8x8T7tJ2gMl1/h/az4ntBbRb9AtS7gEoEA
|
||||
O80bgd/9hnyvCuQQJp+eWnp5kXhcatOeQwZdNHy2YWsZPLbXz6tarvfrAz1hKGKv65j+veEV5BhT
|
||||
nahzKpxZqqMVupNRYVvMvJ7/Sv0FbnhHrLpfHHrX5hB+PE8gWos9sAiP3Sy1RnzBxzQK+3l9v2oQ
|
||||
0uyKQ3RuANH8kw6/kxzjbOOXQmSGK/RVTCdwMqtq3Yn6/MtPbL1fOhBOr0uNBll38F2LngHHFZ4O
|
||||
F/4wEcv4qpUQa14Ob2eh8Fgdvx1ivKs3xPWww7kIg2BelEcLLr4tT8vjyqRrVS/RH34h2+2gLexq
|
||||
s7DjvHxipUJK6cangcZxM06uwSWgOHcvcMoDwXvkDy7o+ciZ4XqpP8RRHezwQ/JiID/bxSS6vaJ9
|
||||
X/arQyKBPTGGbglG4SGscPceBhzQQ9ovtzf3hvU1QRPParO27KWwg/zJ2OHj1B6D9ZuZMVQiV8VB
|
||||
9DgFg8a8CpAnB8YTZlvSfvkNtXlf4xv4HgArpW0BNzwm6l37BlRT8g4wnxudqslQg8mLAhOZgoG8
|
||||
9cdHEsRDIKwumnapmPbzQJMJHoKaxcrGX3/4iMDpBYnmfc1KcLsYwvq4PxLtNA2UCF3ng+h10qdd
|
||||
nRFap50aojJPeqx+TnPaSb26R8wcf7G/C0j1zbuOgXs3IB6bjExA90dBB32sF95Mj1YvbN/Rxk9I
|
||||
Yow64HOQFKChzJngJv72q6r2rOSwbIQV5VgFg4dWH5k3scTu+q0Ambs3A1fR8vBt/I7BPBwOCdyJ
|
||||
fEiMV6pro+d9JVCY94FYXzcDvaWXPkpFGeN4qY2eqyJlRnBfH7yPfmTpwOc6I92mMMJX5MQVbYyO
|
||||
lcbbxd/yray2/V/g5/o2cZ7qi7a691eC1FoVf3yhmp+q+YbvCjUkQ/lXowa9qHA9PSBR7WFK5/KR
|
||||
WX/qXYfYCfQ5m13gGLy/2FuPQz/o+rlD6mwM5NZbcs9zXlLD4/1IyHH+wIrGZ1aCA7l88AlWJFjk
|
||||
OjQRMydfD5qDDFojYyL44+PO+lyDRa1OM7zlzh5rytugQh2QGEz6ycAGcvbV+sZ6iXKDuU/rwW/o
|
||||
d+MbwJyHFWdfjnVmfgAJ+O4PDnHzB5cSK/AHwJeJOQmrPFakbNccJRwi2OqRo/G96Tykl1buvPHw
|
||||
clK2ZVAJxITTSJ4chWAKqBPDTb8RV3zNdEnvyQq9em9h88GE/WrRtQU/fXXAg06Xj36SITeKb2Ie
|
||||
P0Ww6GaygmOUO8RwzmUwd/xLRdM9GsnJrJd+0x8h2PALO1nTASp/Yhvm/JvDxySVwAgnkUcbvyWG
|
||||
SC4O6198HlbOtcAGhiCYHkzBI2ZiGqw97RP9jNXThGlkn7APhKPDO8ESAeMqGMTK0Fwt+nOXgy6S
|
||||
NSID4aiRXVyyUGVYi4RZ7lA+oE4CZRrJRNvPvEM13zChJr5u3szEVzoPV8SDa6Iy+CSpaiXgQZog
|
||||
aOgJK2fz7FBWGX3QS7DE2vHm9iy+3VT49mJ54saPowmPq1xA4SSJZLu/gGbJ/EblmwrePn1UYCK7
|
||||
eoYnK2KJ9uNb2KgKVLZZThyLVMEsj+fwd37TaxrELV8T9Yd3WJ51tV+b8+RDJN/DTZ/LgQAPlxYq
|
||||
OU+J+Rk4rVX2hxruibNMHGNeAqGLqIys0rLIvTqCYH6xCgPZ83rFch+9wNAyXAmsTH16/J75gtWF
|
||||
7gPmXmOTw+GLUrryJxPeB93A12BXgpmmvQtxpcX48OHNfnnqeIK/88eTH/XLcjR4KJbu5efngHm3
|
||||
93kwOe+AeJirwLpOwR5BE7XE84vM4YmX+fAwV19vJ5lvSpMskyHoZIpl6B37ae/DCGqWBIjiiZlG
|
||||
iywJYQpG6rH53ajmvBRXsHfPBGNmycDS5EHNFfYhmCStmOliGZMLW2Xq8UkJb9Wq3ezo5/8Qyw7T
|
||||
fmzBPof5x2L/3C9CSz9H3rMQt/yWA77eKwWsDl+RHGuurKZz0/vQct45OZiaXa0JtSZo4zEmj/Rt
|
||||
pbN/vOSIUZ8L0eXqQckS7UNkZfKThPdD3xP541uIpHebeIOwVpueV9FTv2FsVB8/ZfscenCPUYST
|
||||
p2AGwtwNDEj1ycWHwOpTOuj8Bb7qu4LtyXsFlG2tEpYscyVHxaXOUvrnAaaXR01wRnC62rtklTa+
|
||||
hR1hGdI581Ifci2TTNLkKcH8uJ1bBKvHk+SPqA8oLeMc6L6h4OTerRUlju8iIn0+5KkQyVm3+waR
|
||||
LVv48PMn9kfNhsGS7DF2A+/P+6Dn6fuJ9xqxmupvasKyKsNpOZtnjQaXVw43vj7B6kaccfN34GGI
|
||||
NXw5LmxFJLDm6E7BBePzxwros/Yn1LRiSix/jyk7MRIPzVGzscmVWspl+6aAnkk6rL9V2elEET+A
|
||||
wWkFUTAbAbI2UQdTLmwnfsOrmR9oAn5+1MZXNYrIZwbnqSqwBaq4GqD8SX7xxNiBI/3hF6yk4U4y
|
||||
G52BUAdNjN479eEtHybu1+SdXODiFxYxjF2bzk3UxaAy44hc47HqOU6Q9rC7hA/y03NLt+/sP3h0
|
||||
P9xNSoB/lmHER8MEnsvNmdeDPv38S2IEO5XysGFruOHHJLEO2087f+mQJl9sfCx0Lp2E9mVLDfz8
|
||||
H3z9+Y+92T5J0N1TjRejdwKb2e+wB0eZCoo+MPBz3j9JnD4qOvO2HyI218epZ773dGaZvSVKTvbE
|
||||
zmRdnMU2YxNsfi22FokJ6DU/1jDx4yOJDFF0Vv0hJbBdbwdyXIoo5cluWiHiISLKs/s4czHd9/B0
|
||||
OjPeromPlaDloICptzJTtfEvfqsvUHbCK85pvNPWTI8hXKAMiBJBvRLq3TpA53jUsXWm34rW5ZWF
|
||||
9sFsiCwUFqDrTsvBhic/Pzcgh1PRIl5/etNSlhdnLcx1QMHBabB5CWQgiPpeguvudfeks9CDGV01
|
||||
CRJhrxDsxGGwgK8yo9N95TwmONvpli8xOuFBxUotrikVTkYsbviDbZsw2urm5A0vjnojxqbvOYja
|
||||
C9TOk7Pxt5VOh+fDhrITXYlyMPlgIl52gT7lLwTv/XMw9zl0oV0HKsafMtJ6rRH3YNPHHkpz0SEO
|
||||
c6rBFl8PHm4inU971UM/v3njc/1YKr0PkuQBMN78ofVjehbIh0KdOss79/RCwgeaUWdgIypwRa/D
|
||||
Xv35H8T/HCetu+elBze/yqNB+OxX7aaG6B4OAlZrq3MWOC0s2vQrPi4Fn071/ljCULAhdkKxC4Tj
|
||||
3Vp//yd4ifV0OUlaKL5cZ/akMI77OWsECAfxGk/DdQrB/LAqV9rwHpt6V1fLK899wIZPcfps/IGA
|
||||
t9FC/eN+SSSKVT/sv6MJvOvjgI/bfRluJcx/+OsJwuVQkW1/8OcnRZ/hqpEhGAawOMmTyGe3SJfY
|
||||
st9wjdGM5UEBThMOng02PMHpeIrTdZ1SCT7vuYCtpf5Um1/jwWU3lfiw3yf9eH5KJuSr73d68c8d
|
||||
HZ5CfIGJ2EdeluC6H+7+bpB0r9thNb9qwdLg0Yab3+S9SLMC2j3F5Fff8LFdOUCYizTDyzv4bHpP
|
||||
6ac6eXfwgFh92vCkWhRNVCWPNyti2/rH+cMHNj1JsNoFlHuN7gB/eu6w6c25a9Y3PPUePzEgrau5
|
||||
H8sWfs+vkiiS0zgr2U3zD3+J+wVawLMiq4p5qitb/jt0nC+6h+BYnT1287/G5XhioWDjCza+Xgfo
|
||||
O+ImOEGXkOweukDoxFwH0XNRsU+Pbb/A8wMC9TsU5K64VBuDR79CY/QCchIPLaBRtbTIL+Mz9oJz
|
||||
l87v0OrgtH9Zf/g0a+CDBJ4NPhNN2ec9laPXgPKhVLE5JdeKX0x5RUNfM8S5srrDea1Xw+FCCXFD
|
||||
CrRe5NUYNpa/89Ys+VYziBMXuJ5oEWVcdsEqioccNP1RmVB9P1V00BkfIvKZsDKadUA2fQ/XhcrE
|
||||
PJNDwNvzOwTIJQO2ETvR9YVYH8nXu0WwA0+UZRIA4bQcpenr9i9tuJvHHAYnk8XWk+oOnYzWRvVR
|
||||
OmI1HL7OWl4SHsTCqnprBXhtBfPK/PETVLK8gxXoTQt/9ce1rUOwvtxuhTrfdjgUOSXd+jUR4n1o
|
||||
kNvVYPqhOQAIG9ioRFWYkfbV1JVQNdl8i9/ozBoBJpS6XvLKPnlr9BBHe9hO/dMT/UNTzbS7t+Cn
|
||||
f8yBVakwn+UL0voZYVUyBNpMT52Hm77CrtAjbfzohiotXS0RXSqkYNz8Ydg4TE0co7lWa/SwHrAX
|
||||
fEDOF8WmA2aphGqp/GAVnYyK/eknTG4zdrf+Bx837RvWt64izidZKpKhtIOWjn1vTU9cvw4OKP74
|
||||
G27y9SqujqvyT3ycnM+C+aenLkXDTEAzW0B//mckqQGxxPuQbniXw7NPD9P+ZlT93BwoA5Rl4rFN
|
||||
Mgp+fgtqC2AS75kuwXjlpQhKB7qfaIwkrb37wgCUnKWbuXTUtn5OCX563rt7WbVemb6Al0cee4Ji
|
||||
zXRuojL+g/+O63zBuvVv4MjnPTGKwk6XJCgH4BhZNoHhwlS02KEYZucu8sQDklN+5y8t2urptLjE
|
||||
p+MLExaQUztjL3T1YN09CxMpjOYRZ/emv35E9/N7sBeNM/0mQTdB8WR+seXga7pgoy/h7/w3Pkzn
|
||||
PNQiKO/PNn7gRAl+/BSuzXTC0fgEdG57aw+as4mx7J/NXiidaw09XTbxTx99Gc/rwOPb9eTg3phf
|
||||
P6aFy90WPenD3qtl67cARTR1bJH7uZ/rZ7GiD7u+iMWhD5imW9BB/l7zxFJq4AxN1CVQi/rrNOtp
|
||||
nC7P1/AGSuFzePOHqzk9f99w81vxIXqfKf1IdvvTo+QEKxyspfSB0OpmER8SsDrLvm5l2Do0m3a+
|
||||
eXXW2pxldLw7hDjbeQ9bPwKW7TPHbrC8Kirydgw7WUqJcUw9IJCR+n/8Gquw39Xy45PdyaNbf0yv
|
||||
eNeWINSDucUGIUK6/PrV/bd4TWCrH6yUFgXqtM8O6+cdAtMtsHW4a5OAWN6t0pYqPUAoNUzk9bJS
|
||||
/vRVDiRDD6dff41w4/wGh3Z4ELvwTMpLvSr9/GmibPySpgstkYZcxoNKYlLaRUCFW78HywNk6fLh
|
||||
8lL6Puob1hP1E7TNATDQGTST2JE3ONRIPy6MmF3mwQ0vh7h9F1B+zBGJk5et8Y3bmpDgffTzryjX
|
||||
vp0WTiz3msRVJ2CuVWcPHGXhyeH2dCrOlDwVdIXJEwdNu75Tzxcf7YziTDJ2bqu1Fx4Q/v2bCvjP
|
||||
f/311//4TRjUbZZ/tsGAMV/Gf//XqMC/hX8PdfL5/BlDmIakyP/+539PIPz97dv6O/7PsX3nzfD3
|
||||
P38Jf0YN/h7bMfn8P4//tb3oP//1vwAAAP//AwDPjjDU3iAAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97d174615cfef96b-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 10 Sep 2025 19:50:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=eYh.U8kiOc9xS0U2L8g4MiopA6w9E7lUuodx4D.rMOU-1757533830-1.0.1.1-YO2od1GbrHRgwOEdJSw3gCcNy8XFBF_O.jT_f8F2z6dWZsBIS7XPLWUpJAzenthO1wXRkx7OZDmVrPCPro2sSj1srJCxCY8KgIwcjw5NWGU;
|
||||
path=/; expires=Wed, 10-Sep-25 20:20:30 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=vkbBikeJy.dDV.o7ZB2HjcJaD_hkp9dDeCEBfHZxG94-1757533830280-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-model:
|
||||
- text-embedding-3-small
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '172'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
via:
|
||||
- envoy-router-59c745856-z5gxd
|
||||
x-envoy-upstream-service-time:
|
||||
- '267'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '10000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '9999996'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_06f3f9465f1a4af0ae5a4d8a58f19321
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,140 +0,0 @@
|
||||
import subprocess
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.cli.subprocess_utils import run_command
|
||||
|
||||
|
||||
class TestRunCommand:
|
||||
"""Test the cross-platform subprocess utility."""
|
||||
|
||||
@mock.patch("platform.system")
|
||||
@mock.patch("subprocess.run")
|
||||
def test_windows_uses_shell_true(self, mock_subprocess_run, mock_platform):
|
||||
"""Test that Windows uses shell=True with proper command conversion."""
|
||||
mock_platform.return_value = "Windows"
|
||||
mock_subprocess_run.return_value = subprocess.CompletedProcess(
|
||||
args="uv run test", returncode=0
|
||||
)
|
||||
|
||||
command = ["uv", "run", "test"]
|
||||
run_command(command)
|
||||
|
||||
mock_subprocess_run.assert_called_once()
|
||||
call_args = mock_subprocess_run.call_args
|
||||
|
||||
assert call_args[1]["shell"] is True
|
||||
assert isinstance(call_args[0][0], str)
|
||||
assert "uv run test" in call_args[0][0]
|
||||
|
||||
@mock.patch("platform.system")
|
||||
@mock.patch("subprocess.run")
|
||||
def test_unix_uses_shell_false(self, mock_subprocess_run, mock_platform):
|
||||
"""Test that Unix-like systems use shell=False with list commands."""
|
||||
mock_platform.return_value = "Linux"
|
||||
mock_subprocess_run.return_value = subprocess.CompletedProcess(
|
||||
args=["uv", "run", "test"], returncode=0
|
||||
)
|
||||
|
||||
command = ["uv", "run", "test"]
|
||||
run_command(command)
|
||||
|
||||
mock_subprocess_run.assert_called_once()
|
||||
call_args = mock_subprocess_run.call_args
|
||||
|
||||
assert call_args[1].get("shell", False) is False
|
||||
assert call_args[0][0] == command
|
||||
|
||||
@mock.patch("platform.system")
|
||||
@mock.patch("subprocess.run")
|
||||
def test_windows_command_escaping(self, mock_subprocess_run, mock_platform):
|
||||
"""Test that Windows properly escapes command arguments."""
|
||||
mock_platform.return_value = "Windows"
|
||||
mock_subprocess_run.return_value = subprocess.CompletedProcess(
|
||||
args="test", returncode=0
|
||||
)
|
||||
|
||||
command = ["echo", "hello world", "test&special"]
|
||||
run_command(command)
|
||||
|
||||
mock_subprocess_run.assert_called_once()
|
||||
call_args = mock_subprocess_run.call_args
|
||||
command_str = call_args[0][0]
|
||||
|
||||
assert '"hello world"' in command_str or "'hello world'" in command_str
|
||||
|
||||
@mock.patch("platform.system")
|
||||
@mock.patch("subprocess.run")
|
||||
def test_error_handling_preserved(self, mock_subprocess_run, mock_platform):
|
||||
"""Test that CalledProcessError is properly raised."""
|
||||
mock_platform.return_value = "Windows"
|
||||
mock_subprocess_run.side_effect = subprocess.CalledProcessError(1, "test")
|
||||
|
||||
with pytest.raises(subprocess.CalledProcessError):
|
||||
run_command(["test"], check=True)
|
||||
|
||||
@mock.patch("platform.system")
|
||||
@mock.patch("subprocess.run")
|
||||
def test_all_parameters_passed_through(self, mock_subprocess_run, mock_platform):
|
||||
"""Test that all subprocess parameters are properly passed through."""
|
||||
mock_platform.return_value = "Linux"
|
||||
mock_subprocess_run.return_value = subprocess.CompletedProcess(
|
||||
args=["test"], returncode=0
|
||||
)
|
||||
|
||||
run_command(
|
||||
["test"],
|
||||
capture_output=True,
|
||||
text=False,
|
||||
check=False,
|
||||
cwd="/home/test",
|
||||
env={"TEST": "value"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
mock_subprocess_run.assert_called_once()
|
||||
call_args = mock_subprocess_run.call_args
|
||||
|
||||
assert call_args[1]["capture_output"] is True
|
||||
assert call_args[1]["text"] is False
|
||||
assert call_args[1]["check"] is False
|
||||
assert call_args[1]["cwd"] == "/home/test"
|
||||
assert call_args[1]["env"] == {"TEST": "value"}
|
||||
assert call_args[1]["timeout"] == 30
|
||||
|
||||
@mock.patch("platform.system")
|
||||
@mock.patch("subprocess.run")
|
||||
def test_macos_uses_shell_false(self, mock_subprocess_run, mock_platform):
|
||||
"""Test that macOS uses shell=False with list commands."""
|
||||
mock_platform.return_value = "Darwin"
|
||||
mock_subprocess_run.return_value = subprocess.CompletedProcess(
|
||||
args=["uv", "run", "test"], returncode=0
|
||||
)
|
||||
|
||||
command = ["uv", "run", "test"]
|
||||
run_command(command)
|
||||
|
||||
mock_subprocess_run.assert_called_once()
|
||||
call_args = mock_subprocess_run.call_args
|
||||
|
||||
assert call_args[1].get("shell", False) is False
|
||||
assert call_args[0][0] == command
|
||||
|
||||
@mock.patch("platform.system")
|
||||
@mock.patch("subprocess.run")
|
||||
def test_windows_string_passthrough(self, mock_subprocess_run, mock_platform):
|
||||
"""Test that Windows passes through string commands unchanged."""
|
||||
mock_platform.return_value = "Windows"
|
||||
mock_subprocess_run.return_value = subprocess.CompletedProcess(
|
||||
args="test command", returncode=0
|
||||
)
|
||||
|
||||
command_str = "test command with spaces"
|
||||
run_command(command_str)
|
||||
|
||||
mock_subprocess_run.assert_called_once()
|
||||
call_args = mock_subprocess_run.call_args
|
||||
|
||||
assert call_args[0][0] == command_str
|
||||
assert call_args[1]["shell"] is True
|
||||
3
tests/responsibility/__init__.py
Normal file
3
tests/responsibility/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Tests for the formal responsibility tracking system.
|
||||
"""
|
||||
199
tests/responsibility/test_accountability.py
Normal file
199
tests/responsibility/test_accountability.py
Normal file
@@ -0,0 +1,199 @@
|
||||
"""
|
||||
Tests for accountability logging system.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime, timedelta
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.task import Task
|
||||
from crewai.responsibility.accountability import AccountabilityLogger
|
||||
|
||||
|
||||
class TestAccountabilityLogger:
|
||||
@pytest.fixture
|
||||
def logger(self):
|
||||
return AccountabilityLogger()
|
||||
|
||||
@pytest.fixture
|
||||
def mock_agent(self):
|
||||
agent = Mock(spec=BaseAgent)
|
||||
agent.role = "Test Agent"
|
||||
return agent
|
||||
|
||||
@pytest.fixture
|
||||
def mock_task(self):
|
||||
task = Mock(spec=Task)
|
||||
task.id = "test_task_1"
|
||||
task.description = "Test task description"
|
||||
return task
|
||||
|
||||
def test_log_action(self, logger, mock_agent, mock_task):
|
||||
context = {"complexity": "high", "priority": "urgent"}
|
||||
|
||||
record = logger.log_action(
|
||||
agent=mock_agent,
|
||||
action_type="task_execution",
|
||||
action_description="Executed data processing task",
|
||||
task=mock_task,
|
||||
context=context
|
||||
)
|
||||
|
||||
assert record.agent_id == "Test Agent_" + str(id(mock_agent))
|
||||
assert record.action_type == "task_execution"
|
||||
assert record.action_description == "Executed data processing task"
|
||||
assert record.task_id == "test_task_1"
|
||||
assert record.context["complexity"] == "high"
|
||||
assert len(logger.records) == 1
|
||||
|
||||
def test_log_decision(self, logger, mock_agent, mock_task):
|
||||
alternatives = ["Option A", "Option B", "Option C"]
|
||||
|
||||
record = logger.log_decision(
|
||||
agent=mock_agent,
|
||||
decision="Chose Option A",
|
||||
reasoning="Best performance characteristics",
|
||||
task=mock_task,
|
||||
alternatives_considered=alternatives
|
||||
)
|
||||
|
||||
assert record.action_type == "decision"
|
||||
assert record.action_description == "Chose Option A"
|
||||
assert record.context["reasoning"] == "Best performance characteristics"
|
||||
assert record.context["alternatives_considered"] == alternatives
|
||||
|
||||
def test_log_delegation(self, logger, mock_task):
|
||||
delegating_agent = Mock(spec=BaseAgent)
|
||||
delegating_agent.role = "Manager"
|
||||
|
||||
receiving_agent = Mock(spec=BaseAgent)
|
||||
receiving_agent.role = "Developer"
|
||||
|
||||
record = logger.log_delegation(
|
||||
delegating_agent=delegating_agent,
|
||||
receiving_agent=receiving_agent,
|
||||
task=mock_task,
|
||||
delegation_reason="Specialized expertise required"
|
||||
)
|
||||
|
||||
assert record.action_type == "delegation"
|
||||
assert "Delegated task to Developer" in record.action_description
|
||||
assert record.context["receiving_agent_role"] == "Developer"
|
||||
assert record.context["delegation_reason"] == "Specialized expertise required"
|
||||
|
||||
def test_log_task_completion(self, logger, mock_agent, mock_task):
|
||||
record = logger.log_task_completion(
|
||||
agent=mock_agent,
|
||||
task=mock_task,
|
||||
success=True,
|
||||
outcome_description="Task completed successfully with high quality",
|
||||
completion_time=1800.0
|
||||
)
|
||||
|
||||
assert record.action_type == "task_completion"
|
||||
assert record.success is True
|
||||
assert record.outcome == "Task completed successfully with high quality"
|
||||
assert record.context["completion_time"] == 1800.0
|
||||
|
||||
def test_get_agent_records(self, logger, mock_agent, mock_task):
|
||||
logger.log_action(mock_agent, "action1", "Description 1", mock_task)
|
||||
logger.log_action(mock_agent, "action2", "Description 2", mock_task)
|
||||
logger.log_decision(mock_agent, "decision1", "Reasoning", mock_task)
|
||||
|
||||
all_records = logger.get_agent_records(mock_agent)
|
||||
assert len(all_records) == 3
|
||||
|
||||
decision_records = logger.get_agent_records(mock_agent, action_type="decision")
|
||||
assert len(decision_records) == 1
|
||||
assert decision_records[0].action_type == "decision"
|
||||
|
||||
recent_time = datetime.utcnow() - timedelta(minutes=1)
|
||||
recent_records = logger.get_agent_records(mock_agent, since=recent_time)
|
||||
assert len(recent_records) == 3 # All should be recent
|
||||
|
||||
def test_get_task_records(self, logger, mock_agent, mock_task):
|
||||
other_task = Mock(spec=Task)
|
||||
other_task.id = "other_task"
|
||||
|
||||
logger.log_action(mock_agent, "action1", "Description 1", mock_task)
|
||||
logger.log_action(mock_agent, "action2", "Description 2", other_task)
|
||||
logger.log_action(mock_agent, "action3", "Description 3", mock_task)
|
||||
|
||||
task_records = logger.get_task_records(mock_task)
|
||||
assert len(task_records) == 2
|
||||
|
||||
for record in task_records:
|
||||
assert record.task_id == "test_task_1"
|
||||
|
||||
def test_get_delegation_chain(self, logger, mock_task):
|
||||
manager = Mock(spec=BaseAgent)
|
||||
manager.role = "Manager"
|
||||
|
||||
supervisor = Mock(spec=BaseAgent)
|
||||
supervisor.role = "Supervisor"
|
||||
|
||||
developer = Mock(spec=BaseAgent)
|
||||
developer.role = "Developer"
|
||||
|
||||
logger.log_delegation(manager, supervisor, mock_task, "Initial delegation")
|
||||
logger.log_delegation(supervisor, developer, mock_task, "Further delegation")
|
||||
|
||||
chain = logger.get_delegation_chain(mock_task)
|
||||
assert len(chain) == 2
|
||||
|
||||
assert chain[0].context["receiving_agent_role"] == "Supervisor"
|
||||
assert chain[1].context["receiving_agent_role"] == "Developer"
|
||||
|
||||
def test_generate_accountability_report(self, logger, mock_agent, mock_task):
|
||||
record1 = logger.log_action(mock_agent, "task_execution", "Task 1", mock_task)
|
||||
record1.set_outcome("Success", True)
|
||||
|
||||
record2 = logger.log_action(mock_agent, "task_execution", "Task 2", mock_task)
|
||||
record2.set_outcome("Failed", False)
|
||||
|
||||
record3 = logger.log_decision(mock_agent, "Decision 1", "Reasoning", mock_task)
|
||||
record3.set_outcome("Good decision", True)
|
||||
|
||||
report = logger.generate_accountability_report(agent=mock_agent)
|
||||
|
||||
assert report["total_records"] == 3
|
||||
assert report["action_counts"]["task_execution"] == 2
|
||||
assert report["action_counts"]["decision"] == 1
|
||||
assert report["success_counts"]["task_execution"] == 1
|
||||
assert report["failure_counts"]["task_execution"] == 1
|
||||
assert report["success_rates"]["task_execution"] == 0.5
|
||||
assert report["success_rates"]["decision"] == 1.0
|
||||
|
||||
assert len(report["recent_actions"]) == 3
|
||||
|
||||
def test_generate_system_wide_report(self, logger, mock_task):
|
||||
agent1 = Mock(spec=BaseAgent)
|
||||
agent1.role = "Agent 1"
|
||||
|
||||
agent2 = Mock(spec=BaseAgent)
|
||||
agent2.role = "Agent 2"
|
||||
|
||||
logger.log_action(agent1, "task_execution", "Task 1", mock_task)
|
||||
logger.log_action(agent2, "task_execution", "Task 2", mock_task)
|
||||
|
||||
report = logger.generate_accountability_report()
|
||||
|
||||
assert report["agent_id"] == "all_agents"
|
||||
assert report["total_records"] == 2
|
||||
assert report["action_counts"]["task_execution"] == 2
|
||||
|
||||
def test_time_filtered_report(self, logger, mock_agent, mock_task):
|
||||
logger.log_action(mock_agent, "old_action", "Old action", mock_task)
|
||||
|
||||
report = logger.generate_accountability_report(
|
||||
agent=mock_agent,
|
||||
time_period=timedelta(hours=1)
|
||||
)
|
||||
|
||||
assert report["total_records"] == 1
|
||||
|
||||
report = logger.generate_accountability_report(
|
||||
agent=mock_agent,
|
||||
time_period=timedelta(seconds=1)
|
||||
)
|
||||
221
tests/responsibility/test_assignment.py
Normal file
221
tests/responsibility/test_assignment.py
Normal file
@@ -0,0 +1,221 @@
|
||||
"""
|
||||
Tests for mathematical responsibility assignment.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.task import Task
|
||||
from crewai.responsibility.models import AgentCapability, CapabilityType, TaskRequirement
|
||||
from crewai.responsibility.hierarchy import CapabilityHierarchy
|
||||
from crewai.responsibility.assignment import ResponsibilityCalculator, AssignmentStrategy
|
||||
|
||||
|
||||
class TestResponsibilityCalculator:
|
||||
@pytest.fixture
|
||||
def hierarchy(self):
|
||||
return CapabilityHierarchy()
|
||||
|
||||
@pytest.fixture
|
||||
def calculator(self, hierarchy):
|
||||
return ResponsibilityCalculator(hierarchy)
|
||||
|
||||
@pytest.fixture
|
||||
def mock_task(self):
|
||||
task = Mock(spec=Task)
|
||||
task.id = "test_task_1"
|
||||
task.description = "Test task description"
|
||||
return task
|
||||
|
||||
@pytest.fixture
|
||||
def python_agent(self, hierarchy):
|
||||
agent = Mock(spec=BaseAgent)
|
||||
agent.role = "Python Developer"
|
||||
|
||||
capability = AgentCapability(
|
||||
name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
proficiency_level=0.9,
|
||||
confidence_score=0.8,
|
||||
keywords=["python", "programming"]
|
||||
)
|
||||
|
||||
hierarchy.add_agent(agent, [capability])
|
||||
return agent
|
||||
|
||||
@pytest.fixture
|
||||
def analysis_agent(self, hierarchy):
|
||||
agent = Mock(spec=BaseAgent)
|
||||
agent.role = "Data Analyst"
|
||||
|
||||
capability = AgentCapability(
|
||||
name="Data Analysis",
|
||||
capability_type=CapabilityType.ANALYTICAL,
|
||||
proficiency_level=0.8,
|
||||
confidence_score=0.9,
|
||||
keywords=["data", "analysis"]
|
||||
)
|
||||
|
||||
hierarchy.add_agent(agent, [capability])
|
||||
return agent
|
||||
|
||||
def test_greedy_assignment(self, calculator, mock_task, python_agent):
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.5,
|
||||
weight=1.0
|
||||
)
|
||||
]
|
||||
|
||||
assignment = calculator.calculate_responsibility_assignment(
|
||||
mock_task, requirements, AssignmentStrategy.GREEDY
|
||||
)
|
||||
|
||||
assert assignment is not None
|
||||
assert assignment.task_id == "test_task_1"
|
||||
assert assignment.responsibility_score > 0.5
|
||||
assert "Python Programming" in assignment.capability_matches
|
||||
assert "Greedy assignment" in assignment.reasoning
|
||||
|
||||
def test_balanced_assignment(self, calculator, mock_task, python_agent, analysis_agent):
|
||||
calculator.update_workload(python_agent, 5) # High workload
|
||||
calculator.update_workload(analysis_agent, 1) # Low workload
|
||||
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="General Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.3,
|
||||
weight=1.0
|
||||
)
|
||||
]
|
||||
|
||||
assignment = calculator.calculate_responsibility_assignment(
|
||||
mock_task, requirements, AssignmentStrategy.BALANCED
|
||||
)
|
||||
|
||||
assert assignment is not None
|
||||
assert "Balanced assignment" in assignment.reasoning
|
||||
|
||||
def test_optimal_assignment(self, calculator, mock_task, python_agent):
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.5,
|
||||
weight=1.0
|
||||
)
|
||||
]
|
||||
|
||||
assignment = calculator.calculate_responsibility_assignment(
|
||||
mock_task, requirements, AssignmentStrategy.OPTIMAL
|
||||
)
|
||||
|
||||
assert assignment is not None
|
||||
assert "Optimal assignment" in assignment.reasoning
|
||||
|
||||
def test_multi_agent_assignment(self, calculator, mock_task, python_agent, analysis_agent):
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.5,
|
||||
weight=1.0
|
||||
),
|
||||
TaskRequirement(
|
||||
capability_name="Data Analysis",
|
||||
capability_type=CapabilityType.ANALYTICAL,
|
||||
minimum_proficiency=0.5,
|
||||
weight=0.8
|
||||
)
|
||||
]
|
||||
|
||||
assignments = calculator.calculate_multi_agent_assignment(
|
||||
mock_task, requirements, max_agents=2
|
||||
)
|
||||
|
||||
assert len(assignments) <= 2
|
||||
assert len(assignments) > 0
|
||||
|
||||
agent_ids = [assignment.agent_id for assignment in assignments]
|
||||
assert len(agent_ids) == len(set(agent_ids))
|
||||
|
||||
def test_workload_update(self, calculator, python_agent):
|
||||
initial_workload = calculator.current_workloads.get(
|
||||
calculator.hierarchy._get_agent_id(python_agent), 0
|
||||
)
|
||||
|
||||
calculator.update_workload(python_agent, 3)
|
||||
|
||||
new_workload = calculator.current_workloads.get(
|
||||
calculator.hierarchy._get_agent_id(python_agent), 0
|
||||
)
|
||||
|
||||
assert new_workload == initial_workload + 3
|
||||
|
||||
calculator.update_workload(python_agent, -2)
|
||||
|
||||
final_workload = calculator.current_workloads.get(
|
||||
calculator.hierarchy._get_agent_id(python_agent), 0
|
||||
)
|
||||
|
||||
assert final_workload == new_workload - 2
|
||||
|
||||
def test_workload_distribution(self, calculator, python_agent, analysis_agent):
|
||||
calculator.update_workload(python_agent, 3)
|
||||
calculator.update_workload(analysis_agent, 1)
|
||||
|
||||
distribution = calculator.get_workload_distribution()
|
||||
|
||||
python_id = calculator.hierarchy._get_agent_id(python_agent)
|
||||
analysis_id = calculator.hierarchy._get_agent_id(analysis_agent)
|
||||
|
||||
assert distribution[python_id] == 3
|
||||
assert distribution[analysis_id] == 1
|
||||
|
||||
def test_exclude_agents(self, calculator, mock_task, python_agent, analysis_agent):
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.3,
|
||||
weight=1.0
|
||||
)
|
||||
]
|
||||
|
||||
assignment = calculator.calculate_responsibility_assignment(
|
||||
mock_task, requirements, AssignmentStrategy.GREEDY,
|
||||
exclude_agents=[python_agent]
|
||||
)
|
||||
|
||||
if assignment: # If any agent was assigned
|
||||
python_id = calculator.hierarchy._get_agent_id(python_agent)
|
||||
assert assignment.agent_id != python_id
|
||||
|
||||
def test_no_capable_agents(self, calculator, mock_task):
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Quantum Computing",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.9,
|
||||
weight=1.0
|
||||
)
|
||||
]
|
||||
|
||||
assignment = calculator.calculate_responsibility_assignment(
|
||||
mock_task, requirements, AssignmentStrategy.GREEDY
|
||||
)
|
||||
|
||||
assert assignment is None
|
||||
|
||||
def test_workload_penalty_calculation(self, calculator):
|
||||
assert calculator._calculate_workload_penalty(0) == 0.0
|
||||
|
||||
penalty_1 = calculator._calculate_workload_penalty(1)
|
||||
penalty_5 = calculator._calculate_workload_penalty(5)
|
||||
|
||||
assert penalty_1 < penalty_5 # Higher workload should have higher penalty
|
||||
assert penalty_5 <= 0.8 # Should not exceed maximum penalty
|
||||
208
tests/responsibility/test_hierarchy.py
Normal file
208
tests/responsibility/test_hierarchy.py
Normal file
@@ -0,0 +1,208 @@
|
||||
"""
|
||||
Tests for capability-based agent hierarchy.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.responsibility.models import AgentCapability, CapabilityType, TaskRequirement
|
||||
from crewai.responsibility.hierarchy import CapabilityHierarchy
|
||||
|
||||
|
||||
class TestCapabilityHierarchy:
|
||||
@pytest.fixture
|
||||
def hierarchy(self):
|
||||
return CapabilityHierarchy()
|
||||
|
||||
@pytest.fixture
|
||||
def mock_agent(self):
|
||||
agent = Mock(spec=BaseAgent)
|
||||
agent.role = "Test Agent"
|
||||
return agent
|
||||
|
||||
@pytest.fixture
|
||||
def python_capability(self):
|
||||
return AgentCapability(
|
||||
name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
proficiency_level=0.8,
|
||||
confidence_score=0.9,
|
||||
keywords=["python", "programming"]
|
||||
)
|
||||
|
||||
@pytest.fixture
|
||||
def analysis_capability(self):
|
||||
return AgentCapability(
|
||||
name="Data Analysis",
|
||||
capability_type=CapabilityType.ANALYTICAL,
|
||||
proficiency_level=0.7,
|
||||
confidence_score=0.8,
|
||||
keywords=["data", "analysis", "statistics"]
|
||||
)
|
||||
|
||||
def test_add_agent(self, hierarchy, mock_agent, python_capability):
|
||||
capabilities = [python_capability]
|
||||
hierarchy.add_agent(mock_agent, capabilities)
|
||||
|
||||
assert len(hierarchy.agents) == 1
|
||||
assert len(hierarchy.agent_capabilities) == 1
|
||||
assert "Python Programming" in hierarchy.capability_index
|
||||
|
||||
def test_remove_agent(self, hierarchy, mock_agent, python_capability):
|
||||
capabilities = [python_capability]
|
||||
hierarchy.add_agent(mock_agent, capabilities)
|
||||
|
||||
assert len(hierarchy.agents) == 1
|
||||
|
||||
hierarchy.remove_agent(mock_agent)
|
||||
|
||||
assert len(hierarchy.agents) == 0
|
||||
assert len(hierarchy.agent_capabilities) == 0
|
||||
assert len(hierarchy.capability_index["Python Programming"]) == 0
|
||||
|
||||
def test_supervision_relationship(self, hierarchy):
|
||||
supervisor = Mock(spec=BaseAgent)
|
||||
supervisor.role = "Supervisor"
|
||||
subordinate = Mock(spec=BaseAgent)
|
||||
subordinate.role = "Subordinate"
|
||||
|
||||
hierarchy.add_agent(supervisor, [])
|
||||
hierarchy.add_agent(subordinate, [])
|
||||
|
||||
hierarchy.set_supervision_relationship(supervisor, subordinate)
|
||||
|
||||
subordinates = hierarchy.get_subordinates(supervisor)
|
||||
assert len(subordinates) == 1
|
||||
assert subordinates[0] == subordinate
|
||||
|
||||
def test_update_agent_capability(self, hierarchy, mock_agent, python_capability):
|
||||
hierarchy.add_agent(mock_agent, [python_capability])
|
||||
|
||||
success = hierarchy.update_agent_capability(
|
||||
mock_agent, "Python Programming", 0.9, 0.95
|
||||
)
|
||||
|
||||
assert success is True
|
||||
|
||||
capabilities = hierarchy.get_agent_capabilities(mock_agent)
|
||||
updated_cap = next(cap for cap in capabilities if cap.name == "Python Programming")
|
||||
assert updated_cap.proficiency_level == 0.9
|
||||
assert updated_cap.confidence_score == 0.95
|
||||
|
||||
def test_find_capable_agents(self, hierarchy, mock_agent, python_capability):
|
||||
hierarchy.add_agent(mock_agent, [python_capability])
|
||||
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.5,
|
||||
weight=1.0
|
||||
)
|
||||
]
|
||||
|
||||
capable_agents = hierarchy.find_capable_agents(requirements)
|
||||
|
||||
assert len(capable_agents) == 1
|
||||
assert capable_agents[0][0] == mock_agent
|
||||
assert capable_agents[0][1] > 0.5 # Should have a good match score
|
||||
|
||||
def test_get_best_agent_for_task(self, hierarchy, python_capability, analysis_capability):
|
||||
agent1 = Mock(spec=BaseAgent)
|
||||
agent1.role = "Python Developer"
|
||||
agent2 = Mock(spec=BaseAgent)
|
||||
agent2.role = "Data Analyst"
|
||||
|
||||
hierarchy.add_agent(agent1, [python_capability])
|
||||
hierarchy.add_agent(agent2, [analysis_capability])
|
||||
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.5,
|
||||
weight=1.0
|
||||
)
|
||||
]
|
||||
|
||||
result = hierarchy.get_best_agent_for_task(requirements)
|
||||
|
||||
assert result is not None
|
||||
best_agent, score, matches = result
|
||||
assert best_agent == agent1 # Python developer should be chosen
|
||||
assert "Python Programming" in matches
|
||||
|
||||
def test_capability_distribution(self, hierarchy, python_capability, analysis_capability):
|
||||
agent1 = Mock(spec=BaseAgent)
|
||||
agent1.role = "Developer"
|
||||
agent2 = Mock(spec=BaseAgent)
|
||||
agent2.role = "Analyst"
|
||||
|
||||
hierarchy.add_agent(agent1, [python_capability])
|
||||
hierarchy.add_agent(agent2, [analysis_capability])
|
||||
|
||||
distribution = hierarchy.get_capability_distribution()
|
||||
|
||||
assert CapabilityType.TECHNICAL in distribution
|
||||
assert CapabilityType.ANALYTICAL in distribution
|
||||
assert distribution[CapabilityType.TECHNICAL]["high"] == 1 # Python capability is high proficiency
|
||||
assert distribution[CapabilityType.ANALYTICAL]["medium"] == 1 # Analysis capability is medium proficiency
|
||||
|
||||
def test_hierarchy_path(self, hierarchy):
|
||||
manager = Mock(spec=BaseAgent)
|
||||
manager.role = "Manager"
|
||||
supervisor = Mock(spec=BaseAgent)
|
||||
supervisor.role = "Supervisor"
|
||||
worker = Mock(spec=BaseAgent)
|
||||
worker.role = "Worker"
|
||||
|
||||
hierarchy.add_agent(manager, [])
|
||||
hierarchy.add_agent(supervisor, [])
|
||||
hierarchy.add_agent(worker, [])
|
||||
|
||||
hierarchy.set_supervision_relationship(manager, supervisor)
|
||||
hierarchy.set_supervision_relationship(supervisor, worker)
|
||||
|
||||
path = hierarchy.get_hierarchy_path(manager, worker)
|
||||
|
||||
assert path is not None
|
||||
assert len(path) == 3
|
||||
assert path[0] == manager
|
||||
assert path[1] == supervisor
|
||||
assert path[2] == worker
|
||||
|
||||
def test_capabilities_match(self, hierarchy, python_capability):
|
||||
requirement = TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.5
|
||||
)
|
||||
|
||||
assert hierarchy._capabilities_match(python_capability, requirement) is True
|
||||
|
||||
requirement2 = TaskRequirement(
|
||||
capability_name="Different Name",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.5
|
||||
)
|
||||
|
||||
assert hierarchy._capabilities_match(python_capability, requirement2) is True
|
||||
|
||||
requirement3 = TaskRequirement(
|
||||
capability_name="Different Name",
|
||||
capability_type=CapabilityType.ANALYTICAL,
|
||||
minimum_proficiency=0.5,
|
||||
keywords=["python"]
|
||||
)
|
||||
|
||||
assert hierarchy._capabilities_match(python_capability, requirement3) is True
|
||||
|
||||
requirement4 = TaskRequirement(
|
||||
capability_name="Different Name",
|
||||
capability_type=CapabilityType.ANALYTICAL,
|
||||
minimum_proficiency=0.5,
|
||||
keywords=["java"]
|
||||
)
|
||||
|
||||
assert hierarchy._capabilities_match(python_capability, requirement4) is False
|
||||
286
tests/responsibility/test_integration.py
Normal file
286
tests/responsibility/test_integration.py
Normal file
@@ -0,0 +1,286 @@
|
||||
"""
|
||||
Integration tests for the responsibility tracking system.
|
||||
"""
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.responsibility.assignment import AssignmentStrategy
|
||||
from crewai.responsibility.models import (
|
||||
AgentCapability,
|
||||
CapabilityType,
|
||||
TaskRequirement,
|
||||
)
|
||||
from crewai.responsibility.system import ResponsibilitySystem
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class TestResponsibilitySystemIntegration:
|
||||
@pytest.fixture
|
||||
def system(self):
|
||||
return ResponsibilitySystem()
|
||||
|
||||
@pytest.fixture
|
||||
def python_agent(self):
|
||||
agent = Mock(spec=BaseAgent)
|
||||
agent.role = "Python Developer"
|
||||
return agent
|
||||
|
||||
@pytest.fixture
|
||||
def analysis_agent(self):
|
||||
agent = Mock(spec=BaseAgent)
|
||||
agent.role = "Data Analyst"
|
||||
return agent
|
||||
|
||||
@pytest.fixture
|
||||
def python_capability(self):
|
||||
return AgentCapability(
|
||||
name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
proficiency_level=0.9,
|
||||
confidence_score=0.8,
|
||||
keywords=["python", "programming", "development"]
|
||||
)
|
||||
|
||||
@pytest.fixture
|
||||
def analysis_capability(self):
|
||||
return AgentCapability(
|
||||
name="Data Analysis",
|
||||
capability_type=CapabilityType.ANALYTICAL,
|
||||
proficiency_level=0.8,
|
||||
confidence_score=0.9,
|
||||
keywords=["data", "analysis", "statistics"]
|
||||
)
|
||||
|
||||
@pytest.fixture
|
||||
def mock_task(self):
|
||||
task = Mock(spec=Task)
|
||||
task.id = "integration_test_task"
|
||||
task.description = "Complex data processing task requiring Python skills"
|
||||
return task
|
||||
|
||||
def test_full_workflow(self, system, python_agent, python_capability, mock_task):
|
||||
"""Test complete workflow from agent registration to task completion."""
|
||||
|
||||
system.register_agent(python_agent, [python_capability])
|
||||
|
||||
status = system.get_agent_status(python_agent)
|
||||
assert status["role"] == "Python Developer"
|
||||
assert len(status["capabilities"]) == 1
|
||||
assert status["capabilities"][0]["name"] == "Python Programming"
|
||||
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.5,
|
||||
weight=1.0
|
||||
)
|
||||
]
|
||||
|
||||
assignment = system.assign_task_responsibility(mock_task, requirements)
|
||||
|
||||
assert assignment is not None
|
||||
assert assignment.task_id == "integration_test_task"
|
||||
assert assignment.responsibility_score > 0.5
|
||||
|
||||
updated_status = system.get_agent_status(python_agent)
|
||||
assert updated_status["current_workload"] == 1
|
||||
|
||||
system.complete_task(
|
||||
agent=python_agent,
|
||||
task=mock_task,
|
||||
success=True,
|
||||
completion_time=1800.0,
|
||||
quality_score=0.9,
|
||||
outcome_description="Task completed successfully"
|
||||
)
|
||||
|
||||
final_status = system.get_agent_status(python_agent)
|
||||
assert final_status["performance"]["total_tasks"] == 1
|
||||
assert final_status["performance"]["success_rate"] == 1.0
|
||||
assert final_status["current_workload"] == 0 # Should be decremented
|
||||
|
||||
def test_multi_agent_scenario(self, system, python_agent, analysis_agent,
|
||||
python_capability, analysis_capability, mock_task):
|
||||
"""Test scenario with multiple agents and capabilities."""
|
||||
|
||||
system.register_agent(python_agent, [python_capability])
|
||||
system.register_agent(analysis_agent, [analysis_capability])
|
||||
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.7,
|
||||
weight=1.0
|
||||
),
|
||||
TaskRequirement(
|
||||
capability_name="Data Analysis",
|
||||
capability_type=CapabilityType.ANALYTICAL,
|
||||
minimum_proficiency=0.6,
|
||||
weight=0.8
|
||||
)
|
||||
]
|
||||
|
||||
greedy_assignment = system.assign_task_responsibility(
|
||||
mock_task, requirements, AssignmentStrategy.GREEDY
|
||||
)
|
||||
|
||||
assert greedy_assignment is not None
|
||||
|
||||
system.calculator.update_workload(python_agent, 5)
|
||||
|
||||
balanced_assignment = system.assign_task_responsibility(
|
||||
mock_task, requirements, AssignmentStrategy.BALANCED
|
||||
)
|
||||
|
||||
assert balanced_assignment is not None
|
||||
|
||||
def test_delegation_workflow(self, system, python_agent, analysis_agent,
|
||||
python_capability, analysis_capability, mock_task):
|
||||
"""Test task delegation between agents."""
|
||||
|
||||
system.register_agent(python_agent, [python_capability], supervisor=None)
|
||||
system.register_agent(analysis_agent, [analysis_capability], supervisor=python_agent)
|
||||
|
||||
system.delegate_task(
|
||||
delegating_agent=python_agent,
|
||||
receiving_agent=analysis_agent,
|
||||
task=mock_task,
|
||||
reason="Analysis expertise required"
|
||||
)
|
||||
|
||||
analysis_status = system.get_agent_status(analysis_agent)
|
||||
|
||||
assert analysis_status["current_workload"] > 0
|
||||
|
||||
delegation_records = system.accountability.get_agent_records(
|
||||
python_agent, action_type="delegation"
|
||||
)
|
||||
assert len(delegation_records) > 0
|
||||
|
||||
def test_performance_based_capability_adjustment(self, system, python_agent,
|
||||
python_capability, mock_task):
|
||||
"""Test that capabilities are adjusted based on performance."""
|
||||
|
||||
system.register_agent(python_agent, [python_capability])
|
||||
|
||||
for i in range(5):
|
||||
task = Mock(spec=Task)
|
||||
task.id = f"task_{i}"
|
||||
task.description = f"Task {i}"
|
||||
|
||||
system.complete_task(
|
||||
agent=python_agent,
|
||||
task=task,
|
||||
success=True,
|
||||
completion_time=1800.0,
|
||||
quality_score=0.9
|
||||
)
|
||||
|
||||
updated_capabilities = system.hierarchy.get_agent_capabilities(python_agent)
|
||||
|
||||
assert len(updated_capabilities) == 1
|
||||
|
||||
def test_system_overview_and_recommendations(self, system, python_agent,
|
||||
analysis_agent, python_capability,
|
||||
analysis_capability):
|
||||
"""Test system overview and recommendation generation."""
|
||||
|
||||
system.register_agent(python_agent, [python_capability])
|
||||
system.register_agent(analysis_agent, [analysis_capability])
|
||||
|
||||
overview = system.get_system_overview()
|
||||
|
||||
assert overview["enabled"] is True
|
||||
assert overview["total_agents"] == 2
|
||||
assert "capability_distribution" in overview
|
||||
assert "system_performance" in overview
|
||||
|
||||
recommendations = system.generate_recommendations()
|
||||
|
||||
assert isinstance(recommendations, list)
|
||||
|
||||
def test_system_enable_disable(self, system, python_agent, python_capability, mock_task):
|
||||
"""Test enabling and disabling the responsibility system."""
|
||||
|
||||
assert system.enabled is True
|
||||
|
||||
system.register_agent(python_agent, [python_capability])
|
||||
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.5,
|
||||
weight=1.0
|
||||
)
|
||||
]
|
||||
|
||||
assignment = system.assign_task_responsibility(mock_task, requirements)
|
||||
assert assignment is not None
|
||||
|
||||
system.disable_system()
|
||||
assert system.enabled is False
|
||||
|
||||
disabled_assignment = system.assign_task_responsibility(mock_task, requirements)
|
||||
assert disabled_assignment is None
|
||||
|
||||
disabled_status = system.get_agent_status(python_agent)
|
||||
assert disabled_status == {}
|
||||
|
||||
system.enable_system()
|
||||
assert system.enabled is True
|
||||
|
||||
enabled_assignment = system.assign_task_responsibility(mock_task, requirements)
|
||||
assert enabled_assignment is not None
|
||||
|
||||
def test_accountability_tracking_integration(self, system, python_agent,
|
||||
python_capability, mock_task):
|
||||
"""Test that all operations are properly logged for accountability."""
|
||||
|
||||
system.register_agent(python_agent, [python_capability])
|
||||
|
||||
registration_records = system.accountability.get_agent_records(
|
||||
python_agent, action_type="registration"
|
||||
)
|
||||
assert len(registration_records) == 1
|
||||
|
||||
requirements = [
|
||||
TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.5,
|
||||
weight=1.0
|
||||
)
|
||||
]
|
||||
|
||||
system.assign_task_responsibility(mock_task, requirements)
|
||||
|
||||
assignment_records = system.accountability.get_agent_records(
|
||||
python_agent, action_type="task_assignment"
|
||||
)
|
||||
assert len(assignment_records) == 1
|
||||
|
||||
system.complete_task(
|
||||
agent=python_agent,
|
||||
task=mock_task,
|
||||
success=True,
|
||||
completion_time=1800.0,
|
||||
quality_score=0.9
|
||||
)
|
||||
|
||||
completion_records = system.accountability.get_agent_records(
|
||||
python_agent, action_type="task_completion"
|
||||
)
|
||||
assert len(completion_records) == 1
|
||||
|
||||
report = system.accountability.generate_accountability_report(agent=python_agent)
|
||||
|
||||
assert report["total_records"] >= 3 # At least registration, assignment, completion
|
||||
assert "registration" in report["action_counts"]
|
||||
assert "task_assignment" in report["action_counts"]
|
||||
assert "task_completion" in report["action_counts"]
|
||||
187
tests/responsibility/test_models.py
Normal file
187
tests/responsibility/test_models.py
Normal file
@@ -0,0 +1,187 @@
|
||||
"""
|
||||
Tests for responsibility tracking data models.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime, timedelta
|
||||
from uuid import uuid4
|
||||
|
||||
from crewai.responsibility.models import (
|
||||
AgentCapability,
|
||||
CapabilityType,
|
||||
ResponsibilityAssignment,
|
||||
AccountabilityRecord,
|
||||
PerformanceMetrics,
|
||||
TaskRequirement
|
||||
)
|
||||
|
||||
|
||||
class TestAgentCapability:
|
||||
def test_create_capability(self):
|
||||
capability = AgentCapability(
|
||||
name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
proficiency_level=0.8,
|
||||
confidence_score=0.9,
|
||||
description="Expert in Python development",
|
||||
keywords=["python", "programming", "development"]
|
||||
)
|
||||
|
||||
assert capability.name == "Python Programming"
|
||||
assert capability.capability_type == CapabilityType.TECHNICAL
|
||||
assert capability.proficiency_level == 0.8
|
||||
assert capability.confidence_score == 0.9
|
||||
assert "python" in capability.keywords
|
||||
|
||||
def test_update_proficiency(self):
|
||||
capability = AgentCapability(
|
||||
name="Data Analysis",
|
||||
capability_type=CapabilityType.ANALYTICAL,
|
||||
proficiency_level=0.5,
|
||||
confidence_score=0.6
|
||||
)
|
||||
|
||||
old_updated = capability.last_updated
|
||||
capability.update_proficiency(0.7, 0.8)
|
||||
|
||||
assert capability.proficiency_level == 0.7
|
||||
assert capability.confidence_score == 0.8
|
||||
assert capability.last_updated > old_updated
|
||||
|
||||
def test_proficiency_bounds(self):
|
||||
capability = AgentCapability(
|
||||
name="Test",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
proficiency_level=0.5,
|
||||
confidence_score=0.5
|
||||
)
|
||||
|
||||
capability.update_proficiency(1.5, 1.2)
|
||||
assert capability.proficiency_level == 1.0
|
||||
assert capability.confidence_score == 1.0
|
||||
|
||||
capability.update_proficiency(-0.5, -0.2)
|
||||
assert capability.proficiency_level == 0.0
|
||||
assert capability.confidence_score == 0.0
|
||||
|
||||
|
||||
class TestResponsibilityAssignment:
|
||||
def test_create_assignment(self):
|
||||
assignment = ResponsibilityAssignment(
|
||||
agent_id="agent_1",
|
||||
task_id="task_1",
|
||||
responsibility_score=0.85,
|
||||
capability_matches=["Python Programming", "Data Analysis"],
|
||||
reasoning="Best match for technical requirements"
|
||||
)
|
||||
|
||||
assert assignment.agent_id == "agent_1"
|
||||
assert assignment.task_id == "task_1"
|
||||
assert assignment.responsibility_score == 0.85
|
||||
assert len(assignment.capability_matches) == 2
|
||||
assert assignment.success is None
|
||||
|
||||
def test_mark_completed(self):
|
||||
assignment = ResponsibilityAssignment(
|
||||
agent_id="agent_1",
|
||||
task_id="task_1",
|
||||
responsibility_score=0.85,
|
||||
reasoning="Test assignment"
|
||||
)
|
||||
|
||||
assert assignment.completed_at is None
|
||||
assert assignment.success is None
|
||||
|
||||
assignment.mark_completed(True)
|
||||
|
||||
assert assignment.completed_at is not None
|
||||
assert assignment.success is True
|
||||
|
||||
|
||||
class TestAccountabilityRecord:
|
||||
def test_create_record(self):
|
||||
record = AccountabilityRecord(
|
||||
agent_id="agent_1",
|
||||
action_type="task_execution",
|
||||
action_description="Executed data analysis task",
|
||||
task_id="task_1",
|
||||
context={"complexity": "high", "duration": 3600}
|
||||
)
|
||||
|
||||
assert record.agent_id == "agent_1"
|
||||
assert record.action_type == "task_execution"
|
||||
assert record.context["complexity"] == "high"
|
||||
assert record.outcome is None
|
||||
|
||||
def test_set_outcome(self):
|
||||
record = AccountabilityRecord(
|
||||
agent_id="agent_1",
|
||||
action_type="decision",
|
||||
action_description="Chose algorithm X"
|
||||
)
|
||||
|
||||
record.set_outcome("Algorithm performed well", True)
|
||||
|
||||
assert record.outcome == "Algorithm performed well"
|
||||
assert record.success is True
|
||||
|
||||
|
||||
class TestPerformanceMetrics:
|
||||
def test_create_metrics(self):
|
||||
metrics = PerformanceMetrics(agent_id="agent_1")
|
||||
|
||||
assert metrics.agent_id == "agent_1"
|
||||
assert metrics.total_tasks == 0
|
||||
assert metrics.success_rate == 0.0
|
||||
assert metrics.quality_score == 0.5
|
||||
|
||||
def test_update_metrics_success(self):
|
||||
metrics = PerformanceMetrics(agent_id="agent_1")
|
||||
|
||||
metrics.update_metrics(True, 1800, 0.8)
|
||||
|
||||
assert metrics.total_tasks == 1
|
||||
assert metrics.successful_tasks == 1
|
||||
assert metrics.failed_tasks == 0
|
||||
assert metrics.success_rate == 1.0
|
||||
assert metrics.average_completion_time == 1800
|
||||
assert metrics.reliability_score == 1.0
|
||||
|
||||
def test_update_metrics_failure(self):
|
||||
metrics = PerformanceMetrics(agent_id="agent_1")
|
||||
|
||||
metrics.update_metrics(False, 3600)
|
||||
|
||||
assert metrics.total_tasks == 1
|
||||
assert metrics.successful_tasks == 0
|
||||
assert metrics.failed_tasks == 1
|
||||
assert metrics.success_rate == 0.0
|
||||
|
||||
def test_update_metrics_mixed(self):
|
||||
metrics = PerformanceMetrics(agent_id="agent_1")
|
||||
|
||||
metrics.update_metrics(True, 1800, 0.8)
|
||||
metrics.update_metrics(False, 3600, 0.3)
|
||||
metrics.update_metrics(True, 2400, 0.9)
|
||||
|
||||
assert metrics.total_tasks == 3
|
||||
assert metrics.successful_tasks == 2
|
||||
assert metrics.failed_tasks == 1
|
||||
assert abs(metrics.success_rate - 2/3) < 0.001
|
||||
|
||||
|
||||
class TestTaskRequirement:
|
||||
def test_create_requirement(self):
|
||||
requirement = TaskRequirement(
|
||||
capability_name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
minimum_proficiency=0.7,
|
||||
weight=1.5,
|
||||
keywords=["python", "coding"]
|
||||
)
|
||||
|
||||
assert requirement.capability_name == "Python Programming"
|
||||
assert requirement.capability_type == CapabilityType.TECHNICAL
|
||||
assert requirement.minimum_proficiency == 0.7
|
||||
assert requirement.weight == 1.5
|
||||
assert "python" in requirement.keywords
|
||||
226
tests/responsibility/test_performance.py
Normal file
226
tests/responsibility/test_performance.py
Normal file
@@ -0,0 +1,226 @@
|
||||
"""
|
||||
Tests for performance-based capability adjustment.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from datetime import timedelta
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.responsibility.models import AgentCapability, CapabilityType, PerformanceMetrics
|
||||
from crewai.responsibility.hierarchy import CapabilityHierarchy
|
||||
from crewai.responsibility.performance import PerformanceTracker
|
||||
|
||||
|
||||
class TestPerformanceTracker:
|
||||
@pytest.fixture
|
||||
def hierarchy(self):
|
||||
return CapabilityHierarchy()
|
||||
|
||||
@pytest.fixture
|
||||
def tracker(self, hierarchy):
|
||||
return PerformanceTracker(hierarchy)
|
||||
|
||||
@pytest.fixture
|
||||
def mock_agent(self, hierarchy):
|
||||
agent = Mock(spec=BaseAgent)
|
||||
agent.role = "Test Agent"
|
||||
|
||||
capability = AgentCapability(
|
||||
name="Python Programming",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
proficiency_level=0.7,
|
||||
confidence_score=0.8
|
||||
)
|
||||
|
||||
hierarchy.add_agent(agent, [capability])
|
||||
return agent
|
||||
|
||||
def test_record_task_completion_success(self, tracker, mock_agent):
|
||||
tracker.record_task_completion(
|
||||
agent=mock_agent,
|
||||
task_success=True,
|
||||
completion_time=1800.0,
|
||||
quality_score=0.9
|
||||
)
|
||||
|
||||
metrics = tracker.get_performance_metrics(mock_agent)
|
||||
|
||||
assert metrics is not None
|
||||
assert metrics.total_tasks == 1
|
||||
assert metrics.successful_tasks == 1
|
||||
assert metrics.failed_tasks == 0
|
||||
assert metrics.success_rate == 1.0
|
||||
assert metrics.average_completion_time == 1800.0
|
||||
assert metrics.quality_score > 0.5 # Should be updated towards 0.9
|
||||
|
||||
def test_record_task_completion_failure(self, tracker, mock_agent):
|
||||
tracker.record_task_completion(
|
||||
agent=mock_agent,
|
||||
task_success=False,
|
||||
completion_time=3600.0,
|
||||
quality_score=0.3
|
||||
)
|
||||
|
||||
metrics = tracker.get_performance_metrics(mock_agent)
|
||||
|
||||
assert metrics is not None
|
||||
assert metrics.total_tasks == 1
|
||||
assert metrics.successful_tasks == 0
|
||||
assert metrics.failed_tasks == 1
|
||||
assert metrics.success_rate == 0.0
|
||||
|
||||
def test_multiple_task_completions(self, tracker, mock_agent):
|
||||
tracker.record_task_completion(mock_agent, True, 1800.0, 0.8)
|
||||
tracker.record_task_completion(mock_agent, False, 3600.0, 0.4)
|
||||
tracker.record_task_completion(mock_agent, True, 2400.0, 0.9)
|
||||
|
||||
metrics = tracker.get_performance_metrics(mock_agent)
|
||||
|
||||
assert metrics.total_tasks == 3
|
||||
assert metrics.successful_tasks == 2
|
||||
assert metrics.failed_tasks == 1
|
||||
assert abs(metrics.success_rate - 2/3) < 0.001
|
||||
|
||||
def test_capability_adjustment_on_success(self, tracker, mock_agent):
|
||||
initial_capabilities = tracker.hierarchy.get_agent_capabilities(mock_agent)
|
||||
initial_proficiency = initial_capabilities[0].proficiency_level
|
||||
|
||||
tracker.record_task_completion(
|
||||
agent=mock_agent,
|
||||
task_success=True,
|
||||
completion_time=1800.0,
|
||||
quality_score=0.9,
|
||||
capability_used="Python Programming"
|
||||
)
|
||||
|
||||
updated_capabilities = tracker.hierarchy.get_agent_capabilities(mock_agent)
|
||||
updated_proficiency = updated_capabilities[0].proficiency_level
|
||||
|
||||
assert updated_proficiency >= initial_proficiency
|
||||
|
||||
def test_capability_adjustment_on_failure(self, tracker, mock_agent):
|
||||
initial_capabilities = tracker.hierarchy.get_agent_capabilities(mock_agent)
|
||||
initial_proficiency = initial_capabilities[0].proficiency_level
|
||||
|
||||
tracker.record_task_completion(
|
||||
agent=mock_agent,
|
||||
task_success=False,
|
||||
completion_time=3600.0,
|
||||
quality_score=0.2,
|
||||
capability_used="Python Programming"
|
||||
)
|
||||
|
||||
updated_capabilities = tracker.hierarchy.get_agent_capabilities(mock_agent)
|
||||
updated_proficiency = updated_capabilities[0].proficiency_level
|
||||
|
||||
assert updated_proficiency <= initial_proficiency
|
||||
|
||||
def test_adjust_capabilities_based_on_performance(self, tracker, mock_agent):
|
||||
for _ in range(5):
|
||||
tracker.record_task_completion(mock_agent, True, 1800.0, 0.9)
|
||||
for _ in range(2):
|
||||
tracker.record_task_completion(mock_agent, False, 3600.0, 0.3)
|
||||
|
||||
adjustments = tracker.adjust_capabilities_based_on_performance(mock_agent)
|
||||
|
||||
assert isinstance(adjustments, list)
|
||||
|
||||
def test_get_performance_trends(self, tracker, mock_agent):
|
||||
tracker.record_task_completion(mock_agent, True, 1800.0, 0.8)
|
||||
tracker.record_task_completion(mock_agent, True, 2000.0, 0.9)
|
||||
|
||||
trends = tracker.get_performance_trends(mock_agent)
|
||||
|
||||
assert "success_rate" in trends
|
||||
assert "quality_score" in trends
|
||||
assert "efficiency_score" in trends
|
||||
assert "reliability_score" in trends
|
||||
|
||||
assert len(trends["success_rate"]) > 0
|
||||
|
||||
def test_identify_improvement_opportunities(self, tracker, mock_agent):
|
||||
tracker.record_task_completion(mock_agent, False, 7200.0, 0.3) # Long time, low quality
|
||||
tracker.record_task_completion(mock_agent, False, 6000.0, 0.4)
|
||||
tracker.record_task_completion(mock_agent, True, 5400.0, 0.5)
|
||||
|
||||
opportunities = tracker.identify_improvement_opportunities(mock_agent)
|
||||
|
||||
assert isinstance(opportunities, list)
|
||||
assert len(opportunities) > 0
|
||||
|
||||
areas = [opp["area"] for opp in opportunities]
|
||||
assert "success_rate" in areas or "quality" in areas or "efficiency" in areas
|
||||
|
||||
def test_compare_agent_performance(self, tracker, hierarchy):
|
||||
agent1 = Mock(spec=BaseAgent)
|
||||
agent1.role = "Agent 1"
|
||||
agent2 = Mock(spec=BaseAgent)
|
||||
agent2.role = "Agent 2"
|
||||
|
||||
capability = AgentCapability(
|
||||
name="Test Capability",
|
||||
capability_type=CapabilityType.TECHNICAL,
|
||||
proficiency_level=0.7,
|
||||
confidence_score=0.8
|
||||
)
|
||||
|
||||
hierarchy.add_agent(agent1, [capability])
|
||||
hierarchy.add_agent(agent2, [capability])
|
||||
|
||||
tracker.record_task_completion(agent1, True, 1800.0, 0.9) # Good performance
|
||||
tracker.record_task_completion(agent1, True, 2000.0, 0.8)
|
||||
|
||||
tracker.record_task_completion(agent2, False, 3600.0, 0.4) # Poor performance
|
||||
tracker.record_task_completion(agent2, True, 4000.0, 0.5)
|
||||
|
||||
comparison = tracker.compare_agent_performance([agent1, agent2], metric="overall")
|
||||
|
||||
assert len(comparison) == 2
|
||||
assert comparison[0][1] > comparison[1][1] # First agent should have higher score
|
||||
|
||||
success_comparison = tracker.compare_agent_performance([agent1, agent2], metric="success_rate")
|
||||
assert len(success_comparison) == 2
|
||||
|
||||
def test_learning_rate_effect(self, tracker, mock_agent):
|
||||
original_learning_rate = tracker.learning_rate
|
||||
|
||||
tracker.learning_rate = 0.5
|
||||
|
||||
initial_capabilities = tracker.hierarchy.get_agent_capabilities(mock_agent)
|
||||
initial_proficiency = initial_capabilities[0].proficiency_level
|
||||
|
||||
tracker.record_task_completion(
|
||||
mock_agent, True, 1800.0, 0.9, capability_used="Python Programming"
|
||||
)
|
||||
|
||||
high_lr_capabilities = tracker.hierarchy.get_agent_capabilities(mock_agent)
|
||||
high_lr_proficiency = high_lr_capabilities[0].proficiency_level
|
||||
|
||||
tracker.hierarchy.update_agent_capability(
|
||||
mock_agent, "Python Programming", initial_proficiency, 0.8
|
||||
)
|
||||
tracker.learning_rate = 0.01
|
||||
|
||||
tracker.record_task_completion(
|
||||
mock_agent, True, 1800.0, 0.9, capability_used="Python Programming"
|
||||
)
|
||||
|
||||
low_lr_capabilities = tracker.hierarchy.get_agent_capabilities(mock_agent)
|
||||
low_lr_proficiency = low_lr_capabilities[0].proficiency_level
|
||||
|
||||
high_lr_change = abs(high_lr_proficiency - initial_proficiency)
|
||||
low_lr_change = abs(low_lr_proficiency - initial_proficiency)
|
||||
|
||||
assert high_lr_change > low_lr_change
|
||||
|
||||
tracker.learning_rate = original_learning_rate
|
||||
|
||||
def test_performance_metrics_creation(self, tracker, mock_agent):
|
||||
assert tracker.get_performance_metrics(mock_agent) is None
|
||||
|
||||
tracker.record_task_completion(mock_agent, True, 1800.0)
|
||||
|
||||
metrics = tracker.get_performance_metrics(mock_agent)
|
||||
assert metrics is not None
|
||||
assert metrics.agent_id == tracker._get_agent_id(mock_agent)
|
||||
@@ -16,7 +16,8 @@ class MockCrew:
|
||||
@pytest.fixture
|
||||
def mock_mem0_memory():
|
||||
"""Fixture to create a mock Memory instance"""
|
||||
return MagicMock(spec=Memory)
|
||||
mock_memory = MagicMock(spec=Memory)
|
||||
return mock_memory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -72,7 +73,8 @@ def test_mem0_storage_initialization(mem0_storage_with_mocked_config, mock_mem0_
|
||||
@pytest.fixture
|
||||
def mock_mem0_memory_client():
|
||||
"""Fixture to create a mock MemoryClient instance"""
|
||||
return MagicMock(spec=MemoryClient)
|
||||
mock_memory = MagicMock(spec=MemoryClient)
|
||||
return mock_memory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -94,7 +96,8 @@ def mem0_storage_with_memory_client_using_config_from_crew(mock_mem0_memory_clie
|
||||
"infer": True
|
||||
}
|
||||
|
||||
return Mem0Storage(type="short_term", crew=crew, config=embedder_config)
|
||||
mem0_storage = Mem0Storage(type="short_term", crew=crew, config=embedder_config)
|
||||
return mem0_storage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -108,7 +111,8 @@ def mem0_storage_with_memory_client_using_explictly_config(mock_mem0_memory_clie
|
||||
crew = MockCrew()
|
||||
new_config = {"provider": "mem0", "config": {"api_key": "new-api-key"}}
|
||||
|
||||
return Mem0Storage(type="short_term", crew=crew, config=new_config)
|
||||
mem0_storage = Mem0Storage(type="short_term", crew=crew, config=new_config)
|
||||
return mem0_storage
|
||||
|
||||
|
||||
def test_mem0_storage_with_memory_client_initialization(
|
||||
@@ -168,14 +172,14 @@ def test_save_method_with_memory_oss(mem0_storage_with_mocked_config):
|
||||
|
||||
# Test short_term memory type (already set in fixture)
|
||||
test_value = "This is a test memory"
|
||||
test_metadata = {'description': 'Respond to user conversation. User message: What do you know about me?', 'messages': [{'role': 'system', 'content': 'You are Friendly chatbot assistant. You are a kind and knowledgeable chatbot assistant. You excel at understanding user needs, providing helpful responses, and maintaining engaging conversations. You remember previous interactions to provide a personalized experience.\nYour personal goal is: Engage in useful and interesting conversations with users while remembering context.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!'}, {'role': 'user', 'content': '\nCurrent Task: Respond to user conversation. User message: What do you know about me?\n\nThis is the expected criteria for your final answer: Contextually appropriate, helpful, and friendly response.\nyou MUST return the actual complete content as the final answer, not a summary.\n\n# Useful context: \nExternal memories:\n- User is from India\n- User is interested in the solar system\n- User name is Vidit Ostwal\n- User is interested in French cuisine\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:'}, {'role': 'assistant', 'content': "I now can give a great answer \nFinal Answer: Hi Vidit! From our previous conversations, I know you're from India and have a great interest in the solar system. It's fascinating to explore the wonders of space, isn't it? Also, I remember you have a passion for French cuisine, which has so many delightful dishes to explore. If there's anything specific you'd like to discuss or learn about—whether it's about the solar system or some great French recipes—feel free to let me know! I'm here to help."}], 'agent': 'Friendly chatbot assistant'}
|
||||
test_metadata = {"key": "value"}
|
||||
|
||||
mem0_storage.save(test_value, test_metadata)
|
||||
|
||||
mem0_storage.memory.add.assert_called_once_with(
|
||||
[{'role': 'user', 'content': 'What do you know about me?'}, {'role': 'assistant', 'content': "Hi Vidit! From our previous conversations, I know you're from India and have a great interest in the solar system. It's fascinating to explore the wonders of space, isn't it? Also, I remember you have a passion for French cuisine, which has so many delightful dishes to explore. If there's anything specific you'd like to discuss or learn about—whether it's about the solar system or some great French recipes—feel free to let me know! I'm here to help."}],
|
||||
[{"role": "assistant" , "content": test_value}],
|
||||
infer=True,
|
||||
metadata={'type': 'short_term', 'description': 'Respond to user conversation. User message: What do you know about me?', 'agent': 'Friendly chatbot assistant'},
|
||||
metadata={"type": "short_term", "key": "value"},
|
||||
run_id="my_run_id",
|
||||
user_id="test_user",
|
||||
agent_id='Test_Agent'
|
||||
@@ -187,14 +191,14 @@ def test_save_method_with_multiple_agents(mem0_storage_with_mocked_config):
|
||||
mem0_storage.memory.add = MagicMock()
|
||||
|
||||
test_value = "This is a test memory"
|
||||
test_metadata = {'description': 'Respond to user conversation. User message: What do you know about me?', 'messages': [{'role': 'system', 'content': 'You are Friendly chatbot assistant. You are a kind and knowledgeable chatbot assistant. You excel at understanding user needs, providing helpful responses, and maintaining engaging conversations. You remember previous interactions to provide a personalized experience.\nYour personal goal is: Engage in useful and interesting conversations with users while remembering context.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!'}, {'role': 'user', 'content': '\nCurrent Task: Respond to user conversation. User message: What do you know about me?\n\nThis is the expected criteria for your final answer: Contextually appropriate, helpful, and friendly response.\nyou MUST return the actual complete content as the final answer, not a summary.\n\n# Useful context: \nExternal memories:\n- User is from India\n- User is interested in the solar system\n- User name is Vidit Ostwal\n- User is interested in French cuisine\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:'}, {'role': 'assistant', 'content': "I now can give a great answer \nFinal Answer: Hi Vidit! From our previous conversations, I know you're from India and have a great interest in the solar system. It's fascinating to explore the wonders of space, isn't it? Also, I remember you have a passion for French cuisine, which has so many delightful dishes to explore. If there's anything specific you'd like to discuss or learn about—whether it's about the solar system or some great French recipes—feel free to let me know! I'm here to help."}], 'agent': 'Friendly chatbot assistant'}
|
||||
test_metadata = {"key": "value"}
|
||||
|
||||
mem0_storage.save(test_value, test_metadata)
|
||||
|
||||
mem0_storage.memory.add.assert_called_once_with(
|
||||
[{'role': 'user', 'content': 'What do you know about me?'}, {'role': 'assistant', 'content': "Hi Vidit! From our previous conversations, I know you're from India and have a great interest in the solar system. It's fascinating to explore the wonders of space, isn't it? Also, I remember you have a passion for French cuisine, which has so many delightful dishes to explore. If there's anything specific you'd like to discuss or learn about—whether it's about the solar system or some great French recipes—feel free to let me know! I'm here to help."}],
|
||||
[{"role": "assistant" , "content": test_value}],
|
||||
infer=True,
|
||||
metadata={'type': 'short_term', 'description': 'Respond to user conversation. User message: What do you know about me?', 'agent': 'Friendly chatbot assistant'},
|
||||
metadata={"type": "short_term", "key": "value"},
|
||||
run_id="my_run_id",
|
||||
user_id="test_user",
|
||||
agent_id='Test_Agent_Test_Agent_2_Test_Agent_3'
|
||||
@@ -208,14 +212,14 @@ def test_save_method_with_memory_client(mem0_storage_with_memory_client_using_co
|
||||
|
||||
# Test short_term memory type (already set in fixture)
|
||||
test_value = "This is a test memory"
|
||||
test_metadata = {'description': 'Respond to user conversation. User message: What do you know about me?', 'messages': [{'role': 'system', 'content': 'You are Friendly chatbot assistant. You are a kind and knowledgeable chatbot assistant. You excel at understanding user needs, providing helpful responses, and maintaining engaging conversations. You remember previous interactions to provide a personalized experience.\nYour personal goal is: Engage in useful and interesting conversations with users while remembering context.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!'}, {'role': 'user', 'content': '\nCurrent Task: Respond to user conversation. User message: What do you know about me?\n\nThis is the expected criteria for your final answer: Contextually appropriate, helpful, and friendly response.\nyou MUST return the actual complete content as the final answer, not a summary.\n\n# Useful context: \nExternal memories:\n- User is from India\n- User is interested in the solar system\n- User name is Vidit Ostwal\n- User is interested in French cuisine\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:'}, {'role': 'assistant', 'content': "I now can give a great answer \nFinal Answer: Hi Vidit! From our previous conversations, I know you're from India and have a great interest in the solar system. It's fascinating to explore the wonders of space, isn't it? Also, I remember you have a passion for French cuisine, which has so many delightful dishes to explore. If there's anything specific you'd like to discuss or learn about—whether it's about the solar system or some great French recipes—feel free to let me know! I'm here to help."}], 'agent': 'Friendly chatbot assistant'}
|
||||
test_metadata = {"key": "value"}
|
||||
|
||||
mem0_storage.save(test_value, test_metadata)
|
||||
|
||||
mem0_storage.memory.add.assert_called_once_with(
|
||||
[{'role': 'user', 'content': 'What do you know about me?'}, {'role': 'assistant', 'content': "Hi Vidit! From our previous conversations, I know you're from India and have a great interest in the solar system. It's fascinating to explore the wonders of space, isn't it? Also, I remember you have a passion for French cuisine, which has so many delightful dishes to explore. If there's anything specific you'd like to discuss or learn about—whether it's about the solar system or some great French recipes—feel free to let me know! I'm here to help."}],
|
||||
[{'role': 'assistant' , 'content': test_value}],
|
||||
infer=True,
|
||||
metadata={'type': 'short_term', 'description': 'Respond to user conversation. User message: What do you know about me?', 'agent': 'Friendly chatbot assistant'},
|
||||
metadata={"type": "short_term", "key": "value"},
|
||||
version="v2",
|
||||
run_id="my_run_id",
|
||||
includes="include1",
|
||||
|
||||
@@ -1,216 +0,0 @@
|
||||
# ruff: noqa: S105
|
||||
|
||||
import os
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai.context import (
|
||||
set_platform_integration_token,
|
||||
get_platform_integration_token,
|
||||
platform_context,
|
||||
_platform_integration_token,
|
||||
)
|
||||
|
||||
|
||||
class TestPlatformIntegrationToken:
|
||||
def setup_method(self):
|
||||
_platform_integration_token.set(None)
|
||||
|
||||
def teardown_method(self):
|
||||
_platform_integration_token.set(None)
|
||||
|
||||
def test_set_platform_integration_token(self):
|
||||
test_token = "test-token-123"
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
set_platform_integration_token(test_token)
|
||||
|
||||
assert get_platform_integration_token() == test_token
|
||||
|
||||
def test_get_platform_integration_token_from_context_var(self):
|
||||
test_token = "context-var-token"
|
||||
|
||||
_platform_integration_token.set(test_token)
|
||||
|
||||
assert get_platform_integration_token() == test_token
|
||||
|
||||
@patch.dict(os.environ, {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "env-token-456"})
|
||||
def test_get_platform_integration_token_from_env_var(self):
|
||||
assert _platform_integration_token.get() is None
|
||||
|
||||
assert get_platform_integration_token() == "env-token-456"
|
||||
|
||||
@patch.dict(os.environ, {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "env-token"})
|
||||
def test_context_var_takes_precedence_over_env_var(self):
|
||||
context_token = "context-token"
|
||||
|
||||
set_platform_integration_token(context_token)
|
||||
|
||||
assert get_platform_integration_token() == context_token
|
||||
|
||||
@patch.dict(os.environ, {}, clear=True)
|
||||
def test_get_platform_integration_token_returns_none_when_not_set(self):
|
||||
assert _platform_integration_token.get() is None
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
def test_platform_context_manager_basic_usage(self):
|
||||
test_token = "context-manager-token"
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
with platform_context(test_token):
|
||||
assert get_platform_integration_token() == test_token
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
def test_platform_context_manager_nested_contexts(self):
|
||||
"""Test nested platform_context context managers."""
|
||||
outer_token = "outer-token"
|
||||
inner_token = "inner-token"
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
with platform_context(outer_token):
|
||||
assert get_platform_integration_token() == outer_token
|
||||
|
||||
with platform_context(inner_token):
|
||||
assert get_platform_integration_token() == inner_token
|
||||
|
||||
assert get_platform_integration_token() == outer_token
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
def test_platform_context_manager_preserves_existing_token(self):
|
||||
"""Test that platform_context preserves existing token when exiting."""
|
||||
initial_token = "initial-token"
|
||||
context_token = "context-token"
|
||||
|
||||
set_platform_integration_token(initial_token)
|
||||
assert get_platform_integration_token() == initial_token
|
||||
|
||||
with platform_context(context_token):
|
||||
assert get_platform_integration_token() == context_token
|
||||
|
||||
assert get_platform_integration_token() == initial_token
|
||||
|
||||
def test_platform_context_manager_exception_handling(self):
|
||||
"""Test that platform_context properly resets token even when exception occurs."""
|
||||
initial_token = "initial-token"
|
||||
context_token = "context-token"
|
||||
|
||||
set_platform_integration_token(initial_token)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
with platform_context(context_token):
|
||||
assert get_platform_integration_token() == context_token
|
||||
raise ValueError("Test exception")
|
||||
|
||||
assert get_platform_integration_token() == initial_token
|
||||
|
||||
def test_platform_context_manager_with_none_initial_state(self):
|
||||
"""Test platform_context when initial state is None."""
|
||||
context_token = "context-token"
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
with pytest.raises(RuntimeError):
|
||||
with platform_context(context_token):
|
||||
assert get_platform_integration_token() == context_token
|
||||
raise RuntimeError("Test exception")
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
@patch.dict(os.environ, {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "env-backup"})
|
||||
def test_platform_context_with_env_fallback(self):
|
||||
"""Test platform_context interaction with environment variable fallback."""
|
||||
context_token = "context-token"
|
||||
|
||||
assert get_platform_integration_token() == "env-backup"
|
||||
|
||||
with platform_context(context_token):
|
||||
assert get_platform_integration_token() == context_token
|
||||
|
||||
assert get_platform_integration_token() == "env-backup"
|
||||
|
||||
def test_multiple_sequential_context_managers(self):
|
||||
"""Test multiple sequential uses of platform_context."""
|
||||
token1 = "token-1"
|
||||
token2 = "token-2"
|
||||
token3 = "token-3"
|
||||
|
||||
with platform_context(token1):
|
||||
assert get_platform_integration_token() == token1
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
with platform_context(token2):
|
||||
assert get_platform_integration_token() == token2
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
with platform_context(token3):
|
||||
assert get_platform_integration_token() == token3
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
def test_empty_string_token(self):
|
||||
empty_token = ""
|
||||
|
||||
set_platform_integration_token(empty_token)
|
||||
assert get_platform_integration_token() == ""
|
||||
|
||||
with platform_context(empty_token):
|
||||
assert get_platform_integration_token() == ""
|
||||
|
||||
def test_special_characters_in_token(self):
|
||||
special_token = "token-with-!@#$%^&*()_+-={}[]|\\:;\"'<>?,./"
|
||||
|
||||
set_platform_integration_token(special_token)
|
||||
assert get_platform_integration_token() == special_token
|
||||
|
||||
with platform_context(special_token):
|
||||
assert get_platform_integration_token() == special_token
|
||||
|
||||
def test_very_long_token(self):
|
||||
long_token = "a" * 10000
|
||||
|
||||
set_platform_integration_token(long_token)
|
||||
assert get_platform_integration_token() == long_token
|
||||
|
||||
with platform_context(long_token):
|
||||
assert get_platform_integration_token() == long_token
|
||||
|
||||
@patch.dict(os.environ, {"CREWAI_PLATFORM_INTEGRATION_TOKEN": ""})
|
||||
def test_empty_env_var(self):
|
||||
assert _platform_integration_token.get() is None
|
||||
assert get_platform_integration_token() == ""
|
||||
|
||||
@patch('crewai.context.os.getenv')
|
||||
def test_env_var_access_error_handling(self, mock_getenv):
|
||||
mock_getenv.side_effect = OSError("Environment access error")
|
||||
|
||||
with pytest.raises(OSError):
|
||||
get_platform_integration_token()
|
||||
|
||||
def test_context_var_isolation_between_tests(self):
|
||||
"""Test that context variable changes don't leak between test methods."""
|
||||
test_token = "isolation-test-token"
|
||||
|
||||
assert get_platform_integration_token() is None
|
||||
|
||||
set_platform_integration_token(test_token)
|
||||
assert get_platform_integration_token() == test_token
|
||||
|
||||
|
||||
def test_context_manager_return_value(self):
|
||||
"""Test that platform_context can be used in with statement with return value."""
|
||||
test_token = "return-value-token"
|
||||
|
||||
with platform_context(test_token):
|
||||
assert get_platform_integration_token() == test_token
|
||||
|
||||
with platform_context(test_token) as ctx:
|
||||
assert ctx is None
|
||||
assert get_platform_integration_token() == test_token
|
||||
@@ -1,11 +1,11 @@
|
||||
"""Test Agent creation and execution basic functionality."""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from concurrent.futures import Future
|
||||
from hashlib import md5
|
||||
from unittest import mock
|
||||
from unittest.mock import ANY, MagicMock, patch
|
||||
from collections import defaultdict
|
||||
|
||||
import pydantic_core
|
||||
import pytest
|
||||
@@ -14,29 +14,11 @@ from crewai.agent import Agent
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.flow import Flow, start
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
from crewai.llm import LLM
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.process import Process
|
||||
@@ -45,9 +27,28 @@ from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.utilities.rpm_controller import RPMController
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
|
||||
from crewai.events.types.memory_events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
)
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ceo():
|
||||
@@ -363,7 +364,7 @@ def test_hierarchical_process(researcher, writer):
|
||||
|
||||
assert (
|
||||
result.raw
|
||||
== "**1. The Rise of Autonomous AI Agents in Daily Life** \nAs artificial intelligence technology progresses, the integration of autonomous AI agents into everyday life becomes increasingly prominent. These agents, capable of making decisions without human intervention, are reshaping industries from healthcare to finance. Exploring case studies where autonomous AI has successfully decreased operational costs or improved efficiency can reveal not only the benefits but also the ethical implications of delegating decision-making to machines. This topic offers an exciting opportunity to dive into the AI landscape, showcasing current developments such as AI assistants and autonomous vehicles.\n\n**2. Ethical Implications of Generative AI in Creative Industries** \nThe surge of generative AI tools in creative fields, such as art, music, and writing, has sparked a heated debate about authorship and originality. This article could investigate how these tools are being used by artists and creators, examining both the potential for innovation and the risk of devaluing traditional art forms. Highlighting perspectives from creators, legal experts, and ethicists could provide a comprehensive overview of the challenges faced, including copyright concerns and the emotional impact on human artists. This discussion is vital as the creative landscape evolves alongside technological advancements, making it ripe for exploration.\n\n**3. AI in Climate Change Mitigation: Current Solutions and Future Potential** \nAs the world grapples with climate change, AI technology is increasingly being harnessed to develop innovative solutions for sustainability. From predictive analytics that optimize energy consumption to machine learning algorithms that improve carbon capture methods, AI's potential in environmental science is vast. This topic invites an exploration of existing AI applications in climate initiatives, with a focus on groundbreaking research and initiatives aimed at reducing humanity's carbon footprint. Highlighting successful projects and technology partnerships can illustrate the positive impact AI can have on global climate efforts, inspiring further exploration and investment in this area.\n\n**4. The Future of Work: How AI is Reshaping Employment Landscapes** \nThe discussions around AI's impact on the workforce are both urgent and complex, as advances in automation and machine learning continue to transform the job market. This article could delve into the current trends of AI-driven job displacement alongside opportunities for upskilling and the creation of new job roles. By examining case studies of companies that integrate AI effectively and the resulting workforce adaptations, readers can gain valuable insights into preparing for a future where humans and AI collaborate. This exploration highlights the importance of policies that promote workforce resilience in the face of change.\n\n**5. Decentralized AI: Exploring the Role of Blockchain in AI Development** \nAs blockchain technology sweeps through various sectors, its application in AI development presents a fascinating topic worth examining. Decentralized AI could address issues of data privacy, security, and democratization in AI models by allowing users to retain ownership of data while benefiting from AI's capabilities. This article could analyze how decentralized networks are disrupting traditional AI development models, featuring innovative projects that harness the synergy between blockchain and AI. Highlighting potential pitfalls and the future landscape of decentralized AI could stimulate discussion among technologists, entrepreneurs, and policymakers alike.\n\nThese topics not only reflect current trends but also probe deeper into ethical and practical considerations, making them timely and relevant for contemporary audiences."
|
||||
== "1. **The Rise of Autonomous AI Agents in Daily Life** \n As artificial intelligence technology progresses, the integration of autonomous AI agents into everyday life becomes increasingly prominent. These agents, capable of making decisions without human intervention, are reshaping industries from healthcare to finance. Exploring case studies where autonomous AI has successfully decreased operational costs or improved efficiency can reveal not only the benefits but also the ethical implications of delegating decision-making to machines. This topic offers an exciting opportunity to dive into the AI landscape, showcasing current developments such as AI assistants and autonomous vehicles.\n\n2. **Ethical Implications of Generative AI in Creative Industries** \n The surge of generative AI tools in creative fields, such as art, music, and writing, has sparked a heated debate about authorship and originality. This article could investigate how these tools are being used by artists and creators, examining both the potential for innovation and the risk of devaluing traditional art forms. Highlighting perspectives from creators, legal experts, and ethicists could provide a comprehensive overview of the challenges faced, including copyright concerns and the emotional impact on human artists. This discussion is vital as the creative landscape evolves alongside technological advancements, making it ripe for exploration.\n\n3. **AI in Climate Change Mitigation: Current Solutions and Future Potential** \n As the world grapples with climate change, AI technology is increasingly being harnessed to develop innovative solutions for sustainability. From predictive analytics that optimize energy consumption to machine learning algorithms that improve carbon capture methods, AI's potential in environmental science is vast. This topic invites an exploration of existing AI applications in climate initiatives, with a focus on groundbreaking research and initiatives aimed at reducing humanity's carbon footprint. Highlighting successful projects and technology partnerships can illustrate the positive impact AI can have on global climate efforts, inspiring further exploration and investment in this area.\n\n4. **The Future of Work: How AI is Reshaping Employment Landscapes** \n The discussions around AI's impact on the workforce are both urgent and complex, as advances in automation and machine learning continue to transform the job market. This article could delve into the current trends of AI-driven job displacement alongside opportunities for upskilling and the creation of new job roles. By examining case studies of companies that integrate AI effectively and the resulting workforce adaptations, readers can gain valuable insights into preparing for a future where humans and AI collaborate. This exploration highlights the importance of policies that promote workforce resilience in the face of change.\n\n5. **Decentralized AI: Exploring the Role of Blockchain in AI Development** \n As blockchain technology sweeps through various sectors, its application in AI development presents a fascinating topic worth examining. Decentralized AI could address issues of data privacy, security, and democratization in AI models by allowing users to retain ownership of data while benefiting from AI's capabilities. This article could analyze how decentralized networks are disrupting traditional AI development models, featuring innovative projects that harness the synergy between blockchain and AI. Highlighting potential pitfalls and the future landscape of decentralized AI could stimulate discussion among technologists, entrepreneurs, and policymakers alike."
|
||||
)
|
||||
|
||||
|
||||
@@ -569,6 +570,8 @@ def test_crew_with_delegating_agents(ceo, writer):
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
@@ -581,7 +584,7 @@ def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer)
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -619,16 +622,18 @@ def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer)
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(isinstance(tool, TestTool) for tool in tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
@@ -641,7 +646,7 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -681,16 +686,18 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(isinstance(tool, TestTool) for tool in new_ceo.tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in new_ceo.tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_task_tools_override_agent_tools(researcher):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
@@ -703,7 +710,7 @@ def test_task_tools_override_agent_tools(researcher):
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -711,7 +718,7 @@ def test_task_tools_override_agent_tools(researcher):
|
||||
class AnotherTestTool(BaseTool):
|
||||
name: str = "Another Test Tool"
|
||||
description: str = "Another test tool"
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Another processed: {query}"
|
||||
@@ -747,6 +754,7 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
"""
|
||||
Test that task tools override agent tools while preserving delegation tools when allow_delegation=True
|
||||
"""
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -758,7 +766,7 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -766,7 +774,7 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
class AnotherTestTool(BaseTool):
|
||||
name: str = "Another Test Tool"
|
||||
description: str = "Another test tool"
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Another processed: {query}"
|
||||
@@ -807,17 +815,17 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Confirm AnotherTestTool is present but TestTool is not
|
||||
assert any(isinstance(tool, AnotherTestTool) for tool in used_tools), (
|
||||
"AnotherTestTool should be present"
|
||||
)
|
||||
assert not any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"TestTool should not be present among used tools"
|
||||
)
|
||||
assert any(
|
||||
isinstance(tool, AnotherTestTool) for tool in used_tools
|
||||
), "AnotherTestTool should be present"
|
||||
assert not any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "TestTool should not be present among used tools"
|
||||
|
||||
# Confirm delegation tool(s) are present
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
|
||||
# Finally, make sure the agent's original tools remain unchanged
|
||||
assert len(researcher_with_delegation.tools) == 1
|
||||
@@ -921,9 +929,9 @@ def test_cache_hitting_between_agents(researcher, writer, ceo):
|
||||
tool="multiplier", input={"first_number": 2, "second_number": 6}
|
||||
)
|
||||
assert cache_calls[0] == expected_call, f"First call mismatch: {cache_calls[0]}"
|
||||
assert cache_calls[1] == expected_call, (
|
||||
f"Second call mismatch: {cache_calls[1]}"
|
||||
)
|
||||
assert (
|
||||
cache_calls[1] == expected_call
|
||||
), f"Second call mismatch: {cache_calls[1]}"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1034,7 +1042,7 @@ def test_crew_kickoff_streaming_usage_metrics():
|
||||
assert result.token_usage.cached_prompt_tokens == 0
|
||||
|
||||
|
||||
def test_agents_rpm_is_never_set_if_crew_max_rpm_is_not_set():
|
||||
def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
@@ -1387,9 +1395,8 @@ def test_kickoff_for_each_error_handling():
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
with patch.object(Crew, "kickoff") as mock_kickoff:
|
||||
mock_kickoff.side_effect = [
|
||||
*expected_outputs[:2],
|
||||
Exception("Simulated kickoff error"),
|
||||
mock_kickoff.side_effect = expected_outputs[:2] + [
|
||||
Exception("Simulated kickoff error")
|
||||
]
|
||||
with pytest.raises(Exception, match="Simulated kickoff error"):
|
||||
crew.kickoff_for_each(inputs=inputs)
|
||||
@@ -1667,9 +1674,9 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
||||
|
||||
# Verify that exactly one tool was used and it was a CodeInterpreterTool
|
||||
assert len(used_tools) == 1, "Should have exactly one tool"
|
||||
assert isinstance(used_tools[0], CodeInterpreterTool), (
|
||||
"Tool should be CodeInterpreterTool"
|
||||
)
|
||||
assert isinstance(
|
||||
used_tools[0], CodeInterpreterTool
|
||||
), "Tool should be CodeInterpreterTool"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1753,10 +1760,10 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
|
||||
assert result.raw == "Howdy!"
|
||||
|
||||
assert result.token_usage == UsageMetrics(
|
||||
total_tokens=1673,
|
||||
prompt_tokens=1562,
|
||||
completion_tokens=111,
|
||||
successful_requests=3,
|
||||
total_tokens=2390,
|
||||
prompt_tokens=2264,
|
||||
completion_tokens=126,
|
||||
successful_requests=4,
|
||||
cached_prompt_tokens=0,
|
||||
)
|
||||
|
||||
@@ -2172,7 +2179,8 @@ def test_tools_with_custom_caching():
|
||||
return first_number * second_number
|
||||
|
||||
def cache_func(args, result):
|
||||
return result % 2 == 0
|
||||
cache = result % 2 == 0
|
||||
return cache
|
||||
|
||||
multiplcation_tool.cache_function = cache_func
|
||||
|
||||
@@ -2876,7 +2884,7 @@ def test_manager_agent_with_tools_raises_exception(researcher, writer):
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
with pytest.raises(Exception, match="Manager agent should not have tools"):
|
||||
with pytest.raises(Exception):
|
||||
crew.kickoff()
|
||||
|
||||
|
||||
@@ -3100,7 +3108,7 @@ def test_crew_task_db_init():
|
||||
db_handler.load()
|
||||
assert True # If we reach this point, no exception was raised
|
||||
except Exception as e:
|
||||
pytest.fail(f"An exception was raised: {e!s}")
|
||||
pytest.fail(f"An exception was raised: {str(e)}")
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -3486,9 +3494,8 @@ def test_key(researcher, writer):
|
||||
process=Process.sequential,
|
||||
tasks=tasks,
|
||||
)
|
||||
hash = md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode(),
|
||||
usedforsecurity=False,
|
||||
hash = hashlib.md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode()
|
||||
).hexdigest()
|
||||
|
||||
assert crew.key == hash
|
||||
@@ -3527,9 +3534,8 @@ def test_key_with_interpolated_inputs():
|
||||
process=Process.sequential,
|
||||
tasks=tasks,
|
||||
)
|
||||
hash = md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode(),
|
||||
usedforsecurity=False,
|
||||
hash = hashlib.md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode()
|
||||
).hexdigest()
|
||||
|
||||
assert crew.key == hash
|
||||
@@ -3809,15 +3815,16 @@ def test_fetch_inputs():
|
||||
expected_placeholders = {"role_detail", "topic", "field"}
|
||||
actual_placeholders = crew.fetch_inputs()
|
||||
|
||||
assert actual_placeholders == expected_placeholders, (
|
||||
f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
)
|
||||
assert (
|
||||
actual_placeholders == expected_placeholders
|
||||
), f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
|
||||
|
||||
def test_task_tools_preserve_code_execution_tools():
|
||||
"""
|
||||
Test that task tools don't override code execution tools when allow_code_execution=True
|
||||
"""
|
||||
from typing import Type
|
||||
|
||||
# Mock embedchain initialization to prevent race conditions in parallel CI execution
|
||||
with patch("embedchain.client.Client.setup"):
|
||||
@@ -3834,7 +3841,7 @@ def test_task_tools_preserve_code_execution_tools():
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -3885,20 +3892,20 @@ def test_task_tools_preserve_code_execution_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Verify all expected tools are present
|
||||
assert any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"Task's TestTool should be present"
|
||||
)
|
||||
assert any(isinstance(tool, CodeInterpreterTool) for tool in used_tools), (
|
||||
"CodeInterpreterTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "Task's TestTool should be present"
|
||||
assert any(
|
||||
isinstance(tool, CodeInterpreterTool) for tool in used_tools
|
||||
), "CodeInterpreterTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
|
||||
# Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools)
|
||||
assert len(used_tools) == 4, (
|
||||
"Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
)
|
||||
assert (
|
||||
len(used_tools) == 4
|
||||
), "Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -3942,9 +3949,9 @@ def test_multimodal_flag_adds_multimodal_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Check that the multimodal tool was added
|
||||
assert any(isinstance(tool, AddImageTool) for tool in used_tools), (
|
||||
"AddImageTool should be present when agent is multimodal"
|
||||
)
|
||||
assert any(
|
||||
isinstance(tool, AddImageTool) for tool in used_tools
|
||||
), "AddImageTool should be present when agent is multimodal"
|
||||
|
||||
# Verify we have exactly one tool (just the AddImageTool)
|
||||
assert len(used_tools) == 1, "Should only have the AddImageTool"
|
||||
@@ -4208,9 +4215,9 @@ def test_crew_guardrail_feedback_in_context():
|
||||
assert len(execution_contexts) > 1, "Task should have been executed multiple times"
|
||||
|
||||
# Verify that the second execution included the guardrail feedback
|
||||
assert "Output must contain the keyword 'IMPORTANT'" in execution_contexts[1], (
|
||||
"Guardrail feedback should be included in retry context"
|
||||
)
|
||||
assert (
|
||||
"Output must contain the keyword 'IMPORTANT'" in execution_contexts[1]
|
||||
), "Guardrail feedback should be included in retry context"
|
||||
|
||||
# Verify final output meets guardrail requirements
|
||||
assert "IMPORTANT" in result.raw, "Final output should contain required keyword"
|
||||
@@ -4225,11 +4232,13 @@ def test_before_kickoff_callback():
|
||||
|
||||
@CrewBase
|
||||
class TestCrewClass:
|
||||
from typing import List
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.project import CrewBase, agent, before_kickoff, crew, task
|
||||
|
||||
agents: list[BaseAgent]
|
||||
tasks: list[Task]
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
|
||||
agents_config = None
|
||||
tasks_config = None
|
||||
@@ -4253,11 +4262,12 @@ def test_before_kickoff_callback():
|
||||
|
||||
@task
|
||||
def my_task(self):
|
||||
return Task(
|
||||
task = Task(
|
||||
description="Test task description",
|
||||
expected_output="Test expected output",
|
||||
agent=self.my_agent(),
|
||||
)
|
||||
return task
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
@@ -4423,46 +4433,46 @@ def test_crew_copy_with_memory():
|
||||
try:
|
||||
crew_copy = crew.copy()
|
||||
|
||||
assert hasattr(crew_copy, "_short_term_memory"), (
|
||||
"Copied crew should have _short_term_memory"
|
||||
)
|
||||
assert crew_copy._short_term_memory is not None, (
|
||||
"Copied _short_term_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._short_term_memory) != original_short_term_id, (
|
||||
"Copied _short_term_memory should be a new object"
|
||||
)
|
||||
assert hasattr(
|
||||
crew_copy, "_short_term_memory"
|
||||
), "Copied crew should have _short_term_memory"
|
||||
assert (
|
||||
crew_copy._short_term_memory is not None
|
||||
), "Copied _short_term_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._short_term_memory) != original_short_term_id
|
||||
), "Copied _short_term_memory should be a new object"
|
||||
|
||||
assert hasattr(crew_copy, "_long_term_memory"), (
|
||||
"Copied crew should have _long_term_memory"
|
||||
)
|
||||
assert crew_copy._long_term_memory is not None, (
|
||||
"Copied _long_term_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._long_term_memory) != original_long_term_id, (
|
||||
"Copied _long_term_memory should be a new object"
|
||||
)
|
||||
assert hasattr(
|
||||
crew_copy, "_long_term_memory"
|
||||
), "Copied crew should have _long_term_memory"
|
||||
assert (
|
||||
crew_copy._long_term_memory is not None
|
||||
), "Copied _long_term_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._long_term_memory) != original_long_term_id
|
||||
), "Copied _long_term_memory should be a new object"
|
||||
|
||||
assert hasattr(crew_copy, "_entity_memory"), (
|
||||
"Copied crew should have _entity_memory"
|
||||
)
|
||||
assert crew_copy._entity_memory is not None, (
|
||||
"Copied _entity_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._entity_memory) != original_entity_id, (
|
||||
"Copied _entity_memory should be a new object"
|
||||
)
|
||||
assert hasattr(
|
||||
crew_copy, "_entity_memory"
|
||||
), "Copied crew should have _entity_memory"
|
||||
assert (
|
||||
crew_copy._entity_memory is not None
|
||||
), "Copied _entity_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._entity_memory) != original_entity_id
|
||||
), "Copied _entity_memory should be a new object"
|
||||
|
||||
if original_external_id:
|
||||
assert hasattr(crew_copy, "_external_memory"), (
|
||||
"Copied crew should have _external_memory"
|
||||
)
|
||||
assert crew_copy._external_memory is not None, (
|
||||
"Copied _external_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._external_memory) != original_external_id, (
|
||||
"Copied _external_memory should be a new object"
|
||||
)
|
||||
assert hasattr(
|
||||
crew_copy, "_external_memory"
|
||||
), "Copied crew should have _external_memory"
|
||||
assert (
|
||||
crew_copy._external_memory is not None
|
||||
), "Copied _external_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._external_memory) != original_external_id
|
||||
), "Copied _external_memory should be a new object"
|
||||
else:
|
||||
assert (
|
||||
not hasattr(crew_copy, "_external_memory")
|
||||
@@ -4725,25 +4735,21 @@ def test_ensure_exchanged_messages_are_propagated_to_external_memory():
|
||||
) as external_memory_save:
|
||||
crew.kickoff()
|
||||
|
||||
external_memory_save.assert_called_once()
|
||||
|
||||
call_args = external_memory_save.call_args
|
||||
|
||||
assert "value" in call_args.kwargs or len(call_args.args) > 0
|
||||
assert "metadata" in call_args.kwargs or len(call_args.args) > 1
|
||||
|
||||
if "metadata" in call_args.kwargs:
|
||||
metadata = call_args.kwargs["metadata"]
|
||||
else:
|
||||
metadata = call_args.args[1]
|
||||
|
||||
assert "description" in metadata
|
||||
assert "messages" in metadata
|
||||
assert isinstance(metadata["messages"], list)
|
||||
assert len(metadata["messages"]) >= 2
|
||||
|
||||
messages = metadata["messages"]
|
||||
assert messages[0]["role"] == "system"
|
||||
assert "Researcher" in messages[0]["content"]
|
||||
assert messages[1]["role"] == "user"
|
||||
assert "Research a topic to teach a kid aged 6 about math" in messages[1]["content"]
|
||||
expected_messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are Researcher. You're an expert in research and you love to learn new things.\nYour personal goal is: You research about math.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "\nCurrent Task: Research a topic to teach a kid aged 6 about math.\n\nThis is the expected criteria for your final answer: A topic, explanation, angle, and examples.\nyou MUST return the actual complete content as the final answer, not a summary.\n\n# Useful context: \nExternal memories:\n\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "I now can give a great answer \nFinal Answer: \n\n**Topic: Understanding Shapes (Geometry)**\n\n**Explanation:** \nShapes are everywhere around us! They are the special forms that we can see in everyday objects. Teaching a 6-year-old about shapes is not only fun but also a way to help them think about the world around them and develop their spatial awareness. We will focus on basic shapes: circle, square, triangle, and rectangle. Understanding these shapes helps kids recognize and describe their environment.\n\n**Angle:** \nLet’s make learning about shapes an adventure! We can turn it into a treasure hunt where the child has to find objects around the house or outside that match the shapes we learn. This hands-on approach helps make the learning stick!\n\n**Examples:** \n1. **Circle:** \n - Explanation: A circle is round and has no corners. It looks like a wheel or a cookie! \n - Activity: Find objects that are circles, such as a clock, a dinner plate, or a ball. Draw a big circle on a paper and then try to draw smaller circles inside it.\n\n2. **Square:** \n - Explanation: A square has four equal sides and four corners. It looks like a box! \n - Activity: Look for squares in books, in windows, or in building blocks. Try to build a tall tower using square blocks!\n\n3. **Triangle:** \n - Explanation: A triangle has three sides and three corners. It looks like a slice of pizza or a roof! \n - Activity: Use crayons to draw a big triangle and then find things that are shaped like a triangle, like a slice of cheese or a traffic sign.\n\n4. **Rectangle:** \n - Explanation: A rectangle has four sides but only opposite sides are equal. It’s like a stretched square! \n - Activity: Search for rectangles, such as a book cover or a door. You can cut out rectangles from colored paper and create a collage!\n\nBy relating the shapes to fun activities and using real-world examples, we not only make learning more enjoyable but also help the child better remember and understand the concept of shapes in math. This foundation forms the basis of their future learning in geometry!",
|
||||
},
|
||||
]
|
||||
external_memory_save.assert_called_once_with(
|
||||
value=ANY,
|
||||
metadata={"description": ANY, "messages": expected_messages},
|
||||
)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from typing import Any, ClassVar
|
||||
from typing import List
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
@@ -45,8 +44,8 @@ class InternalCrew:
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
agents: list[BaseAgent]
|
||||
tasks: list[Task]
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
|
||||
@llm
|
||||
def local_llm(self):
|
||||
@@ -90,8 +89,7 @@ class InternalCrew:
|
||||
|
||||
@CrewBase
|
||||
class InternalCrewWithMCP(InternalCrew):
|
||||
mcp_server_params: ClassVar[dict[str, Any]] = {"host": "localhost", "port": 8000}
|
||||
mcp_connect_timeout = 120
|
||||
mcp_server_params = {"host": "localhost", "port": 8000}
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self):
|
||||
@@ -202,8 +200,8 @@ def test_before_kickoff_with_none_input():
|
||||
def test_multiple_before_after_kickoff():
|
||||
@CrewBase
|
||||
class MultipleHooksCrew:
|
||||
agents: list[BaseAgent]
|
||||
tasks: list[Task]
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
@@ -286,7 +284,4 @@ def test_internal_crew_with_mcp():
|
||||
assert crew.reporting_analyst().tools == [simple_tool, another_simple_tool]
|
||||
assert crew.researcher().tools == [simple_tool]
|
||||
|
||||
adapter_mock.assert_called_once_with(
|
||||
{"host": "localhost", "port": 8000},
|
||||
connect_timeout=120
|
||||
)
|
||||
adapter_mock.assert_called_once_with({"host": "localhost", "port": 8000})
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
"""Test Agent creation and execution basic functionality."""
|
||||
|
||||
import ast
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from functools import partial
|
||||
from hashlib import md5
|
||||
from typing import Tuple, Union
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
@@ -248,7 +248,7 @@ def test_guardrail_type_error():
|
||||
return (True, x)
|
||||
|
||||
@staticmethod
|
||||
def guardrail_static_fn(x: TaskOutput) -> tuple[bool, str | TaskOutput]:
|
||||
def guardrail_static_fn(x: TaskOutput) -> tuple[bool, Union[str, TaskOutput]]:
|
||||
return (True, x)
|
||||
|
||||
obj = Object()
|
||||
@@ -271,7 +271,7 @@ def test_guardrail_type_error():
|
||||
guardrail=Object.guardrail_static_fn,
|
||||
)
|
||||
|
||||
def error_fn(x: TaskOutput, y: bool) -> tuple[bool, TaskOutput]:
|
||||
def error_fn(x: TaskOutput, y: bool) -> Tuple[bool, TaskOutput]:
|
||||
return (y, x)
|
||||
|
||||
Task(
|
||||
@@ -340,7 +340,7 @@ def test_output_pydantic_hierarchical():
|
||||
)
|
||||
result = crew.kickoff()
|
||||
assert isinstance(result.pydantic, ScoreOutput)
|
||||
assert result.to_dict() == {"score": 4}
|
||||
assert result.to_dict() == {"score": 5}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -401,8 +401,8 @@ def test_output_json_hierarchical():
|
||||
manager_llm="gpt-4o",
|
||||
)
|
||||
result = crew.kickoff()
|
||||
assert result.json == '{"score": 4}'
|
||||
assert result.to_dict() == {"score": 4}
|
||||
assert result.json == '{"score": 5}'
|
||||
assert result.to_dict() == {"score": 5}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -560,8 +560,8 @@ def test_output_json_dict_hierarchical():
|
||||
manager_llm="gpt-4o",
|
||||
)
|
||||
result = crew.kickoff()
|
||||
assert {"score": 4} == result.json_dict
|
||||
assert result.to_dict() == {"score": 4}
|
||||
assert {"score": 5} == result.json_dict
|
||||
assert result.to_dict() == {"score": 5}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -900,11 +900,11 @@ def test_conditional_task_copy_preserves_type():
|
||||
assert isinstance(copied_conditional_task, ConditionalTask)
|
||||
|
||||
|
||||
def test_interpolate_inputs(tmp_path):
|
||||
def test_interpolate_inputs():
|
||||
task = Task(
|
||||
description="Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 interesting ideas about {topic}.",
|
||||
output_file=str(tmp_path / "{topic}" / "output_{date}.txt"),
|
||||
output_file="/tmp/{topic}/output_{date}.txt",
|
||||
)
|
||||
|
||||
task.interpolate_inputs_and_add_conversation_history(
|
||||
@@ -915,7 +915,7 @@ def test_interpolate_inputs(tmp_path):
|
||||
== "Give me a list of 5 interesting ideas about AI to explore for an article, what makes them unique and interesting."
|
||||
)
|
||||
assert task.expected_output == "Bullet point list of 5 interesting ideas about AI."
|
||||
assert task.output_file == str(tmp_path / "AI" / "output_2025.txt")
|
||||
assert task.output_file == "/tmp/AI/output_2025.txt"
|
||||
|
||||
task.interpolate_inputs_and_add_conversation_history(
|
||||
inputs={"topic": "ML", "date": "2025"}
|
||||
@@ -925,7 +925,7 @@ def test_interpolate_inputs(tmp_path):
|
||||
== "Give me a list of 5 interesting ideas about ML to explore for an article, what makes them unique and interesting."
|
||||
)
|
||||
assert task.expected_output == "Bullet point list of 5 interesting ideas about ML."
|
||||
assert task.output_file == str(tmp_path / "ML" / "output_2025.txt")
|
||||
assert task.output_file == "/tmp/ML/output_2025.txt"
|
||||
|
||||
|
||||
def test_interpolate_only():
|
||||
@@ -1074,9 +1074,8 @@ def test_key():
|
||||
description=original_description,
|
||||
expected_output=original_expected_output,
|
||||
)
|
||||
hash = md5(
|
||||
f"{original_description}|{original_expected_output}".encode(),
|
||||
usedforsecurity=False,
|
||||
hash = hashlib.md5(
|
||||
f"{original_description}|{original_expected_output}".encode()
|
||||
).hexdigest()
|
||||
|
||||
assert task.key == hash, "The key should be the hash of the description."
|
||||
@@ -1087,7 +1086,7 @@ def test_key():
|
||||
)
|
||||
|
||||
|
||||
def test_output_file_validation(tmp_path):
|
||||
def test_output_file_validation():
|
||||
"""Test output file path validation."""
|
||||
# Valid paths
|
||||
assert (
|
||||
@@ -1098,15 +1097,13 @@ def test_output_file_validation(tmp_path):
|
||||
).output_file
|
||||
== "output.txt"
|
||||
)
|
||||
# Use secure temporary path instead of /tmp
|
||||
temp_file = tmp_path / "output.txt"
|
||||
assert (
|
||||
Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
output_file=str(temp_file),
|
||||
output_file="/tmp/output.txt",
|
||||
).output_file
|
||||
== str(temp_file).lstrip("/") # Remove leading slash to match expected behavior
|
||||
== "tmp/output.txt"
|
||||
)
|
||||
assert (
|
||||
Task(
|
||||
@@ -1323,7 +1320,7 @@ def test_interpolate_with_list_of_dicts():
|
||||
}
|
||||
result = interpolate_only("{people}", input_data)
|
||||
|
||||
parsed_result = ast.literal_eval(result)
|
||||
parsed_result = eval(result)
|
||||
assert isinstance(parsed_result, list)
|
||||
assert len(parsed_result) == 2
|
||||
assert parsed_result[0]["name"] == "Alice"
|
||||
@@ -1349,7 +1346,7 @@ def test_interpolate_with_nested_structures():
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{company}", input_data)
|
||||
parsed = ast.literal_eval(result)
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["name"] == "TechCorp"
|
||||
assert len(parsed["departments"]) == 2
|
||||
@@ -1367,7 +1364,7 @@ def test_interpolate_with_special_characters():
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{special_data}", input_data)
|
||||
parsed = ast.literal_eval(result)
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["quotes"] == """This has "double" and 'single' quotes"""
|
||||
assert parsed["unicode"] == "文字化けテスト"
|
||||
@@ -1389,7 +1386,7 @@ def test_interpolate_mixed_types():
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{data}", input_data)
|
||||
parsed = ast.literal_eval(result)
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["name"] == "Test Dataset"
|
||||
assert parsed["samples"] == 1000
|
||||
@@ -1412,7 +1409,7 @@ def test_interpolate_complex_combination():
|
||||
]
|
||||
}
|
||||
result = interpolate_only("{report}", input_data)
|
||||
parsed = ast.literal_eval(result)
|
||||
parsed = eval(result)
|
||||
|
||||
assert len(parsed) == 2
|
||||
assert parsed[0]["month"] == "January"
|
||||
@@ -1485,7 +1482,7 @@ def test_interpolate_valid_complex_types():
|
||||
|
||||
# Should not raise any errors
|
||||
result = interpolate_only("{data}", {"data": valid_data})
|
||||
parsed = ast.literal_eval(result)
|
||||
parsed = eval(result)
|
||||
assert parsed["name"] == "Valid Dataset"
|
||||
assert parsed["stats"]["nested"]["deeper"]["b"] == 2.5
|
||||
|
||||
@@ -1515,7 +1512,7 @@ def test_interpolate_valid_types():
|
||||
}
|
||||
|
||||
result = interpolate_only("{data}", {"data": valid_data})
|
||||
parsed = ast.literal_eval(result)
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["active"] is True
|
||||
assert parsed["deleted"] is False
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -37,7 +39,6 @@ def test_initialization(basic_function, schema_class):
|
||||
assert tool.func == basic_function
|
||||
assert tool.args_schema == schema_class
|
||||
|
||||
|
||||
def test_from_function(basic_function):
|
||||
"""Test creating tool from function"""
|
||||
tool = CrewStructuredTool.from_function(
|
||||
@@ -49,7 +50,6 @@ def test_from_function(basic_function):
|
||||
assert tool.func == basic_function
|
||||
assert isinstance(tool.args_schema, type(BaseModel))
|
||||
|
||||
|
||||
def test_validate_function_signature(basic_function, schema_class):
|
||||
"""Test function signature validation"""
|
||||
tool = CrewStructuredTool(
|
||||
@@ -62,7 +62,6 @@ def test_validate_function_signature(basic_function, schema_class):
|
||||
# Should not raise any exceptions
|
||||
tool._validate_function_signature()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ainvoke(basic_function):
|
||||
"""Test asynchronous invocation"""
|
||||
@@ -71,7 +70,6 @@ async def test_ainvoke(basic_function):
|
||||
result = await tool.ainvoke(input={"param1": "test"})
|
||||
assert result == "test 0"
|
||||
|
||||
|
||||
def test_parse_args_dict(basic_function):
|
||||
"""Test parsing dictionary arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
@@ -80,7 +78,6 @@ def test_parse_args_dict(basic_function):
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
|
||||
|
||||
def test_parse_args_string(basic_function):
|
||||
"""Test parsing string arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
@@ -89,7 +86,6 @@ def test_parse_args_string(basic_function):
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
|
||||
|
||||
def test_complex_types():
|
||||
"""Test handling of complex parameter types"""
|
||||
|
||||
@@ -103,7 +99,6 @@ def test_complex_types():
|
||||
result = tool.invoke({"nested": {"key": "value"}, "items": [1, 2, 3]})
|
||||
assert result == "Processed 3 items with 1 nested keys"
|
||||
|
||||
|
||||
def test_schema_inheritance():
|
||||
"""Test tool creation with inherited schema"""
|
||||
|
||||
@@ -124,14 +119,13 @@ def test_schema_inheritance():
|
||||
result = tool.invoke({"base_param": "test", "extra_param": 42})
|
||||
assert result == "test 42"
|
||||
|
||||
|
||||
def test_default_values_in_schema():
|
||||
"""Test handling of default values in schema"""
|
||||
|
||||
def default_func(
|
||||
required_param: str,
|
||||
optional_param: str = "default",
|
||||
nullable_param: int | None = None,
|
||||
nullable_param: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Test function with default values."""
|
||||
return f"{required_param} {optional_param} {nullable_param}"
|
||||
@@ -150,7 +144,6 @@ def test_default_values_in_schema():
|
||||
)
|
||||
assert result == "test custom 42"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def custom_tool_decorator():
|
||||
from crewai.tools import tool
|
||||
@@ -162,7 +155,6 @@ def custom_tool_decorator():
|
||||
|
||||
return custom_tool
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def custom_tool():
|
||||
from crewai.tools import BaseTool
|
||||
@@ -177,25 +169,17 @@ def custom_tool():
|
||||
|
||||
return CustomTool()
|
||||
|
||||
|
||||
def build_simple_crew(tool):
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
agent1 = Agent(
|
||||
role="Simple role",
|
||||
goal="Simple goal",
|
||||
backstory="Simple backstory",
|
||||
tools=[tool],
|
||||
)
|
||||
agent1 = Agent(role="Simple role", goal="Simple goal", backstory="Simple backstory", tools=[tool])
|
||||
|
||||
say_hi_task = Task(
|
||||
description="Use the custom tool result as answer.",
|
||||
agent=agent1,
|
||||
expected_output="Use the tool result",
|
||||
description="Use the custom tool result as answer.", agent=agent1, expected_output="Use the tool result"
|
||||
)
|
||||
|
||||
return Crew(agents=[agent1], tasks=[say_hi_task])
|
||||
|
||||
crew = Crew(agents=[agent1], tasks=[say_hi_task])
|
||||
return crew
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_using_within_isolated_crew(custom_tool):
|
||||
@@ -204,7 +188,6 @@ def test_async_tool_using_within_isolated_crew(custom_tool):
|
||||
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_using_decorator_within_isolated_crew(custom_tool_decorator):
|
||||
crew = build_simple_crew(custom_tool_decorator)
|
||||
@@ -212,7 +195,6 @@ def test_async_tool_using_decorator_within_isolated_crew(custom_tool_decorator):
|
||||
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_within_flow(custom_tool):
|
||||
from crewai.flow.flow import Flow
|
||||
@@ -223,7 +205,8 @@ def test_async_tool_within_flow(custom_tool):
|
||||
@start()
|
||||
async def start(self):
|
||||
crew = build_simple_crew(custom_tool)
|
||||
return await crew.kickoff_async()
|
||||
result = await crew.kickoff_async()
|
||||
return result
|
||||
|
||||
flow = StructuredExampleFlow()
|
||||
result = flow.kickoff()
|
||||
@@ -236,141 +219,12 @@ def test_async_tool_using_decorator_within_flow(custom_tool_decorator):
|
||||
|
||||
class StructuredExampleFlow(Flow):
|
||||
from crewai.flow.flow import start
|
||||
|
||||
@start()
|
||||
async def start(self):
|
||||
crew = build_simple_crew(custom_tool_decorator)
|
||||
return await crew.kickoff_async()
|
||||
result = await crew.kickoff_async()
|
||||
return result
|
||||
|
||||
flow = StructuredExampleFlow()
|
||||
result = flow.kickoff()
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
|
||||
def test_structured_tool_invoke_calls_func_only_once():
|
||||
"""Test that CrewStructuredTool.invoke() calls the underlying function exactly once."""
|
||||
call_count = 0
|
||||
call_history = []
|
||||
|
||||
def counting_function(param: str) -> str:
|
||||
"""Function that tracks how many times it's called."""
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
call_history.append(f"Call #{call_count} with param: {param}")
|
||||
return f"Result from call #{call_count}: {param}"
|
||||
|
||||
# Create CrewStructuredTool directly
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=counting_function,
|
||||
name="direct_test_tool",
|
||||
description="Tool to test direct invoke() method",
|
||||
)
|
||||
|
||||
# Call invoke() directly - this is where the bug was
|
||||
result = tool.invoke({"param": "test_value"})
|
||||
|
||||
# Critical assertions that would catch the duplicate execution bug
|
||||
assert call_count == 1, (
|
||||
f"DUPLICATE EXECUTION BUG: Function was called {call_count} times instead of 1. "
|
||||
f"This means CrewStructuredTool.invoke() has duplicate function calls. "
|
||||
f"Call history: {call_history}"
|
||||
)
|
||||
|
||||
assert len(call_history) == 1, (
|
||||
f"Expected 1 call in history, got {len(call_history)}: {call_history}"
|
||||
)
|
||||
|
||||
assert call_history[0] == "Call #1 with param: test_value", (
|
||||
f"Expected 'Call #1 with param: test_value', got: {call_history[0]}"
|
||||
)
|
||||
|
||||
assert result == "Result from call #1: test_value", (
|
||||
f"Expected result from first call, got: {result}"
|
||||
)
|
||||
|
||||
|
||||
def test_structured_tool_invoke_multiple_calls_increment_correctly():
|
||||
"""Test multiple calls to invoke() to ensure each increments correctly."""
|
||||
call_count = 0
|
||||
|
||||
def incrementing_function(value: int) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return value + call_count
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=incrementing_function,
|
||||
name="incrementing_tool",
|
||||
description="Tool that increments on each call",
|
||||
)
|
||||
|
||||
result1 = tool.invoke({"value": 10})
|
||||
assert call_count == 1, (
|
||||
f"After first invoke, expected call_count=1, got {call_count}"
|
||||
)
|
||||
assert result1 == 11, f"Expected 11 (10+1), got {result1}"
|
||||
|
||||
result2 = tool.invoke({"value": 20})
|
||||
assert call_count == 2, (
|
||||
f"After second invoke, expected call_count=2, got {call_count}"
|
||||
)
|
||||
assert result2 == 22, f"Expected 22 (20+2), got {result2}"
|
||||
|
||||
result3 = tool.invoke({"value": 30})
|
||||
assert call_count == 3, (
|
||||
f"After third invoke, expected call_count=3, got {call_count}"
|
||||
)
|
||||
assert result3 == 33, f"Expected 33 (30+3), got {result3}"
|
||||
|
||||
|
||||
def test_structured_tool_invoke_with_side_effects():
|
||||
"""Test that side effects only happen once per invoke() call."""
|
||||
side_effects = []
|
||||
|
||||
def side_effect_function(action: str) -> str:
|
||||
side_effects.append(f"SIDE_EFFECT: {action} executed at call")
|
||||
return f"Action {action} completed"
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=side_effect_function,
|
||||
name="side_effect_tool",
|
||||
description="Tool with observable side effects",
|
||||
)
|
||||
|
||||
result = tool.invoke({"action": "write_file"})
|
||||
|
||||
assert len(side_effects) == 1, (
|
||||
f"SIDE EFFECT BUG: Expected 1 side effect, got {len(side_effects)}. "
|
||||
f"This indicates the function was called multiple times. "
|
||||
f"Side effects: {side_effects}"
|
||||
)
|
||||
|
||||
assert side_effects[0] == "SIDE_EFFECT: write_file executed at call"
|
||||
assert result == "Action write_file completed"
|
||||
|
||||
|
||||
def test_structured_tool_invoke_exception_handling():
|
||||
"""Test that exceptions don't cause duplicate execution."""
|
||||
call_count = 0
|
||||
|
||||
def failing_function(should_fail: bool) -> str:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if should_fail:
|
||||
raise ValueError(f"Intentional failure on call #{call_count}")
|
||||
return f"Success on call #{call_count}"
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=failing_function, name="failing_tool", description="Tool that can fail"
|
||||
)
|
||||
|
||||
result = tool.invoke({"should_fail": False})
|
||||
assert call_count == 1, f"Expected 1 call for success case, got {call_count}"
|
||||
assert result == "Success on call #1"
|
||||
|
||||
call_count = 0
|
||||
|
||||
with pytest.raises(ValueError, match="Intentional failure on call #1"):
|
||||
tool.invoke({"should_fail": True})
|
||||
|
||||
assert call_count == 1
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
Reference in New Issue
Block a user