mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-21 13:58:15 +00:00
Compare commits
49 Commits
devin/1768
...
lorenze/im
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1b67629149 | ||
|
|
741bf12bf4 | ||
|
|
b49e42af05 | ||
|
|
3472cb4f8a | ||
|
|
63a33cf01c | ||
|
|
9de0e7cb13 | ||
|
|
4c1f86b32f | ||
|
|
822d1f9997 | ||
|
|
bfc15ef4bd | ||
|
|
edcf3e3e36 | ||
|
|
33d87bdf0f | ||
|
|
c16f1dd801 | ||
|
|
b267bb4054 | ||
|
|
ceef062426 | ||
|
|
64052745b7 | ||
|
|
7f7b5094cc | ||
|
|
67d681bc6e | ||
|
|
ad83e8a2bf | ||
|
|
e44d778e0e | ||
|
|
601eda9095 | ||
|
|
83c62a65dd | ||
|
|
5645cbb22e | ||
|
|
8f022be106 | ||
|
|
6a19b0a279 | ||
|
|
641c336b2c | ||
|
|
22f1812824 | ||
|
|
3a1deb193a | ||
|
|
09185acc0d | ||
|
|
6541f01b1b | ||
|
|
3a6702e9c8 | ||
|
|
e4bd7889fd | ||
|
|
842a1db16f | ||
|
|
e9b86100c7 | ||
|
|
341812d58e | ||
|
|
38db734561 | ||
|
|
5048d54981 | ||
|
|
ae17178e86 | ||
|
|
b7a13e15ff | ||
|
|
13dc7e25e0 | ||
|
|
6c5e5056f3 | ||
|
|
9edbf89b68 | ||
|
|
685f7b9af1 | ||
|
|
595fdfb6e7 | ||
|
|
8f99fa76ed | ||
|
|
17e3fcbe1f | ||
|
|
b858d705a8 | ||
|
|
5cef85c643 | ||
|
|
dc3ae9396d | ||
|
|
0029f8193c |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -26,3 +26,4 @@ plan.md
|
||||
conceptual_plan.md
|
||||
build_image
|
||||
chromadb-*.lock
|
||||
.claude
|
||||
|
||||
@@ -291,6 +291,7 @@
|
||||
"en/observability/arize-phoenix",
|
||||
"en/observability/braintrust",
|
||||
"en/observability/datadog",
|
||||
"en/observability/galileo",
|
||||
"en/observability/langdb",
|
||||
"en/observability/langfuse",
|
||||
"en/observability/langtrace",
|
||||
@@ -428,7 +429,8 @@
|
||||
"group": "How-To Guides",
|
||||
"pages": [
|
||||
"en/enterprise/guides/build-crew",
|
||||
"en/enterprise/guides/deploy-crew",
|
||||
"en/enterprise/guides/prepare-for-deployment",
|
||||
"en/enterprise/guides/deploy-to-amp",
|
||||
"en/enterprise/guides/kickoff-crew",
|
||||
"en/enterprise/guides/update-crew",
|
||||
"en/enterprise/guides/enable-crew-studio",
|
||||
@@ -742,6 +744,7 @@
|
||||
"pt-BR/observability/arize-phoenix",
|
||||
"pt-BR/observability/braintrust",
|
||||
"pt-BR/observability/datadog",
|
||||
"pt-BR/observability/galileo",
|
||||
"pt-BR/observability/langdb",
|
||||
"pt-BR/observability/langfuse",
|
||||
"pt-BR/observability/langtrace",
|
||||
@@ -862,7 +865,8 @@
|
||||
"group": "Guias",
|
||||
"pages": [
|
||||
"pt-BR/enterprise/guides/build-crew",
|
||||
"pt-BR/enterprise/guides/deploy-crew",
|
||||
"pt-BR/enterprise/guides/prepare-for-deployment",
|
||||
"pt-BR/enterprise/guides/deploy-to-amp",
|
||||
"pt-BR/enterprise/guides/kickoff-crew",
|
||||
"pt-BR/enterprise/guides/update-crew",
|
||||
"pt-BR/enterprise/guides/enable-crew-studio",
|
||||
@@ -1203,6 +1207,7 @@
|
||||
"ko/observability/arize-phoenix",
|
||||
"ko/observability/braintrust",
|
||||
"ko/observability/datadog",
|
||||
"ko/observability/galileo",
|
||||
"ko/observability/langdb",
|
||||
"ko/observability/langfuse",
|
||||
"ko/observability/langtrace",
|
||||
@@ -1323,7 +1328,8 @@
|
||||
"group": "How-To Guides",
|
||||
"pages": [
|
||||
"ko/enterprise/guides/build-crew",
|
||||
"ko/enterprise/guides/deploy-crew",
|
||||
"ko/enterprise/guides/prepare-for-deployment",
|
||||
"ko/enterprise/guides/deploy-to-amp",
|
||||
"ko/enterprise/guides/kickoff-crew",
|
||||
"ko/enterprise/guides/update-crew",
|
||||
"ko/enterprise/guides/enable-crew-studio",
|
||||
@@ -1511,6 +1517,18 @@
|
||||
"source": "/enterprise/:path*",
|
||||
"destination": "/en/enterprise/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/en/enterprise/guides/deploy-crew",
|
||||
"destination": "/en/enterprise/guides/deploy-to-amp"
|
||||
},
|
||||
{
|
||||
"source": "/ko/enterprise/guides/deploy-crew",
|
||||
"destination": "/ko/enterprise/guides/deploy-to-amp"
|
||||
},
|
||||
{
|
||||
"source": "/pt-BR/enterprise/guides/deploy-crew",
|
||||
"destination": "/pt-BR/enterprise/guides/deploy-to-amp"
|
||||
},
|
||||
{
|
||||
"source": "/api-reference/:path*",
|
||||
"destination": "/en/api-reference/:path*"
|
||||
|
||||
@@ -574,6 +574,10 @@ When you run this Flow, the output will change based on the random boolean value
|
||||
|
||||
### Human in the Loop (human feedback)
|
||||
|
||||
<Note>
|
||||
The `@human_feedback` decorator requires **CrewAI version 1.8.0 or higher**.
|
||||
</Note>
|
||||
|
||||
The `@human_feedback` decorator enables human-in-the-loop workflows by pausing flow execution to collect feedback from a human. This is useful for approval gates, quality review, and decision points that require human judgment.
|
||||
|
||||
```python Code
|
||||
|
||||
@@ -375,10 +375,13 @@ In this section, you'll find detailed examples that help you select, configure,
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
GEMINI_API_KEY=<your-api-key>
|
||||
|
||||
# Optional - for Vertex AI
|
||||
# For Vertex AI Express mode (API key authentication)
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
|
||||
# For Vertex AI with service account
|
||||
GOOGLE_CLOUD_PROJECT=<your-project-id>
|
||||
GOOGLE_CLOUD_LOCATION=<location> # Defaults to us-central1
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true # Set to use Vertex AI
|
||||
```
|
||||
|
||||
**Basic Usage:**
|
||||
@@ -412,7 +415,35 @@ In this section, you'll find detailed examples that help you select, configure,
|
||||
)
|
||||
```
|
||||
|
||||
**Vertex AI Configuration:**
|
||||
**Vertex AI Express Mode (API Key Authentication):**
|
||||
|
||||
Vertex AI Express mode allows you to use Vertex AI with simple API key authentication instead of service account credentials. This is the quickest way to get started with Vertex AI.
|
||||
|
||||
To enable Express mode, set both environment variables in your `.env` file:
|
||||
```toml .env
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Then use the LLM as usual:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gemini/gemini-2.0-flash",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
To get an Express mode API key:
|
||||
- New Google Cloud users: Get an [express mode API key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey)
|
||||
- Existing Google Cloud users: Get a [Google Cloud API key bound to a service account](https://cloud.google.com/docs/authentication/api-keys)
|
||||
|
||||
For more details, see the [Vertex AI Express mode documentation](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey).
|
||||
</Info>
|
||||
|
||||
**Vertex AI Configuration (Service Account):**
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
@@ -424,10 +455,10 @@ In this section, you'll find detailed examples that help you select, configure,
|
||||
```
|
||||
|
||||
**Supported Environment Variables:**
|
||||
- `GOOGLE_API_KEY` or `GEMINI_API_KEY`: Your Google API key (required for Gemini API)
|
||||
- `GOOGLE_CLOUD_PROJECT`: Google Cloud project ID (for Vertex AI)
|
||||
- `GOOGLE_API_KEY` or `GEMINI_API_KEY`: Your Google API key (required for Gemini API and Vertex AI Express mode)
|
||||
- `GOOGLE_GENAI_USE_VERTEXAI`: Set to `true` to use Vertex AI (required for Express mode)
|
||||
- `GOOGLE_CLOUD_PROJECT`: Google Cloud project ID (for Vertex AI with service account)
|
||||
- `GOOGLE_CLOUD_LOCATION`: GCP location (defaults to `us-central1`)
|
||||
- `GOOGLE_GENAI_USE_VERTEXAI`: Set to `true` to use Vertex AI
|
||||
|
||||
**Features:**
|
||||
- Native function calling support for Gemini 1.5+ and 2.x models
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
---
|
||||
title: "Deploy Crew"
|
||||
description: "Deploying a Crew on CrewAI AMP"
|
||||
title: "Deploy to AMP"
|
||||
description: "Deploy your Crew or Flow to CrewAI AMP"
|
||||
icon: "rocket"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
After creating a crew locally or through Crew Studio, the next step is
|
||||
After creating a Crew or Flow locally (or through Crew Studio), the next step is
|
||||
deploying it to the CrewAI AMP platform. This guide covers multiple deployment
|
||||
methods to help you choose the best approach for your workflow.
|
||||
</Note>
|
||||
@@ -14,19 +14,26 @@ mode: "wide"
|
||||
## Prerequisites
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Crew Ready for Deployment" icon="users">
|
||||
You should have a working crew either built locally or created through Crew
|
||||
Studio
|
||||
<Card title="Project Ready for Deployment" icon="check-circle">
|
||||
You should have a working Crew or Flow that runs successfully locally.
|
||||
Follow our [preparation guide](/en/enterprise/guides/prepare-for-deployment) to verify your project structure.
|
||||
</Card>
|
||||
<Card title="GitHub Repository" icon="github">
|
||||
Your crew code should be in a GitHub repository (for GitHub integration
|
||||
Your code should be in a GitHub repository (for GitHub integration
|
||||
method)
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
<Info>
|
||||
**Crews vs Flows**: Both project types can be deployed as "automations" on CrewAI AMP.
|
||||
The deployment process is the same, but they have different project structures.
|
||||
See [Prepare for Deployment](/en/enterprise/guides/prepare-for-deployment) for details.
|
||||
</Info>
|
||||
|
||||
## Option 1: Deploy Using CrewAI CLI
|
||||
|
||||
The CLI provides the fastest way to deploy locally developed crews to the Enterprise platform.
|
||||
The CLI provides the fastest way to deploy locally developed Crews or Flows to the AMP platform.
|
||||
The CLI automatically detects your project type from `pyproject.toml` and builds accordingly.
|
||||
|
||||
<Steps>
|
||||
<Step title="Install CrewAI CLI">
|
||||
@@ -128,7 +135,7 @@ crewai deploy remove <deployment_id>
|
||||
|
||||
## Option 2: Deploy Directly via Web Interface
|
||||
|
||||
You can also deploy your crews directly through the CrewAI AMP web interface by connecting your GitHub account. This approach doesn't require using the CLI on your local machine.
|
||||
You can also deploy your Crews or Flows directly through the CrewAI AMP web interface by connecting your GitHub account. This approach doesn't require using the CLI on your local machine. The platform automatically detects your project type and handles the build appropriately.
|
||||
|
||||
<Steps>
|
||||
|
||||
@@ -282,68 +289,7 @@ For automated deployments in CI/CD pipelines, you can use the CrewAI API to trig
|
||||
|
||||
</Steps>
|
||||
|
||||
## ⚠️ Environment Variable Security Requirements
|
||||
|
||||
<Warning>
|
||||
**Important**: CrewAI AMP has security restrictions on environment variable
|
||||
names that can cause deployment failures if not followed.
|
||||
</Warning>
|
||||
|
||||
### Blocked Environment Variable Patterns
|
||||
|
||||
For security reasons, the following environment variable naming patterns are **automatically filtered** and will cause deployment issues:
|
||||
|
||||
**Blocked Patterns:**
|
||||
|
||||
- Variables ending with `_TOKEN` (e.g., `MY_API_TOKEN`)
|
||||
- Variables ending with `_PASSWORD` (e.g., `DB_PASSWORD`)
|
||||
- Variables ending with `_SECRET` (e.g., `API_SECRET`)
|
||||
- Variables ending with `_KEY` in certain contexts
|
||||
|
||||
**Specific Blocked Variables:**
|
||||
|
||||
- `GITHUB_USER`, `GITHUB_TOKEN`
|
||||
- `AWS_REGION`, `AWS_DEFAULT_REGION`
|
||||
- Various internal CrewAI system variables
|
||||
|
||||
### Allowed Exceptions
|
||||
|
||||
Some variables are explicitly allowed despite matching blocked patterns:
|
||||
|
||||
- `AZURE_AD_TOKEN`
|
||||
- `AZURE_OPENAI_AD_TOKEN`
|
||||
- `ENTERPRISE_ACTION_TOKEN`
|
||||
- `CREWAI_ENTEPRISE_TOOLS_TOKEN`
|
||||
|
||||
### How to Fix Naming Issues
|
||||
|
||||
If your deployment fails due to environment variable restrictions:
|
||||
|
||||
```bash
|
||||
# ❌ These will cause deployment failures
|
||||
OPENAI_TOKEN=sk-...
|
||||
DATABASE_PASSWORD=mypassword
|
||||
API_SECRET=secret123
|
||||
|
||||
# ✅ Use these naming patterns instead
|
||||
OPENAI_API_KEY=sk-...
|
||||
DATABASE_CREDENTIALS=mypassword
|
||||
API_CONFIG=secret123
|
||||
```
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. **Use standard naming conventions**: `PROVIDER_API_KEY` instead of `PROVIDER_TOKEN`
|
||||
2. **Test locally first**: Ensure your crew works with the renamed variables
|
||||
3. **Update your code**: Change any references to the old variable names
|
||||
4. **Document changes**: Keep track of renamed variables for your team
|
||||
|
||||
<Tip>
|
||||
If you encounter deployment failures with cryptic environment variable errors,
|
||||
check your variable names against these patterns first.
|
||||
</Tip>
|
||||
|
||||
### Interact with Your Deployed Crew
|
||||
## Interact with Your Deployed Automation
|
||||
|
||||
Once deployment is complete, you can access your crew through:
|
||||
|
||||
@@ -387,7 +333,108 @@ The Enterprise platform also offers:
|
||||
- **Custom Tools Repository**: Create, share, and install tools
|
||||
- **Crew Studio**: Build crews through a chat interface without writing code
|
||||
|
||||
## Troubleshooting Deployment Failures
|
||||
|
||||
If your deployment fails, check these common issues:
|
||||
|
||||
### Build Failures
|
||||
|
||||
#### Missing uv.lock File
|
||||
|
||||
**Symptom**: Build fails early with dependency resolution errors
|
||||
|
||||
**Solution**: Generate and commit the lock file:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
git add uv.lock
|
||||
git commit -m "Add uv.lock for deployment"
|
||||
git push
|
||||
```
|
||||
|
||||
<Warning>
|
||||
The `uv.lock` file is required for all deployments. Without it, the platform
|
||||
cannot reliably install your dependencies.
|
||||
</Warning>
|
||||
|
||||
#### Wrong Project Structure
|
||||
|
||||
**Symptom**: "Could not find entry point" or "Module not found" errors
|
||||
|
||||
**Solution**: Verify your project matches the expected structure:
|
||||
|
||||
- **Both Crews and Flows**: Must have entry point at `src/project_name/main.py`
|
||||
- **Crews**: Use a `run()` function as entry point
|
||||
- **Flows**: Use a `kickoff()` function as entry point
|
||||
|
||||
See [Prepare for Deployment](/en/enterprise/guides/prepare-for-deployment) for detailed structure diagrams.
|
||||
|
||||
#### Missing CrewBase Decorator
|
||||
|
||||
**Symptom**: "Crew not found", "Config not found", or agent/task configuration errors
|
||||
|
||||
**Solution**: Ensure **all** crew classes use the `@CrewBase` decorator:
|
||||
|
||||
```python
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
@CrewBase # This decorator is REQUIRED
|
||||
class YourCrew():
|
||||
"""Your crew description"""
|
||||
|
||||
@agent
|
||||
def my_agent(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['my_agent'], # type: ignore[index]
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# ... rest of crew definition
|
||||
```
|
||||
|
||||
<Info>
|
||||
This applies to standalone Crews AND crews embedded inside Flow projects.
|
||||
Every crew class needs the decorator.
|
||||
</Info>
|
||||
|
||||
#### Incorrect pyproject.toml Type
|
||||
|
||||
**Symptom**: Build succeeds but runtime fails, or unexpected behavior
|
||||
|
||||
**Solution**: Verify the `[tool.crewai]` section matches your project type:
|
||||
|
||||
```toml
|
||||
# For Crew projects:
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
|
||||
# For Flow projects:
|
||||
[tool.crewai]
|
||||
type = "flow"
|
||||
```
|
||||
|
||||
### Runtime Failures
|
||||
|
||||
#### LLM Connection Failures
|
||||
|
||||
**Symptom**: API key errors, "model not found", or authentication failures
|
||||
|
||||
**Solution**:
|
||||
1. Verify your LLM provider's API key is correctly set in environment variables
|
||||
2. Ensure the environment variable names match what your code expects
|
||||
3. Test locally with the exact same environment variables before deploying
|
||||
|
||||
#### Crew Execution Errors
|
||||
|
||||
**Symptom**: Crew starts but fails during execution
|
||||
|
||||
**Solution**:
|
||||
1. Check the execution logs in the AMP dashboard (Traces tab)
|
||||
2. Verify all tools have required API keys configured
|
||||
3. Ensure agent configurations in `agents.yaml` are valid
|
||||
4. Check task configurations in `tasks.yaml` for syntax errors
|
||||
|
||||
<Card title="Need Help?" icon="headset" href="mailto:support@crewai.com">
|
||||
Contact our support team for assistance with deployment issues or questions
|
||||
about the Enterprise platform.
|
||||
about the AMP platform.
|
||||
</Card>
|
||||
305
docs/en/enterprise/guides/prepare-for-deployment.mdx
Normal file
305
docs/en/enterprise/guides/prepare-for-deployment.mdx
Normal file
@@ -0,0 +1,305 @@
|
||||
---
|
||||
title: "Prepare for Deployment"
|
||||
description: "Ensure your Crew or Flow is ready for deployment to CrewAI AMP"
|
||||
icon: "clipboard-check"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
Before deploying to CrewAI AMP, it's crucial to verify your project is correctly structured.
|
||||
Both Crews and Flows can be deployed as "automations," but they have different project structures
|
||||
and requirements that must be met for successful deployment.
|
||||
</Note>
|
||||
|
||||
## Understanding Automations
|
||||
|
||||
In CrewAI AMP, **automations** is the umbrella term for deployable Agentic AI projects. An automation can be either:
|
||||
|
||||
- **A Crew**: A standalone team of AI agents working together on tasks
|
||||
- **A Flow**: An orchestrated workflow that can combine multiple crews, direct LLM calls, and procedural logic
|
||||
|
||||
Understanding which type you're deploying is essential because they have different project structures and entry points.
|
||||
|
||||
## Crews vs Flows: Key Differences
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Crew Projects" icon="users">
|
||||
Standalone AI agent teams with `crew.py` defining agents and tasks. Best for focused, collaborative tasks.
|
||||
</Card>
|
||||
<Card title="Flow Projects" icon="diagram-project">
|
||||
Orchestrated workflows with embedded crews in a `crews/` folder. Best for complex, multi-stage processes.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
| Aspect | Crew | Flow |
|
||||
|--------|------|------|
|
||||
| **Project structure** | `src/project_name/` with `crew.py` | `src/project_name/` with `crews/` folder |
|
||||
| **Main logic location** | `src/project_name/crew.py` | `src/project_name/main.py` (Flow class) |
|
||||
| **Entry point function** | `run()` in `main.py` | `kickoff()` in `main.py` |
|
||||
| **pyproject.toml type** | `type = "crew"` | `type = "flow"` |
|
||||
| **CLI create command** | `crewai create crew name` | `crewai create flow name` |
|
||||
| **Config location** | `src/project_name/config/` | `src/project_name/crews/crew_name/config/` |
|
||||
| **Can contain other crews** | No | Yes (in `crews/` folder) |
|
||||
|
||||
## Project Structure Reference
|
||||
|
||||
### Crew Project Structure
|
||||
|
||||
When you run `crewai create crew my_crew`, you get this structure:
|
||||
|
||||
```
|
||||
my_crew/
|
||||
├── .gitignore
|
||||
├── pyproject.toml # Must have type = "crew"
|
||||
├── README.md
|
||||
├── .env
|
||||
├── uv.lock # REQUIRED for deployment
|
||||
└── src/
|
||||
└── my_crew/
|
||||
├── __init__.py
|
||||
├── main.py # Entry point with run() function
|
||||
├── crew.py # Crew class with @CrewBase decorator
|
||||
├── tools/
|
||||
│ ├── custom_tool.py
|
||||
│ └── __init__.py
|
||||
└── config/
|
||||
├── agents.yaml # Agent definitions
|
||||
└── tasks.yaml # Task definitions
|
||||
```
|
||||
|
||||
<Warning>
|
||||
The nested `src/project_name/` structure is critical for Crews.
|
||||
Placing files at the wrong level will cause deployment failures.
|
||||
</Warning>
|
||||
|
||||
### Flow Project Structure
|
||||
|
||||
When you run `crewai create flow my_flow`, you get this structure:
|
||||
|
||||
```
|
||||
my_flow/
|
||||
├── .gitignore
|
||||
├── pyproject.toml # Must have type = "flow"
|
||||
├── README.md
|
||||
├── .env
|
||||
├── uv.lock # REQUIRED for deployment
|
||||
└── src/
|
||||
└── my_flow/
|
||||
├── __init__.py
|
||||
├── main.py # Entry point with kickoff() function + Flow class
|
||||
├── crews/ # Embedded crews folder
|
||||
│ └── poem_crew/
|
||||
│ ├── __init__.py
|
||||
│ ├── poem_crew.py # Crew with @CrewBase decorator
|
||||
│ └── config/
|
||||
│ ├── agents.yaml
|
||||
│ └── tasks.yaml
|
||||
└── tools/
|
||||
├── __init__.py
|
||||
└── custom_tool.py
|
||||
```
|
||||
|
||||
<Info>
|
||||
Both Crews and Flows use the `src/project_name/` structure.
|
||||
The key difference is that Flows have a `crews/` folder for embedded crews,
|
||||
while Crews have `crew.py` directly in the project folder.
|
||||
</Info>
|
||||
|
||||
## Pre-Deployment Checklist
|
||||
|
||||
Use this checklist to verify your project is ready for deployment.
|
||||
|
||||
### 1. Verify pyproject.toml Configuration
|
||||
|
||||
Your `pyproject.toml` must include the correct `[tool.crewai]` section:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="For Crews">
|
||||
```toml
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="For Flows">
|
||||
```toml
|
||||
[tool.crewai]
|
||||
type = "flow"
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Warning>
|
||||
If the `type` doesn't match your project structure, the build will fail or
|
||||
the automation won't run correctly.
|
||||
</Warning>
|
||||
|
||||
### 2. Ensure uv.lock File Exists
|
||||
|
||||
CrewAI uses `uv` for dependency management. The `uv.lock` file ensures reproducible builds and is **required** for deployment.
|
||||
|
||||
```bash
|
||||
# Generate or update the lock file
|
||||
uv lock
|
||||
|
||||
# Verify it exists
|
||||
ls -la uv.lock
|
||||
```
|
||||
|
||||
If the file doesn't exist, run `uv lock` and commit it to your repository:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
git add uv.lock
|
||||
git commit -m "Add uv.lock for deployment"
|
||||
git push
|
||||
```
|
||||
|
||||
### 3. Validate CrewBase Decorator Usage
|
||||
|
||||
**Every crew class must use the `@CrewBase` decorator.** This applies to:
|
||||
|
||||
- Standalone crew projects
|
||||
- Crews embedded inside Flow projects
|
||||
|
||||
```python
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from typing import List
|
||||
|
||||
@CrewBase # This decorator is REQUIRED
|
||||
class MyCrew():
|
||||
"""My crew description"""
|
||||
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
|
||||
@agent
|
||||
def my_agent(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['my_agent'], # type: ignore[index]
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def my_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['my_task'] # type: ignore[index]
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=self.agents,
|
||||
tasks=self.tasks,
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
<Warning>
|
||||
If you forget the `@CrewBase` decorator, your deployment will fail with
|
||||
errors about missing agents or tasks configurations.
|
||||
</Warning>
|
||||
|
||||
### 4. Check Project Entry Points
|
||||
|
||||
Both Crews and Flows have their entry point in `src/project_name/main.py`:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="For Crews">
|
||||
The entry point uses a `run()` function:
|
||||
|
||||
```python
|
||||
# src/my_crew/main.py
|
||||
from my_crew.crew import MyCrew
|
||||
|
||||
def run():
|
||||
"""Run the crew."""
|
||||
inputs = {'topic': 'AI in Healthcare'}
|
||||
result = MyCrew().crew().kickoff(inputs=inputs)
|
||||
return result
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="For Flows">
|
||||
The entry point uses a `kickoff()` function with a Flow class:
|
||||
|
||||
```python
|
||||
# src/my_flow/main.py
|
||||
from crewai.flow import Flow, listen, start
|
||||
from my_flow.crews.poem_crew.poem_crew import PoemCrew
|
||||
|
||||
class MyFlow(Flow):
|
||||
@start()
|
||||
def begin(self):
|
||||
# Flow logic here
|
||||
result = PoemCrew().crew().kickoff(inputs={...})
|
||||
return result
|
||||
|
||||
def kickoff():
|
||||
"""Run the flow."""
|
||||
MyFlow().kickoff()
|
||||
|
||||
if __name__ == "__main__":
|
||||
kickoff()
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### 5. Prepare Environment Variables
|
||||
|
||||
Before deployment, ensure you have:
|
||||
|
||||
1. **LLM API keys** ready (OpenAI, Anthropic, Google, etc.)
|
||||
2. **Tool API keys** if using external tools (Serper, etc.)
|
||||
|
||||
<Tip>
|
||||
Test your project locally with the same environment variables before deploying
|
||||
to catch configuration issues early.
|
||||
</Tip>
|
||||
|
||||
## Quick Validation Commands
|
||||
|
||||
Run these commands from your project root to quickly verify your setup:
|
||||
|
||||
```bash
|
||||
# 1. Check project type in pyproject.toml
|
||||
grep -A2 "\[tool.crewai\]" pyproject.toml
|
||||
|
||||
# 2. Verify uv.lock exists
|
||||
ls -la uv.lock || echo "ERROR: uv.lock missing! Run 'uv lock'"
|
||||
|
||||
# 3. Verify src/ structure exists
|
||||
ls -la src/*/main.py 2>/dev/null || echo "No main.py found in src/"
|
||||
|
||||
# 4. For Crews - verify crew.py exists
|
||||
ls -la src/*/crew.py 2>/dev/null || echo "No crew.py (expected for Crews)"
|
||||
|
||||
# 5. For Flows - verify crews/ folder exists
|
||||
ls -la src/*/crews/ 2>/dev/null || echo "No crews/ folder (expected for Flows)"
|
||||
|
||||
# 6. Check for CrewBase usage
|
||||
grep -r "@CrewBase" . --include="*.py"
|
||||
```
|
||||
|
||||
## Common Setup Mistakes
|
||||
|
||||
| Mistake | Symptom | Fix |
|
||||
|---------|---------|-----|
|
||||
| Missing `uv.lock` | Build fails during dependency resolution | Run `uv lock` and commit |
|
||||
| Wrong `type` in pyproject.toml | Build succeeds but runtime fails | Change to correct type |
|
||||
| Missing `@CrewBase` decorator | "Config not found" errors | Add decorator to all crew classes |
|
||||
| Files at root instead of `src/` | Entry point not found | Move to `src/project_name/` |
|
||||
| Missing `run()` or `kickoff()` | Cannot start automation | Add correct entry function |
|
||||
|
||||
## Next Steps
|
||||
|
||||
Once your project passes all checklist items, you're ready to deploy:
|
||||
|
||||
<Card title="Deploy to AMP" icon="rocket" href="/en/enterprise/guides/deploy-to-amp">
|
||||
Follow the deployment guide to deploy your Crew or Flow to CrewAI AMP using
|
||||
the CLI, web interface, or CI/CD integration.
|
||||
</Card>
|
||||
@@ -1,43 +1,48 @@
|
||||
---
|
||||
title: Agent-to-Agent (A2A) Protocol
|
||||
description: Enable CrewAI agents to delegate tasks to remote A2A-compliant agents for specialized handling
|
||||
description: Agents delegate tasks to remote A2A agents and/or operate as A2A-compliant server agents.
|
||||
icon: network-wired
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## A2A Agent Delegation
|
||||
|
||||
CrewAI supports the Agent-to-Agent (A2A) protocol, allowing agents to delegate tasks to remote specialized agents. The agent's LLM automatically decides whether to handle a task directly or delegate to an A2A agent based on the task requirements.
|
||||
|
||||
<Note>
|
||||
A2A delegation requires the `a2a-sdk` package. Install with: `uv add 'crewai[a2a]'` or `pip install 'crewai[a2a]'`
|
||||
</Note>
|
||||
CrewAI treats [A2A protocol](https://a2a-protocol.org/latest/) as a first-class delegation primitive, enabling agents to delegate tasks, request information, and collaborate with remote agents, as well as act as A2A-compliant server agents.
|
||||
In client mode, agents autonomously choose between local execution and remote delegation based on task requirements.
|
||||
|
||||
## How It Works
|
||||
|
||||
When an agent is configured with A2A capabilities:
|
||||
|
||||
1. The LLM analyzes each task
|
||||
1. The Agent analyzes each task
|
||||
2. It decides to either:
|
||||
- Handle the task directly using its own capabilities
|
||||
- Delegate to a remote A2A agent for specialized handling
|
||||
3. If delegating, the agent communicates with the remote A2A agent through the protocol
|
||||
4. Results are returned to the CrewAI workflow
|
||||
|
||||
<Note>
|
||||
A2A delegation requires the `a2a-sdk` package. Install with: `uv add 'crewai[a2a]'` or `pip install 'crewai[a2a]'`
|
||||
</Note>
|
||||
|
||||
## Basic Configuration
|
||||
|
||||
<Warning>
|
||||
`crewai.a2a.config.A2AConfig` is deprecated and will be removed in v2.0.0. Use `A2AClientConfig` for connecting to remote agents and/or `A2AServerConfig` for exposing agents as servers.
|
||||
</Warning>
|
||||
|
||||
Configure an agent for A2A delegation by setting the `a2a` parameter:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.a2a import A2AConfig
|
||||
from crewai.a2a import A2AClientConfig
|
||||
|
||||
agent = Agent(
|
||||
role="Research Coordinator",
|
||||
goal="Coordinate research tasks efficiently",
|
||||
backstory="Expert at delegating to specialized research agents",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
a2a=A2AClientConfig(
|
||||
endpoint="https://example.com/.well-known/agent-card.json",
|
||||
timeout=120,
|
||||
max_turns=10
|
||||
@@ -54,9 +59,9 @@ crew = Crew(agents=[agent], tasks=[task], verbose=True)
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
## Client Configuration Options
|
||||
|
||||
The `A2AConfig` class accepts the following parameters:
|
||||
The `A2AClientConfig` class accepts the following parameters:
|
||||
|
||||
<ParamField path="endpoint" type="str" required>
|
||||
The A2A agent endpoint URL (typically points to `.well-known/agent-card.json`)
|
||||
@@ -91,14 +96,34 @@ The `A2AConfig` class accepts the following parameters:
|
||||
Update mechanism for receiving task status. Options: `StreamingConfig`, `PollingConfig`, or `PushNotificationConfig`.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="transport_protocol" type="Literal['JSONRPC', 'GRPC', 'HTTP+JSON']" default="JSONRPC">
|
||||
Transport protocol for A2A communication. Options: `JSONRPC` (default), `GRPC`, or `HTTP+JSON`.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="accepted_output_modes" type="list[str]" default='["application/json"]'>
|
||||
Media types the client can accept in responses.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="supported_transports" type="list[str]" default='["JSONRPC"]'>
|
||||
Ordered list of transport protocols the client supports.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="use_client_preference" type="bool" default="False">
|
||||
Whether to prioritize client transport preferences over server.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="extensions" type="list[str]" default="[]">
|
||||
Extension URIs the client supports.
|
||||
</ParamField>
|
||||
|
||||
## Authentication
|
||||
|
||||
For A2A agents that require authentication, use one of the provided auth schemes:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Bearer Token">
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
```python bearer_token_auth.py lines
|
||||
from crewai.a2a import A2AClientConfig
|
||||
from crewai.a2a.auth import BearerTokenAuth
|
||||
|
||||
agent = Agent(
|
||||
@@ -106,18 +131,18 @@ agent = Agent(
|
||||
goal="Coordinate tasks with secured agents",
|
||||
backstory="Manages secure agent communications",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
a2a=A2AClientConfig(
|
||||
endpoint="https://secure-agent.example.com/.well-known/agent-card.json",
|
||||
auth=BearerTokenAuth(token="your-bearer-token"),
|
||||
timeout=120
|
||||
)
|
||||
)
|
||||
```
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="API Key">
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
```python api_key_auth.py lines
|
||||
from crewai.a2a import A2AClientConfig
|
||||
from crewai.a2a.auth import APIKeyAuth
|
||||
|
||||
agent = Agent(
|
||||
@@ -125,7 +150,7 @@ agent = Agent(
|
||||
goal="Coordinate with API-based agents",
|
||||
backstory="Manages API-authenticated communications",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
a2a=A2AClientConfig(
|
||||
endpoint="https://api-agent.example.com/.well-known/agent-card.json",
|
||||
auth=APIKeyAuth(
|
||||
api_key="your-api-key",
|
||||
@@ -135,12 +160,12 @@ agent = Agent(
|
||||
timeout=120
|
||||
)
|
||||
)
|
||||
```
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="OAuth2">
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
```python oauth2_auth.py lines
|
||||
from crewai.a2a import A2AClientConfig
|
||||
from crewai.a2a.auth import OAuth2ClientCredentials
|
||||
|
||||
agent = Agent(
|
||||
@@ -148,7 +173,7 @@ agent = Agent(
|
||||
goal="Coordinate with OAuth-secured agents",
|
||||
backstory="Manages OAuth-authenticated communications",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
a2a=A2AClientConfig(
|
||||
endpoint="https://oauth-agent.example.com/.well-known/agent-card.json",
|
||||
auth=OAuth2ClientCredentials(
|
||||
token_url="https://auth.example.com/oauth/token",
|
||||
@@ -159,12 +184,12 @@ agent = Agent(
|
||||
timeout=120
|
||||
)
|
||||
)
|
||||
```
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="HTTP Basic">
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
```python http_basic_auth.py lines
|
||||
from crewai.a2a import A2AClientConfig
|
||||
from crewai.a2a.auth import HTTPBasicAuth
|
||||
|
||||
agent = Agent(
|
||||
@@ -172,7 +197,7 @@ agent = Agent(
|
||||
goal="Coordinate with basic auth agents",
|
||||
backstory="Manages basic authentication communications",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
a2a=A2AClientConfig(
|
||||
endpoint="https://basic-agent.example.com/.well-known/agent-card.json",
|
||||
auth=HTTPBasicAuth(
|
||||
username="your-username",
|
||||
@@ -181,7 +206,7 @@ agent = Agent(
|
||||
timeout=120
|
||||
)
|
||||
)
|
||||
```
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
@@ -190,7 +215,7 @@ agent = Agent(
|
||||
Configure multiple A2A agents for delegation by passing a list:
|
||||
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
from crewai.a2a import A2AClientConfig
|
||||
from crewai.a2a.auth import BearerTokenAuth
|
||||
|
||||
agent = Agent(
|
||||
@@ -199,11 +224,11 @@ agent = Agent(
|
||||
backstory="Expert at delegating to the right specialist",
|
||||
llm="gpt-4o",
|
||||
a2a=[
|
||||
A2AConfig(
|
||||
A2AClientConfig(
|
||||
endpoint="https://research.example.com/.well-known/agent-card.json",
|
||||
timeout=120
|
||||
),
|
||||
A2AConfig(
|
||||
A2AClientConfig(
|
||||
endpoint="https://data.example.com/.well-known/agent-card.json",
|
||||
auth=BearerTokenAuth(token="data-token"),
|
||||
timeout=90
|
||||
@@ -219,7 +244,7 @@ The LLM will automatically choose which A2A agent to delegate to based on the ta
|
||||
Control how agent connection failures are handled using the `fail_fast` parameter:
|
||||
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
from crewai.a2a import A2AClientConfig
|
||||
|
||||
# Fail immediately on connection errors (default)
|
||||
agent = Agent(
|
||||
@@ -227,7 +252,7 @@ agent = Agent(
|
||||
goal="Coordinate research tasks",
|
||||
backstory="Expert at delegation",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
a2a=A2AClientConfig(
|
||||
endpoint="https://research.example.com/.well-known/agent-card.json",
|
||||
fail_fast=True
|
||||
)
|
||||
@@ -240,11 +265,11 @@ agent = Agent(
|
||||
backstory="Expert at working with available resources",
|
||||
llm="gpt-4o",
|
||||
a2a=[
|
||||
A2AConfig(
|
||||
A2AClientConfig(
|
||||
endpoint="https://primary.example.com/.well-known/agent-card.json",
|
||||
fail_fast=False
|
||||
),
|
||||
A2AConfig(
|
||||
A2AClientConfig(
|
||||
endpoint="https://backup.example.com/.well-known/agent-card.json",
|
||||
fail_fast=False
|
||||
)
|
||||
@@ -263,8 +288,8 @@ Control how your agent receives task status updates from remote A2A agents:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Streaming (Default)">
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
```python streaming_config.py lines
|
||||
from crewai.a2a import A2AClientConfig
|
||||
from crewai.a2a.updates import StreamingConfig
|
||||
|
||||
agent = Agent(
|
||||
@@ -272,17 +297,17 @@ agent = Agent(
|
||||
goal="Coordinate research tasks",
|
||||
backstory="Expert at delegation",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
a2a=A2AClientConfig(
|
||||
endpoint="https://research.example.com/.well-known/agent-card.json",
|
||||
updates=StreamingConfig()
|
||||
)
|
||||
)
|
||||
```
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="Polling">
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
```python polling_config.py lines
|
||||
from crewai.a2a import A2AClientConfig
|
||||
from crewai.a2a.updates import PollingConfig
|
||||
|
||||
agent = Agent(
|
||||
@@ -290,7 +315,7 @@ agent = Agent(
|
||||
goal="Coordinate research tasks",
|
||||
backstory="Expert at delegation",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
a2a=A2AClientConfig(
|
||||
endpoint="https://research.example.com/.well-known/agent-card.json",
|
||||
updates=PollingConfig(
|
||||
interval=2.0,
|
||||
@@ -299,12 +324,12 @@ agent = Agent(
|
||||
)
|
||||
)
|
||||
)
|
||||
```
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="Push Notifications">
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
```python push_notifications_config.py lines
|
||||
from crewai.a2a import A2AClientConfig
|
||||
from crewai.a2a.updates import PushNotificationConfig
|
||||
|
||||
agent = Agent(
|
||||
@@ -312,19 +337,137 @@ agent = Agent(
|
||||
goal="Coordinate research tasks",
|
||||
backstory="Expert at delegation",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
a2a=A2AClientConfig(
|
||||
endpoint="https://research.example.com/.well-known/agent-card.json",
|
||||
updates=PushNotificationConfig(
|
||||
url={base_url}/a2a/callback",
|
||||
url="{base_url}/a2a/callback",
|
||||
token="your-validation-token",
|
||||
timeout=300.0
|
||||
)
|
||||
)
|
||||
)
|
||||
```
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Exposing Agents as A2A Servers
|
||||
|
||||
You can expose your CrewAI agents as A2A-compliant servers, allowing other A2A clients to delegate tasks to them.
|
||||
|
||||
### Server Configuration
|
||||
|
||||
Add an `A2AServerConfig` to your agent to enable server capabilities:
|
||||
|
||||
```python a2a_server_agent.py lines
|
||||
from crewai import Agent
|
||||
from crewai.a2a import A2AServerConfig
|
||||
|
||||
agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze datasets and provide insights",
|
||||
backstory="Expert data scientist with statistical analysis skills",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AServerConfig(url="https://your-server.com")
|
||||
)
|
||||
```
|
||||
|
||||
### Server Configuration Options
|
||||
|
||||
<ParamField path="name" type="str" default="None">
|
||||
Human-readable name for the agent. Defaults to the agent's role if not provided.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="description" type="str" default="None">
|
||||
Human-readable description. Defaults to the agent's goal and backstory if not provided.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="version" type="str" default="1.0.0">
|
||||
Version string for the agent card.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="skills" type="list[AgentSkill]" default="[]">
|
||||
List of agent skills. Auto-generated from agent tools if not provided.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="capabilities" type="AgentCapabilities" default="AgentCapabilities(streaming=True, push_notifications=False)">
|
||||
Declaration of optional capabilities supported by the agent.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="default_input_modes" type="list[str]" default='["text/plain", "application/json"]'>
|
||||
Supported input MIME types.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="default_output_modes" type="list[str]" default='["text/plain", "application/json"]'>
|
||||
Supported output MIME types.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="url" type="str" default="None">
|
||||
Preferred endpoint URL. If set, overrides the URL passed to `to_agent_card()`.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="preferred_transport" type="Literal['JSONRPC', 'GRPC', 'HTTP+JSON']" default="JSONRPC">
|
||||
Transport protocol for the preferred endpoint.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="protocol_version" type="str" default="0.3">
|
||||
A2A protocol version this agent supports.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="provider" type="AgentProvider" default="None">
|
||||
Information about the agent's service provider.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="documentation_url" type="str" default="None">
|
||||
URL to the agent's documentation.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="icon_url" type="str" default="None">
|
||||
URL to an icon for the agent.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="additional_interfaces" type="list[AgentInterface]" default="[]">
|
||||
Additional supported interfaces (transport and URL combinations).
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="security" type="list[dict[str, list[str]]]" default="[]">
|
||||
Security requirement objects for all agent interactions.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="security_schemes" type="dict[str, SecurityScheme]" default="{}">
|
||||
Security schemes available to authorize requests.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="supports_authenticated_extended_card" type="bool" default="False">
|
||||
Whether agent provides extended card to authenticated users.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="signatures" type="list[AgentCardSignature]" default="[]">
|
||||
JSON Web Signatures for the AgentCard.
|
||||
</ParamField>
|
||||
|
||||
### Combined Client and Server
|
||||
|
||||
An agent can act as both client and server by providing both configurations:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai.a2a import A2AClientConfig, A2AServerConfig
|
||||
|
||||
agent = Agent(
|
||||
role="Research Coordinator",
|
||||
goal="Coordinate research and serve analysis requests",
|
||||
backstory="Expert at delegation and analysis",
|
||||
llm="gpt-4o",
|
||||
a2a=[
|
||||
A2AClientConfig(
|
||||
endpoint="https://specialist.example.com/.well-known/agent-card.json",
|
||||
timeout=120
|
||||
),
|
||||
A2AServerConfig(url="https://your-server.com")
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
<CardGroup cols={2}>
|
||||
|
||||
@@ -7,6 +7,10 @@ mode: "wide"
|
||||
|
||||
## Overview
|
||||
|
||||
<Note>
|
||||
The `@human_feedback` decorator requires **CrewAI version 1.8.0 or higher**. Make sure to update your installation before using this feature.
|
||||
</Note>
|
||||
|
||||
The `@human_feedback` decorator enables human-in-the-loop (HITL) workflows directly within CrewAI Flows. It allows you to pause flow execution, present output to a human for review, collect their feedback, and optionally route to different listeners based on the feedback outcome.
|
||||
|
||||
This is particularly valuable for:
|
||||
|
||||
@@ -11,10 +11,10 @@ Human-in-the-Loop (HITL) is a powerful approach that combines artificial intelli
|
||||
|
||||
CrewAI offers two main approaches for implementing human-in-the-loop workflows:
|
||||
|
||||
| Approach | Best For | Integration |
|
||||
|----------|----------|-------------|
|
||||
| **Flow-based** (`@human_feedback` decorator) | Local development, console-based review, synchronous workflows | [Human Feedback in Flows](/en/learn/human-feedback-in-flows) |
|
||||
| **Webhook-based** (Enterprise) | Production deployments, async workflows, external integrations (Slack, Teams, etc.) | This guide |
|
||||
| Approach | Best For | Integration | Version |
|
||||
|----------|----------|-------------|---------|
|
||||
| **Flow-based** (`@human_feedback` decorator) | Local development, console-based review, synchronous workflows | [Human Feedback in Flows](/en/learn/human-feedback-in-flows) | **1.8.0+** |
|
||||
| **Webhook-based** (Enterprise) | Production deployments, async workflows, external integrations (Slack, Teams, etc.) | This guide | - |
|
||||
|
||||
<Tip>
|
||||
If you're building flows and want to add human review steps with routing based on feedback, check out the [Human Feedback in Flows](/en/learn/human-feedback-in-flows) guide for the `@human_feedback` decorator.
|
||||
|
||||
115
docs/en/observability/galileo.mdx
Normal file
115
docs/en/observability/galileo.mdx
Normal file
@@ -0,0 +1,115 @@
|
||||
---
|
||||
title: Galileo
|
||||
description: Galileo integration for CrewAI tracing and evaluation
|
||||
icon: telescope
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
This guide demonstrates how to integrate **Galileo** with **CrewAI**
|
||||
for comprehensive tracing and Evaluation Engineering.
|
||||
By the end of this guide, you will be able to trace your CrewAI agents,
|
||||
monitor their performance, and evaluate their behaviour with
|
||||
Galileo's powerful observability platform.
|
||||
|
||||
> **What is Galileo?** [Galileo](https://galileo.ai) is AI evaluation and observability
|
||||
platform that delivers end-to-end tracing, evaluation,
|
||||
and monitoring for AI applications. It enables teams to capture ground truth,
|
||||
create robust guardrails, and run systematic experiments with
|
||||
built-in experiment tracking and performance analytics—ensuring reliability,
|
||||
transparency, and continuous improvement across the AI lifecycle.
|
||||
|
||||
## Getting started
|
||||
|
||||
This tutorial follows the [CrewAI quickstart](/en/quickstart) and shows how to add
|
||||
Galileo's [CrewAIEventListener](https://v2docs.galileo.ai/sdk-api/python/reference/handlers/crewai/handler),
|
||||
an event handler.
|
||||
For more information, see Galileo’s
|
||||
[Add Galileo to a CrewAI Application](https://v2docs.galileo.ai/how-to-guides/third-party-integrations/add-galileo-to-crewai/add-galileo-to-crewai)
|
||||
how-to guide.
|
||||
|
||||
> **Note** This tutorial assumes you have completed the [CrewAI quickstart](/en/quickstart).
|
||||
If you want a completed comprehensive example, see the Galileo
|
||||
[CrewAI sdk-example repo](https://github.com/rungalileo/sdk-examples/tree/main/python/agent/crew-ai).
|
||||
|
||||
### Step 1: Install dependencies
|
||||
|
||||
Install the required dependencies for your app.
|
||||
Create a virtual environment using your preferred method,
|
||||
then install dependencies inside that environment using your
|
||||
preferred tool:
|
||||
|
||||
```bash
|
||||
uv add galileo
|
||||
```
|
||||
|
||||
### Step 2: Add to the .env file from the [CrewAI quickstart](/en/quickstart)
|
||||
|
||||
```bash
|
||||
# Your Galileo API key
|
||||
GALILEO_API_KEY="your-galileo-api-key"
|
||||
|
||||
# Your Galileo project name
|
||||
GALILEO_PROJECT="your-galileo-project-name"
|
||||
|
||||
# The name of the Log stream you want to use for logging
|
||||
GALILEO_LOG_STREAM="your-galileo-log-stream "
|
||||
```
|
||||
|
||||
### Step 3: Add the Galileo event listener
|
||||
|
||||
To enable logging with Galileo, you need to create an instance of the `CrewAIEventListener`.
|
||||
Import the Galileo CrewAI handler package by
|
||||
adding the following code at the top of your main.py file:
|
||||
|
||||
```python
|
||||
from galileo.handlers.crewai.handler import CrewAIEventListener
|
||||
```
|
||||
|
||||
At the start of your run function, create the event listener:
|
||||
|
||||
```python
|
||||
def run():
|
||||
# Create the event listener
|
||||
CrewAIEventListener()
|
||||
# The rest of your existing code goes here
|
||||
```
|
||||
|
||||
When you create the listener instance, it is automatically
|
||||
registered with CrewAI.
|
||||
|
||||
### Step 4: Run your crew
|
||||
|
||||
Run your crew with the CrewAI CLI:
|
||||
|
||||
```bash
|
||||
crewai run
|
||||
```
|
||||
|
||||
### Step 5: View the traces in Galileo
|
||||
|
||||
Once your crew has finished, the traces will be flushed and appear in Galileo.
|
||||
|
||||

|
||||
|
||||
## Understanding the Galileo Integration
|
||||
|
||||
Galileo integrates with CrewAI by registering an event listener
|
||||
that captures Crew execution events (e.g., agent actions, tool calls, model responses)
|
||||
and forwards them to Galileo for observability and evaluation.
|
||||
|
||||
### Understanding the event listener
|
||||
|
||||
Creating a `CrewAIEventListener()` instance is all that’s
|
||||
required to enable Galileo for a CrewAI run. When instantiated, the listener:
|
||||
|
||||
- Automatically registers itself with CrewAI
|
||||
- Reads Galileo configuration from environment variables
|
||||
- Logs all run data to the Galileo project and log stream specified by
|
||||
`GALILEO_PROJECT` and `GALILEO_LOG_STREAM`
|
||||
|
||||
No additional configuration or code changes are required.
|
||||
All data from this run is logged to the Galileo project and
|
||||
log stream specified by your environment configuration
|
||||
(for example, GALILEO_PROJECT and GALILEO_LOG_STREAM).
|
||||
BIN
docs/images/galileo-trace-veiw.png
Normal file
BIN
docs/images/galileo-trace-veiw.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 239 KiB |
@@ -567,6 +567,10 @@ Fourth method running
|
||||
|
||||
### Human in the Loop (인간 피드백)
|
||||
|
||||
<Note>
|
||||
`@human_feedback` 데코레이터는 **CrewAI 버전 1.8.0 이상**이 필요합니다.
|
||||
</Note>
|
||||
|
||||
`@human_feedback` 데코레이터는 인간의 피드백을 수집하기 위해 플로우 실행을 일시 중지하는 human-in-the-loop 워크플로우를 가능하게 합니다. 이는 승인 게이트, 품질 검토, 인간의 판단이 필요한 결정 지점에 유용합니다.
|
||||
|
||||
```python Code
|
||||
|
||||
@@ -107,7 +107,7 @@ CrewAI 코드 내에는 사용할 모델을 지정할 수 있는 여러 위치
|
||||
|
||||
## 공급자 구성 예시
|
||||
|
||||
CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양한 LLM 공급자를 지원합니다.
|
||||
CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양한 LLM 공급자를 지원합니다.
|
||||
이 섹션에서는 프로젝트의 요구에 가장 적합한 LLM을 선택, 구성, 최적화하는 데 도움이 되는 자세한 예시를 제공합니다.
|
||||
|
||||
<AccordionGroup>
|
||||
@@ -153,8 +153,8 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Meta-Llama">
|
||||
Meta의 Llama API는 Meta의 대형 언어 모델 패밀리 접근을 제공합니다.
|
||||
API는 [Meta Llama API](https://llama.developer.meta.com?utm_source=partner-crewai&utm_medium=website)에서 사용할 수 있습니다.
|
||||
Meta의 Llama API는 Meta의 대형 언어 모델 패밀리 접근을 제공합니다.
|
||||
API는 [Meta Llama API](https://llama.developer.meta.com?utm_source=partner-crewai&utm_medium=website)에서 사용할 수 있습니다.
|
||||
`.env` 파일에 다음 환경 변수를 설정하십시오:
|
||||
|
||||
```toml Code
|
||||
@@ -207,11 +207,20 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양
|
||||
`.env` 파일에 API 키를 설정하십시오. 키가 필요하거나 기존 키를 찾으려면 [AI Studio](https://aistudio.google.com/apikey)를 확인하세요.
|
||||
|
||||
```toml .env
|
||||
# https://ai.google.dev/gemini-api/docs/api-key
|
||||
# Gemini API 사용 시 (다음 중 하나)
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
GEMINI_API_KEY=<your-api-key>
|
||||
|
||||
# Vertex AI Express 모드 사용 시 (API 키 인증)
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
|
||||
# Vertex AI 서비스 계정 사용 시
|
||||
GOOGLE_CLOUD_PROJECT=<your-project-id>
|
||||
GOOGLE_CLOUD_LOCATION=<location> # 기본값: us-central1
|
||||
```
|
||||
|
||||
CrewAI 프로젝트에서의 예시 사용법:
|
||||
**기본 사용법:**
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
@@ -221,6 +230,34 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양
|
||||
)
|
||||
```
|
||||
|
||||
**Vertex AI Express 모드 (API 키 인증):**
|
||||
|
||||
Vertex AI Express 모드를 사용하면 서비스 계정 자격 증명 대신 간단한 API 키 인증으로 Vertex AI를 사용할 수 있습니다. Vertex AI를 시작하는 가장 빠른 방법입니다.
|
||||
|
||||
Express 모드를 활성화하려면 `.env` 파일에 두 환경 변수를 모두 설정하세요:
|
||||
```toml .env
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
그런 다음 평소처럼 LLM을 사용하세요:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gemini/gemini-2.0-flash",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Express 모드 API 키를 받으려면:
|
||||
- 신규 Google Cloud 사용자: [Express 모드 API 키](https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey) 받기
|
||||
- 기존 Google Cloud 사용자: [서비스 계정에 바인딩된 Google Cloud API 키](https://cloud.google.com/docs/authentication/api-keys) 받기
|
||||
|
||||
자세한 내용은 [Vertex AI Express 모드 문서](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey)를 참조하세요.
|
||||
</Info>
|
||||
|
||||
### Gemini 모델
|
||||
|
||||
Google은 다양한 용도에 최적화된 강력한 모델을 제공합니다.
|
||||
@@ -476,7 +513,7 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양
|
||||
|
||||
<Accordion title="Local NVIDIA NIM Deployed using WSL2">
|
||||
|
||||
NVIDIA NIM을 이용하면 Windows 기기에서 WSL2(Windows Subsystem for Linux)를 통해 강력한 LLM을 로컬로 실행할 수 있습니다.
|
||||
NVIDIA NIM을 이용하면 Windows 기기에서 WSL2(Windows Subsystem for Linux)를 통해 강력한 LLM을 로컬로 실행할 수 있습니다.
|
||||
이 방식은 Nvidia GPU를 활용하여 프라이빗하고, 안전하며, 비용 효율적인 AI 추론을 클라우드 서비스에 의존하지 않고 구현할 수 있습니다.
|
||||
데이터 프라이버시, 오프라인 기능이 필요한 개발, 테스트, 또는 프로덕션 환경에 최적입니다.
|
||||
|
||||
@@ -954,4 +991,4 @@ LLM 설정을 최대한 활용하는 방법을 알아보세요:
|
||||
llm = LLM(model="openai/gpt-4o") # 128K tokens
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Tabs>
|
||||
|
||||
@@ -128,7 +128,7 @@ Flow를 배포할 때 다음을 고려하세요:
|
||||
### CrewAI Enterprise
|
||||
Flow를 배포하는 가장 쉬운 방법은 CrewAI Enterprise를 사용하는 것입니다. 인프라, 인증 및 모니터링을 대신 처리합니다.
|
||||
|
||||
시작하려면 [배포 가이드](/ko/enterprise/guides/deploy-crew)를 확인하세요.
|
||||
시작하려면 [배포 가이드](/ko/enterprise/guides/deploy-to-amp)를 확인하세요.
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
|
||||
@@ -91,7 +91,7 @@ Git 없이 빠르게 배포 — 프로젝트 ZIP 패키지를 업로드하세요
|
||||
## 관련 문서
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="크루 배포" href="/ko/enterprise/guides/deploy-crew" icon="rocket">
|
||||
<Card title="크루 배포" href="/ko/enterprise/guides/deploy-to-amp" icon="rocket">
|
||||
GitHub 또는 ZIP 파일로 크루 배포
|
||||
</Card>
|
||||
<Card title="자동화 트리거" href="/ko/enterprise/guides/automation-triggers" icon="trigger">
|
||||
|
||||
@@ -79,7 +79,7 @@ Crew Studio는 자연어와 시각적 워크플로 에디터로 처음부터 자
|
||||
<Card title="크루 빌드" href="/ko/enterprise/guides/build-crew" icon="paintbrush">
|
||||
크루를 빌드하세요.
|
||||
</Card>
|
||||
<Card title="크루 배포" href="/ko/enterprise/guides/deploy-crew" icon="rocket">
|
||||
<Card title="크루 배포" href="/ko/enterprise/guides/deploy-to-amp" icon="rocket">
|
||||
GitHub 또는 ZIP 파일로 크루 배포.
|
||||
</Card>
|
||||
<Card title="React 컴포넌트 내보내기" href="/ko/enterprise/guides/react-component-export" icon="download">
|
||||
|
||||
@@ -1,305 +0,0 @@
|
||||
---
|
||||
title: "Crew 배포"
|
||||
description: "CrewAI 엔터프라이즈에서 Crew 배포하기"
|
||||
icon: "rocket"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
로컬에서 또는 Crew Studio를 통해 crew를 생성한 후, 다음 단계는 이를 CrewAI AMP
|
||||
플랫폼에 배포하는 것입니다. 본 가이드에서는 다양한 배포 방법을 다루며,
|
||||
여러분의 워크플로우에 가장 적합한 방식을 선택할 수 있도록 안내합니다.
|
||||
</Note>
|
||||
|
||||
## 사전 준비 사항
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="배포 준비가 된 Crew" icon="users">
|
||||
작동 중인 crew가 로컬에서 빌드되었거나 Crew Studio를 통해 생성되어 있어야
|
||||
합니다.
|
||||
</Card>
|
||||
<Card title="GitHub 저장소" icon="github">
|
||||
crew 코드가 GitHub 저장소에 있어야 합니다(GitHub 연동 방식의 경우).
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## 옵션 1: CrewAI CLI를 사용한 배포
|
||||
|
||||
CLI는 로컬에서 개발된 crew를 Enterprise 플랫폼에 가장 빠르게 배포할 수 있는 방법을 제공합니다.
|
||||
|
||||
<Steps>
|
||||
<Step title="CrewAI CLI 설치">
|
||||
아직 설치하지 않았다면 CrewAI CLI를 설치하세요:
|
||||
|
||||
```bash
|
||||
pip install crewai[tools]
|
||||
```
|
||||
|
||||
<Tip>
|
||||
CLI는 기본 CrewAI 패키지에 포함되어 있지만, `[tools]` 추가 옵션을 사용하면 모든 배포 종속성을 함께 설치할 수 있습니다.
|
||||
</Tip>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Enterprise 플랫폼에 인증">
|
||||
먼저, CrewAI AMP 플랫폼에 CLI를 인증해야 합니다:
|
||||
|
||||
```bash
|
||||
# 이미 CrewAI AMP 계정이 있거나 새로 생성하고 싶을 때:
|
||||
crewai login
|
||||
```
|
||||
|
||||
위 명령어를 실행하면 CLI가 다음을 진행합니다:
|
||||
1. URL과 고유 기기 코드를 표시합니다
|
||||
2. 브라우저를 열어 인증 페이지로 이동합니다
|
||||
3. 기기 확인을 요청합니다
|
||||
4. 인증 과정을 완료합니다
|
||||
|
||||
인증이 성공적으로 완료되면 터미널에 확인 메시지가 표시됩니다!
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="배포 생성">
|
||||
|
||||
프로젝트 디렉터리에서 다음 명령어를 실행하세요:
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
```
|
||||
|
||||
이 명령어는 다음을 수행합니다:
|
||||
1. GitHub 저장소 정보를 감지합니다
|
||||
2. 로컬 `.env` 파일의 환경 변수를 식별합니다
|
||||
3. 이러한 변수를 Enterprise 플랫폼으로 안전하게 전송합니다
|
||||
4. 고유 식별자가 부여된 새 배포를 만듭니다
|
||||
|
||||
성공적으로 생성되면 다음과 같은 메시지가 표시됩니다:
|
||||
```shell
|
||||
Deployment created successfully!
|
||||
Name: your_project_name
|
||||
Deployment ID: 01234567-89ab-cdef-0123-456789abcdef
|
||||
Current Status: Deploy Enqueued
|
||||
```
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="배포 진행 상황 모니터링">
|
||||
|
||||
다음 명령어로 배포 상태를 추적할 수 있습니다:
|
||||
|
||||
```bash
|
||||
crewai deploy status
|
||||
```
|
||||
|
||||
빌드 과정의 상세 로그가 필요하다면:
|
||||
|
||||
```bash
|
||||
crewai deploy logs
|
||||
```
|
||||
|
||||
<Tip>
|
||||
첫 배포는 컨테이너 이미지를 빌드하므로 일반적으로 10~15분 정도 소요됩니다. 이후 배포는 훨씬 빠릅니다.
|
||||
</Tip>
|
||||
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## 추가 CLI 명령어
|
||||
|
||||
CrewAI CLI는 배포를 관리하기 위한 여러 명령어를 제공합니다:
|
||||
|
||||
```bash
|
||||
# 모든 배포 목록 확인
|
||||
crewai deploy list
|
||||
|
||||
# 배포 상태 확인
|
||||
crewai deploy status
|
||||
|
||||
# 배포 로그 보기
|
||||
crewai deploy logs
|
||||
|
||||
# 코드 변경 후 업데이트 푸시
|
||||
crewai deploy push
|
||||
|
||||
# 배포 삭제
|
||||
crewai deploy remove <deployment_id>
|
||||
```
|
||||
|
||||
## 옵션 2: 웹 인터페이스를 통한 직접 배포
|
||||
|
||||
GitHub 계정을 연결하여 CrewAI AMP 웹 인터페이스를 통해 crews를 직접 배포할 수도 있습니다. 이 방법은 로컬 머신에서 CLI를 사용할 필요가 없습니다.
|
||||
|
||||
<Steps>
|
||||
|
||||
<Step title="GitHub로 푸시하기">
|
||||
|
||||
crew를 GitHub 저장소에 푸시해야 합니다. 아직 crew를 만들지 않았다면, [이 튜토리얼](/ko/quickstart)을 따라할 수 있습니다.
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="GitHub를 CrewAI AOP에 연결하기">
|
||||
|
||||
1. [CrewAI AMP](https://app.crewai.com)에 로그인합니다.
|
||||
2. "Connect GitHub" 버튼을 클릭합니다.
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="저장소 선택하기">
|
||||
|
||||
GitHub 계정을 연결한 후 배포할 저장소를 선택할 수 있습니다:
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="환경 변수 설정하기">
|
||||
|
||||
배포 전에, LLM 제공업체 또는 기타 서비스에 연결할 환경 변수를 설정해야 합니다:
|
||||
|
||||
1. 변수를 개별적으로 또는 일괄적으로 추가할 수 있습니다.
|
||||
2. 환경 변수는 `KEY=VALUE` 형식(한 줄에 하나씩)으로 입력합니다.
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Crew 배포하기">
|
||||
|
||||
1. "Deploy" 버튼을 클릭하여 배포 프로세스를 시작합니다.
|
||||
2. 진행 바를 통해 진행 상황을 모니터링할 수 있습니다.
|
||||
3. 첫 번째 배포에는 일반적으로 약 10-15분 정도 소요되며, 이후 배포는 더 빠릅니다.
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
배포가 완료되면 다음을 확인할 수 있습니다:
|
||||
- crew의 고유 URL
|
||||
- crew API를 보호할 Bearer 토큰
|
||||
- 배포를 삭제해야 하는 경우 "Delete" 버튼
|
||||
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## ⚠️ 환경 변수 보안 요구사항
|
||||
|
||||
<Warning>
|
||||
**중요**: CrewAI AOP는 환경 변수 이름에 대한 보안 제한이 있으며, 이를 따르지
|
||||
않을 경우 배포가 실패할 수 있습니다.
|
||||
</Warning>
|
||||
|
||||
### 차단된 환경 변수 패턴
|
||||
|
||||
보안상의 이유로, 다음과 같은 환경 변수 명명 패턴은 **자동으로 필터링**되며 배포에 문제가 발생할 수 있습니다:
|
||||
|
||||
**차단된 패턴:**
|
||||
|
||||
- `_TOKEN`으로 끝나는 변수 (예: `MY_API_TOKEN`)
|
||||
- `_PASSWORD`로 끝나는 변수 (예: `DB_PASSWORD`)
|
||||
- `_SECRET`로 끝나는 변수 (예: `API_SECRET`)
|
||||
- 특정 상황에서 `_KEY`로 끝나는 변수
|
||||
|
||||
**특정 차단 변수:**
|
||||
|
||||
- `GITHUB_USER`, `GITHUB_TOKEN`
|
||||
- `AWS_REGION`, `AWS_DEFAULT_REGION`
|
||||
- 다양한 내부 CrewAI 시스템 변수
|
||||
|
||||
### 허용된 예외
|
||||
|
||||
일부 변수는 차단된 패턴과 일치하더라도 명시적으로 허용됩니다:
|
||||
|
||||
- `AZURE_AD_TOKEN`
|
||||
- `AZURE_OPENAI_AD_TOKEN`
|
||||
- `ENTERPRISE_ACTION_TOKEN`
|
||||
- `CREWAI_ENTEPRISE_TOOLS_TOKEN`
|
||||
|
||||
### 네이밍 문제 해결 방법
|
||||
|
||||
환경 변수 제한으로 인해 배포가 실패하는 경우:
|
||||
|
||||
```bash
|
||||
# ❌ 이러한 이름은 배포 실패를 초래합니다
|
||||
OPENAI_TOKEN=sk-...
|
||||
DATABASE_PASSWORD=mypassword
|
||||
API_SECRET=secret123
|
||||
|
||||
# ✅ 대신 다음과 같은 네이밍 패턴을 사용하세요
|
||||
OPENAI_API_KEY=sk-...
|
||||
DATABASE_CREDENTIALS=mypassword
|
||||
API_CONFIG=secret123
|
||||
```
|
||||
|
||||
### 모범 사례
|
||||
|
||||
1. **표준 명명 규칙 사용**: `PROVIDER_TOKEN` 대신 `PROVIDER_API_KEY` 사용
|
||||
2. **먼저 로컬에서 테스트**: crew가 이름이 변경된 변수로 제대로 동작하는지 확인
|
||||
3. **코드 업데이트**: 이전 변수 이름을 참조하는 부분을 모두 변경
|
||||
4. **변경 내용 문서화**: 팀을 위해 이름이 변경된 변수를 기록
|
||||
|
||||
<Tip>
|
||||
배포 실패 시, 환경 변수 에러 메시지가 난해하다면 먼저 변수 이름이 이 패턴을
|
||||
따르는지 확인하세요.
|
||||
</Tip>
|
||||
|
||||
### 배포된 Crew와 상호작용하기
|
||||
|
||||
배포가 완료되면 다음을 통해 crew에 접근할 수 있습니다:
|
||||
|
||||
1. **REST API**: 플랫폼에서 아래의 주요 경로가 포함된 고유한 HTTPS 엔드포인트를 생성합니다:
|
||||
|
||||
- `/inputs`: 필요한 입력 파라미터 목록
|
||||
- `/kickoff`: 제공된 입력값으로 실행 시작
|
||||
- `/status/{kickoff_id}`: 실행 상태 확인
|
||||
|
||||
2. **웹 인터페이스**: [app.crewai.com](https://app.crewai.com)에 방문하여 다음을 확인할 수 있습니다:
|
||||
- **Status 탭**: 배포 정보, API 엔드포인트 세부 정보 및 인증 토큰 확인
|
||||
- **Run 탭**: crew 구조의 시각적 표현
|
||||
- **Executions 탭**: 모든 실행 내역
|
||||
- **Metrics 탭**: 성능 분석
|
||||
- **Traces 탭**: 상세 실행 인사이트
|
||||
|
||||
### 실행 트리거하기
|
||||
|
||||
Enterprise 대시보드에서 다음 작업을 수행할 수 있습니다:
|
||||
|
||||
1. crew 이름을 클릭하여 상세 정보를 엽니다
|
||||
2. 관리 인터페이스에서 "Trigger Crew"를 선택합니다
|
||||
3. 나타나는 모달에 필요한 입력값을 입력합니다
|
||||
4. 파이프라인을 따라 실행의 진행 상황을 모니터링합니다
|
||||
|
||||
### 모니터링 및 분석
|
||||
|
||||
Enterprise 플랫폼은 포괄적인 가시성 기능을 제공합니다:
|
||||
|
||||
- **실행 관리**: 활성 및 완료된 실행 추적
|
||||
- **트레이스**: 각 실행의 상세 분해
|
||||
- **메트릭**: 토큰 사용량, 실행 시간, 비용
|
||||
- **타임라인 보기**: 작업 시퀀스의 시각적 표현
|
||||
|
||||
### 고급 기능
|
||||
|
||||
Enterprise 플랫폼은 또한 다음을 제공합니다:
|
||||
|
||||
- **환경 변수 관리**: API 키를 안전하게 저장 및 관리
|
||||
- **LLM 연결**: 다양한 LLM 공급자와의 통합 구성
|
||||
- **Custom Tools Repository**: 도구 생성, 공유 및 설치
|
||||
- **Crew Studio**: 코드를 작성하지 않고 채팅 인터페이스를 통해 crew 빌드
|
||||
|
||||
<Card
|
||||
title="도움이 필요하신가요?"
|
||||
icon="headset"
|
||||
href="mailto:support@crewai.com"
|
||||
>
|
||||
Enterprise 플랫폼의 배포 문제 또는 문의 사항이 있으시면 지원팀에 연락해
|
||||
주십시오.
|
||||
</Card>
|
||||
438
docs/ko/enterprise/guides/deploy-to-amp.mdx
Normal file
438
docs/ko/enterprise/guides/deploy-to-amp.mdx
Normal file
@@ -0,0 +1,438 @@
|
||||
---
|
||||
title: "AMP에 배포하기"
|
||||
description: "Crew 또는 Flow를 CrewAI AMP에 배포하기"
|
||||
icon: "rocket"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
로컬에서 또는 Crew Studio를 통해 Crew나 Flow를 생성한 후, 다음 단계는 이를 CrewAI AMP
|
||||
플랫폼에 배포하는 것입니다. 본 가이드에서는 다양한 배포 방법을 다루며,
|
||||
여러분의 워크플로우에 가장 적합한 방식을 선택할 수 있도록 안내합니다.
|
||||
</Note>
|
||||
|
||||
## 사전 준비 사항
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="배포 준비가 완료된 프로젝트" icon="check-circle">
|
||||
로컬에서 성공적으로 실행되는 Crew 또는 Flow가 있어야 합니다.
|
||||
[배포 준비 가이드](/ko/enterprise/guides/prepare-for-deployment)를 따라 프로젝트 구조를 확인하세요.
|
||||
</Card>
|
||||
<Card title="GitHub 저장소" icon="github">
|
||||
코드가 GitHub 저장소에 있어야 합니다(GitHub 연동 방식의 경우).
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
<Info>
|
||||
**Crews vs Flows**: 두 프로젝트 유형 모두 CrewAI AMP에서 "자동화"로 배포할 수 있습니다.
|
||||
배포 과정은 동일하지만, 프로젝트 구조가 다릅니다.
|
||||
자세한 내용은 [배포 준비하기](/ko/enterprise/guides/prepare-for-deployment)를 참조하세요.
|
||||
</Info>
|
||||
|
||||
## 옵션 1: CrewAI CLI를 사용한 배포
|
||||
|
||||
CLI는 로컬에서 개발된 Crew 또는 Flow를 AMP 플랫폼에 가장 빠르게 배포할 수 있는 방법을 제공합니다.
|
||||
CLI는 `pyproject.toml`에서 프로젝트 유형을 자동으로 감지하고 그에 맞게 빌드합니다.
|
||||
|
||||
<Steps>
|
||||
<Step title="CrewAI CLI 설치">
|
||||
아직 설치하지 않았다면 CrewAI CLI를 설치하세요:
|
||||
|
||||
```bash
|
||||
pip install crewai[tools]
|
||||
```
|
||||
|
||||
<Tip>
|
||||
CLI는 기본 CrewAI 패키지에 포함되어 있지만, `[tools]` 추가 옵션을 사용하면 모든 배포 종속성을 함께 설치할 수 있습니다.
|
||||
</Tip>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Enterprise 플랫폼에 인증">
|
||||
먼저, CrewAI AMP 플랫폼에 CLI를 인증해야 합니다:
|
||||
|
||||
```bash
|
||||
# 이미 CrewAI AMP 계정이 있거나 새로 생성하고 싶을 때:
|
||||
crewai login
|
||||
```
|
||||
|
||||
위 명령어를 실행하면 CLI가 다음을 진행합니다:
|
||||
1. URL과 고유 기기 코드를 표시합니다
|
||||
2. 브라우저를 열어 인증 페이지로 이동합니다
|
||||
3. 기기 확인을 요청합니다
|
||||
4. 인증 과정을 완료합니다
|
||||
|
||||
인증이 성공적으로 완료되면 터미널에 확인 메시지가 표시됩니다!
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="배포 생성">
|
||||
|
||||
프로젝트 디렉터리에서 다음 명령어를 실행하세요:
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
```
|
||||
|
||||
이 명령어는 다음을 수행합니다:
|
||||
1. GitHub 저장소 정보를 감지합니다
|
||||
2. 로컬 `.env` 파일의 환경 변수를 식별합니다
|
||||
3. 이러한 변수를 Enterprise 플랫폼으로 안전하게 전송합니다
|
||||
4. 고유 식별자가 부여된 새 배포를 만듭니다
|
||||
|
||||
성공적으로 생성되면 다음과 같은 메시지가 표시됩니다:
|
||||
```shell
|
||||
Deployment created successfully!
|
||||
Name: your_project_name
|
||||
Deployment ID: 01234567-89ab-cdef-0123-456789abcdef
|
||||
Current Status: Deploy Enqueued
|
||||
```
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="배포 진행 상황 모니터링">
|
||||
|
||||
다음 명령어로 배포 상태를 추적할 수 있습니다:
|
||||
|
||||
```bash
|
||||
crewai deploy status
|
||||
```
|
||||
|
||||
빌드 과정의 상세 로그가 필요하다면:
|
||||
|
||||
```bash
|
||||
crewai deploy logs
|
||||
```
|
||||
|
||||
<Tip>
|
||||
첫 배포는 컨테이너 이미지를 빌드하므로 일반적으로 10~15분 정도 소요됩니다. 이후 배포는 훨씬 빠릅니다.
|
||||
</Tip>
|
||||
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## 추가 CLI 명령어
|
||||
|
||||
CrewAI CLI는 배포를 관리하기 위한 여러 명령어를 제공합니다:
|
||||
|
||||
```bash
|
||||
# 모든 배포 목록 확인
|
||||
crewai deploy list
|
||||
|
||||
# 배포 상태 확인
|
||||
crewai deploy status
|
||||
|
||||
# 배포 로그 보기
|
||||
crewai deploy logs
|
||||
|
||||
# 코드 변경 후 업데이트 푸시
|
||||
crewai deploy push
|
||||
|
||||
# 배포 삭제
|
||||
crewai deploy remove <deployment_id>
|
||||
```
|
||||
|
||||
## 옵션 2: 웹 인터페이스를 통한 직접 배포
|
||||
|
||||
GitHub 계정을 연결하여 CrewAI AMP 웹 인터페이스를 통해 Crew 또는 Flow를 직접 배포할 수도 있습니다. 이 방법은 로컬 머신에서 CLI를 사용할 필요가 없습니다. 플랫폼은 자동으로 프로젝트 유형을 감지하고 적절하게 빌드를 처리합니다.
|
||||
|
||||
<Steps>
|
||||
|
||||
<Step title="GitHub로 푸시하기">
|
||||
|
||||
Crew를 GitHub 저장소에 푸시해야 합니다. 아직 Crew를 만들지 않았다면, [이 튜토리얼](/ko/quickstart)을 따라할 수 있습니다.
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="GitHub를 CrewAI AMP에 연결하기">
|
||||
|
||||
1. [CrewAI AMP](https://app.crewai.com)에 로그인합니다.
|
||||
2. "Connect GitHub" 버튼을 클릭합니다.
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="저장소 선택하기">
|
||||
|
||||
GitHub 계정을 연결한 후 배포할 저장소를 선택할 수 있습니다:
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="환경 변수 설정하기">
|
||||
|
||||
배포 전에, LLM 제공업체 또는 기타 서비스에 연결할 환경 변수를 설정해야 합니다:
|
||||
|
||||
1. 변수를 개별적으로 또는 일괄적으로 추가할 수 있습니다.
|
||||
2. 환경 변수는 `KEY=VALUE` 형식(한 줄에 하나씩)으로 입력합니다.
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Crew 배포하기">
|
||||
|
||||
1. "Deploy" 버튼을 클릭하여 배포 프로세스를 시작합니다.
|
||||
2. 진행 바를 통해 진행 상황을 모니터링할 수 있습니다.
|
||||
3. 첫 번째 배포에는 일반적으로 약 10-15분 정도 소요되며, 이후 배포는 더 빠릅니다.
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
배포가 완료되면 다음을 확인할 수 있습니다:
|
||||
- Crew의 고유 URL
|
||||
- Crew API를 보호할 Bearer 토큰
|
||||
- 배포를 삭제해야 하는 경우 "Delete" 버튼
|
||||
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## 옵션 3: API를 통한 재배포 (CI/CD 통합)
|
||||
|
||||
CI/CD 파이프라인에서 자동화된 배포를 위해 CrewAI API를 사용하여 기존 crew의 재배포를 트리거할 수 있습니다. 이 방법은 GitHub Actions, Jenkins 또는 기타 자동화 워크플로우에 특히 유용합니다.
|
||||
|
||||
<Steps>
|
||||
<Step title="개인 액세스 토큰 발급">
|
||||
|
||||
CrewAI AMP 계정 설정에서 API 토큰을 생성합니다:
|
||||
|
||||
1. [app.crewai.com](https://app.crewai.com)으로 이동합니다
|
||||
2. **Settings** → **Account** → **Personal Access Token**을 클릭합니다
|
||||
3. 새 토큰을 생성하고 안전하게 복사합니다
|
||||
4. 이 토큰을 CI/CD 시스템의 시크릿으로 저장합니다
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Automation UUID 찾기">
|
||||
|
||||
배포된 crew의 고유 식별자를 찾습니다:
|
||||
|
||||
1. CrewAI AMP 대시보드에서 **Automations**로 이동합니다
|
||||
2. 기존 automation/crew를 선택합니다
|
||||
3. **Additional Details**를 클릭합니다
|
||||
4. **UUID**를 복사합니다 - 이것이 특정 crew 배포를 식별합니다
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="API를 통한 재배포 트리거">
|
||||
|
||||
Deploy API 엔드포인트를 사용하여 재배포를 트리거합니다:
|
||||
|
||||
```bash
|
||||
curl -i -X POST \
|
||||
-H "Authorization: Bearer YOUR_PERSONAL_ACCESS_TOKEN" \
|
||||
https://app.crewai.com/crewai_plus/api/v1/crews/YOUR-AUTOMATION-UUID/deploy
|
||||
|
||||
# HTTP/2 200
|
||||
# content-type: application/json
|
||||
#
|
||||
# {
|
||||
# "uuid": "your-automation-uuid",
|
||||
# "status": "Deploy Enqueued",
|
||||
# "public_url": "https://your-crew-deployment.crewai.com",
|
||||
# "token": "your-bearer-token"
|
||||
# }
|
||||
```
|
||||
|
||||
<Info>
|
||||
Git에 연결되어 처음 생성된 automation의 경우, API가 재배포 전에 자동으로 저장소에서 최신 변경 사항을 가져옵니다.
|
||||
</Info>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="GitHub Actions 통합 예시">
|
||||
|
||||
더 복잡한 배포 트리거가 있는 GitHub Actions 워크플로우 예시입니다:
|
||||
|
||||
```yaml
|
||||
name: Deploy CrewAI Automation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
types: [ labeled ]
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
(github.event_name == 'push' && github.ref == 'refs/heads/main') ||
|
||||
(github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'deploy')) ||
|
||||
(github.event_name == 'release')
|
||||
steps:
|
||||
- name: Trigger CrewAI Redeployment
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.CREWAI_PAT }}" \
|
||||
https://app.crewai.com/crewai_plus/api/v1/crews/${{ secrets.CREWAI_AUTOMATION_UUID }}/deploy
|
||||
```
|
||||
|
||||
<Tip>
|
||||
`CREWAI_PAT`와 `CREWAI_AUTOMATION_UUID`를 저장소 시크릿으로 추가하세요. PR 배포의 경우 "deploy" 라벨을 추가하여 워크플로우를 트리거합니다.
|
||||
</Tip>
|
||||
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## 배포된 Automation과 상호작용하기
|
||||
|
||||
배포가 완료되면 다음을 통해 crew에 접근할 수 있습니다:
|
||||
|
||||
1. **REST API**: 플랫폼에서 아래의 주요 경로가 포함된 고유한 HTTPS 엔드포인트를 생성합니다:
|
||||
|
||||
- `/inputs`: 필요한 입력 파라미터 목록
|
||||
- `/kickoff`: 제공된 입력값으로 실행 시작
|
||||
- `/status/{kickoff_id}`: 실행 상태 확인
|
||||
|
||||
2. **웹 인터페이스**: [app.crewai.com](https://app.crewai.com)에 방문하여 다음을 확인할 수 있습니다:
|
||||
- **Status 탭**: 배포 정보, API 엔드포인트 세부 정보 및 인증 토큰 확인
|
||||
- **Run 탭**: Crew 구조의 시각적 표현
|
||||
- **Executions 탭**: 모든 실행 내역
|
||||
- **Metrics 탭**: 성능 분석
|
||||
- **Traces 탭**: 상세 실행 인사이트
|
||||
|
||||
### 실행 트리거하기
|
||||
|
||||
Enterprise 대시보드에서 다음 작업을 수행할 수 있습니다:
|
||||
|
||||
1. Crew 이름을 클릭하여 상세 정보를 엽니다
|
||||
2. 관리 인터페이스에서 "Trigger Crew"를 선택합니다
|
||||
3. 나타나는 모달에 필요한 입력값을 입력합니다
|
||||
4. 파이프라인을 따라 실행의 진행 상황을 모니터링합니다
|
||||
|
||||
### 모니터링 및 분석
|
||||
|
||||
Enterprise 플랫폼은 포괄적인 가시성 기능을 제공합니다:
|
||||
|
||||
- **실행 관리**: 활성 및 완료된 실행 추적
|
||||
- **트레이스**: 각 실행의 상세 분해
|
||||
- **메트릭**: 토큰 사용량, 실행 시간, 비용
|
||||
- **타임라인 보기**: 작업 시퀀스의 시각적 표현
|
||||
|
||||
### 고급 기능
|
||||
|
||||
Enterprise 플랫폼은 또한 다음을 제공합니다:
|
||||
|
||||
- **환경 변수 관리**: API 키를 안전하게 저장 및 관리
|
||||
- **LLM 연결**: 다양한 LLM 공급자와의 통합 구성
|
||||
- **Custom Tools Repository**: 도구 생성, 공유 및 설치
|
||||
- **Crew Studio**: 코드를 작성하지 않고 채팅 인터페이스를 통해 crew 빌드
|
||||
|
||||
## 배포 실패 문제 해결
|
||||
|
||||
배포가 실패하면 다음과 같은 일반적인 문제를 확인하세요:
|
||||
|
||||
### 빌드 실패
|
||||
|
||||
#### uv.lock 파일 누락
|
||||
|
||||
**증상**: 의존성 해결 오류와 함께 빌드 초기에 실패
|
||||
|
||||
**해결책**: lock 파일을 생성하고 커밋합니다:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
git add uv.lock
|
||||
git commit -m "Add uv.lock for deployment"
|
||||
git push
|
||||
```
|
||||
|
||||
<Warning>
|
||||
`uv.lock` 파일은 모든 배포에 필수입니다. 이 파일이 없으면 플랫폼에서
|
||||
의존성을 안정적으로 설치할 수 없습니다.
|
||||
</Warning>
|
||||
|
||||
#### 잘못된 프로젝트 구조
|
||||
|
||||
**증상**: "Could not find entry point" 또는 "Module not found" 오류
|
||||
|
||||
**해결책**: 프로젝트가 예상 구조와 일치하는지 확인합니다:
|
||||
|
||||
- **Crews와 Flows 모두**: 진입점이 `src/project_name/main.py`에 있어야 합니다
|
||||
- **Crews**: 진입점으로 `run()` 함수 사용
|
||||
- **Flows**: 진입점으로 `kickoff()` 함수 사용
|
||||
|
||||
자세한 구조 다이어그램은 [배포 준비하기](/ko/enterprise/guides/prepare-for-deployment)를 참조하세요.
|
||||
|
||||
#### CrewBase 데코레이터 누락
|
||||
|
||||
**증상**: "Crew not found", "Config not found" 또는 agent/task 구성 오류
|
||||
|
||||
**해결책**: **모든** crew 클래스가 `@CrewBase` 데코레이터를 사용하는지 확인합니다:
|
||||
|
||||
```python
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
@CrewBase # 이 데코레이터는 필수입니다
|
||||
class YourCrew():
|
||||
"""Crew 설명"""
|
||||
|
||||
@agent
|
||||
def my_agent(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['my_agent'], # type: ignore[index]
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# ... 나머지 crew 정의
|
||||
```
|
||||
|
||||
<Info>
|
||||
이것은 독립 실행형 Crews와 Flow 프로젝트 내에 포함된 crews 모두에 적용됩니다.
|
||||
모든 crew 클래스에 데코레이터가 필요합니다.
|
||||
</Info>
|
||||
|
||||
#### 잘못된 pyproject.toml 타입
|
||||
|
||||
**증상**: 빌드는 성공하지만 런타임에서 실패하거나 예상치 못한 동작
|
||||
|
||||
**해결책**: `[tool.crewai]` 섹션이 프로젝트 유형과 일치하는지 확인합니다:
|
||||
|
||||
```toml
|
||||
# Crew 프로젝트의 경우:
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
|
||||
# Flow 프로젝트의 경우:
|
||||
[tool.crewai]
|
||||
type = "flow"
|
||||
```
|
||||
|
||||
### 런타임 실패
|
||||
|
||||
#### LLM 연결 실패
|
||||
|
||||
**증상**: API 키 오류, "model not found" 또는 인증 실패
|
||||
|
||||
**해결책**:
|
||||
1. LLM 제공업체의 API 키가 환경 변수에 올바르게 설정되어 있는지 확인합니다
|
||||
2. 환경 변수 이름이 코드에서 예상하는 것과 일치하는지 확인합니다
|
||||
3. 배포 전에 동일한 환경 변수로 로컬에서 테스트합니다
|
||||
|
||||
#### Crew 실행 오류
|
||||
|
||||
**증상**: Crew가 시작되지만 실행 중에 실패
|
||||
|
||||
**해결책**:
|
||||
1. AMP 대시보드에서 실행 로그를 확인합니다 (Traces 탭)
|
||||
2. 모든 도구에 필요한 API 키가 구성되어 있는지 확인합니다
|
||||
3. `agents.yaml`의 agent 구성이 유효한지 확인합니다
|
||||
4. `tasks.yaml`의 task 구성에 구문 오류가 없는지 확인합니다
|
||||
|
||||
<Card title="도움이 필요하신가요?" icon="headset" href="mailto:support@crewai.com">
|
||||
배포 문제 또는 AMP 플랫폼에 대한 문의 사항이 있으시면 지원팀에 연락해 주세요.
|
||||
</Card>
|
||||
305
docs/ko/enterprise/guides/prepare-for-deployment.mdx
Normal file
305
docs/ko/enterprise/guides/prepare-for-deployment.mdx
Normal file
@@ -0,0 +1,305 @@
|
||||
---
|
||||
title: "배포 준비하기"
|
||||
description: "Crew 또는 Flow가 CrewAI AMP에 배포될 준비가 되었는지 확인하기"
|
||||
icon: "clipboard-check"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
CrewAI AMP에 배포하기 전에, 프로젝트가 올바르게 구성되어 있는지 확인하는 것이 중요합니다.
|
||||
Crews와 Flows 모두 "자동화"로 배포할 수 있지만, 성공적인 배포를 위해 충족해야 하는
|
||||
서로 다른 프로젝트 구조와 요구 사항이 있습니다.
|
||||
</Note>
|
||||
|
||||
## 자동화 이해하기
|
||||
|
||||
CrewAI AMP에서 **자동화(automations)**는 배포 가능한 Agentic AI 프로젝트의 총칭입니다. 자동화는 다음 중 하나일 수 있습니다:
|
||||
|
||||
- **Crew**: 작업을 함께 수행하는 AI 에이전트들의 독립 실행형 팀
|
||||
- **Flow**: 여러 crew, 직접 LLM 호출 및 절차적 로직을 결합할 수 있는 오케스트레이션된 워크플로우
|
||||
|
||||
배포하는 유형을 이해하는 것은 프로젝트 구조와 진입점이 다르기 때문에 필수적입니다.
|
||||
|
||||
## Crews vs Flows: 주요 차이점
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Crew 프로젝트" icon="users">
|
||||
에이전트와 작업을 정의하는 `crew.py`가 있는 독립 실행형 AI 에이전트 팀. 집중적이고 협업적인 작업에 적합합니다.
|
||||
</Card>
|
||||
<Card title="Flow 프로젝트" icon="diagram-project">
|
||||
`crews/` 폴더에 포함된 crew가 있는 오케스트레이션된 워크플로우. 복잡한 다단계 프로세스에 적합합니다.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
| 측면 | Crew | Flow |
|
||||
|------|------|------|
|
||||
| **프로젝트 구조** | `crew.py`가 있는 `src/project_name/` | `crews/` 폴더가 있는 `src/project_name/` |
|
||||
| **메인 로직 위치** | `src/project_name/crew.py` | `src/project_name/main.py` (Flow 클래스) |
|
||||
| **진입점 함수** | `main.py`의 `run()` | `main.py`의 `kickoff()` |
|
||||
| **pyproject.toml 타입** | `type = "crew"` | `type = "flow"` |
|
||||
| **CLI 생성 명령어** | `crewai create crew name` | `crewai create flow name` |
|
||||
| **설정 위치** | `src/project_name/config/` | `src/project_name/crews/crew_name/config/` |
|
||||
| **다른 crew 포함 가능** | 아니오 | 예 (`crews/` 폴더 내) |
|
||||
|
||||
## 프로젝트 구조 참조
|
||||
|
||||
### Crew 프로젝트 구조
|
||||
|
||||
`crewai create crew my_crew`를 실행하면 다음 구조를 얻습니다:
|
||||
|
||||
```
|
||||
my_crew/
|
||||
├── .gitignore
|
||||
├── pyproject.toml # type = "crew"여야 함
|
||||
├── README.md
|
||||
├── .env
|
||||
├── uv.lock # 배포에 필수
|
||||
└── src/
|
||||
└── my_crew/
|
||||
├── __init__.py
|
||||
├── main.py # run() 함수가 있는 진입점
|
||||
├── crew.py # @CrewBase 데코레이터가 있는 Crew 클래스
|
||||
├── tools/
|
||||
│ ├── custom_tool.py
|
||||
│ └── __init__.py
|
||||
└── config/
|
||||
├── agents.yaml # 에이전트 정의
|
||||
└── tasks.yaml # 작업 정의
|
||||
```
|
||||
|
||||
<Warning>
|
||||
중첩된 `src/project_name/` 구조는 Crews에 매우 중요합니다.
|
||||
잘못된 레벨에 파일을 배치하면 배포 실패의 원인이 됩니다.
|
||||
</Warning>
|
||||
|
||||
### Flow 프로젝트 구조
|
||||
|
||||
`crewai create flow my_flow`를 실행하면 다음 구조를 얻습니다:
|
||||
|
||||
```
|
||||
my_flow/
|
||||
├── .gitignore
|
||||
├── pyproject.toml # type = "flow"여야 함
|
||||
├── README.md
|
||||
├── .env
|
||||
├── uv.lock # 배포에 필수
|
||||
└── src/
|
||||
└── my_flow/
|
||||
├── __init__.py
|
||||
├── main.py # kickoff() 함수 + Flow 클래스가 있는 진입점
|
||||
├── crews/ # 포함된 crews 폴더
|
||||
│ └── poem_crew/
|
||||
│ ├── __init__.py
|
||||
│ ├── poem_crew.py # @CrewBase 데코레이터가 있는 Crew
|
||||
│ └── config/
|
||||
│ ├── agents.yaml
|
||||
│ └── tasks.yaml
|
||||
└── tools/
|
||||
├── __init__.py
|
||||
└── custom_tool.py
|
||||
```
|
||||
|
||||
<Info>
|
||||
Crews와 Flows 모두 `src/project_name/` 구조를 사용합니다.
|
||||
핵심 차이점은 Flows는 포함된 crews를 위한 `crews/` 폴더가 있고,
|
||||
Crews는 프로젝트 폴더에 직접 `crew.py`가 있다는 것입니다.
|
||||
</Info>
|
||||
|
||||
## 배포 전 체크리스트
|
||||
|
||||
이 체크리스트를 사용하여 프로젝트가 배포 준비가 되었는지 확인하세요.
|
||||
|
||||
### 1. pyproject.toml 설정 확인
|
||||
|
||||
`pyproject.toml`에 올바른 `[tool.crewai]` 섹션이 포함되어야 합니다:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Crews의 경우">
|
||||
```toml
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Flows의 경우">
|
||||
```toml
|
||||
[tool.crewai]
|
||||
type = "flow"
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Warning>
|
||||
`type`이 프로젝트 구조와 일치하지 않으면 빌드가 실패하거나
|
||||
자동화가 올바르게 실행되지 않습니다.
|
||||
</Warning>
|
||||
|
||||
### 2. uv.lock 파일 존재 확인
|
||||
|
||||
CrewAI는 의존성 관리를 위해 `uv`를 사용합니다. `uv.lock` 파일은 재현 가능한 빌드를 보장하며 배포에 **필수**입니다.
|
||||
|
||||
```bash
|
||||
# lock 파일 생성 또는 업데이트
|
||||
uv lock
|
||||
|
||||
# 존재 여부 확인
|
||||
ls -la uv.lock
|
||||
```
|
||||
|
||||
파일이 존재하지 않으면 `uv lock`을 실행하고 저장소에 커밋하세요:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
git add uv.lock
|
||||
git commit -m "Add uv.lock for deployment"
|
||||
git push
|
||||
```
|
||||
|
||||
### 3. CrewBase 데코레이터 사용 확인
|
||||
|
||||
**모든 crew 클래스는 `@CrewBase` 데코레이터를 사용해야 합니다.** 이것은 다음에 적용됩니다:
|
||||
|
||||
- 독립 실행형 crew 프로젝트
|
||||
- Flow 프로젝트 내에 포함된 crews
|
||||
|
||||
```python
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from typing import List
|
||||
|
||||
@CrewBase # 이 데코레이터는 필수입니다
|
||||
class MyCrew():
|
||||
"""내 crew 설명"""
|
||||
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
|
||||
@agent
|
||||
def my_agent(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['my_agent'], # type: ignore[index]
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def my_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['my_task'] # type: ignore[index]
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=self.agents,
|
||||
tasks=self.tasks,
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
<Warning>
|
||||
`@CrewBase` 데코레이터를 잊으면 에이전트나 작업 구성이 누락되었다는
|
||||
오류와 함께 배포가 실패합니다.
|
||||
</Warning>
|
||||
|
||||
### 4. 프로젝트 진입점 확인
|
||||
|
||||
Crews와 Flows 모두 `src/project_name/main.py`에 진입점이 있습니다:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Crews의 경우">
|
||||
진입점은 `run()` 함수를 사용합니다:
|
||||
|
||||
```python
|
||||
# src/my_crew/main.py
|
||||
from my_crew.crew import MyCrew
|
||||
|
||||
def run():
|
||||
"""crew를 실행합니다."""
|
||||
inputs = {'topic': 'AI in Healthcare'}
|
||||
result = MyCrew().crew().kickoff(inputs=inputs)
|
||||
return result
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Flows의 경우">
|
||||
진입점은 Flow 클래스와 함께 `kickoff()` 함수를 사용합니다:
|
||||
|
||||
```python
|
||||
# src/my_flow/main.py
|
||||
from crewai.flow import Flow, listen, start
|
||||
from my_flow.crews.poem_crew.poem_crew import PoemCrew
|
||||
|
||||
class MyFlow(Flow):
|
||||
@start()
|
||||
def begin(self):
|
||||
# Flow 로직
|
||||
result = PoemCrew().crew().kickoff(inputs={...})
|
||||
return result
|
||||
|
||||
def kickoff():
|
||||
"""flow를 실행합니다."""
|
||||
MyFlow().kickoff()
|
||||
|
||||
if __name__ == "__main__":
|
||||
kickoff()
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### 5. 환경 변수 준비
|
||||
|
||||
배포 전에 다음을 준비해야 합니다:
|
||||
|
||||
1. **LLM API 키** (OpenAI, Anthropic, Google 등)
|
||||
2. **도구 API 키** - 외부 도구를 사용하는 경우 (Serper 등)
|
||||
|
||||
<Tip>
|
||||
구성 문제를 조기에 발견하기 위해 배포 전에 동일한 환경 변수로
|
||||
로컬에서 프로젝트를 테스트하세요.
|
||||
</Tip>
|
||||
|
||||
## 빠른 검증 명령어
|
||||
|
||||
프로젝트 루트에서 다음 명령어를 실행하여 설정을 빠르게 확인하세요:
|
||||
|
||||
```bash
|
||||
# 1. pyproject.toml에서 프로젝트 타입 확인
|
||||
grep -A2 "\[tool.crewai\]" pyproject.toml
|
||||
|
||||
# 2. uv.lock 존재 확인
|
||||
ls -la uv.lock || echo "오류: uv.lock이 없습니다! 'uv lock'을 실행하세요"
|
||||
|
||||
# 3. src/ 구조 존재 확인
|
||||
ls -la src/*/main.py 2>/dev/null || echo "src/에서 main.py를 찾을 수 없습니다"
|
||||
|
||||
# 4. Crews의 경우 - crew.py 존재 확인
|
||||
ls -la src/*/crew.py 2>/dev/null || echo "crew.py가 없습니다 (Crews에서 예상됨)"
|
||||
|
||||
# 5. Flows의 경우 - crews/ 폴더 존재 확인
|
||||
ls -la src/*/crews/ 2>/dev/null || echo "crews/ 폴더가 없습니다 (Flows에서 예상됨)"
|
||||
|
||||
# 6. CrewBase 사용 확인
|
||||
grep -r "@CrewBase" . --include="*.py"
|
||||
```
|
||||
|
||||
## 일반적인 설정 실수
|
||||
|
||||
| 실수 | 증상 | 해결 방법 |
|
||||
|------|------|----------|
|
||||
| `uv.lock` 누락 | 의존성 해결 중 빌드 실패 | `uv lock` 실행 후 커밋 |
|
||||
| pyproject.toml의 잘못된 `type` | 빌드 성공하지만 런타임 실패 | 올바른 타입으로 변경 |
|
||||
| `@CrewBase` 데코레이터 누락 | "Config not found" 오류 | 모든 crew 클래스에 데코레이터 추가 |
|
||||
| `src/` 대신 루트에 파일 배치 | 진입점을 찾을 수 없음 | `src/project_name/`으로 이동 |
|
||||
| `run()` 또는 `kickoff()` 누락 | 자동화를 시작할 수 없음 | 올바른 진입 함수 추가 |
|
||||
|
||||
## 다음 단계
|
||||
|
||||
프로젝트가 모든 체크리스트 항목을 통과하면 배포할 준비가 된 것입니다:
|
||||
|
||||
<Card title="AMP에 배포하기" icon="rocket" href="/ko/enterprise/guides/deploy-to-amp">
|
||||
CLI, 웹 인터페이스 또는 CI/CD 통합을 사용하여 Crew 또는 Flow를 CrewAI AMP에
|
||||
배포하려면 배포 가이드를 따르세요.
|
||||
</Card>
|
||||
@@ -79,7 +79,7 @@ CrewAI AOP는 오픈 소스 프레임워크의 강력함에 프로덕션 배포,
|
||||
<Card
|
||||
title="Crew 배포"
|
||||
icon="rocket"
|
||||
href="/ko/enterprise/guides/deploy-crew"
|
||||
href="/ko/enterprise/guides/deploy-to-amp"
|
||||
>
|
||||
Crew 배포
|
||||
</Card>
|
||||
@@ -96,4 +96,4 @@ CrewAI AOP는 오픈 소스 프레임워크의 강력함에 프로덕션 배포,
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
자세한 안내를 원하시면 [배포 가이드](/ko/enterprise/guides/deploy-crew)를 확인하거나 아래 버튼을 클릭해 시작하세요.
|
||||
자세한 안내를 원하시면 [배포 가이드](/ko/enterprise/guides/deploy-to-amp)를 확인하거나 아래 버튼을 클릭해 시작하세요.
|
||||
|
||||
@@ -7,6 +7,10 @@ mode: "wide"
|
||||
|
||||
## 개요
|
||||
|
||||
<Note>
|
||||
`@human_feedback` 데코레이터는 **CrewAI 버전 1.8.0 이상**이 필요합니다. 이 기능을 사용하기 전에 설치를 업데이트하세요.
|
||||
</Note>
|
||||
|
||||
`@human_feedback` 데코레이터는 CrewAI Flow 내에서 직접 human-in-the-loop(HITL) 워크플로우를 가능하게 합니다. Flow 실행을 일시 중지하고, 인간에게 검토를 위해 출력을 제시하고, 피드백을 수집하고, 선택적으로 피드백 결과에 따라 다른 리스너로 라우팅할 수 있습니다.
|
||||
|
||||
이는 특히 다음과 같은 경우에 유용합니다:
|
||||
|
||||
@@ -5,9 +5,22 @@ icon: "user-check"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
휴먼 인 더 루프(HITL, Human-in-the-Loop)는 인공지능과 인간의 전문 지식을 결합하여 의사결정을 강화하고 작업 결과를 향상시키는 강력한 접근 방식입니다. 이 가이드에서는 CrewAI 내에서 HITL을 구현하는 방법을 안내합니다.
|
||||
휴먼 인 더 루프(HITL, Human-in-the-Loop)는 인공지능과 인간의 전문 지식을 결합하여 의사결정을 강화하고 작업 결과를 향상시키는 강력한 접근 방식입니다. CrewAI는 필요에 따라 HITL을 구현하는 여러 가지 방법을 제공합니다.
|
||||
|
||||
## HITL 워크플로우 설정
|
||||
## HITL 접근 방식 선택
|
||||
|
||||
CrewAI는 human-in-the-loop 워크플로우를 구현하기 위한 두 가지 주요 접근 방식을 제공합니다:
|
||||
|
||||
| 접근 방식 | 적합한 용도 | 통합 | 버전 |
|
||||
|----------|----------|-------------|---------|
|
||||
| **Flow 기반** (`@human_feedback` 데코레이터) | 로컬 개발, 콘솔 기반 검토, 동기식 워크플로우 | [Flow에서 인간 피드백](/ko/learn/human-feedback-in-flows) | **1.8.0+** |
|
||||
| **Webhook 기반** (Enterprise) | 프로덕션 배포, 비동기 워크플로우, 외부 통합 (Slack, Teams 등) | 이 가이드 | - |
|
||||
|
||||
<Tip>
|
||||
Flow를 구축하면서 피드백을 기반으로 라우팅하는 인간 검토 단계를 추가하려면 `@human_feedback` 데코레이터에 대한 [Flow에서 인간 피드백](/ko/learn/human-feedback-in-flows) 가이드를 참조하세요.
|
||||
</Tip>
|
||||
|
||||
## Webhook 기반 HITL 워크플로우 설정
|
||||
|
||||
<Steps>
|
||||
<Step title="작업 구성">
|
||||
|
||||
115
docs/ko/observability/galileo.mdx
Normal file
115
docs/ko/observability/galileo.mdx
Normal file
@@ -0,0 +1,115 @@
|
||||
---
|
||||
title: Galileo 갈릴레오
|
||||
description: CrewAI 추적 및 평가를 위한 Galileo 통합
|
||||
icon: telescope
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## 개요
|
||||
|
||||
이 가이드는 **Galileo**를 **CrewAI**와 통합하는 방법을 보여줍니다.
|
||||
포괄적인 추적 및 평가 엔지니어링을 위한 것입니다.
|
||||
이 가이드가 끝나면 CrewAI 에이전트를 추적할 수 있게 됩니다.
|
||||
성과를 모니터링하고 행동을 평가합니다.
|
||||
Galileo의 강력한 관측 플랫폼.
|
||||
|
||||
> **갈릴레오(Galileo)란 무엇인가요?**[Galileo](https://galileo.ai/)는 AI 평가 및 관찰 가능성입니다.
|
||||
엔드투엔드 추적, 평가,
|
||||
AI 애플리케이션 모니터링. 이를 통해 팀은 실제 사실을 포착할 수 있습니다.
|
||||
견고한 가드레일을 만들고 체계적인 실험을 실행하세요.
|
||||
내장된 실험 추적 및 성능 분석으로 신뢰성 보장
|
||||
AI 수명주기 전반에 걸쳐 투명성과 지속적인 개선을 제공합니다.
|
||||
|
||||
## 시작하기
|
||||
|
||||
이 튜토리얼은 [CrewAI 빠른 시작](/ko/quickstart.mdx)을 따르며 추가하는 방법을 보여줍니다.
|
||||
갈릴레오의 [CrewAIEventListener](https://v2docs.galileo.ai/sdk-api/python/reference/handlers/crewai/handler),
|
||||
이벤트 핸들러.
|
||||
자세한 내용은 갈릴레오 문서를 참고하세요.
|
||||
[CrewAI 애플리케이션에 Galileo 추가](https://v2docs.galileo.ai/how-to-guides/third-party-integrations/add-galileo-to-crewai/add-galileo-to-crewai)
|
||||
방법 안내.
|
||||
|
||||
> **참고**이 튜토리얼에서는 [CrewAI 빠른 시작](/ko/quickstart.mdx)을 완료했다고 가정합니다.
|
||||
완전한 포괄적인 예제를 원한다면 Galileo
|
||||
[CrewAI SDK 예제 저장소](https://github.com/rungalileo/sdk-examples/tree/main/python/agent/crew-ai).
|
||||
|
||||
### 1단계: 종속성 설치
|
||||
|
||||
앱에 필요한 종속성을 설치합니다.
|
||||
원하는 방법으로 가상 환경을 생성하고,
|
||||
그런 다음 다음을 사용하여 해당 환경 내에 종속성을 설치하십시오.
|
||||
선호하는 도구:
|
||||
|
||||
```bash
|
||||
uv add galileo
|
||||
```
|
||||
|
||||
### 2단계: [CrewAI 빠른 시작](/ko/quickstart.mdx)에서 .env 파일에 추가
|
||||
|
||||
```bash
|
||||
# Your Galileo API key
|
||||
GALILEO_API_KEY="your-galileo-api-key"
|
||||
|
||||
# Your Galileo project name
|
||||
GALILEO_PROJECT="your-galileo-project-name"
|
||||
|
||||
# The name of the Log stream you want to use for logging
|
||||
GALILEO_LOG_STREAM="your-galileo-log-stream "
|
||||
```
|
||||
|
||||
### 3단계: Galileo 이벤트 리스너 추가
|
||||
|
||||
Galileo로 로깅을 활성화하려면 `CrewAIEventListener`의 인스턴스를 생성해야 합니다.
|
||||
다음을 통해 Galileo CrewAI 핸들러 패키지를 가져옵니다.
|
||||
main.py 파일 상단에 다음 코드를 추가하세요.
|
||||
|
||||
```python
|
||||
from galileo.handlers.crewai.handler import CrewAIEventListener
|
||||
```
|
||||
|
||||
실행 함수 시작 시 이벤트 리스너를 생성합니다.
|
||||
|
||||
```python
|
||||
def run():
|
||||
# Create the event listener
|
||||
CrewAIEventListener()
|
||||
# The rest of your existing code goes here
|
||||
```
|
||||
|
||||
리스너 인스턴스를 생성하면 자동으로
|
||||
CrewAI에 등록되었습니다.
|
||||
|
||||
### 4단계: Crew Agent 실행
|
||||
|
||||
CrewAI CLI를 사용하여 Crew Agent를 실행하세요.
|
||||
|
||||
```bash
|
||||
crewai run
|
||||
```
|
||||
|
||||
### 5단계: Galileo에서 추적 보기
|
||||
|
||||
승무원 에이전트가 완료되면 흔적이 플러시되어 Galileo에 나타납니다.
|
||||
|
||||

|
||||
|
||||
## 갈릴레오 통합 이해
|
||||
|
||||
Galileo는 이벤트 리스너를 등록하여 CrewAI와 통합됩니다.
|
||||
승무원 실행 이벤트(예: 에이전트 작업, 도구 호출, 모델 응답)를 캡처합니다.
|
||||
관찰 가능성과 평가를 위해 이를 갈릴레오에 전달합니다.
|
||||
|
||||
### 이벤트 리스너 이해
|
||||
|
||||
`CrewAIEventListener()` 인스턴스를 생성하는 것이 전부입니다.
|
||||
CrewAI 실행을 위해 Galileo를 활성화하는 데 필요합니다. 인스턴스화되면 리스너는 다음을 수행합니다.
|
||||
|
||||
-CrewAI에 자동으로 등록됩니다.
|
||||
-환경 변수에서 Galileo 구성을 읽습니다.
|
||||
-모든 실행 데이터를 Galileo 프로젝트 및 다음에서 지정한 로그 스트림에 기록합니다.
|
||||
`GALILEO_PROJECT` 및 `GALILEO_LOG_STREAM`
|
||||
|
||||
추가 구성이나 코드 변경이 필요하지 않습니다.
|
||||
이 실행의 모든 데이터는 Galileo 프로젝트에 기록되며
|
||||
환경 구성에 따라 지정된 로그 스트림
|
||||
(예: GALILEO_PROJECT 및 GALILEO_LOG_STREAM)
|
||||
@@ -309,6 +309,10 @@ Ao executar esse Flow, a saída será diferente dependendo do valor booleano ale
|
||||
|
||||
### Human in the Loop (feedback humano)
|
||||
|
||||
<Note>
|
||||
O decorador `@human_feedback` requer **CrewAI versão 1.8.0 ou superior**.
|
||||
</Note>
|
||||
|
||||
O decorador `@human_feedback` permite fluxos de trabalho human-in-the-loop, pausando a execução do flow para coletar feedback de um humano. Isso é útil para portões de aprovação, revisão de qualidade e pontos de decisão que requerem julgamento humano.
|
||||
|
||||
```python Code
|
||||
|
||||
@@ -79,7 +79,7 @@ Existem diferentes locais no código do CrewAI onde você pode especificar o mod
|
||||
|
||||
# Configuração avançada com parâmetros detalhados
|
||||
llm = LLM(
|
||||
model="openai/gpt-4",
|
||||
model="openai/gpt-4",
|
||||
temperature=0.8,
|
||||
max_tokens=150,
|
||||
top_p=0.9,
|
||||
@@ -207,11 +207,20 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co
|
||||
Defina sua chave de API no seu arquivo `.env`. Se precisar de uma chave, ou encontrar uma existente, verifique o [AI Studio](https://aistudio.google.com/apikey).
|
||||
|
||||
```toml .env
|
||||
# https://ai.google.dev/gemini-api/docs/api-key
|
||||
# Para API Gemini (uma das seguintes)
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
GEMINI_API_KEY=<your-api-key>
|
||||
|
||||
# Para Vertex AI Express mode (autenticação por chave de API)
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
|
||||
# Para Vertex AI com conta de serviço
|
||||
GOOGLE_CLOUD_PROJECT=<your-project-id>
|
||||
GOOGLE_CLOUD_LOCATION=<location> # Padrão: us-central1
|
||||
```
|
||||
|
||||
Exemplo de uso em seu projeto CrewAI:
|
||||
**Uso Básico:**
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
@@ -221,6 +230,34 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co
|
||||
)
|
||||
```
|
||||
|
||||
**Vertex AI Express Mode (Autenticação por Chave de API):**
|
||||
|
||||
O Vertex AI Express mode permite usar o Vertex AI com autenticação simples por chave de API, em vez de credenciais de conta de serviço. Esta é a maneira mais rápida de começar com o Vertex AI.
|
||||
|
||||
Para habilitar o Express mode, defina ambas as variáveis de ambiente no seu arquivo `.env`:
|
||||
```toml .env
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Em seguida, use o LLM normalmente:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gemini/gemini-2.0-flash",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Para obter uma chave de API do Express mode:
|
||||
- Novos usuários do Google Cloud: Obtenha uma [chave de API do Express mode](https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey)
|
||||
- Usuários existentes do Google Cloud: Obtenha uma [chave de API do Google Cloud vinculada a uma conta de serviço](https://cloud.google.com/docs/authentication/api-keys)
|
||||
|
||||
Para mais detalhes, consulte a [documentação do Vertex AI Express mode](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey).
|
||||
</Info>
|
||||
|
||||
### Modelos Gemini
|
||||
|
||||
O Google oferece uma variedade de modelos poderosos otimizados para diferentes casos de uso.
|
||||
@@ -823,7 +860,7 @@ Saiba como obter o máximo da configuração do seu LLM:
|
||||
Lembre-se de monitorar regularmente o uso de tokens e ajustar suas configurações para otimizar custos e desempenho.
|
||||
</Info>
|
||||
</Accordion>
|
||||
|
||||
|
||||
<Accordion title="Descartar Parâmetros Adicionais">
|
||||
O CrewAI usa Litellm internamente para chamadas LLM, permitindo descartar parâmetros adicionais desnecessários para seu caso de uso. Isso pode simplificar seu código e reduzir a complexidade da configuração do LLM.
|
||||
Por exemplo, se não precisar enviar o parâmetro <code>stop</code>, basta omiti-lo na chamada do LLM:
|
||||
@@ -882,4 +919,4 @@ Saiba como obter o máximo da configuração do seu LLM:
|
||||
llm = LLM(model="openai/gpt-4o") # 128K tokens
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Tabs>
|
||||
|
||||
@@ -128,7 +128,7 @@ Ao implantar seu Flow, considere o seguinte:
|
||||
### CrewAI Enterprise
|
||||
A maneira mais fácil de implantar seu Flow é usando o CrewAI Enterprise. Ele lida com a infraestrutura, autenticação e monitoramento para você.
|
||||
|
||||
Confira o [Guia de Implantação](/pt-BR/enterprise/guides/deploy-crew) para começar.
|
||||
Confira o [Guia de Implantação](/pt-BR/enterprise/guides/deploy-to-amp) para começar.
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
|
||||
@@ -91,7 +91,7 @@ Após implantar, você pode ver os detalhes da automação e usar o menu **Optio
|
||||
## Relacionados
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Implantar um Crew" href="/pt-BR/enterprise/guides/deploy-crew" icon="rocket">
|
||||
<Card title="Implantar um Crew" href="/pt-BR/enterprise/guides/deploy-to-amp" icon="rocket">
|
||||
Implante um Crew via GitHub ou arquivo ZIP.
|
||||
</Card>
|
||||
<Card title="Gatilhos de Automação" href="/pt-BR/enterprise/guides/automation-triggers" icon="trigger">
|
||||
|
||||
@@ -79,7 +79,7 @@ Após publicar, você pode visualizar os detalhes da automação e usar o menu *
|
||||
<Card title="Criar um Crew" href="/pt-BR/enterprise/guides/build-crew" icon="paintbrush">
|
||||
Crie um Crew.
|
||||
</Card>
|
||||
<Card title="Implantar um Crew" href="/pt-BR/enterprise/guides/deploy-crew" icon="rocket">
|
||||
<Card title="Implantar um Crew" href="/pt-BR/enterprise/guides/deploy-to-amp" icon="rocket">
|
||||
Implante um Crew via GitHub ou ZIP.
|
||||
</Card>
|
||||
<Card title="Exportar um Componente React" href="/pt-BR/enterprise/guides/react-component-export" icon="download">
|
||||
|
||||
@@ -1,304 +0,0 @@
|
||||
---
|
||||
title: "Deploy Crew"
|
||||
description: "Implantando um Crew na CrewAI AMP"
|
||||
icon: "rocket"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
Depois de criar um crew localmente ou pelo Crew Studio, o próximo passo é
|
||||
implantá-lo na plataforma CrewAI AMP. Este guia cobre múltiplos métodos de
|
||||
implantação para ajudá-lo a escolher a melhor abordagem para o seu fluxo de
|
||||
trabalho.
|
||||
</Note>
|
||||
|
||||
## Pré-requisitos
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Crew Pronto para Implantação" icon="users">
|
||||
Você deve ter um crew funcional, criado localmente ou pelo Crew Studio
|
||||
</Card>
|
||||
<Card title="Repositório GitHub" icon="github">
|
||||
O código do seu crew deve estar em um repositório do GitHub (para o método
|
||||
de integração com GitHub)
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Opção 1: Implantar Usando o CrewAI CLI
|
||||
|
||||
A CLI fornece a maneira mais rápida de implantar crews desenvolvidos localmente na plataforma Enterprise.
|
||||
|
||||
<Steps>
|
||||
<Step title="Instale o CrewAI CLI">
|
||||
Se ainda não tiver, instale o CrewAI CLI:
|
||||
|
||||
```bash
|
||||
pip install crewai[tools]
|
||||
```
|
||||
|
||||
<Tip>
|
||||
A CLI vem com o pacote principal CrewAI, mas o extra `[tools]` garante todas as dependências de implantação.
|
||||
</Tip>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Autentique-se na Plataforma Enterprise">
|
||||
Primeiro, você precisa autenticar sua CLI com a plataforma CrewAI AMP:
|
||||
|
||||
```bash
|
||||
# Se já possui uma conta CrewAI AMP, ou deseja criar uma:
|
||||
crewai login
|
||||
```
|
||||
|
||||
Ao executar qualquer um dos comandos, a CLI irá:
|
||||
1. Exibir uma URL e um código de dispositivo único
|
||||
2. Abrir seu navegador para a página de autenticação
|
||||
3. Solicitar a confirmação do dispositivo
|
||||
4. Completar o processo de autenticação
|
||||
|
||||
Após a autenticação bem-sucedida, você verá uma mensagem de confirmação no terminal!
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Criar uma Implantação">
|
||||
|
||||
No diretório do seu projeto, execute:
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
```
|
||||
|
||||
Este comando irá:
|
||||
1. Detectar informações do seu repositório GitHub
|
||||
2. Identificar variáveis de ambiente no seu arquivo `.env` local
|
||||
3. Transferir essas variáveis com segurança para a plataforma Enterprise
|
||||
4. Criar uma nova implantação com um identificador único
|
||||
|
||||
Com a criação bem-sucedida, você verá uma mensagem como:
|
||||
```shell
|
||||
Deployment created successfully!
|
||||
Name: your_project_name
|
||||
Deployment ID: 01234567-89ab-cdef-0123-456789abcdef
|
||||
Current Status: Deploy Enqueued
|
||||
```
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Acompanhe o Progresso da Implantação">
|
||||
|
||||
Acompanhe o status da implantação com:
|
||||
|
||||
```bash
|
||||
crewai deploy status
|
||||
```
|
||||
|
||||
Para ver logs detalhados do processo de build:
|
||||
|
||||
```bash
|
||||
crewai deploy logs
|
||||
```
|
||||
|
||||
<Tip>
|
||||
A primeira implantação normalmente leva de 10 a 15 minutos, pois as imagens dos containers são construídas. As próximas implantações são bem mais rápidas.
|
||||
</Tip>
|
||||
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Comandos Adicionais da CLI
|
||||
|
||||
O CrewAI CLI oferece vários comandos para gerenciar suas implantações:
|
||||
|
||||
```bash
|
||||
# Liste todas as suas implantações
|
||||
crewai deploy list
|
||||
|
||||
# Consulte o status de uma implantação
|
||||
crewai deploy status
|
||||
|
||||
# Veja os logs da implantação
|
||||
crewai deploy logs
|
||||
|
||||
# Envie atualizações após alterações no código
|
||||
crewai deploy push
|
||||
|
||||
# Remova uma implantação
|
||||
crewai deploy remove <deployment_id>
|
||||
```
|
||||
|
||||
## Opção 2: Implantar Diretamente pela Interface Web
|
||||
|
||||
Você também pode implantar seus crews diretamente pela interface web da CrewAI AMP conectando sua conta do GitHub. Esta abordagem não requer utilizar a CLI na sua máquina local.
|
||||
|
||||
<Steps>
|
||||
|
||||
<Step title="Enviar no GitHub">
|
||||
|
||||
Você precisa subir seu crew para um repositório do GitHub. Caso ainda não tenha criado um crew, você pode [seguir este tutorial](/pt-BR/quickstart).
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Conectando o GitHub ao CrewAI AMP">
|
||||
|
||||
1. Faça login em [CrewAI AMP](https://app.crewai.com)
|
||||
2. Clique no botão "Connect GitHub"
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Selecionar o Repositório">
|
||||
|
||||
Após conectar sua conta GitHub, você poderá selecionar qual repositório deseja implantar:
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Definir as Variáveis de Ambiente">
|
||||
|
||||
Antes de implantar, você precisará configurar as variáveis de ambiente para conectar ao seu provedor de LLM ou outros serviços:
|
||||
|
||||
1. Você pode adicionar variáveis individualmente ou em lote
|
||||
2. Digite suas variáveis no formato `KEY=VALUE` (uma por linha)
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Implante Seu Crew">
|
||||
|
||||
1. Clique no botão "Deploy" para iniciar o processo de implantação
|
||||
2. Você pode monitorar o progresso pela barra de progresso
|
||||
3. A primeira implantação geralmente demora de 10 a 15 minutos; as próximas serão mais rápidas
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
Após a conclusão, você verá:
|
||||
- A URL exclusiva do seu crew
|
||||
- Um Bearer token para proteger sua API crew
|
||||
- Um botão "Delete" caso precise remover a implantação
|
||||
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## ⚠️ Requisitos de Segurança para Variáveis de Ambiente
|
||||
|
||||
<Warning>
|
||||
**Importante**: A CrewAI AMP possui restrições de segurança sobre os nomes de
|
||||
variáveis de ambiente que podem causar falha na implantação caso não sejam
|
||||
seguidas.
|
||||
</Warning>
|
||||
|
||||
### Padrões de Variáveis de Ambiente Bloqueados
|
||||
|
||||
Por motivos de segurança, os seguintes padrões de nome de variável de ambiente são **automaticamente filtrados** e causarão problemas de implantação:
|
||||
|
||||
**Padrões Bloqueados:**
|
||||
|
||||
- Variáveis terminando em `_TOKEN` (ex: `MY_API_TOKEN`)
|
||||
- Variáveis terminando em `_PASSWORD` (ex: `DB_PASSWORD`)
|
||||
- Variáveis terminando em `_SECRET` (ex: `API_SECRET`)
|
||||
- Variáveis terminando em `_KEY` em certos contextos
|
||||
|
||||
**Variáveis Bloqueadas Específicas:**
|
||||
|
||||
- `GITHUB_USER`, `GITHUB_TOKEN`
|
||||
- `AWS_REGION`, `AWS_DEFAULT_REGION`
|
||||
- Diversas variáveis internas do sistema CrewAI
|
||||
|
||||
### Exceções Permitidas
|
||||
|
||||
Algumas variáveis são explicitamente permitidas mesmo coincidindo com os padrões bloqueados:
|
||||
|
||||
- `AZURE_AD_TOKEN`
|
||||
- `AZURE_OPENAI_AD_TOKEN`
|
||||
- `ENTERPRISE_ACTION_TOKEN`
|
||||
- `CREWAI_ENTEPRISE_TOOLS_TOKEN`
|
||||
|
||||
### Como Corrigir Problemas de Nomeação
|
||||
|
||||
Se sua implantação falhar devido a restrições de variáveis de ambiente:
|
||||
|
||||
```bash
|
||||
# ❌ Estas irão causar falhas na implantação
|
||||
OPENAI_TOKEN=sk-...
|
||||
DATABASE_PASSWORD=mysenha
|
||||
API_SECRET=segredo123
|
||||
|
||||
# ✅ Utilize estes padrões de nomeação
|
||||
OPENAI_API_KEY=sk-...
|
||||
DATABASE_CREDENTIALS=mysenha
|
||||
API_CONFIG=segredo123
|
||||
```
|
||||
|
||||
### Melhores Práticas
|
||||
|
||||
1. **Use convenções padrão de nomenclatura**: `PROVIDER_API_KEY` em vez de `PROVIDER_TOKEN`
|
||||
2. **Teste localmente primeiro**: Certifique-se de que seu crew funciona com as variáveis renomeadas
|
||||
3. **Atualize seu código**: Altere todas as referências aos nomes antigos das variáveis
|
||||
4. **Documente as mudanças**: Mantenha registro das variáveis renomeadas para seu time
|
||||
|
||||
<Tip>
|
||||
Se você se deparar com falhas de implantação com erros enigmáticos de
|
||||
variáveis de ambiente, confira primeiro os nomes das variáveis em relação a
|
||||
esses padrões.
|
||||
</Tip>
|
||||
|
||||
### Interaja com Seu Crew Implantado
|
||||
|
||||
Após a implantação, você pode acessar seu crew por meio de:
|
||||
|
||||
1. **REST API**: A plataforma gera um endpoint HTTPS exclusivo com estas rotas principais:
|
||||
|
||||
- `/inputs`: Lista os parâmetros de entrada requeridos
|
||||
- `/kickoff`: Inicia uma execução com os inputs fornecidos
|
||||
- `/status/{kickoff_id}`: Consulta o status da execução
|
||||
|
||||
2. **Interface Web**: Acesse [app.crewai.com](https://app.crewai.com) para visualizar:
|
||||
- **Aba Status**: Informações da implantação, detalhes do endpoint da API e token de autenticação
|
||||
- **Aba Run**: Visualização da estrutura do seu crew
|
||||
- **Aba Executions**: Histórico de todas as execuções
|
||||
- **Aba Metrics**: Análises de desempenho
|
||||
- **Aba Traces**: Insights detalhados das execuções
|
||||
|
||||
### Dispare uma Execução
|
||||
|
||||
No dashboard Enterprise, você pode:
|
||||
|
||||
1. Clicar no nome do seu crew para abrir seus detalhes
|
||||
2. Selecionar "Trigger Crew" na interface de gerenciamento
|
||||
3. Inserir os inputs necessários no modal exibido
|
||||
4. Monitorar o progresso à medida que a execução avança pelo pipeline
|
||||
|
||||
### Monitoramento e Análises
|
||||
|
||||
A plataforma Enterprise oferece recursos abrangentes de observabilidade:
|
||||
|
||||
- **Gestão das Execuções**: Acompanhe execuções ativas e concluídas
|
||||
- **Traces**: Quebra detalhada de cada execução
|
||||
- **Métricas**: Uso de tokens, tempos de execução e custos
|
||||
- **Visualização em Linha do Tempo**: Representação visual das sequências de tarefas
|
||||
|
||||
### Funcionalidades Avançadas
|
||||
|
||||
A plataforma Enterprise também oferece:
|
||||
|
||||
- **Gerenciamento de Variáveis de Ambiente**: Armazene e gerencie com segurança as chaves de API
|
||||
- **Conexões com LLM**: Configure integrações com diversos provedores de LLM
|
||||
- **Repositório Custom Tools**: Crie, compartilhe e instale ferramentas
|
||||
- **Crew Studio**: Monte crews via interface de chat sem escrever código
|
||||
|
||||
<Card title="Precisa de Ajuda?" icon="headset" href="mailto:support@crewai.com">
|
||||
Entre em contato com nossa equipe de suporte para ajuda com questões de
|
||||
implantação ou dúvidas sobre a plataforma Enterprise.
|
||||
</Card>
|
||||
439
docs/pt-BR/enterprise/guides/deploy-to-amp.mdx
Normal file
439
docs/pt-BR/enterprise/guides/deploy-to-amp.mdx
Normal file
@@ -0,0 +1,439 @@
|
||||
---
|
||||
title: "Deploy para AMP"
|
||||
description: "Implante seu Crew ou Flow no CrewAI AMP"
|
||||
icon: "rocket"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
Depois de criar um Crew ou Flow localmente (ou pelo Crew Studio), o próximo passo é
|
||||
implantá-lo na plataforma CrewAI AMP. Este guia cobre múltiplos métodos de
|
||||
implantação para ajudá-lo a escolher a melhor abordagem para o seu fluxo de trabalho.
|
||||
</Note>
|
||||
|
||||
## Pré-requisitos
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Projeto Pronto para Implantação" icon="check-circle">
|
||||
Você deve ter um Crew ou Flow funcionando localmente com sucesso.
|
||||
Siga nosso [guia de preparação](/pt-BR/enterprise/guides/prepare-for-deployment) para verificar a estrutura do seu projeto.
|
||||
</Card>
|
||||
<Card title="Repositório GitHub" icon="github">
|
||||
Seu código deve estar em um repositório do GitHub (para o método de integração com GitHub).
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
<Info>
|
||||
**Crews vs Flows**: Ambos os tipos de projeto podem ser implantados como "automações" no CrewAI AMP.
|
||||
O processo de implantação é o mesmo, mas eles têm estruturas de projeto diferentes.
|
||||
Veja [Preparar para Implantação](/pt-BR/enterprise/guides/prepare-for-deployment) para detalhes.
|
||||
</Info>
|
||||
|
||||
## Opção 1: Implantar Usando o CrewAI CLI
|
||||
|
||||
A CLI fornece a maneira mais rápida de implantar Crews ou Flows desenvolvidos localmente na plataforma AMP.
|
||||
A CLI detecta automaticamente o tipo do seu projeto a partir do `pyproject.toml` e faz o build adequadamente.
|
||||
|
||||
<Steps>
|
||||
<Step title="Instale o CrewAI CLI">
|
||||
Se ainda não tiver, instale o CrewAI CLI:
|
||||
|
||||
```bash
|
||||
pip install crewai[tools]
|
||||
```
|
||||
|
||||
<Tip>
|
||||
A CLI vem com o pacote principal CrewAI, mas o extra `[tools]` garante todas as dependências de implantação.
|
||||
</Tip>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Autentique-se na Plataforma Enterprise">
|
||||
Primeiro, você precisa autenticar sua CLI com a plataforma CrewAI AMP:
|
||||
|
||||
```bash
|
||||
# Se já possui uma conta CrewAI AMP, ou deseja criar uma:
|
||||
crewai login
|
||||
```
|
||||
|
||||
Ao executar qualquer um dos comandos, a CLI irá:
|
||||
1. Exibir uma URL e um código de dispositivo único
|
||||
2. Abrir seu navegador para a página de autenticação
|
||||
3. Solicitar a confirmação do dispositivo
|
||||
4. Completar o processo de autenticação
|
||||
|
||||
Após a autenticação bem-sucedida, você verá uma mensagem de confirmação no terminal!
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Criar uma Implantação">
|
||||
|
||||
No diretório do seu projeto, execute:
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
```
|
||||
|
||||
Este comando irá:
|
||||
1. Detectar informações do seu repositório GitHub
|
||||
2. Identificar variáveis de ambiente no seu arquivo `.env` local
|
||||
3. Transferir essas variáveis com segurança para a plataforma Enterprise
|
||||
4. Criar uma nova implantação com um identificador único
|
||||
|
||||
Com a criação bem-sucedida, você verá uma mensagem como:
|
||||
```shell
|
||||
Deployment created successfully!
|
||||
Name: your_project_name
|
||||
Deployment ID: 01234567-89ab-cdef-0123-456789abcdef
|
||||
Current Status: Deploy Enqueued
|
||||
```
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Acompanhe o Progresso da Implantação">
|
||||
|
||||
Acompanhe o status da implantação com:
|
||||
|
||||
```bash
|
||||
crewai deploy status
|
||||
```
|
||||
|
||||
Para ver logs detalhados do processo de build:
|
||||
|
||||
```bash
|
||||
crewai deploy logs
|
||||
```
|
||||
|
||||
<Tip>
|
||||
A primeira implantação normalmente leva de 10 a 15 minutos, pois as imagens dos containers são construídas. As próximas implantações são bem mais rápidas.
|
||||
</Tip>
|
||||
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Comandos Adicionais da CLI
|
||||
|
||||
O CrewAI CLI oferece vários comandos para gerenciar suas implantações:
|
||||
|
||||
```bash
|
||||
# Liste todas as suas implantações
|
||||
crewai deploy list
|
||||
|
||||
# Consulte o status de uma implantação
|
||||
crewai deploy status
|
||||
|
||||
# Veja os logs da implantação
|
||||
crewai deploy logs
|
||||
|
||||
# Envie atualizações após alterações no código
|
||||
crewai deploy push
|
||||
|
||||
# Remova uma implantação
|
||||
crewai deploy remove <deployment_id>
|
||||
```
|
||||
|
||||
## Opção 2: Implantar Diretamente pela Interface Web
|
||||
|
||||
Você também pode implantar seus Crews ou Flows diretamente pela interface web do CrewAI AMP conectando sua conta do GitHub. Esta abordagem não requer utilizar a CLI na sua máquina local. A plataforma detecta automaticamente o tipo do seu projeto e trata o build adequadamente.
|
||||
|
||||
<Steps>
|
||||
|
||||
<Step title="Enviar para o GitHub">
|
||||
|
||||
Você precisa enviar seu crew para um repositório do GitHub. Caso ainda não tenha criado um crew, você pode [seguir este tutorial](/pt-BR/quickstart).
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Conectando o GitHub ao CrewAI AMP">
|
||||
|
||||
1. Faça login em [CrewAI AMP](https://app.crewai.com)
|
||||
2. Clique no botão "Connect GitHub"
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Selecionar o Repositório">
|
||||
|
||||
Após conectar sua conta GitHub, você poderá selecionar qual repositório deseja implantar:
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Definir as Variáveis de Ambiente">
|
||||
|
||||
Antes de implantar, você precisará configurar as variáveis de ambiente para conectar ao seu provedor de LLM ou outros serviços:
|
||||
|
||||
1. Você pode adicionar variáveis individualmente ou em lote
|
||||
2. Digite suas variáveis no formato `KEY=VALUE` (uma por linha)
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Implante Seu Crew">
|
||||
|
||||
1. Clique no botão "Deploy" para iniciar o processo de implantação
|
||||
2. Você pode monitorar o progresso pela barra de progresso
|
||||
3. A primeira implantação geralmente demora de 10 a 15 minutos; as próximas serão mais rápidas
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
Após a conclusão, você verá:
|
||||
- A URL exclusiva do seu crew
|
||||
- Um Bearer token para proteger sua API crew
|
||||
- Um botão "Delete" caso precise remover a implantação
|
||||
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## Opção 3: Reimplantar Usando API (Integração CI/CD)
|
||||
|
||||
Para implantações automatizadas em pipelines CI/CD, você pode usar a API do CrewAI para acionar reimplantações de crews existentes. Isso é particularmente útil para GitHub Actions, Jenkins ou outros workflows de automação.
|
||||
|
||||
<Steps>
|
||||
<Step title="Obtenha Seu Token de Acesso Pessoal">
|
||||
|
||||
Navegue até as configurações da sua conta CrewAI AMP para gerar um token de API:
|
||||
|
||||
1. Acesse [app.crewai.com](https://app.crewai.com)
|
||||
2. Clique em **Settings** → **Account** → **Personal Access Token**
|
||||
3. Gere um novo token e copie-o com segurança
|
||||
4. Armazene este token como um secret no seu sistema CI/CD
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Encontre o UUID da Sua Automação">
|
||||
|
||||
Localize o identificador único do seu crew implantado:
|
||||
|
||||
1. Acesse **Automations** no seu dashboard CrewAI AMP
|
||||
2. Selecione sua automação/crew existente
|
||||
3. Clique em **Additional Details**
|
||||
4. Copie o **UUID** - este identifica sua implantação específica do crew
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Acione a Reimplantação via API">
|
||||
|
||||
Use o endpoint da API de Deploy para acionar uma reimplantação:
|
||||
|
||||
```bash
|
||||
curl -i -X POST \
|
||||
-H "Authorization: Bearer YOUR_PERSONAL_ACCESS_TOKEN" \
|
||||
https://app.crewai.com/crewai_plus/api/v1/crews/YOUR-AUTOMATION-UUID/deploy
|
||||
|
||||
# HTTP/2 200
|
||||
# content-type: application/json
|
||||
#
|
||||
# {
|
||||
# "uuid": "your-automation-uuid",
|
||||
# "status": "Deploy Enqueued",
|
||||
# "public_url": "https://your-crew-deployment.crewai.com",
|
||||
# "token": "your-bearer-token"
|
||||
# }
|
||||
```
|
||||
|
||||
<Info>
|
||||
Se sua automação foi criada originalmente conectada ao Git, a API automaticamente puxará as últimas alterações do seu repositório antes de reimplantar.
|
||||
</Info>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Exemplo de Integração com GitHub Actions">
|
||||
|
||||
Aqui está um workflow do GitHub Actions com gatilhos de implantação mais complexos:
|
||||
|
||||
```yaml
|
||||
name: Deploy CrewAI Automation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
types: [ labeled ]
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
(github.event_name == 'push' && github.ref == 'refs/heads/main') ||
|
||||
(github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'deploy')) ||
|
||||
(github.event_name == 'release')
|
||||
steps:
|
||||
- name: Trigger CrewAI Redeployment
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.CREWAI_PAT }}" \
|
||||
https://app.crewai.com/crewai_plus/api/v1/crews/${{ secrets.CREWAI_AUTOMATION_UUID }}/deploy
|
||||
```
|
||||
|
||||
<Tip>
|
||||
Adicione `CREWAI_PAT` e `CREWAI_AUTOMATION_UUID` como secrets do repositório. Para implantações de PR, adicione um label "deploy" para acionar o workflow.
|
||||
</Tip>
|
||||
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## Interaja com Sua Automação Implantada
|
||||
|
||||
Após a implantação, você pode acessar seu crew através de:
|
||||
|
||||
1. **REST API**: A plataforma gera um endpoint HTTPS exclusivo com estas rotas principais:
|
||||
|
||||
- `/inputs`: Lista os parâmetros de entrada requeridos
|
||||
- `/kickoff`: Inicia uma execução com os inputs fornecidos
|
||||
- `/status/{kickoff_id}`: Consulta o status da execução
|
||||
|
||||
2. **Interface Web**: Acesse [app.crewai.com](https://app.crewai.com) para visualizar:
|
||||
- **Aba Status**: Informações da implantação, detalhes do endpoint da API e token de autenticação
|
||||
- **Aba Run**: Visualização da estrutura do seu crew
|
||||
- **Aba Executions**: Histórico de todas as execuções
|
||||
- **Aba Metrics**: Análises de desempenho
|
||||
- **Aba Traces**: Insights detalhados das execuções
|
||||
|
||||
### Dispare uma Execução
|
||||
|
||||
No dashboard Enterprise, você pode:
|
||||
|
||||
1. Clicar no nome do seu crew para abrir seus detalhes
|
||||
2. Selecionar "Trigger Crew" na interface de gerenciamento
|
||||
3. Inserir os inputs necessários no modal exibido
|
||||
4. Monitorar o progresso à medida que a execução avança pelo pipeline
|
||||
|
||||
### Monitoramento e Análises
|
||||
|
||||
A plataforma Enterprise oferece recursos abrangentes de observabilidade:
|
||||
|
||||
- **Gestão das Execuções**: Acompanhe execuções ativas e concluídas
|
||||
- **Traces**: Quebra detalhada de cada execução
|
||||
- **Métricas**: Uso de tokens, tempos de execução e custos
|
||||
- **Visualização em Linha do Tempo**: Representação visual das sequências de tarefas
|
||||
|
||||
### Funcionalidades Avançadas
|
||||
|
||||
A plataforma Enterprise também oferece:
|
||||
|
||||
- **Gerenciamento de Variáveis de Ambiente**: Armazene e gerencie com segurança as chaves de API
|
||||
- **Conexões com LLM**: Configure integrações com diversos provedores de LLM
|
||||
- **Repositório Custom Tools**: Crie, compartilhe e instale ferramentas
|
||||
- **Crew Studio**: Monte crews via interface de chat sem escrever código
|
||||
|
||||
## Solução de Problemas em Falhas de Implantação
|
||||
|
||||
Se sua implantação falhar, verifique estes problemas comuns:
|
||||
|
||||
### Falhas de Build
|
||||
|
||||
#### Arquivo uv.lock Ausente
|
||||
|
||||
**Sintoma**: Build falha no início com erros de resolução de dependências
|
||||
|
||||
**Solução**: Gere e faça commit do arquivo lock:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
git add uv.lock
|
||||
git commit -m "Add uv.lock for deployment"
|
||||
git push
|
||||
```
|
||||
|
||||
<Warning>
|
||||
O arquivo `uv.lock` é obrigatório para todas as implantações. Sem ele, a plataforma
|
||||
não consegue instalar suas dependências de forma confiável.
|
||||
</Warning>
|
||||
|
||||
#### Estrutura de Projeto Incorreta
|
||||
|
||||
**Sintoma**: Erros "Could not find entry point" ou "Module not found"
|
||||
|
||||
**Solução**: Verifique se seu projeto corresponde à estrutura esperada:
|
||||
|
||||
- **Tanto Crews quanto Flows**: Devem ter ponto de entrada em `src/project_name/main.py`
|
||||
- **Crews**: Usam uma função `run()` como ponto de entrada
|
||||
- **Flows**: Usam uma função `kickoff()` como ponto de entrada
|
||||
|
||||
Veja [Preparar para Implantação](/pt-BR/enterprise/guides/prepare-for-deployment) para diagramas de estrutura detalhados.
|
||||
|
||||
#### Decorador CrewBase Ausente
|
||||
|
||||
**Sintoma**: Erros "Crew not found", "Config not found" ou erros de configuração de agent/task
|
||||
|
||||
**Solução**: Certifique-se de que **todas** as classes crew usam o decorador `@CrewBase`:
|
||||
|
||||
```python
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
@CrewBase # Este decorador é OBRIGATÓRIO
|
||||
class YourCrew():
|
||||
"""Descrição do seu crew"""
|
||||
|
||||
@agent
|
||||
def my_agent(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['my_agent'], # type: ignore[index]
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# ... resto da definição do crew
|
||||
```
|
||||
|
||||
<Info>
|
||||
Isso se aplica a Crews independentes E crews embutidos dentro de projetos Flow.
|
||||
Toda classe crew precisa do decorador.
|
||||
</Info>
|
||||
|
||||
#### Tipo Incorreto no pyproject.toml
|
||||
|
||||
**Sintoma**: Build tem sucesso mas falha em runtime, ou comportamento inesperado
|
||||
|
||||
**Solução**: Verifique se a seção `[tool.crewai]` corresponde ao tipo do seu projeto:
|
||||
|
||||
```toml
|
||||
# Para projetos Crew:
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
|
||||
# Para projetos Flow:
|
||||
[tool.crewai]
|
||||
type = "flow"
|
||||
```
|
||||
|
||||
### Falhas de Runtime
|
||||
|
||||
#### Falhas de Conexão com LLM
|
||||
|
||||
**Sintoma**: Erros de chave API, "model not found" ou falhas de autenticação
|
||||
|
||||
**Solução**:
|
||||
1. Verifique se a chave API do seu provedor LLM está corretamente definida nas variáveis de ambiente
|
||||
2. Certifique-se de que os nomes das variáveis de ambiente correspondem ao que seu código espera
|
||||
3. Teste localmente com exatamente as mesmas variáveis de ambiente antes de implantar
|
||||
|
||||
#### Erros de Execução do Crew
|
||||
|
||||
**Sintoma**: Crew inicia mas falha durante a execução
|
||||
|
||||
**Solução**:
|
||||
1. Verifique os logs de execução no dashboard AMP (aba Traces)
|
||||
2. Verifique se todas as ferramentas têm as chaves API necessárias configuradas
|
||||
3. Certifique-se de que as configurações de agents em `agents.yaml` são válidas
|
||||
4. Verifique se há erros de sintaxe nas configurações de tasks em `tasks.yaml`
|
||||
|
||||
<Card title="Precisa de Ajuda?" icon="headset" href="mailto:support@crewai.com">
|
||||
Entre em contato com nossa equipe de suporte para ajuda com questões de
|
||||
implantação ou dúvidas sobre a plataforma AMP.
|
||||
</Card>
|
||||
305
docs/pt-BR/enterprise/guides/prepare-for-deployment.mdx
Normal file
305
docs/pt-BR/enterprise/guides/prepare-for-deployment.mdx
Normal file
@@ -0,0 +1,305 @@
|
||||
---
|
||||
title: "Preparar para Implantação"
|
||||
description: "Certifique-se de que seu Crew ou Flow está pronto para implantação no CrewAI AMP"
|
||||
icon: "clipboard-check"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
Antes de implantar no CrewAI AMP, é crucial verificar se seu projeto está estruturado corretamente.
|
||||
Tanto Crews quanto Flows podem ser implantados como "automações", mas eles têm estruturas de projeto
|
||||
e requisitos diferentes que devem ser atendidos para uma implantação bem-sucedida.
|
||||
</Note>
|
||||
|
||||
## Entendendo Automações
|
||||
|
||||
No CrewAI AMP, **automações** é o termo geral para projetos de IA Agêntica implantáveis. Uma automação pode ser:
|
||||
|
||||
- **Um Crew**: Uma equipe independente de agentes de IA trabalhando juntos em tarefas
|
||||
- **Um Flow**: Um workflow orquestrado que pode combinar múltiplos crews, chamadas diretas de LLM e lógica procedural
|
||||
|
||||
Entender qual tipo você está implantando é essencial porque eles têm estruturas de projeto e pontos de entrada diferentes.
|
||||
|
||||
## Crews vs Flows: Principais Diferenças
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Projetos Crew" icon="users">
|
||||
Equipes de agentes de IA independentes com `crew.py` definindo agentes e tarefas. Ideal para tarefas focadas e colaborativas.
|
||||
</Card>
|
||||
<Card title="Projetos Flow" icon="diagram-project">
|
||||
Workflows orquestrados com crews embutidos em uma pasta `crews/`. Ideal para processos complexos de múltiplas etapas.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
| Aspecto | Crew | Flow |
|
||||
|---------|------|------|
|
||||
| **Estrutura do projeto** | `src/project_name/` com `crew.py` | `src/project_name/` com pasta `crews/` |
|
||||
| **Localização da lógica principal** | `src/project_name/crew.py` | `src/project_name/main.py` (classe Flow) |
|
||||
| **Função de ponto de entrada** | `run()` em `main.py` | `kickoff()` em `main.py` |
|
||||
| **Tipo no pyproject.toml** | `type = "crew"` | `type = "flow"` |
|
||||
| **Comando CLI de criação** | `crewai create crew name` | `crewai create flow name` |
|
||||
| **Localização da configuração** | `src/project_name/config/` | `src/project_name/crews/crew_name/config/` |
|
||||
| **Pode conter outros crews** | Não | Sim (na pasta `crews/`) |
|
||||
|
||||
## Referência de Estrutura de Projeto
|
||||
|
||||
### Estrutura de Projeto Crew
|
||||
|
||||
Quando você executa `crewai create crew my_crew`, você obtém esta estrutura:
|
||||
|
||||
```
|
||||
my_crew/
|
||||
├── .gitignore
|
||||
├── pyproject.toml # Deve ter type = "crew"
|
||||
├── README.md
|
||||
├── .env
|
||||
├── uv.lock # OBRIGATÓRIO para implantação
|
||||
└── src/
|
||||
└── my_crew/
|
||||
├── __init__.py
|
||||
├── main.py # Ponto de entrada com função run()
|
||||
├── crew.py # Classe Crew com decorador @CrewBase
|
||||
├── tools/
|
||||
│ ├── custom_tool.py
|
||||
│ └── __init__.py
|
||||
└── config/
|
||||
├── agents.yaml # Definições de agentes
|
||||
└── tasks.yaml # Definições de tarefas
|
||||
```
|
||||
|
||||
<Warning>
|
||||
A estrutura aninhada `src/project_name/` é crítica para Crews.
|
||||
Colocar arquivos no nível errado causará falhas na implantação.
|
||||
</Warning>
|
||||
|
||||
### Estrutura de Projeto Flow
|
||||
|
||||
Quando você executa `crewai create flow my_flow`, você obtém esta estrutura:
|
||||
|
||||
```
|
||||
my_flow/
|
||||
├── .gitignore
|
||||
├── pyproject.toml # Deve ter type = "flow"
|
||||
├── README.md
|
||||
├── .env
|
||||
├── uv.lock # OBRIGATÓRIO para implantação
|
||||
└── src/
|
||||
└── my_flow/
|
||||
├── __init__.py
|
||||
├── main.py # Ponto de entrada com função kickoff() + classe Flow
|
||||
├── crews/ # Pasta de crews embutidos
|
||||
│ └── poem_crew/
|
||||
│ ├── __init__.py
|
||||
│ ├── poem_crew.py # Crew com decorador @CrewBase
|
||||
│ └── config/
|
||||
│ ├── agents.yaml
|
||||
│ └── tasks.yaml
|
||||
└── tools/
|
||||
├── __init__.py
|
||||
└── custom_tool.py
|
||||
```
|
||||
|
||||
<Info>
|
||||
Tanto Crews quanto Flows usam a estrutura `src/project_name/`.
|
||||
A diferença chave é que Flows têm uma pasta `crews/` para crews embutidos,
|
||||
enquanto Crews têm `crew.py` diretamente na pasta do projeto.
|
||||
</Info>
|
||||
|
||||
## Checklist Pré-Implantação
|
||||
|
||||
Use este checklist para verificar se seu projeto está pronto para implantação.
|
||||
|
||||
### 1. Verificar Configuração do pyproject.toml
|
||||
|
||||
Seu `pyproject.toml` deve incluir a seção `[tool.crewai]` correta:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Para Crews">
|
||||
```toml
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Para Flows">
|
||||
```toml
|
||||
[tool.crewai]
|
||||
type = "flow"
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Warning>
|
||||
Se o `type` não corresponder à estrutura do seu projeto, o build falhará ou
|
||||
a automação não funcionará corretamente.
|
||||
</Warning>
|
||||
|
||||
### 2. Garantir que o Arquivo uv.lock Existe
|
||||
|
||||
CrewAI usa `uv` para gerenciamento de dependências. O arquivo `uv.lock` garante builds reproduzíveis e é **obrigatório** para implantação.
|
||||
|
||||
```bash
|
||||
# Gerar ou atualizar o arquivo lock
|
||||
uv lock
|
||||
|
||||
# Verificar se existe
|
||||
ls -la uv.lock
|
||||
```
|
||||
|
||||
Se o arquivo não existir, execute `uv lock` e faça commit no seu repositório:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
git add uv.lock
|
||||
git commit -m "Add uv.lock for deployment"
|
||||
git push
|
||||
```
|
||||
|
||||
### 3. Validar Uso do Decorador CrewBase
|
||||
|
||||
**Toda classe crew deve usar o decorador `@CrewBase`.** Isso se aplica a:
|
||||
|
||||
- Projetos crew independentes
|
||||
- Crews embutidos dentro de projetos Flow
|
||||
|
||||
```python
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from typing import List
|
||||
|
||||
@CrewBase # Este decorador é OBRIGATÓRIO
|
||||
class MyCrew():
|
||||
"""Descrição do meu crew"""
|
||||
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
|
||||
@agent
|
||||
def my_agent(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['my_agent'], # type: ignore[index]
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def my_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['my_task'] # type: ignore[index]
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=self.agents,
|
||||
tasks=self.tasks,
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
<Warning>
|
||||
Se você esquecer o decorador `@CrewBase`, sua implantação falhará com
|
||||
erros sobre configurações de agents ou tasks ausentes.
|
||||
</Warning>
|
||||
|
||||
### 4. Verificar Pontos de Entrada do Projeto
|
||||
|
||||
Tanto Crews quanto Flows têm seu ponto de entrada em `src/project_name/main.py`:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Para Crews">
|
||||
O ponto de entrada usa uma função `run()`:
|
||||
|
||||
```python
|
||||
# src/my_crew/main.py
|
||||
from my_crew.crew import MyCrew
|
||||
|
||||
def run():
|
||||
"""Executa o crew."""
|
||||
inputs = {'topic': 'AI in Healthcare'}
|
||||
result = MyCrew().crew().kickoff(inputs=inputs)
|
||||
return result
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Para Flows">
|
||||
O ponto de entrada usa uma função `kickoff()` com uma classe Flow:
|
||||
|
||||
```python
|
||||
# src/my_flow/main.py
|
||||
from crewai.flow import Flow, listen, start
|
||||
from my_flow.crews.poem_crew.poem_crew import PoemCrew
|
||||
|
||||
class MyFlow(Flow):
|
||||
@start()
|
||||
def begin(self):
|
||||
# Lógica do Flow aqui
|
||||
result = PoemCrew().crew().kickoff(inputs={...})
|
||||
return result
|
||||
|
||||
def kickoff():
|
||||
"""Executa o flow."""
|
||||
MyFlow().kickoff()
|
||||
|
||||
if __name__ == "__main__":
|
||||
kickoff()
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### 5. Preparar Variáveis de Ambiente
|
||||
|
||||
Antes da implantação, certifique-se de ter:
|
||||
|
||||
1. **Chaves de API de LLM** prontas (OpenAI, Anthropic, Google, etc.)
|
||||
2. **Chaves de API de ferramentas** se estiver usando ferramentas externas (Serper, etc.)
|
||||
|
||||
<Tip>
|
||||
Teste seu projeto localmente com as mesmas variáveis de ambiente antes de implantar
|
||||
para detectar problemas de configuração antecipadamente.
|
||||
</Tip>
|
||||
|
||||
## Comandos de Validação Rápida
|
||||
|
||||
Execute estes comandos a partir da raiz do seu projeto para verificar rapidamente sua configuração:
|
||||
|
||||
```bash
|
||||
# 1. Verificar tipo do projeto no pyproject.toml
|
||||
grep -A2 "\[tool.crewai\]" pyproject.toml
|
||||
|
||||
# 2. Verificar se uv.lock existe
|
||||
ls -la uv.lock || echo "ERRO: uv.lock ausente! Execute 'uv lock'"
|
||||
|
||||
# 3. Verificar se estrutura src/ existe
|
||||
ls -la src/*/main.py 2>/dev/null || echo "Nenhum main.py encontrado em src/"
|
||||
|
||||
# 4. Para Crews - verificar se crew.py existe
|
||||
ls -la src/*/crew.py 2>/dev/null || echo "Nenhum crew.py (esperado para Crews)"
|
||||
|
||||
# 5. Para Flows - verificar se pasta crews/ existe
|
||||
ls -la src/*/crews/ 2>/dev/null || echo "Nenhuma pasta crews/ (esperado para Flows)"
|
||||
|
||||
# 6. Verificar uso do CrewBase
|
||||
grep -r "@CrewBase" . --include="*.py"
|
||||
```
|
||||
|
||||
## Erros Comuns de Configuração
|
||||
|
||||
| Erro | Sintoma | Correção |
|
||||
|------|---------|----------|
|
||||
| `uv.lock` ausente | Build falha durante resolução de dependências | Execute `uv lock` e faça commit |
|
||||
| `type` errado no pyproject.toml | Build bem-sucedido mas falha em runtime | Altere para o tipo correto |
|
||||
| Decorador `@CrewBase` ausente | Erros "Config not found" | Adicione decorador a todas as classes crew |
|
||||
| Arquivos na raiz ao invés de `src/` | Ponto de entrada não encontrado | Mova para `src/project_name/` |
|
||||
| `run()` ou `kickoff()` ausente | Não é possível iniciar automação | Adicione a função de entrada correta |
|
||||
|
||||
## Próximos Passos
|
||||
|
||||
Uma vez que seu projeto passar por todos os itens do checklist, você está pronto para implantar:
|
||||
|
||||
<Card title="Deploy para AMP" icon="rocket" href="/pt-BR/enterprise/guides/deploy-to-amp">
|
||||
Siga o guia de implantação para implantar seu Crew ou Flow no CrewAI AMP usando
|
||||
a CLI, interface web ou integração CI/CD.
|
||||
</Card>
|
||||
@@ -82,7 +82,7 @@ CrewAI AMP expande o poder do framework open-source com funcionalidades projetad
|
||||
<Card
|
||||
title="Implantar Crew"
|
||||
icon="rocket"
|
||||
href="/pt-BR/enterprise/guides/deploy-crew"
|
||||
href="/pt-BR/enterprise/guides/deploy-to-amp"
|
||||
>
|
||||
Implantar Crew
|
||||
</Card>
|
||||
@@ -92,11 +92,11 @@ CrewAI AMP expande o poder do framework open-source com funcionalidades projetad
|
||||
<Card
|
||||
title="Acesso via API"
|
||||
icon="code"
|
||||
href="/pt-BR/enterprise/guides/deploy-crew"
|
||||
href="/pt-BR/enterprise/guides/kickoff-crew"
|
||||
>
|
||||
Usar a API do Crew
|
||||
</Card>
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
Para instruções detalhadas, consulte nosso [guia de implantação](/pt-BR/enterprise/guides/deploy-crew) ou clique no botão abaixo para começar.
|
||||
Para instruções detalhadas, consulte nosso [guia de implantação](/pt-BR/enterprise/guides/deploy-to-amp) ou clique no botão abaixo para começar.
|
||||
|
||||
@@ -7,6 +7,10 @@ mode: "wide"
|
||||
|
||||
## Visão Geral
|
||||
|
||||
<Note>
|
||||
O decorador `@human_feedback` requer **CrewAI versão 1.8.0 ou superior**. Certifique-se de atualizar sua instalação antes de usar este recurso.
|
||||
</Note>
|
||||
|
||||
O decorador `@human_feedback` permite fluxos de trabalho human-in-the-loop (HITL) diretamente nos CrewAI Flows. Ele permite pausar a execução do flow, apresentar a saída para um humano revisar, coletar seu feedback e, opcionalmente, rotear para diferentes listeners com base no resultado do feedback.
|
||||
|
||||
Isso é particularmente valioso para:
|
||||
|
||||
@@ -5,9 +5,22 @@ icon: "user-check"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
Human-in-the-Loop (HITL) é uma abordagem poderosa que combina a inteligência artificial com a experiência humana para aprimorar a tomada de decisões e melhorar os resultados das tarefas. Este guia mostra como implementar HITL dentro da CrewAI.
|
||||
Human-in-the-Loop (HITL) é uma abordagem poderosa que combina a inteligência artificial com a experiência humana para aprimorar a tomada de decisões e melhorar os resultados das tarefas. CrewAI oferece várias maneiras de implementar HITL dependendo das suas necessidades.
|
||||
|
||||
## Configurando Workflows HITL
|
||||
## Escolhendo Sua Abordagem HITL
|
||||
|
||||
CrewAI oferece duas abordagens principais para implementar workflows human-in-the-loop:
|
||||
|
||||
| Abordagem | Melhor Para | Integração | Versão |
|
||||
|----------|----------|-------------|---------|
|
||||
| **Baseada em Flow** (decorador `@human_feedback`) | Desenvolvimento local, revisão via console, workflows síncronos | [Feedback Humano em Flows](/pt-BR/learn/human-feedback-in-flows) | **1.8.0+** |
|
||||
| **Baseada em Webhook** (Enterprise) | Deployments em produção, workflows assíncronos, integrações externas (Slack, Teams, etc.) | Este guia | - |
|
||||
|
||||
<Tip>
|
||||
Se você está construindo flows e deseja adicionar etapas de revisão humana com roteamento baseado em feedback, confira o guia [Feedback Humano em Flows](/pt-BR/learn/human-feedback-in-flows) para o decorador `@human_feedback`.
|
||||
</Tip>
|
||||
|
||||
## Configurando Workflows HITL Baseados em Webhook
|
||||
|
||||
<Steps>
|
||||
<Step title="Configure sua Tarefa">
|
||||
|
||||
115
docs/pt-BR/observability/galileo.mdx
Normal file
115
docs/pt-BR/observability/galileo.mdx
Normal file
@@ -0,0 +1,115 @@
|
||||
---
|
||||
title: Galileo Galileu
|
||||
description: Integração Galileo para rastreamento e avaliação CrewAI
|
||||
icon: telescope
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Visão geral
|
||||
|
||||
Este guia demonstra como integrar o **Galileo**com o **CrewAI**
|
||||
para rastreamento abrangente e engenharia de avaliação.
|
||||
Ao final deste guia, você será capaz de rastrear seus agentes CrewAI,
|
||||
monitorar seu desempenho e avaliar seu comportamento com
|
||||
A poderosa plataforma de observabilidade do Galileo.
|
||||
|
||||
> **O que é Galileo?**[Galileo](https://galileo.ai/) é avaliação e observabilidade de IA
|
||||
plataforma que oferece rastreamento, avaliação e
|
||||
e monitoramento de aplicações de IA. Ele permite que as equipes capturem a verdade,
|
||||
criar grades de proteção robustas e realizar experimentos sistemáticos com
|
||||
rastreamento de experimentos integrado e análise de desempenho -garantindo confiabilidade,
|
||||
transparência e melhoria contínua em todo o ciclo de vida da IA.
|
||||
|
||||
## Primeiros passos
|
||||
|
||||
Este tutorial segue o [CrewAI Quickstart](pt-BR/quickstart) e mostra como adicionar
|
||||
[CrewAIEventListener] do Galileo(https://v2docs.galileo.ai/sdk-api/python/reference/handlers/crewai/handler),
|
||||
um manipulador de eventos.
|
||||
Para mais informações, consulte Galileu
|
||||
[Adicionar Galileo a um aplicativo CrewAI](https://v2docs.galileo.ai/how-to-guides/third-party-integrations/add-galileo-to-crewai/add-galileo-to-crewai)
|
||||
guia prático.
|
||||
|
||||
> **Observação**Este tutorial pressupõe que você concluiu o [CrewAI Quickstart](pt-BR/quickstart).
|
||||
Se você quiser um exemplo completo e abrangente, consulte o Galileo
|
||||
[Repositório de exemplo SDK da CrewAI](https://github.com/rungalileo/sdk-examples/tree/main/python/agent/crew-ai).
|
||||
|
||||
### Etapa 1: instalar dependências
|
||||
|
||||
Instale as dependências necessárias para seu aplicativo.
|
||||
Crie um ambiente virtual usando seu método preferido,
|
||||
em seguida, instale dependências dentro desse ambiente usando seu
|
||||
ferramenta preferida:
|
||||
|
||||
```bash
|
||||
uv add galileo
|
||||
```
|
||||
|
||||
### Etapa 2: adicione ao arquivo .env do [CrewAI Quickstart](/pt-BR/quickstart)
|
||||
|
||||
```bash
|
||||
# Your Galileo API key
|
||||
GALILEO_API_KEY="your-galileo-api-key"
|
||||
|
||||
# Your Galileo project name
|
||||
GALILEO_PROJECT="your-galileo-project-name"
|
||||
|
||||
# The name of the Log stream you want to use for logging
|
||||
GALILEO_LOG_STREAM="your-galileo-log-stream "
|
||||
```
|
||||
|
||||
### Etapa 3: adicionar o ouvinte de eventos Galileo
|
||||
|
||||
Para habilitar o registro com Galileo, você precisa criar uma instância do `CrewAIEventListener`.
|
||||
Importe o pacote manipulador Galileo CrewAI por
|
||||
adicionando o seguinte código no topo do seu arquivo main.py:
|
||||
|
||||
```python
|
||||
from galileo.handlers.crewai.handler import CrewAIEventListener
|
||||
```
|
||||
|
||||
No início da sua função run, crie o ouvinte de evento:
|
||||
|
||||
```python
|
||||
def run():
|
||||
# Create the event listener
|
||||
CrewAIEventListener()
|
||||
# The rest of your existing code goes here
|
||||
```
|
||||
|
||||
Quando você cria a instância do listener, ela é automaticamente
|
||||
registrado na CrewAI.
|
||||
|
||||
### Etapa 4: administre sua Crew
|
||||
|
||||
Administre sua Crew com o CrewAI CLI:
|
||||
|
||||
```bash
|
||||
crewai run
|
||||
```
|
||||
|
||||
### Passo 5: Visualize os traços no Galileo
|
||||
|
||||
Assim que sua tripulação terminar, os rastros serão eliminados e aparecerão no Galileo.
|
||||
|
||||

|
||||
|
||||
## Compreendendo a integração do Galileo
|
||||
|
||||
Galileo se integra ao CrewAI registrando um ouvinte de evento
|
||||
que captura eventos de execução da tripulação (por exemplo, ações do agente, chamadas de ferramentas, respostas do modelo)
|
||||
e os encaminha ao Galileo para observabilidade e avaliação.
|
||||
|
||||
### Compreendendo o ouvinte de eventos
|
||||
|
||||
Criar uma instância `CrewAIEventListener()` é tudo o que você precisa
|
||||
necessário para habilitar o Galileo para uma execução do CrewAI. Quando instanciado, o ouvinte:
|
||||
|
||||
-Registra-se automaticamente no CrewAI
|
||||
-Lê a configuração do Galileo a partir de variáveis de ambiente
|
||||
-Registra todos os dados de execução no projeto Galileo e fluxo de log especificado por
|
||||
`GALILEO_PROJECT` e `GALILEO_LOG_STREAM`
|
||||
|
||||
Nenhuma configuração adicional ou alterações de código são necessárias.
|
||||
Todos os dados desta execução são registados no projecto Galileo e
|
||||
fluxo de log especificado pela configuração do seu ambiente
|
||||
(por exemplo, GALILEO_PROJECT e GALILEO_LOG_STREAM).
|
||||
@@ -12,7 +12,7 @@ dependencies = [
|
||||
"pytube~=15.0.0",
|
||||
"requests~=2.32.5",
|
||||
"docker~=7.1.0",
|
||||
"crewai==1.8.0",
|
||||
"crewai==1.8.1",
|
||||
"lancedb~=0.5.4",
|
||||
"tiktoken~=0.8.0",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
|
||||
@@ -291,4 +291,4 @@ __all__ = [
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.8.0"
|
||||
__version__ = "1.8.1"
|
||||
|
||||
@@ -49,7 +49,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.8.0",
|
||||
"crewai-tools==1.8.1",
|
||||
]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
|
||||
@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "1.8.0"
|
||||
__version__ = "1.8.1"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
"""Agent-to-Agent (A2A) protocol communication module for CrewAI."""
|
||||
|
||||
from crewai.a2a.config import A2AConfig
|
||||
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
|
||||
|
||||
|
||||
__all__ = [
|
||||
"A2AClientConfig",
|
||||
"A2AConfig",
|
||||
"A2AServerConfig",
|
||||
]
|
||||
|
||||
@@ -5,45 +5,57 @@ This module is separate from experimental.a2a to avoid circular imports.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Annotated, Any, ClassVar
|
||||
from importlib.metadata import version
|
||||
from typing import Any, ClassVar, Literal
|
||||
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
ConfigDict,
|
||||
Field,
|
||||
HttpUrl,
|
||||
TypeAdapter,
|
||||
)
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from typing_extensions import deprecated
|
||||
|
||||
from crewai.a2a.auth.schemas import AuthScheme
|
||||
from crewai.a2a.types import TransportType, Url
|
||||
|
||||
|
||||
try:
|
||||
from a2a.types import (
|
||||
AgentCapabilities,
|
||||
AgentCardSignature,
|
||||
AgentInterface,
|
||||
AgentProvider,
|
||||
AgentSkill,
|
||||
SecurityScheme,
|
||||
)
|
||||
|
||||
from crewai.a2a.updates import UpdateConfig
|
||||
except ImportError:
|
||||
UpdateConfig = Any
|
||||
AgentCapabilities = Any
|
||||
AgentCardSignature = Any
|
||||
AgentInterface = Any
|
||||
AgentProvider = Any
|
||||
SecurityScheme = Any
|
||||
AgentSkill = Any
|
||||
UpdateConfig = Any # type: ignore[misc,assignment]
|
||||
|
||||
|
||||
http_url_adapter = TypeAdapter(HttpUrl)
|
||||
|
||||
Url = Annotated[
|
||||
str,
|
||||
BeforeValidator(
|
||||
lambda value: str(http_url_adapter.validate_python(value, strict=True))
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def _get_default_update_config() -> UpdateConfig:
|
||||
from crewai.a2a.updates import StreamingConfig
|
||||
|
||||
return StreamingConfig()
|
||||
|
||||
|
||||
@deprecated(
|
||||
"""
|
||||
`crewai.a2a.config.A2AConfig` is deprecated and will be removed in v2.0.0,
|
||||
use `crewai.a2a.config.A2AClientConfig` or `crewai.a2a.config.A2AServerConfig` instead.
|
||||
""",
|
||||
category=FutureWarning,
|
||||
)
|
||||
class A2AConfig(BaseModel):
|
||||
"""Configuration for A2A protocol integration.
|
||||
|
||||
Deprecated:
|
||||
Use A2AClientConfig instead. This class will be removed in a future version.
|
||||
|
||||
Attributes:
|
||||
endpoint: A2A agent endpoint URL.
|
||||
auth: Authentication scheme.
|
||||
@@ -53,6 +65,7 @@ class A2AConfig(BaseModel):
|
||||
fail_fast: If True, raise error when agent unreachable; if False, skip and continue.
|
||||
trust_remote_completion_status: If True, return A2A agent's result directly when completed.
|
||||
updates: Update mechanism config.
|
||||
transport_protocol: A2A transport protocol (grpc, jsonrpc, http+json).
|
||||
"""
|
||||
|
||||
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
|
||||
@@ -82,3 +95,180 @@ class A2AConfig(BaseModel):
|
||||
default_factory=_get_default_update_config,
|
||||
description="Update mechanism config",
|
||||
)
|
||||
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"] = Field(
|
||||
default="JSONRPC",
|
||||
description="Specified mode of A2A transport protocol",
|
||||
)
|
||||
|
||||
|
||||
class A2AClientConfig(BaseModel):
|
||||
"""Configuration for connecting to remote A2A agents.
|
||||
|
||||
Attributes:
|
||||
endpoint: A2A agent endpoint URL.
|
||||
auth: Authentication scheme.
|
||||
timeout: Request timeout in seconds.
|
||||
max_turns: Maximum conversation turns with A2A agent.
|
||||
response_model: Optional Pydantic model for structured A2A agent responses.
|
||||
fail_fast: If True, raise error when agent unreachable; if False, skip and continue.
|
||||
trust_remote_completion_status: If True, return A2A agent's result directly when completed.
|
||||
updates: Update mechanism config.
|
||||
accepted_output_modes: Media types the client can accept in responses.
|
||||
supported_transports: Ordered list of transport protocols the client supports.
|
||||
use_client_preference: Whether to prioritize client transport preferences over server.
|
||||
extensions: Extension URIs the client supports.
|
||||
"""
|
||||
|
||||
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
|
||||
|
||||
endpoint: Url = Field(description="A2A agent endpoint URL")
|
||||
auth: AuthScheme | None = Field(
|
||||
default=None,
|
||||
description="Authentication scheme",
|
||||
)
|
||||
timeout: int = Field(default=120, description="Request timeout in seconds")
|
||||
max_turns: int = Field(
|
||||
default=10, description="Maximum conversation turns with A2A agent"
|
||||
)
|
||||
response_model: type[BaseModel] | None = Field(
|
||||
default=None,
|
||||
description="Optional Pydantic model for structured A2A agent responses",
|
||||
)
|
||||
fail_fast: bool = Field(
|
||||
default=True,
|
||||
description="If True, raise error when agent unreachable; if False, skip",
|
||||
)
|
||||
trust_remote_completion_status: bool = Field(
|
||||
default=False,
|
||||
description="If True, return A2A result directly when completed",
|
||||
)
|
||||
updates: UpdateConfig = Field(
|
||||
default_factory=_get_default_update_config,
|
||||
description="Update mechanism config",
|
||||
)
|
||||
accepted_output_modes: list[str] = Field(
|
||||
default_factory=lambda: ["application/json"],
|
||||
description="Media types the client can accept in responses",
|
||||
)
|
||||
supported_transports: list[str] = Field(
|
||||
default_factory=lambda: ["JSONRPC"],
|
||||
description="Ordered list of transport protocols the client supports",
|
||||
)
|
||||
use_client_preference: bool = Field(
|
||||
default=False,
|
||||
description="Whether to prioritize client transport preferences over server",
|
||||
)
|
||||
extensions: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Extension URIs the client supports",
|
||||
)
|
||||
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"] = Field(
|
||||
default="JSONRPC",
|
||||
description="Specified mode of A2A transport protocol",
|
||||
)
|
||||
|
||||
|
||||
class A2AServerConfig(BaseModel):
|
||||
"""Configuration for exposing a Crew or Agent as an A2A server.
|
||||
|
||||
All fields correspond to A2A AgentCard fields. Fields like name, description,
|
||||
and skills can be auto-derived from the Crew/Agent if not provided.
|
||||
|
||||
Attributes:
|
||||
name: Human-readable name for the agent.
|
||||
description: Human-readable description of the agent.
|
||||
version: Version string for the agent card.
|
||||
skills: List of agent skills/capabilities.
|
||||
default_input_modes: Default supported input MIME types.
|
||||
default_output_modes: Default supported output MIME types.
|
||||
capabilities: Declaration of optional capabilities.
|
||||
preferred_transport: Transport protocol for the preferred endpoint.
|
||||
protocol_version: A2A protocol version this agent supports.
|
||||
provider: Information about the agent's service provider.
|
||||
documentation_url: URL to the agent's documentation.
|
||||
icon_url: URL to an icon for the agent.
|
||||
additional_interfaces: Additional supported interfaces.
|
||||
security: Security requirement objects for all interactions.
|
||||
security_schemes: Security schemes available to authorize requests.
|
||||
supports_authenticated_extended_card: Whether agent provides extended card to authenticated users.
|
||||
url: Preferred endpoint URL for the agent.
|
||||
signatures: JSON Web Signatures for the AgentCard.
|
||||
"""
|
||||
|
||||
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
|
||||
|
||||
name: str | None = Field(
|
||||
default=None,
|
||||
description="Human-readable name for the agent. Auto-derived from Crew/Agent if not provided.",
|
||||
)
|
||||
description: str | None = Field(
|
||||
default=None,
|
||||
description="Human-readable description of the agent. Auto-derived from Crew/Agent if not provided.",
|
||||
)
|
||||
version: str = Field(
|
||||
default="1.0.0",
|
||||
description="Version string for the agent card",
|
||||
)
|
||||
skills: list[AgentSkill] = Field(
|
||||
default_factory=list,
|
||||
description="List of agent skills. Auto-derived from tasks/tools if not provided.",
|
||||
)
|
||||
default_input_modes: list[str] = Field(
|
||||
default_factory=lambda: ["text/plain", "application/json"],
|
||||
description="Default supported input MIME types",
|
||||
)
|
||||
default_output_modes: list[str] = Field(
|
||||
default_factory=lambda: ["text/plain", "application/json"],
|
||||
description="Default supported output MIME types",
|
||||
)
|
||||
capabilities: AgentCapabilities = Field(
|
||||
default_factory=lambda: AgentCapabilities(
|
||||
streaming=True,
|
||||
push_notifications=False,
|
||||
),
|
||||
description="Declaration of optional capabilities supported by the agent",
|
||||
)
|
||||
preferred_transport: TransportType = Field(
|
||||
default="JSONRPC",
|
||||
description="Transport protocol for the preferred endpoint",
|
||||
)
|
||||
protocol_version: str = Field(
|
||||
default_factory=lambda: version("a2a-sdk"),
|
||||
description="A2A protocol version this agent supports",
|
||||
)
|
||||
provider: AgentProvider | None = Field(
|
||||
default=None,
|
||||
description="Information about the agent's service provider",
|
||||
)
|
||||
documentation_url: Url | None = Field(
|
||||
default=None,
|
||||
description="URL to the agent's documentation",
|
||||
)
|
||||
icon_url: Url | None = Field(
|
||||
default=None,
|
||||
description="URL to an icon for the agent",
|
||||
)
|
||||
additional_interfaces: list[AgentInterface] = Field(
|
||||
default_factory=list,
|
||||
description="Additional supported interfaces (transport and URL combinations)",
|
||||
)
|
||||
security: list[dict[str, list[str]]] = Field(
|
||||
default_factory=list,
|
||||
description="Security requirement objects for all agent interactions",
|
||||
)
|
||||
security_schemes: dict[str, SecurityScheme] = Field(
|
||||
default_factory=dict,
|
||||
description="Security schemes available to authorize requests",
|
||||
)
|
||||
supports_authenticated_extended_card: bool = Field(
|
||||
default=False,
|
||||
description="Whether agent provides extended card to authenticated users",
|
||||
)
|
||||
url: Url | None = Field(
|
||||
default=None,
|
||||
description="Preferred endpoint URL for the agent. Set at runtime if not provided.",
|
||||
)
|
||||
signatures: list[AgentCardSignature] = Field(
|
||||
default_factory=list,
|
||||
description="JSON Web Signatures for the AgentCard",
|
||||
)
|
||||
|
||||
@@ -3,9 +3,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncIterator
|
||||
from typing import TYPE_CHECKING, TypedDict
|
||||
from typing import TYPE_CHECKING, Any, TypedDict
|
||||
import uuid
|
||||
|
||||
from a2a.client.errors import A2AClientHTTPError
|
||||
from a2a.types import (
|
||||
AgentCard,
|
||||
Message,
|
||||
@@ -20,7 +21,10 @@ from a2a.types import (
|
||||
from typing_extensions import NotRequired
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import A2AResponseReceivedEvent
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AConnectionErrorEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -55,7 +59,8 @@ class TaskStateResult(TypedDict):
|
||||
history: list[Message]
|
||||
result: NotRequired[str]
|
||||
error: NotRequired[str]
|
||||
agent_card: NotRequired[AgentCard]
|
||||
agent_card: NotRequired[dict[str, Any]]
|
||||
a2a_agent_name: NotRequired[str | None]
|
||||
|
||||
|
||||
def extract_task_result_parts(a2a_task: A2ATask) -> list[str]:
|
||||
@@ -131,50 +136,69 @@ def process_task_state(
|
||||
is_multiturn: bool,
|
||||
agent_role: str | None,
|
||||
result_parts: list[str] | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
is_final: bool = True,
|
||||
) -> TaskStateResult | None:
|
||||
"""Process A2A task state and return result dictionary.
|
||||
|
||||
Shared logic for both polling and streaming handlers.
|
||||
|
||||
Args:
|
||||
a2a_task: The A2A task to process
|
||||
new_messages: List to collect messages (modified in place)
|
||||
agent_card: The agent card
|
||||
turn_number: Current turn number
|
||||
is_multiturn: Whether multi-turn conversation
|
||||
agent_role: Agent role for logging
|
||||
a2a_task: The A2A task to process.
|
||||
new_messages: List to collect messages (modified in place).
|
||||
agent_card: The agent card.
|
||||
turn_number: Current turn number.
|
||||
is_multiturn: Whether multi-turn conversation.
|
||||
agent_role: Agent role for logging.
|
||||
result_parts: Accumulated result parts (streaming passes accumulated,
|
||||
polling passes None to extract from task)
|
||||
polling passes None to extract from task).
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
from_task: Optional CrewAI Task for event metadata.
|
||||
from_agent: Optional CrewAI Agent for event metadata.
|
||||
is_final: Whether this is the final response in the stream.
|
||||
|
||||
Returns:
|
||||
Result dictionary if terminal/actionable state, None otherwise
|
||||
Result dictionary if terminal/actionable state, None otherwise.
|
||||
"""
|
||||
should_extract = result_parts is None
|
||||
if result_parts is None:
|
||||
result_parts = []
|
||||
|
||||
if a2a_task.status.state == TaskState.completed:
|
||||
if should_extract:
|
||||
if not result_parts:
|
||||
extracted_parts = extract_task_result_parts(a2a_task)
|
||||
result_parts.extend(extracted_parts)
|
||||
if a2a_task.history:
|
||||
new_messages.extend(a2a_task.history)
|
||||
|
||||
response_text = " ".join(result_parts) if result_parts else ""
|
||||
message_id = None
|
||||
if a2a_task.status and a2a_task.status.message:
|
||||
message_id = a2a_task.status.message.message_id
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=response_text,
|
||||
turn_number=turn_number,
|
||||
context_id=a2a_task.context_id,
|
||||
message_id=message_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="completed",
|
||||
final=is_final,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
return TaskStateResult(
|
||||
status=TaskState.completed,
|
||||
agent_card=agent_card,
|
||||
agent_card=agent_card.model_dump(exclude_none=True),
|
||||
result=response_text,
|
||||
history=new_messages,
|
||||
)
|
||||
@@ -194,14 +218,24 @@ def process_task_state(
|
||||
)
|
||||
new_messages.append(agent_message)
|
||||
|
||||
input_message_id = None
|
||||
if a2a_task.status and a2a_task.status.message:
|
||||
input_message_id = a2a_task.status.message.message_id
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=response_text,
|
||||
turn_number=turn_number,
|
||||
context_id=a2a_task.context_id,
|
||||
message_id=input_message_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="input_required",
|
||||
final=is_final,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -209,7 +243,7 @@ def process_task_state(
|
||||
status=TaskState.input_required,
|
||||
error=response_text,
|
||||
history=new_messages,
|
||||
agent_card=agent_card,
|
||||
agent_card=agent_card.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
if a2a_task.status.state in {TaskState.failed, TaskState.rejected}:
|
||||
@@ -248,6 +282,11 @@ async def send_message_and_get_task_id(
|
||||
turn_number: int,
|
||||
is_multiturn: bool,
|
||||
agent_role: str | None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
context_id: str | None = None,
|
||||
) -> str | TaskStateResult:
|
||||
"""Send message and process initial response.
|
||||
|
||||
@@ -262,6 +301,11 @@ async def send_message_and_get_task_id(
|
||||
turn_number: Current turn number
|
||||
is_multiturn: Whether multi-turn conversation
|
||||
agent_role: Agent role for logging
|
||||
from_task: Optional CrewAI Task object for event metadata.
|
||||
from_agent: Optional CrewAI Agent object for event metadata.
|
||||
endpoint: Optional A2A endpoint URL.
|
||||
a2a_agent_name: Optional A2A agent name.
|
||||
context_id: Optional A2A context ID for correlation.
|
||||
|
||||
Returns:
|
||||
Task ID string if agent needs polling/waiting, or TaskStateResult if done.
|
||||
@@ -280,9 +324,16 @@ async def send_message_and_get_task_id(
|
||||
A2AResponseReceivedEvent(
|
||||
response=response_text,
|
||||
turn_number=turn_number,
|
||||
context_id=event.context_id,
|
||||
message_id=event.message_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="completed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -290,7 +341,7 @@ async def send_message_and_get_task_id(
|
||||
status=TaskState.completed,
|
||||
result=response_text,
|
||||
history=new_messages,
|
||||
agent_card=agent_card,
|
||||
agent_card=agent_card.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
if isinstance(event, tuple):
|
||||
@@ -304,6 +355,10 @@ async def send_message_and_get_task_id(
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
@@ -316,6 +371,99 @@ async def send_message_and_get_task_id(
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except A2AClientHTTPError as e:
|
||||
error_msg = f"HTTP Error {e.status_code}: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="http_error",
|
||||
status_code=e.status_code,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="send_message",
|
||||
context_id=context_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error during send_message: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="unexpected_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="send_message",
|
||||
context_id=context_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
finally:
|
||||
aclose = getattr(event_stream, "aclose", None)
|
||||
if aclose:
|
||||
|
||||
@@ -1,7 +1,17 @@
|
||||
"""Type definitions for A2A protocol message parts."""
|
||||
|
||||
from typing import Any, Literal, Protocol, TypedDict, runtime_checkable
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
Annotated,
|
||||
Any,
|
||||
Literal,
|
||||
Protocol,
|
||||
TypedDict,
|
||||
runtime_checkable,
|
||||
)
|
||||
|
||||
from pydantic import BeforeValidator, HttpUrl, TypeAdapter
|
||||
from typing_extensions import NotRequired
|
||||
|
||||
from crewai.a2a.updates import (
|
||||
@@ -15,6 +25,18 @@ from crewai.a2a.updates import (
|
||||
)
|
||||
|
||||
|
||||
TransportType = Literal["JSONRPC", "GRPC", "HTTP+JSON"]
|
||||
|
||||
http_url_adapter: TypeAdapter[HttpUrl] = TypeAdapter(HttpUrl)
|
||||
|
||||
Url = Annotated[
|
||||
str,
|
||||
BeforeValidator(
|
||||
lambda value: str(http_url_adapter.validate_python(value, strict=True))
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class AgentResponseProtocol(Protocol):
|
||||
"""Protocol for the dynamically created AgentResponse model."""
|
||||
|
||||
@@ -22,6 +22,13 @@ class BaseHandlerKwargs(TypedDict, total=False):
|
||||
turn_number: int
|
||||
is_multiturn: bool
|
||||
agent_role: str | None
|
||||
context_id: str | None
|
||||
task_id: str | None
|
||||
endpoint: str | None
|
||||
agent_branch: Any
|
||||
a2a_agent_name: str | None
|
||||
from_task: Any
|
||||
from_agent: Any
|
||||
|
||||
|
||||
class PollingHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
@@ -29,8 +36,6 @@ class PollingHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
|
||||
polling_interval: float
|
||||
polling_timeout: float
|
||||
endpoint: str
|
||||
agent_branch: Any
|
||||
history_length: int
|
||||
max_polls: int | None
|
||||
|
||||
@@ -38,9 +43,6 @@ class PollingHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
class StreamingHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
"""Kwargs for streaming handler."""
|
||||
|
||||
context_id: str | None
|
||||
task_id: str | None
|
||||
|
||||
|
||||
class PushNotificationHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
"""Kwargs for push notification handler."""
|
||||
@@ -49,7 +51,6 @@ class PushNotificationHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
result_store: PushNotificationResultStore
|
||||
polling_timeout: float
|
||||
polling_interval: float
|
||||
agent_branch: Any
|
||||
|
||||
|
||||
class PushNotificationResultStore(Protocol):
|
||||
|
||||
@@ -31,6 +31,7 @@ from crewai.a2a.task_helpers import (
|
||||
from crewai.a2a.updates.base import PollingHandlerKwargs
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AConnectionErrorEvent,
|
||||
A2APollingStartedEvent,
|
||||
A2APollingStatusEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
@@ -49,23 +50,33 @@ async def _poll_task_until_complete(
|
||||
agent_branch: Any | None = None,
|
||||
history_length: int = 100,
|
||||
max_polls: int | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
context_id: str | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
) -> A2ATask:
|
||||
"""Poll task status until terminal state reached.
|
||||
|
||||
Args:
|
||||
client: A2A client instance
|
||||
task_id: Task ID to poll
|
||||
polling_interval: Seconds between poll attempts
|
||||
polling_timeout: Max seconds before timeout
|
||||
agent_branch: Agent tree branch for logging
|
||||
history_length: Number of messages to retrieve per poll
|
||||
max_polls: Max number of poll attempts (None = unlimited)
|
||||
client: A2A client instance.
|
||||
task_id: Task ID to poll.
|
||||
polling_interval: Seconds between poll attempts.
|
||||
polling_timeout: Max seconds before timeout.
|
||||
agent_branch: Agent tree branch for logging.
|
||||
history_length: Number of messages to retrieve per poll.
|
||||
max_polls: Max number of poll attempts (None = unlimited).
|
||||
from_task: Optional CrewAI Task object for event metadata.
|
||||
from_agent: Optional CrewAI Agent object for event metadata.
|
||||
context_id: A2A context ID for correlation.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
|
||||
Returns:
|
||||
Final task object in terminal state
|
||||
Final task object in terminal state.
|
||||
|
||||
Raises:
|
||||
A2APollingTimeoutError: If polling exceeds timeout or max_polls
|
||||
A2APollingTimeoutError: If polling exceeds timeout or max_polls.
|
||||
"""
|
||||
start_time = time.monotonic()
|
||||
poll_count = 0
|
||||
@@ -77,13 +88,19 @@ async def _poll_task_until_complete(
|
||||
)
|
||||
|
||||
elapsed = time.monotonic() - start_time
|
||||
effective_context_id = task.context_id or context_id
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2APollingStatusEvent(
|
||||
task_id=task_id,
|
||||
context_id=effective_context_id,
|
||||
state=str(task.status.state.value) if task.status.state else "unknown",
|
||||
elapsed_seconds=elapsed,
|
||||
poll_count=poll_count,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -137,6 +154,9 @@ class PollingHandler:
|
||||
max_polls = kwargs.get("max_polls")
|
||||
context_id = kwargs.get("context_id")
|
||||
task_id = kwargs.get("task_id")
|
||||
a2a_agent_name = kwargs.get("a2a_agent_name")
|
||||
from_task = kwargs.get("from_task")
|
||||
from_agent = kwargs.get("from_agent")
|
||||
|
||||
try:
|
||||
result_or_task_id = await send_message_and_get_task_id(
|
||||
@@ -146,6 +166,11 @@ class PollingHandler:
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
context_id=context_id,
|
||||
)
|
||||
|
||||
if not isinstance(result_or_task_id, str):
|
||||
@@ -157,8 +182,12 @@ class PollingHandler:
|
||||
agent_branch,
|
||||
A2APollingStartedEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
polling_interval=polling_interval,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -170,6 +199,11 @@ class PollingHandler:
|
||||
agent_branch=agent_branch,
|
||||
history_length=history_length,
|
||||
max_polls=max_polls,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
context_id=context_id,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
)
|
||||
|
||||
result = process_task_state(
|
||||
@@ -179,6 +213,10 @@ class PollingHandler:
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
@@ -206,9 +244,15 @@ class PollingHandler:
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
@@ -229,14 +273,83 @@ class PollingHandler:
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint,
|
||||
error=str(e),
|
||||
error_type="http_error",
|
||||
status_code=e.status_code,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="polling",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error during polling: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="unexpected_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="polling",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
|
||||
@@ -29,6 +29,7 @@ from crewai.a2a.updates.base import (
|
||||
)
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AConnectionErrorEvent,
|
||||
A2APushNotificationRegisteredEvent,
|
||||
A2APushNotificationTimeoutEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
@@ -48,6 +49,11 @@ async def _wait_for_push_result(
|
||||
timeout: float,
|
||||
poll_interval: float,
|
||||
agent_branch: Any | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
context_id: str | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
) -> A2ATask | None:
|
||||
"""Wait for push notification result.
|
||||
|
||||
@@ -57,6 +63,11 @@ async def _wait_for_push_result(
|
||||
timeout: Max seconds to wait.
|
||||
poll_interval: Seconds between polling attempts.
|
||||
agent_branch: Agent tree branch for logging.
|
||||
from_task: Optional CrewAI Task object for event metadata.
|
||||
from_agent: Optional CrewAI Agent object for event metadata.
|
||||
context_id: A2A context ID for correlation.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent.
|
||||
|
||||
Returns:
|
||||
Final task object, or None if timeout.
|
||||
@@ -72,7 +83,12 @@ async def _wait_for_push_result(
|
||||
agent_branch,
|
||||
A2APushNotificationTimeoutEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
timeout_seconds=timeout,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -115,18 +131,56 @@ class PushNotificationHandler:
|
||||
agent_role = kwargs.get("agent_role")
|
||||
context_id = kwargs.get("context_id")
|
||||
task_id = kwargs.get("task_id")
|
||||
endpoint = kwargs.get("endpoint")
|
||||
a2a_agent_name = kwargs.get("a2a_agent_name")
|
||||
from_task = kwargs.get("from_task")
|
||||
from_agent = kwargs.get("from_agent")
|
||||
|
||||
if config is None:
|
||||
error_msg = (
|
||||
"PushNotificationConfig is required for push notification handler"
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=error_msg,
|
||||
error_type="configuration_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="push_notification",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error="PushNotificationConfig is required for push notification handler",
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
if result_store is None:
|
||||
error_msg = (
|
||||
"PushNotificationResultStore is required for push notification handler"
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=error_msg,
|
||||
error_type="configuration_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="push_notification",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error="PushNotificationResultStore is required for push notification handler",
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
@@ -138,6 +192,11 @@ class PushNotificationHandler:
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
context_id=context_id,
|
||||
)
|
||||
|
||||
if not isinstance(result_or_task_id, str):
|
||||
@@ -149,7 +208,12 @@ class PushNotificationHandler:
|
||||
agent_branch,
|
||||
A2APushNotificationRegisteredEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
callback_url=str(config.url),
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -165,6 +229,11 @@ class PushNotificationHandler:
|
||||
timeout=polling_timeout,
|
||||
poll_interval=polling_interval,
|
||||
agent_branch=agent_branch,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
context_id=context_id,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
)
|
||||
|
||||
if final_task is None:
|
||||
@@ -181,6 +250,10 @@ class PushNotificationHandler:
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
@@ -203,14 +276,83 @@ class PushNotificationHandler:
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="http_error",
|
||||
status_code=e.status_code,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="push_notification",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error during push notification: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="unexpected_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="push_notification",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
|
||||
@@ -26,7 +26,13 @@ from crewai.a2a.task_helpers import (
|
||||
)
|
||||
from crewai.a2a.updates.base import StreamingHandlerKwargs
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import A2AResponseReceivedEvent
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AArtifactReceivedEvent,
|
||||
A2AConnectionErrorEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
A2AStreamingChunkEvent,
|
||||
A2AStreamingStartedEvent,
|
||||
)
|
||||
|
||||
|
||||
class StreamingHandler:
|
||||
@@ -57,19 +63,57 @@ class StreamingHandler:
|
||||
turn_number = kwargs.get("turn_number", 0)
|
||||
is_multiturn = kwargs.get("is_multiturn", False)
|
||||
agent_role = kwargs.get("agent_role")
|
||||
endpoint = kwargs.get("endpoint")
|
||||
a2a_agent_name = kwargs.get("a2a_agent_name")
|
||||
from_task = kwargs.get("from_task")
|
||||
from_agent = kwargs.get("from_agent")
|
||||
agent_branch = kwargs.get("agent_branch")
|
||||
|
||||
result_parts: list[str] = []
|
||||
final_result: TaskStateResult | None = None
|
||||
event_stream = client.send_message(message)
|
||||
chunk_index = 0
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AStreamingStartedEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
endpoint=endpoint or "",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
async for event in event_stream:
|
||||
if isinstance(event, Message):
|
||||
new_messages.append(event)
|
||||
message_context_id = event.context_id or context_id
|
||||
for part in event.parts:
|
||||
if part.root.kind == "text":
|
||||
text = part.root.text
|
||||
result_parts.append(text)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AStreamingChunkEvent(
|
||||
task_id=event.task_id or task_id,
|
||||
context_id=message_context_id,
|
||||
chunk=text,
|
||||
chunk_index=chunk_index,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
chunk_index += 1
|
||||
|
||||
elif isinstance(event, tuple):
|
||||
a2a_task, update = event
|
||||
@@ -81,10 +125,51 @@ class StreamingHandler:
|
||||
for part in artifact.parts
|
||||
if part.root.kind == "text"
|
||||
)
|
||||
artifact_size = None
|
||||
if artifact.parts:
|
||||
artifact_size = sum(
|
||||
len(p.root.text.encode("utf-8"))
|
||||
if p.root.kind == "text"
|
||||
else len(getattr(p.root, "data", b""))
|
||||
for p in artifact.parts
|
||||
)
|
||||
effective_context_id = a2a_task.context_id or context_id
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AArtifactReceivedEvent(
|
||||
task_id=a2a_task.id,
|
||||
artifact_id=artifact.artifact_id,
|
||||
artifact_name=artifact.name,
|
||||
artifact_description=artifact.description,
|
||||
mime_type=artifact.parts[0].root.kind
|
||||
if artifact.parts
|
||||
else None,
|
||||
size_bytes=artifact_size,
|
||||
append=update.append or False,
|
||||
last_chunk=update.last_chunk or False,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
context_id=effective_context_id,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
is_final_update = False
|
||||
if isinstance(update, TaskStatusUpdateEvent):
|
||||
is_final_update = update.final
|
||||
if (
|
||||
update.status
|
||||
and update.status.message
|
||||
and update.status.message.parts
|
||||
):
|
||||
result_parts.extend(
|
||||
part.root.text
|
||||
for part in update.status.message.parts
|
||||
if part.root.kind == "text" and part.root.text
|
||||
)
|
||||
|
||||
if (
|
||||
not is_final_update
|
||||
@@ -101,6 +186,11 @@ class StreamingHandler:
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
result_parts=result_parts,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
is_final=is_final_update,
|
||||
)
|
||||
if final_result:
|
||||
break
|
||||
@@ -118,13 +208,82 @@ class StreamingHandler:
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="http_error",
|
||||
status_code=e.status_code,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="streaming",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error during streaming: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="unexpected_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="streaming",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
@@ -136,7 +295,23 @@ class StreamingHandler:
|
||||
finally:
|
||||
aclose = getattr(event_stream, "aclose", None)
|
||||
if aclose:
|
||||
await aclose()
|
||||
try:
|
||||
await aclose()
|
||||
except Exception as close_error:
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(close_error),
|
||||
error_type="stream_close_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="stream_close",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
if final_result:
|
||||
return final_result
|
||||
@@ -145,5 +320,5 @@ class StreamingHandler:
|
||||
status=TaskState.completed,
|
||||
result=" ".join(result_parts) if result_parts else "",
|
||||
history=new_messages,
|
||||
agent_card=agent_card,
|
||||
agent_card=agent_card.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
1
lib/crewai/src/crewai/a2a/utils/__init__.py
Normal file
1
lib/crewai/src/crewai/a2a/utils/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""A2A utility modules for client operations."""
|
||||
513
lib/crewai/src/crewai/a2a/utils/agent_card.py
Normal file
513
lib/crewai/src/crewai/a2a/utils/agent_card.py
Normal file
@@ -0,0 +1,513 @@
|
||||
"""AgentCard utilities for A2A client and server operations."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import MutableMapping
|
||||
from functools import lru_cache
|
||||
import time
|
||||
from types import MethodType
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from a2a.client.errors import A2AClientHTTPError
|
||||
from a2a.types import AgentCapabilities, AgentCard, AgentSkill
|
||||
from aiocache import cached # type: ignore[import-untyped]
|
||||
from aiocache.serializers import PickleSerializer # type: ignore[import-untyped]
|
||||
import httpx
|
||||
|
||||
from crewai.a2a.auth.schemas import APIKeyAuth, HTTPDigestAuth
|
||||
from crewai.a2a.auth.utils import (
|
||||
_auth_store,
|
||||
configure_auth_client,
|
||||
retry_on_401,
|
||||
)
|
||||
from crewai.a2a.config import A2AServerConfig
|
||||
from crewai.crew import Crew
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AAgentCardFetchedEvent,
|
||||
A2AAuthenticationFailedEvent,
|
||||
A2AConnectionErrorEvent,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.a2a.auth.schemas import AuthScheme
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
def _get_server_config(agent: Agent) -> A2AServerConfig | None:
|
||||
"""Get A2AServerConfig from an agent's a2a configuration.
|
||||
|
||||
Args:
|
||||
agent: The Agent instance to check.
|
||||
|
||||
Returns:
|
||||
A2AServerConfig if present, None otherwise.
|
||||
"""
|
||||
if agent.a2a is None:
|
||||
return None
|
||||
if isinstance(agent.a2a, A2AServerConfig):
|
||||
return agent.a2a
|
||||
if isinstance(agent.a2a, list):
|
||||
for config in agent.a2a:
|
||||
if isinstance(config, A2AServerConfig):
|
||||
return config
|
||||
return None
|
||||
|
||||
|
||||
def fetch_agent_card(
|
||||
endpoint: str,
|
||||
auth: AuthScheme | None = None,
|
||||
timeout: int = 30,
|
||||
use_cache: bool = True,
|
||||
cache_ttl: int = 300,
|
||||
) -> AgentCard:
|
||||
"""Fetch AgentCard from an A2A endpoint with optional caching.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL).
|
||||
auth: Optional AuthScheme for authentication.
|
||||
timeout: Request timeout in seconds.
|
||||
use_cache: Whether to use caching (default True).
|
||||
cache_ttl: Cache TTL in seconds (default 300 = 5 minutes).
|
||||
|
||||
Returns:
|
||||
AgentCard object with agent capabilities and skills.
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If the request fails.
|
||||
A2AClientHTTPError: If authentication fails.
|
||||
"""
|
||||
if use_cache:
|
||||
if auth:
|
||||
auth_data = auth.model_dump_json(
|
||||
exclude={
|
||||
"_access_token",
|
||||
"_token_expires_at",
|
||||
"_refresh_token",
|
||||
"_authorization_callback",
|
||||
}
|
||||
)
|
||||
auth_hash = hash((type(auth).__name__, auth_data))
|
||||
else:
|
||||
auth_hash = 0
|
||||
_auth_store[auth_hash] = auth
|
||||
ttl_hash = int(time.time() // cache_ttl)
|
||||
return _fetch_agent_card_cached(endpoint, auth_hash, timeout, ttl_hash)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
return loop.run_until_complete(
|
||||
afetch_agent_card(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
async def afetch_agent_card(
|
||||
endpoint: str,
|
||||
auth: AuthScheme | None = None,
|
||||
timeout: int = 30,
|
||||
use_cache: bool = True,
|
||||
) -> AgentCard:
|
||||
"""Fetch AgentCard from an A2A endpoint asynchronously.
|
||||
|
||||
Native async implementation. Use this when running in an async context.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL).
|
||||
auth: Optional AuthScheme for authentication.
|
||||
timeout: Request timeout in seconds.
|
||||
use_cache: Whether to use caching (default True).
|
||||
|
||||
Returns:
|
||||
AgentCard object with agent capabilities and skills.
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If the request fails.
|
||||
A2AClientHTTPError: If authentication fails.
|
||||
"""
|
||||
if use_cache:
|
||||
if auth:
|
||||
auth_data = auth.model_dump_json(
|
||||
exclude={
|
||||
"_access_token",
|
||||
"_token_expires_at",
|
||||
"_refresh_token",
|
||||
"_authorization_callback",
|
||||
}
|
||||
)
|
||||
auth_hash = hash((type(auth).__name__, auth_data))
|
||||
else:
|
||||
auth_hash = 0
|
||||
_auth_store[auth_hash] = auth
|
||||
agent_card: AgentCard = await _afetch_agent_card_cached(
|
||||
endpoint, auth_hash, timeout
|
||||
)
|
||||
return agent_card
|
||||
|
||||
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def _fetch_agent_card_cached(
|
||||
endpoint: str,
|
||||
auth_hash: int,
|
||||
timeout: int,
|
||||
_ttl_hash: int,
|
||||
) -> AgentCard:
|
||||
"""Cached sync version of fetch_agent_card."""
|
||||
auth = _auth_store.get(auth_hash)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
return loop.run_until_complete(
|
||||
_afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
@cached(ttl=300, serializer=PickleSerializer()) # type: ignore[untyped-decorator]
|
||||
async def _afetch_agent_card_cached(
|
||||
endpoint: str,
|
||||
auth_hash: int,
|
||||
timeout: int,
|
||||
) -> AgentCard:
|
||||
"""Cached async implementation of AgentCard fetching."""
|
||||
auth = _auth_store.get(auth_hash)
|
||||
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
|
||||
|
||||
async def _afetch_agent_card_impl(
|
||||
endpoint: str,
|
||||
auth: AuthScheme | None,
|
||||
timeout: int,
|
||||
) -> AgentCard:
|
||||
"""Internal async implementation of AgentCard fetching."""
|
||||
start_time = time.perf_counter()
|
||||
|
||||
if "/.well-known/agent-card.json" in endpoint:
|
||||
base_url = endpoint.replace("/.well-known/agent-card.json", "")
|
||||
agent_card_path = "/.well-known/agent-card.json"
|
||||
else:
|
||||
url_parts = endpoint.split("/", 3)
|
||||
base_url = f"{url_parts[0]}//{url_parts[2]}"
|
||||
agent_card_path = f"/{url_parts[3]}" if len(url_parts) > 3 else "/"
|
||||
|
||||
headers: MutableMapping[str, str] = {}
|
||||
if auth:
|
||||
async with httpx.AsyncClient(timeout=timeout) as temp_auth_client:
|
||||
if isinstance(auth, (HTTPDigestAuth, APIKeyAuth)):
|
||||
configure_auth_client(auth, temp_auth_client)
|
||||
headers = await auth.apply_auth(temp_auth_client, {})
|
||||
|
||||
async with httpx.AsyncClient(timeout=timeout, headers=headers) as temp_client:
|
||||
if auth and isinstance(auth, (HTTPDigestAuth, APIKeyAuth)):
|
||||
configure_auth_client(auth, temp_client)
|
||||
|
||||
agent_card_url = f"{base_url}{agent_card_path}"
|
||||
|
||||
async def _fetch_agent_card_request() -> httpx.Response:
|
||||
return await temp_client.get(agent_card_url)
|
||||
|
||||
try:
|
||||
response = await retry_on_401(
|
||||
request_func=_fetch_agent_card_request,
|
||||
auth_scheme=auth,
|
||||
client=temp_client,
|
||||
headers=temp_client.headers,
|
||||
max_retries=2,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
agent_card = AgentCard.model_validate(response.json())
|
||||
fetch_time_ms = (time.perf_counter() - start_time) * 1000
|
||||
agent_card_dict = agent_card.model_dump(exclude_none=True)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AAgentCardFetchedEvent(
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=agent_card.name,
|
||||
agent_card=agent_card_dict,
|
||||
protocol_version=agent_card.protocol_version,
|
||||
provider=agent_card_dict.get("provider"),
|
||||
cached=False,
|
||||
fetch_time_ms=fetch_time_ms,
|
||||
),
|
||||
)
|
||||
|
||||
return agent_card
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
||||
response_body = e.response.text[:1000] if e.response.text else None
|
||||
|
||||
if e.response.status_code == 401:
|
||||
error_details = ["Authentication failed"]
|
||||
www_auth = e.response.headers.get("WWW-Authenticate")
|
||||
if www_auth:
|
||||
error_details.append(f"WWW-Authenticate: {www_auth}")
|
||||
if not auth:
|
||||
error_details.append("No auth scheme provided")
|
||||
msg = " | ".join(error_details)
|
||||
|
||||
auth_type = type(auth).__name__ if auth else None
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AAuthenticationFailedEvent(
|
||||
endpoint=endpoint,
|
||||
auth_type=auth_type,
|
||||
error=msg,
|
||||
status_code=401,
|
||||
metadata={
|
||||
"elapsed_ms": elapsed_ms,
|
||||
"response_body": response_body,
|
||||
"www_authenticate": www_auth,
|
||||
"request_url": str(e.request.url),
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
raise A2AClientHTTPError(401, msg) from e
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint,
|
||||
error=str(e),
|
||||
error_type="http_error",
|
||||
status_code=e.response.status_code,
|
||||
operation="fetch_agent_card",
|
||||
metadata={
|
||||
"elapsed_ms": elapsed_ms,
|
||||
"response_body": response_body,
|
||||
"request_url": str(e.request.url),
|
||||
},
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
except httpx.TimeoutException as e:
|
||||
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint,
|
||||
error=str(e),
|
||||
error_type="timeout",
|
||||
operation="fetch_agent_card",
|
||||
metadata={
|
||||
"elapsed_ms": elapsed_ms,
|
||||
"timeout_config": timeout,
|
||||
"request_url": str(e.request.url) if e.request else None,
|
||||
},
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
except httpx.ConnectError as e:
|
||||
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint,
|
||||
error=str(e),
|
||||
error_type="connection_error",
|
||||
operation="fetch_agent_card",
|
||||
metadata={
|
||||
"elapsed_ms": elapsed_ms,
|
||||
"request_url": str(e.request.url) if e.request else None,
|
||||
},
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
except httpx.RequestError as e:
|
||||
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint,
|
||||
error=str(e),
|
||||
error_type="request_error",
|
||||
operation="fetch_agent_card",
|
||||
metadata={
|
||||
"elapsed_ms": elapsed_ms,
|
||||
"request_url": str(e.request.url) if e.request else None,
|
||||
},
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def _task_to_skill(task: Task) -> AgentSkill:
|
||||
"""Convert a CrewAI Task to an A2A AgentSkill.
|
||||
|
||||
Args:
|
||||
task: The CrewAI Task to convert.
|
||||
|
||||
Returns:
|
||||
AgentSkill representing the task's capability.
|
||||
"""
|
||||
task_name = task.name or task.description[:50]
|
||||
task_id = task_name.lower().replace(" ", "_")
|
||||
|
||||
tags: list[str] = []
|
||||
if task.agent:
|
||||
tags.append(task.agent.role.lower().replace(" ", "-"))
|
||||
|
||||
return AgentSkill(
|
||||
id=task_id,
|
||||
name=task_name,
|
||||
description=task.description,
|
||||
tags=tags,
|
||||
examples=[task.expected_output] if task.expected_output else None,
|
||||
)
|
||||
|
||||
|
||||
def _tool_to_skill(tool_name: str, tool_description: str) -> AgentSkill:
|
||||
"""Convert an Agent's tool to an A2A AgentSkill.
|
||||
|
||||
Args:
|
||||
tool_name: Name of the tool.
|
||||
tool_description: Description of what the tool does.
|
||||
|
||||
Returns:
|
||||
AgentSkill representing the tool's capability.
|
||||
"""
|
||||
tool_id = tool_name.lower().replace(" ", "_")
|
||||
|
||||
return AgentSkill(
|
||||
id=tool_id,
|
||||
name=tool_name,
|
||||
description=tool_description,
|
||||
tags=[tool_name.lower().replace(" ", "-")],
|
||||
)
|
||||
|
||||
|
||||
def _crew_to_agent_card(crew: Crew, url: str) -> AgentCard:
|
||||
"""Generate an A2A AgentCard from a Crew instance.
|
||||
|
||||
Args:
|
||||
crew: The Crew instance to generate a card for.
|
||||
url: The base URL where this crew will be exposed.
|
||||
|
||||
Returns:
|
||||
AgentCard describing the crew's capabilities.
|
||||
"""
|
||||
crew_name = getattr(crew, "name", None) or crew.__class__.__name__
|
||||
|
||||
description_parts: list[str] = []
|
||||
crew_description = getattr(crew, "description", None)
|
||||
if crew_description:
|
||||
description_parts.append(crew_description)
|
||||
else:
|
||||
agent_roles = [agent.role for agent in crew.agents]
|
||||
description_parts.append(
|
||||
f"A crew of {len(crew.agents)} agents: {', '.join(agent_roles)}"
|
||||
)
|
||||
|
||||
skills = [_task_to_skill(task) for task in crew.tasks]
|
||||
|
||||
return AgentCard(
|
||||
name=crew_name,
|
||||
description=" ".join(description_parts),
|
||||
url=url,
|
||||
version="1.0.0",
|
||||
capabilities=AgentCapabilities(
|
||||
streaming=True,
|
||||
push_notifications=True,
|
||||
),
|
||||
default_input_modes=["text/plain", "application/json"],
|
||||
default_output_modes=["text/plain", "application/json"],
|
||||
skills=skills,
|
||||
)
|
||||
|
||||
|
||||
def _agent_to_agent_card(agent: Agent, url: str) -> AgentCard:
|
||||
"""Generate an A2A AgentCard from an Agent instance.
|
||||
|
||||
Uses A2AServerConfig values when available, falling back to agent properties.
|
||||
|
||||
Args:
|
||||
agent: The Agent instance to generate a card for.
|
||||
url: The base URL where this agent will be exposed.
|
||||
|
||||
Returns:
|
||||
AgentCard describing the agent's capabilities.
|
||||
"""
|
||||
server_config = _get_server_config(agent) or A2AServerConfig()
|
||||
|
||||
name = server_config.name or agent.role
|
||||
|
||||
description_parts = [agent.goal]
|
||||
if agent.backstory:
|
||||
description_parts.append(agent.backstory)
|
||||
description = server_config.description or " ".join(description_parts)
|
||||
|
||||
skills: list[AgentSkill] = (
|
||||
server_config.skills.copy() if server_config.skills else []
|
||||
)
|
||||
|
||||
if not skills:
|
||||
if agent.tools:
|
||||
for tool in agent.tools:
|
||||
tool_name = getattr(tool, "name", None) or tool.__class__.__name__
|
||||
tool_desc = getattr(tool, "description", None) or f"Tool: {tool_name}"
|
||||
skills.append(_tool_to_skill(tool_name, tool_desc))
|
||||
|
||||
if not skills:
|
||||
skills.append(
|
||||
AgentSkill(
|
||||
id=agent.role.lower().replace(" ", "_"),
|
||||
name=agent.role,
|
||||
description=agent.goal,
|
||||
tags=[agent.role.lower().replace(" ", "-")],
|
||||
)
|
||||
)
|
||||
|
||||
return AgentCard(
|
||||
name=name,
|
||||
description=description,
|
||||
url=server_config.url or url,
|
||||
version=server_config.version,
|
||||
capabilities=server_config.capabilities,
|
||||
default_input_modes=server_config.default_input_modes,
|
||||
default_output_modes=server_config.default_output_modes,
|
||||
skills=skills,
|
||||
protocol_version=server_config.protocol_version,
|
||||
provider=server_config.provider,
|
||||
documentation_url=server_config.documentation_url,
|
||||
icon_url=server_config.icon_url,
|
||||
additional_interfaces=server_config.additional_interfaces,
|
||||
security=server_config.security,
|
||||
security_schemes=server_config.security_schemes,
|
||||
supports_authenticated_extended_card=server_config.supports_authenticated_extended_card,
|
||||
signatures=server_config.signatures,
|
||||
)
|
||||
|
||||
|
||||
def inject_a2a_server_methods(agent: Agent) -> None:
|
||||
"""Inject A2A server methods onto an Agent instance.
|
||||
|
||||
Adds a `to_agent_card(url: str) -> AgentCard` method to the agent
|
||||
that generates an A2A-compliant AgentCard.
|
||||
|
||||
Only injects if the agent has an A2AServerConfig.
|
||||
|
||||
Args:
|
||||
agent: The Agent instance to inject methods onto.
|
||||
"""
|
||||
if _get_server_config(agent) is None:
|
||||
return
|
||||
|
||||
def _to_agent_card(self: Agent, url: str) -> AgentCard:
|
||||
return _agent_to_agent_card(self, url)
|
||||
|
||||
object.__setattr__(agent, "to_agent_card", MethodType(_to_agent_card, agent))
|
||||
@@ -1,16 +1,14 @@
|
||||
"""Utility functions for A2A (Agent-to-Agent) protocol delegation."""
|
||||
"""A2A delegation utilities for executing tasks on remote agents."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import AsyncIterator, MutableMapping
|
||||
from contextlib import asynccontextmanager
|
||||
from functools import lru_cache
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING, Any, Literal
|
||||
import uuid
|
||||
|
||||
from a2a.client import A2AClientHTTPError, Client, ClientConfig, ClientFactory
|
||||
from a2a.client import Client, ClientConfig, ClientFactory
|
||||
from a2a.types import (
|
||||
AgentCard,
|
||||
Message,
|
||||
@@ -18,21 +16,16 @@ from a2a.types import (
|
||||
PushNotificationConfig as A2APushNotificationConfig,
|
||||
Role,
|
||||
TextPart,
|
||||
TransportProtocol,
|
||||
)
|
||||
from aiocache import cached # type: ignore[import-untyped]
|
||||
from aiocache.serializers import PickleSerializer # type: ignore[import-untyped]
|
||||
import httpx
|
||||
from pydantic import BaseModel, Field, create_model
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.a2a.auth.schemas import APIKeyAuth, HTTPDigestAuth
|
||||
from crewai.a2a.auth.utils import (
|
||||
_auth_store,
|
||||
configure_auth_client,
|
||||
retry_on_401,
|
||||
validate_auth_against_agent_card,
|
||||
)
|
||||
from crewai.a2a.config import A2AConfig
|
||||
from crewai.a2a.task_helpers import TaskStateResult
|
||||
from crewai.a2a.types import (
|
||||
HANDLER_REGISTRY,
|
||||
@@ -46,6 +39,7 @@ from crewai.a2a.updates import (
|
||||
StreamingHandler,
|
||||
UpdateConfig,
|
||||
)
|
||||
from crewai.a2a.utils.agent_card import _afetch_agent_card_cached
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AConversationStartedEvent,
|
||||
@@ -53,7 +47,6 @@ from crewai.events.types.a2a_events import (
|
||||
A2ADelegationStartedEvent,
|
||||
A2AMessageSentEvent,
|
||||
)
|
||||
from crewai.types.utils import create_literals_from_strings
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -76,189 +69,9 @@ def get_handler(config: UpdateConfig | None) -> HandlerType:
|
||||
return HANDLER_REGISTRY.get(type(config), StreamingHandler)
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def _fetch_agent_card_cached(
|
||||
endpoint: str,
|
||||
auth_hash: int,
|
||||
timeout: int,
|
||||
_ttl_hash: int,
|
||||
) -> AgentCard:
|
||||
"""Cached sync version of fetch_agent_card."""
|
||||
auth = _auth_store.get(auth_hash)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
return loop.run_until_complete(
|
||||
_afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
def fetch_agent_card(
|
||||
endpoint: str,
|
||||
auth: AuthScheme | None = None,
|
||||
timeout: int = 30,
|
||||
use_cache: bool = True,
|
||||
cache_ttl: int = 300,
|
||||
) -> AgentCard:
|
||||
"""Fetch AgentCard from an A2A endpoint with optional caching.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL)
|
||||
auth: Optional AuthScheme for authentication
|
||||
timeout: Request timeout in seconds
|
||||
use_cache: Whether to use caching (default True)
|
||||
cache_ttl: Cache TTL in seconds (default 300 = 5 minutes)
|
||||
|
||||
Returns:
|
||||
AgentCard object with agent capabilities and skills
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If the request fails
|
||||
A2AClientHTTPError: If authentication fails
|
||||
"""
|
||||
if use_cache:
|
||||
if auth:
|
||||
auth_data = auth.model_dump_json(
|
||||
exclude={
|
||||
"_access_token",
|
||||
"_token_expires_at",
|
||||
"_refresh_token",
|
||||
"_authorization_callback",
|
||||
}
|
||||
)
|
||||
auth_hash = hash((type(auth).__name__, auth_data))
|
||||
else:
|
||||
auth_hash = 0
|
||||
_auth_store[auth_hash] = auth
|
||||
ttl_hash = int(time.time() // cache_ttl)
|
||||
return _fetch_agent_card_cached(endpoint, auth_hash, timeout, ttl_hash)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
return loop.run_until_complete(
|
||||
afetch_agent_card(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
async def afetch_agent_card(
|
||||
endpoint: str,
|
||||
auth: AuthScheme | None = None,
|
||||
timeout: int = 30,
|
||||
use_cache: bool = True,
|
||||
) -> AgentCard:
|
||||
"""Fetch AgentCard from an A2A endpoint asynchronously.
|
||||
|
||||
Native async implementation. Use this when running in an async context.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL).
|
||||
auth: Optional AuthScheme for authentication.
|
||||
timeout: Request timeout in seconds.
|
||||
use_cache: Whether to use caching (default True).
|
||||
|
||||
Returns:
|
||||
AgentCard object with agent capabilities and skills.
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If the request fails.
|
||||
A2AClientHTTPError: If authentication fails.
|
||||
"""
|
||||
if use_cache:
|
||||
if auth:
|
||||
auth_data = auth.model_dump_json(
|
||||
exclude={
|
||||
"_access_token",
|
||||
"_token_expires_at",
|
||||
"_refresh_token",
|
||||
"_authorization_callback",
|
||||
}
|
||||
)
|
||||
auth_hash = hash((type(auth).__name__, auth_data))
|
||||
else:
|
||||
auth_hash = 0
|
||||
_auth_store[auth_hash] = auth
|
||||
agent_card: AgentCard = await _afetch_agent_card_cached(
|
||||
endpoint, auth_hash, timeout
|
||||
)
|
||||
return agent_card
|
||||
|
||||
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
|
||||
|
||||
@cached(ttl=300, serializer=PickleSerializer()) # type: ignore[untyped-decorator]
|
||||
async def _afetch_agent_card_cached(
|
||||
endpoint: str,
|
||||
auth_hash: int,
|
||||
timeout: int,
|
||||
) -> AgentCard:
|
||||
"""Cached async implementation of AgentCard fetching."""
|
||||
auth = _auth_store.get(auth_hash)
|
||||
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
|
||||
|
||||
async def _afetch_agent_card_impl(
|
||||
endpoint: str,
|
||||
auth: AuthScheme | None,
|
||||
timeout: int,
|
||||
) -> AgentCard:
|
||||
"""Internal async implementation of AgentCard fetching."""
|
||||
if "/.well-known/agent-card.json" in endpoint:
|
||||
base_url = endpoint.replace("/.well-known/agent-card.json", "")
|
||||
agent_card_path = "/.well-known/agent-card.json"
|
||||
else:
|
||||
url_parts = endpoint.split("/", 3)
|
||||
base_url = f"{url_parts[0]}//{url_parts[2]}"
|
||||
agent_card_path = f"/{url_parts[3]}" if len(url_parts) > 3 else "/"
|
||||
|
||||
headers: MutableMapping[str, str] = {}
|
||||
if auth:
|
||||
async with httpx.AsyncClient(timeout=timeout) as temp_auth_client:
|
||||
if isinstance(auth, (HTTPDigestAuth, APIKeyAuth)):
|
||||
configure_auth_client(auth, temp_auth_client)
|
||||
headers = await auth.apply_auth(temp_auth_client, {})
|
||||
|
||||
async with httpx.AsyncClient(timeout=timeout, headers=headers) as temp_client:
|
||||
if auth and isinstance(auth, (HTTPDigestAuth, APIKeyAuth)):
|
||||
configure_auth_client(auth, temp_client)
|
||||
|
||||
agent_card_url = f"{base_url}{agent_card_path}"
|
||||
|
||||
async def _fetch_agent_card_request() -> httpx.Response:
|
||||
return await temp_client.get(agent_card_url)
|
||||
|
||||
try:
|
||||
response = await retry_on_401(
|
||||
request_func=_fetch_agent_card_request,
|
||||
auth_scheme=auth,
|
||||
client=temp_client,
|
||||
headers=temp_client.headers,
|
||||
max_retries=2,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
return AgentCard.model_validate(response.json())
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
if e.response.status_code == 401:
|
||||
error_details = ["Authentication failed"]
|
||||
www_auth = e.response.headers.get("WWW-Authenticate")
|
||||
if www_auth:
|
||||
error_details.append(f"WWW-Authenticate: {www_auth}")
|
||||
if not auth:
|
||||
error_details.append("No auth scheme provided")
|
||||
msg = " | ".join(error_details)
|
||||
raise A2AClientHTTPError(401, msg) from e
|
||||
raise
|
||||
|
||||
|
||||
def execute_a2a_delegation(
|
||||
endpoint: str,
|
||||
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
|
||||
auth: AuthScheme | None,
|
||||
timeout: int,
|
||||
task_description: str,
|
||||
@@ -275,6 +88,9 @@ def execute_a2a_delegation(
|
||||
response_model: type[BaseModel] | None = None,
|
||||
turn_number: int | None = None,
|
||||
updates: UpdateConfig | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
skill_id: str | None = None,
|
||||
) -> TaskStateResult:
|
||||
"""Execute a task delegation to a remote A2A agent synchronously.
|
||||
|
||||
@@ -282,6 +98,23 @@ def execute_a2a_delegation(
|
||||
use aexecute_a2a_delegation directly.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL)
|
||||
transport_protocol: Optional A2A transport protocol (grpc, jsonrpc, http+json)
|
||||
auth: Optional AuthScheme for authentication (Bearer, OAuth2, API Key, HTTP Basic/Digest)
|
||||
timeout: Request timeout in seconds
|
||||
task_description: The task to delegate
|
||||
context: Optional context information
|
||||
context_id: Context ID for correlating messages/tasks
|
||||
task_id: Specific task identifier
|
||||
reference_task_ids: List of related task IDs
|
||||
metadata: Additional metadata (external_id, request_id, etc.)
|
||||
extensions: Protocol extensions for custom fields
|
||||
conversation_history: Previous Message objects from conversation
|
||||
agent_id: Agent identifier for logging
|
||||
agent_role: Role of the CrewAI agent delegating the task
|
||||
agent_branch: Optional agent tree branch for logging
|
||||
response_model: Optional Pydantic model for structured outputs
|
||||
turn_number: Optional turn number for multi-turn conversations
|
||||
endpoint: A2A agent endpoint URL.
|
||||
auth: Optional AuthScheme for authentication.
|
||||
timeout: Request timeout in seconds.
|
||||
@@ -299,6 +132,9 @@ def execute_a2a_delegation(
|
||||
response_model: Optional Pydantic model for structured outputs.
|
||||
turn_number: Optional turn number for multi-turn conversations.
|
||||
updates: Update mechanism config from A2AConfig.updates.
|
||||
from_task: Optional CrewAI Task object for event metadata.
|
||||
from_agent: Optional CrewAI Agent object for event metadata.
|
||||
skill_id: Optional skill ID to target a specific agent capability.
|
||||
|
||||
Returns:
|
||||
TaskStateResult with status, result/error, history, and agent_card.
|
||||
@@ -323,16 +159,24 @@ def execute_a2a_delegation(
|
||||
agent_role=agent_role,
|
||||
agent_branch=agent_branch,
|
||||
response_model=response_model,
|
||||
transport_protocol=transport_protocol,
|
||||
turn_number=turn_number,
|
||||
updates=updates,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
skill_id=skill_id,
|
||||
)
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
try:
|
||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
async def aexecute_a2a_delegation(
|
||||
endpoint: str,
|
||||
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
|
||||
auth: AuthScheme | None,
|
||||
timeout: int,
|
||||
task_description: str,
|
||||
@@ -349,6 +193,9 @@ async def aexecute_a2a_delegation(
|
||||
response_model: type[BaseModel] | None = None,
|
||||
turn_number: int | None = None,
|
||||
updates: UpdateConfig | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
skill_id: str | None = None,
|
||||
) -> TaskStateResult:
|
||||
"""Execute a task delegation to a remote A2A agent asynchronously.
|
||||
|
||||
@@ -356,6 +203,23 @@ async def aexecute_a2a_delegation(
|
||||
in an async context (e.g., with Crew.akickoff() or agent.aexecute_task()).
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL
|
||||
transport_protocol: Optional A2A transport protocol (grpc, jsonrpc, http+json)
|
||||
auth: Optional AuthScheme for authentication
|
||||
timeout: Request timeout in seconds
|
||||
task_description: Task to delegate
|
||||
context: Optional context
|
||||
context_id: Context ID for correlation
|
||||
task_id: Specific task identifier
|
||||
reference_task_ids: Related task IDs
|
||||
metadata: Additional metadata
|
||||
extensions: Protocol extensions
|
||||
conversation_history: Previous Message objects
|
||||
turn_number: Current turn number
|
||||
agent_branch: Agent tree branch for logging
|
||||
agent_id: Agent identifier for logging
|
||||
agent_role: Agent role for logging
|
||||
response_model: Optional Pydantic model for structured outputs
|
||||
endpoint: A2A agent endpoint URL.
|
||||
auth: Optional AuthScheme for authentication.
|
||||
timeout: Request timeout in seconds.
|
||||
@@ -373,6 +237,9 @@ async def aexecute_a2a_delegation(
|
||||
response_model: Optional Pydantic model for structured outputs.
|
||||
turn_number: Optional turn number for multi-turn conversations.
|
||||
updates: Update mechanism config from A2AConfig.updates.
|
||||
from_task: Optional CrewAI Task object for event metadata.
|
||||
from_agent: Optional CrewAI Agent object for event metadata.
|
||||
skill_id: Optional skill ID to target a specific agent capability.
|
||||
|
||||
Returns:
|
||||
TaskStateResult with status, result/error, history, and agent_card.
|
||||
@@ -384,17 +251,6 @@ async def aexecute_a2a_delegation(
|
||||
if turn_number is None:
|
||||
turn_number = len([m for m in conversation_history if m.role == Role.user]) + 1
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2ADelegationStartedEvent(
|
||||
endpoint=endpoint,
|
||||
task_description=task_description,
|
||||
agent_id=agent_id,
|
||||
is_multiturn=is_multiturn,
|
||||
turn_number=turn_number,
|
||||
),
|
||||
)
|
||||
|
||||
result = await _aexecute_a2a_delegation_impl(
|
||||
endpoint=endpoint,
|
||||
auth=auth,
|
||||
@@ -414,15 +270,29 @@ async def aexecute_a2a_delegation(
|
||||
agent_role=agent_role,
|
||||
response_model=response_model,
|
||||
updates=updates,
|
||||
transport_protocol=transport_protocol,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
skill_id=skill_id,
|
||||
)
|
||||
|
||||
agent_card_data: dict[str, Any] = result.get("agent_card") or {}
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2ADelegationCompletedEvent(
|
||||
status=result["status"],
|
||||
result=result.get("result"),
|
||||
error=result.get("error"),
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=result.get("a2a_agent_name"),
|
||||
agent_card=agent_card_data,
|
||||
provider=agent_card_data.get("provider"),
|
||||
metadata=metadata,
|
||||
extensions=list(extensions.keys()) if extensions else None,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -431,6 +301,7 @@ async def aexecute_a2a_delegation(
|
||||
|
||||
async def _aexecute_a2a_delegation_impl(
|
||||
endpoint: str,
|
||||
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
|
||||
auth: AuthScheme | None,
|
||||
timeout: int,
|
||||
task_description: str,
|
||||
@@ -448,6 +319,9 @@ async def _aexecute_a2a_delegation_impl(
|
||||
agent_role: str | None,
|
||||
response_model: type[BaseModel] | None,
|
||||
updates: UpdateConfig | None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
skill_id: str | None = None,
|
||||
) -> TaskStateResult:
|
||||
"""Internal async implementation of A2A delegation."""
|
||||
if auth:
|
||||
@@ -480,6 +354,28 @@ async def _aexecute_a2a_delegation_impl(
|
||||
if agent_card.name:
|
||||
a2a_agent_name = agent_card.name
|
||||
|
||||
agent_card_dict = agent_card.model_dump(exclude_none=True)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2ADelegationStartedEvent(
|
||||
endpoint=endpoint,
|
||||
task_description=task_description,
|
||||
agent_id=agent_id or endpoint,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
turn_number=turn_number,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card_dict,
|
||||
protocol_version=agent_card.protocol_version,
|
||||
provider=agent_card_dict.get("provider"),
|
||||
skill_id=skill_id,
|
||||
metadata=metadata,
|
||||
extensions=list(extensions.keys()) if extensions else None,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
if turn_number == 1:
|
||||
agent_id_for_event = agent_id or endpoint
|
||||
crewai_event_bus.emit(
|
||||
@@ -487,7 +383,17 @@ async def _aexecute_a2a_delegation_impl(
|
||||
A2AConversationStartedEvent(
|
||||
agent_id=agent_id_for_event,
|
||||
endpoint=endpoint,
|
||||
context_id=context_id,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card_dict,
|
||||
protocol_version=agent_card.protocol_version,
|
||||
provider=agent_card_dict.get("provider"),
|
||||
skill_id=skill_id,
|
||||
reference_task_ids=reference_task_ids,
|
||||
metadata=metadata,
|
||||
extensions=list(extensions.keys()) if extensions else None,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -513,6 +419,10 @@ async def _aexecute_a2a_delegation_impl(
|
||||
}
|
||||
)
|
||||
|
||||
message_metadata = metadata.copy() if metadata else {}
|
||||
if skill_id:
|
||||
message_metadata["skill_id"] = skill_id
|
||||
|
||||
message = Message(
|
||||
role=Role.user,
|
||||
message_id=str(uuid.uuid4()),
|
||||
@@ -520,19 +430,27 @@ async def _aexecute_a2a_delegation_impl(
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
reference_task_ids=reference_task_ids,
|
||||
metadata=metadata,
|
||||
metadata=message_metadata if message_metadata else None,
|
||||
extensions=extensions,
|
||||
)
|
||||
|
||||
transport_protocol = TransportProtocol("JSONRPC")
|
||||
new_messages: list[Message] = [*conversation_history, message]
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AMessageSentEvent(
|
||||
message=message_text,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
message_id=message.message_id,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
skill_id=skill_id,
|
||||
metadata=message_metadata if message_metadata else None,
|
||||
extensions=list(extensions.keys()) if extensions else None,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -547,6 +465,9 @@ async def _aexecute_a2a_delegation_impl(
|
||||
"task_id": task_id,
|
||||
"endpoint": endpoint,
|
||||
"agent_branch": agent_branch,
|
||||
"a2a_agent_name": a2a_agent_name,
|
||||
"from_task": from_task,
|
||||
"from_agent": from_agent,
|
||||
}
|
||||
|
||||
if isinstance(updates, PollingConfig):
|
||||
@@ -584,19 +505,22 @@ async def _aexecute_a2a_delegation_impl(
|
||||
use_polling=use_polling,
|
||||
push_notification_config=push_config_for_client,
|
||||
) as client:
|
||||
return await handler.execute(
|
||||
result = await handler.execute(
|
||||
client=client,
|
||||
message=message,
|
||||
new_messages=new_messages,
|
||||
agent_card=agent_card,
|
||||
**handler_kwargs,
|
||||
)
|
||||
result["a2a_agent_name"] = a2a_agent_name
|
||||
result["agent_card"] = agent_card.model_dump(exclude_none=True)
|
||||
return result
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _create_a2a_client(
|
||||
agent_card: AgentCard,
|
||||
transport_protocol: TransportProtocol,
|
||||
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
|
||||
timeout: int,
|
||||
headers: MutableMapping[str, str],
|
||||
streaming: bool,
|
||||
@@ -607,19 +531,18 @@ async def _create_a2a_client(
|
||||
"""Create and configure an A2A client.
|
||||
|
||||
Args:
|
||||
agent_card: The A2A agent card
|
||||
transport_protocol: Transport protocol to use
|
||||
timeout: Request timeout in seconds
|
||||
headers: HTTP headers (already with auth applied)
|
||||
streaming: Enable streaming responses
|
||||
auth: Optional AuthScheme for client configuration
|
||||
use_polling: Enable polling mode
|
||||
push_notification_config: Optional push notification config to include in requests
|
||||
agent_card: The A2A agent card.
|
||||
transport_protocol: Transport protocol to use.
|
||||
timeout: Request timeout in seconds.
|
||||
headers: HTTP headers (already with auth applied).
|
||||
streaming: Enable streaming responses.
|
||||
auth: Optional AuthScheme for client configuration.
|
||||
use_polling: Enable polling mode.
|
||||
push_notification_config: Optional push notification config.
|
||||
|
||||
Yields:
|
||||
Configured A2A client instance
|
||||
Configured A2A client instance.
|
||||
"""
|
||||
|
||||
async with httpx.AsyncClient(
|
||||
timeout=timeout,
|
||||
headers=headers,
|
||||
@@ -640,7 +563,7 @@ async def _create_a2a_client(
|
||||
|
||||
config = ClientConfig(
|
||||
httpx_client=httpx_client,
|
||||
supported_transports=[str(transport_protocol.value)],
|
||||
supported_transports=[transport_protocol],
|
||||
streaming=streaming and not use_polling,
|
||||
polling=use_polling,
|
||||
accepted_output_modes=["application/json"],
|
||||
@@ -650,78 +573,3 @@ async def _create_a2a_client(
|
||||
factory = ClientFactory(config)
|
||||
client = factory.create(agent_card)
|
||||
yield client
|
||||
|
||||
|
||||
def create_agent_response_model(agent_ids: tuple[str, ...]) -> type[BaseModel]:
|
||||
"""Create a dynamic AgentResponse model with Literal types for agent IDs.
|
||||
|
||||
Args:
|
||||
agent_ids: List of available A2A agent IDs
|
||||
|
||||
Returns:
|
||||
Dynamically created Pydantic model with Literal-constrained a2a_ids field
|
||||
"""
|
||||
|
||||
DynamicLiteral = create_literals_from_strings(agent_ids) # noqa: N806
|
||||
|
||||
return create_model(
|
||||
"AgentResponse",
|
||||
a2a_ids=(
|
||||
tuple[DynamicLiteral, ...], # type: ignore[valid-type]
|
||||
Field(
|
||||
default_factory=tuple,
|
||||
max_length=len(agent_ids),
|
||||
description="A2A agent IDs to delegate to.",
|
||||
),
|
||||
),
|
||||
message=(
|
||||
str,
|
||||
Field(
|
||||
description="The message content. If is_a2a=true, this is sent to the A2A agent. If is_a2a=false, this is your final answer ending the conversation."
|
||||
),
|
||||
),
|
||||
is_a2a=(
|
||||
bool,
|
||||
Field(
|
||||
description="Set to false when the remote agent has answered your question - extract their answer and return it as your final message. Set to true ONLY if you need to ask a NEW, DIFFERENT question. NEVER repeat the same request - if the conversation history shows the agent already answered, set is_a2a=false immediately."
|
||||
),
|
||||
),
|
||||
__base__=BaseModel,
|
||||
)
|
||||
|
||||
|
||||
def extract_a2a_agent_ids_from_config(
|
||||
a2a_config: list[A2AConfig] | A2AConfig | None,
|
||||
) -> tuple[list[A2AConfig], tuple[str, ...]]:
|
||||
"""Extract A2A agent IDs from A2A configuration.
|
||||
|
||||
Args:
|
||||
a2a_config: A2A configuration
|
||||
|
||||
Returns:
|
||||
List of A2A agent IDs
|
||||
"""
|
||||
if a2a_config is None:
|
||||
return [], ()
|
||||
|
||||
if isinstance(a2a_config, A2AConfig):
|
||||
a2a_agents = [a2a_config]
|
||||
else:
|
||||
a2a_agents = a2a_config
|
||||
return a2a_agents, tuple(config.endpoint for config in a2a_agents)
|
||||
|
||||
|
||||
def get_a2a_agents_and_response_model(
|
||||
a2a_config: list[A2AConfig] | A2AConfig | None,
|
||||
) -> tuple[list[A2AConfig], type[BaseModel]]:
|
||||
"""Get A2A agent IDs and response model.
|
||||
|
||||
Args:
|
||||
a2a_config: A2A configuration
|
||||
|
||||
Returns:
|
||||
Tuple of A2A agent IDs and response model
|
||||
"""
|
||||
a2a_agents, agent_ids = extract_a2a_agent_ids_from_config(a2a_config=a2a_config)
|
||||
|
||||
return a2a_agents, create_agent_response_model(agent_ids)
|
||||
101
lib/crewai/src/crewai/a2a/utils/response_model.py
Normal file
101
lib/crewai/src/crewai/a2a/utils/response_model.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""Response model utilities for A2A agent interactions."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TypeAlias
|
||||
|
||||
from pydantic import BaseModel, Field, create_model
|
||||
|
||||
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
|
||||
from crewai.types.utils import create_literals_from_strings
|
||||
|
||||
|
||||
A2AConfigTypes: TypeAlias = A2AConfig | A2AServerConfig | A2AClientConfig
|
||||
A2AClientConfigTypes: TypeAlias = A2AConfig | A2AClientConfig
|
||||
|
||||
|
||||
def create_agent_response_model(agent_ids: tuple[str, ...]) -> type[BaseModel] | None:
|
||||
"""Create a dynamic AgentResponse model with Literal types for agent IDs.
|
||||
|
||||
Args:
|
||||
agent_ids: List of available A2A agent IDs.
|
||||
|
||||
Returns:
|
||||
Dynamically created Pydantic model with Literal-constrained a2a_ids field,
|
||||
or None if agent_ids is empty.
|
||||
"""
|
||||
if not agent_ids:
|
||||
return None
|
||||
|
||||
DynamicLiteral = create_literals_from_strings(agent_ids) # noqa: N806
|
||||
|
||||
return create_model(
|
||||
"AgentResponse",
|
||||
a2a_ids=(
|
||||
tuple[DynamicLiteral, ...], # type: ignore[valid-type]
|
||||
Field(
|
||||
default_factory=tuple,
|
||||
max_length=len(agent_ids),
|
||||
description="A2A agent IDs to delegate to.",
|
||||
),
|
||||
),
|
||||
message=(
|
||||
str,
|
||||
Field(
|
||||
description="The message content. If is_a2a=true, this is sent to the A2A agent. If is_a2a=false, this is your final answer ending the conversation."
|
||||
),
|
||||
),
|
||||
is_a2a=(
|
||||
bool,
|
||||
Field(
|
||||
description="Set to false when the remote agent has answered your question - extract their answer and return it as your final message. Set to true ONLY if you need to ask a NEW, DIFFERENT question. NEVER repeat the same request - if the conversation history shows the agent already answered, set is_a2a=false immediately."
|
||||
),
|
||||
),
|
||||
__base__=BaseModel,
|
||||
)
|
||||
|
||||
|
||||
def extract_a2a_agent_ids_from_config(
|
||||
a2a_config: list[A2AConfigTypes] | A2AConfigTypes | None,
|
||||
) -> tuple[list[A2AClientConfigTypes], tuple[str, ...]]:
|
||||
"""Extract A2A agent IDs from A2A configuration.
|
||||
|
||||
Filters out A2AServerConfig since it doesn't have an endpoint for delegation.
|
||||
|
||||
Args:
|
||||
a2a_config: A2A configuration (any type).
|
||||
|
||||
Returns:
|
||||
Tuple of client A2A configs list and agent endpoint IDs.
|
||||
"""
|
||||
if a2a_config is None:
|
||||
return [], ()
|
||||
|
||||
configs: list[A2AConfigTypes]
|
||||
if isinstance(a2a_config, (A2AConfig, A2AClientConfig, A2AServerConfig)):
|
||||
configs = [a2a_config]
|
||||
else:
|
||||
configs = a2a_config
|
||||
|
||||
# Filter to only client configs (those with endpoint)
|
||||
client_configs: list[A2AClientConfigTypes] = [
|
||||
config for config in configs if isinstance(config, (A2AConfig, A2AClientConfig))
|
||||
]
|
||||
|
||||
return client_configs, tuple(config.endpoint for config in client_configs)
|
||||
|
||||
|
||||
def get_a2a_agents_and_response_model(
|
||||
a2a_config: list[A2AConfigTypes] | A2AConfigTypes | None,
|
||||
) -> tuple[list[A2AClientConfigTypes], type[BaseModel] | None]:
|
||||
"""Get A2A agent configs and response model.
|
||||
|
||||
Args:
|
||||
a2a_config: A2A configuration (any type).
|
||||
|
||||
Returns:
|
||||
Tuple of client A2A configs and response model.
|
||||
"""
|
||||
a2a_agents, agent_ids = extract_a2a_agent_ids_from_config(a2a_config=a2a_config)
|
||||
|
||||
return a2a_agents, create_agent_response_model(agent_ids)
|
||||
399
lib/crewai/src/crewai/a2a/utils/task.py
Normal file
399
lib/crewai/src/crewai/a2a/utils/task.py
Normal file
@@ -0,0 +1,399 @@
|
||||
"""A2A task utilities for server-side task management."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
from collections.abc import Callable, Coroutine
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any, ParamSpec, TypeVar, cast
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from a2a.server.agent_execution import RequestContext
|
||||
from a2a.server.events import EventQueue
|
||||
from a2a.types import (
|
||||
InternalError,
|
||||
InvalidParamsError,
|
||||
Message,
|
||||
Task as A2ATask,
|
||||
TaskState,
|
||||
TaskStatus,
|
||||
TaskStatusUpdateEvent,
|
||||
)
|
||||
from a2a.utils import new_agent_text_message, new_text_artifact
|
||||
from a2a.utils.errors import ServerError
|
||||
from aiocache import SimpleMemoryCache, caches # type: ignore[import-untyped]
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AServerTaskCanceledEvent,
|
||||
A2AServerTaskCompletedEvent,
|
||||
A2AServerTaskFailedEvent,
|
||||
A2AServerTaskStartedEvent,
|
||||
)
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def _parse_redis_url(url: str) -> dict[str, Any]:
|
||||
"""Parse a Redis URL into aiocache configuration.
|
||||
|
||||
Args:
|
||||
url: Redis connection URL (e.g., redis://localhost:6379/0).
|
||||
|
||||
Returns:
|
||||
Configuration dict for aiocache.RedisCache.
|
||||
"""
|
||||
|
||||
parsed = urlparse(url)
|
||||
config: dict[str, Any] = {
|
||||
"cache": "aiocache.RedisCache",
|
||||
"endpoint": parsed.hostname or "localhost",
|
||||
"port": parsed.port or 6379,
|
||||
}
|
||||
if parsed.path and parsed.path != "/":
|
||||
try:
|
||||
config["db"] = int(parsed.path.lstrip("/"))
|
||||
except ValueError:
|
||||
pass
|
||||
if parsed.password:
|
||||
config["password"] = parsed.password
|
||||
return config
|
||||
|
||||
|
||||
_redis_url = os.environ.get("REDIS_URL")
|
||||
|
||||
caches.set_config(
|
||||
{
|
||||
"default": _parse_redis_url(_redis_url)
|
||||
if _redis_url
|
||||
else {
|
||||
"cache": "aiocache.SimpleMemoryCache",
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def cancellable(
|
||||
fn: Callable[P, Coroutine[Any, Any, T]],
|
||||
) -> Callable[P, Coroutine[Any, Any, T]]:
|
||||
"""Decorator that enables cancellation for A2A task execution.
|
||||
|
||||
Runs a cancellation watcher concurrently with the wrapped function.
|
||||
When a cancel event is published, the execution is cancelled.
|
||||
|
||||
Args:
|
||||
fn: The async function to wrap.
|
||||
|
||||
Returns:
|
||||
Wrapped function with cancellation support.
|
||||
"""
|
||||
|
||||
@wraps(fn)
|
||||
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
"""Wrap function with cancellation monitoring."""
|
||||
context: RequestContext | None = None
|
||||
for arg in args:
|
||||
if isinstance(arg, RequestContext):
|
||||
context = arg
|
||||
break
|
||||
if context is None:
|
||||
context = cast(RequestContext | None, kwargs.get("context"))
|
||||
|
||||
if context is None:
|
||||
return await fn(*args, **kwargs)
|
||||
|
||||
task_id = context.task_id
|
||||
cache = caches.get("default")
|
||||
|
||||
async def poll_for_cancel() -> bool:
|
||||
"""Poll cache for cancellation flag."""
|
||||
while True:
|
||||
if await cache.get(f"cancel:{task_id}"):
|
||||
return True
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
async def watch_for_cancel() -> bool:
|
||||
"""Watch for cancellation events via pub/sub or polling."""
|
||||
if isinstance(cache, SimpleMemoryCache):
|
||||
return await poll_for_cancel()
|
||||
|
||||
try:
|
||||
client = cache.client
|
||||
pubsub = client.pubsub()
|
||||
await pubsub.subscribe(f"cancel:{task_id}")
|
||||
async for message in pubsub.listen():
|
||||
if message["type"] == "message":
|
||||
return True
|
||||
except (OSError, ConnectionError) as e:
|
||||
logger.warning("Cancel watcher error for task_id=%s: %s", task_id, e)
|
||||
return await poll_for_cancel()
|
||||
return False
|
||||
|
||||
execute_task = asyncio.create_task(fn(*args, **kwargs))
|
||||
cancel_watch = asyncio.create_task(watch_for_cancel())
|
||||
|
||||
try:
|
||||
done, _ = await asyncio.wait(
|
||||
[execute_task, cancel_watch],
|
||||
return_when=asyncio.FIRST_COMPLETED,
|
||||
)
|
||||
|
||||
if cancel_watch in done:
|
||||
execute_task.cancel()
|
||||
try:
|
||||
await execute_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
raise asyncio.CancelledError(f"Task {task_id} was cancelled")
|
||||
cancel_watch.cancel()
|
||||
return execute_task.result()
|
||||
finally:
|
||||
await cache.delete(f"cancel:{task_id}")
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@cancellable
|
||||
async def execute(
|
||||
agent: Agent,
|
||||
context: RequestContext,
|
||||
event_queue: EventQueue,
|
||||
) -> None:
|
||||
"""Execute an A2A task using a CrewAI agent.
|
||||
|
||||
Args:
|
||||
agent: The CrewAI agent to execute the task.
|
||||
context: The A2A request context containing the user's message.
|
||||
event_queue: The event queue for sending responses back.
|
||||
|
||||
TODOs:
|
||||
* need to impl both of structured output and file inputs, depends on `file_inputs` for
|
||||
`crewai.task.Task`, pass the below two to Task. both utils in `a2a.utils.parts`
|
||||
* structured outputs ingestion, `structured_inputs = get_data_parts(parts=context.message.parts)`
|
||||
* file inputs ingestion, `file_inputs = get_file_parts(parts=context.message.parts)`
|
||||
"""
|
||||
|
||||
user_message = context.get_user_input()
|
||||
task_id = context.task_id
|
||||
context_id = context.context_id
|
||||
if task_id is None or context_id is None:
|
||||
msg = "task_id and context_id are required"
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
A2AServerTaskFailedEvent(
|
||||
task_id="",
|
||||
context_id="",
|
||||
error=msg,
|
||||
from_agent=agent,
|
||||
),
|
||||
)
|
||||
raise ServerError(InvalidParamsError(message=msg)) from None
|
||||
|
||||
task = Task(
|
||||
description=user_message,
|
||||
expected_output="Response to the user's request",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
A2AServerTaskStartedEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
from_task=task,
|
||||
from_agent=agent,
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
result = await agent.aexecute_task(task=task, tools=agent.tools)
|
||||
result_str = str(result)
|
||||
history: list[Message] = [context.message] if context.message else []
|
||||
history.append(new_agent_text_message(result_str, context_id, task_id))
|
||||
await event_queue.enqueue_event(
|
||||
A2ATask(
|
||||
id=task_id,
|
||||
context_id=context_id,
|
||||
status=TaskStatus(state=TaskState.input_required),
|
||||
artifacts=[new_text_artifact(result_str, f"result_{task_id}")],
|
||||
history=history,
|
||||
)
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
A2AServerTaskCompletedEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
result=str(result),
|
||||
from_task=task,
|
||||
from_agent=agent,
|
||||
),
|
||||
)
|
||||
except asyncio.CancelledError:
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
A2AServerTaskCanceledEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
from_task=task,
|
||||
from_agent=agent,
|
||||
),
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
A2AServerTaskFailedEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
error=str(e),
|
||||
from_task=task,
|
||||
from_agent=agent,
|
||||
),
|
||||
)
|
||||
raise ServerError(
|
||||
error=InternalError(message=f"Task execution failed: {e}")
|
||||
) from e
|
||||
|
||||
|
||||
async def cancel(
|
||||
context: RequestContext,
|
||||
event_queue: EventQueue,
|
||||
) -> A2ATask | None:
|
||||
"""Cancel an A2A task.
|
||||
|
||||
Publishes a cancel event that the cancellable decorator listens for.
|
||||
|
||||
Args:
|
||||
context: The A2A request context containing task information.
|
||||
event_queue: The event queue for sending the cancellation status.
|
||||
|
||||
Returns:
|
||||
The canceled task with updated status.
|
||||
"""
|
||||
task_id = context.task_id
|
||||
context_id = context.context_id
|
||||
if task_id is None or context_id is None:
|
||||
raise ServerError(InvalidParamsError(message="task_id and context_id required"))
|
||||
|
||||
if context.current_task and context.current_task.status.state in (
|
||||
TaskState.completed,
|
||||
TaskState.failed,
|
||||
TaskState.canceled,
|
||||
):
|
||||
return context.current_task
|
||||
|
||||
cache = caches.get("default")
|
||||
|
||||
await cache.set(f"cancel:{task_id}", True, ttl=3600)
|
||||
if not isinstance(cache, SimpleMemoryCache):
|
||||
await cache.client.publish(f"cancel:{task_id}", "cancel")
|
||||
|
||||
await event_queue.enqueue_event(
|
||||
TaskStatusUpdateEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
status=TaskStatus(state=TaskState.canceled),
|
||||
final=True,
|
||||
)
|
||||
)
|
||||
|
||||
if context.current_task:
|
||||
context.current_task.status = TaskStatus(state=TaskState.canceled)
|
||||
return context.current_task
|
||||
return None
|
||||
|
||||
|
||||
def list_tasks(
|
||||
tasks: list[A2ATask],
|
||||
context_id: str | None = None,
|
||||
status: TaskState | None = None,
|
||||
status_timestamp_after: datetime | None = None,
|
||||
page_size: int = 50,
|
||||
page_token: str | None = None,
|
||||
history_length: int | None = None,
|
||||
include_artifacts: bool = False,
|
||||
) -> tuple[list[A2ATask], str | None, int]:
|
||||
"""Filter and paginate A2A tasks.
|
||||
|
||||
Provides filtering by context, status, and timestamp, along with
|
||||
cursor-based pagination. This is a pure utility function that operates
|
||||
on an in-memory list of tasks - storage retrieval is handled separately.
|
||||
|
||||
Args:
|
||||
tasks: All tasks to filter.
|
||||
context_id: Filter by context ID to get tasks in a conversation.
|
||||
status: Filter by task state (e.g., completed, working).
|
||||
status_timestamp_after: Filter to tasks updated after this time.
|
||||
page_size: Maximum tasks per page (default 50).
|
||||
page_token: Base64-encoded cursor from previous response.
|
||||
history_length: Limit history messages per task (None = full history).
|
||||
include_artifacts: Whether to include task artifacts (default False).
|
||||
|
||||
Returns:
|
||||
Tuple of (filtered_tasks, next_page_token, total_count).
|
||||
- filtered_tasks: Tasks matching filters, paginated and trimmed.
|
||||
- next_page_token: Token for next page, or None if no more pages.
|
||||
- total_count: Total number of tasks matching filters (before pagination).
|
||||
"""
|
||||
filtered: list[A2ATask] = []
|
||||
for task in tasks:
|
||||
if context_id and task.context_id != context_id:
|
||||
continue
|
||||
if status and task.status.state != status:
|
||||
continue
|
||||
if status_timestamp_after and task.status.timestamp:
|
||||
ts = datetime.fromisoformat(task.status.timestamp.replace("Z", "+00:00"))
|
||||
if ts <= status_timestamp_after:
|
||||
continue
|
||||
filtered.append(task)
|
||||
|
||||
def get_timestamp(t: A2ATask) -> datetime:
|
||||
"""Extract timestamp from task status for sorting."""
|
||||
if t.status.timestamp is None:
|
||||
return datetime.min
|
||||
return datetime.fromisoformat(t.status.timestamp.replace("Z", "+00:00"))
|
||||
|
||||
filtered.sort(key=get_timestamp, reverse=True)
|
||||
total = len(filtered)
|
||||
|
||||
start = 0
|
||||
if page_token:
|
||||
try:
|
||||
cursor_id = base64.b64decode(page_token).decode()
|
||||
for idx, task in enumerate(filtered):
|
||||
if task.id == cursor_id:
|
||||
start = idx + 1
|
||||
break
|
||||
except (ValueError, UnicodeDecodeError):
|
||||
pass
|
||||
|
||||
page = filtered[start : start + page_size]
|
||||
|
||||
result: list[A2ATask] = []
|
||||
for task in page:
|
||||
task = task.model_copy(deep=True)
|
||||
if history_length is not None and task.history:
|
||||
task.history = task.history[-history_length:]
|
||||
if not include_artifacts:
|
||||
task.artifacts = None
|
||||
result.append(task)
|
||||
|
||||
next_token: str | None = None
|
||||
if result and len(result) == page_size:
|
||||
next_token = base64.b64encode(result[-1].id.encode()).decode()
|
||||
|
||||
return result, next_token, total
|
||||
@@ -6,16 +6,17 @@ Wraps agent classes with A2A delegation capabilities.
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Coroutine
|
||||
from collections.abc import Callable, Coroutine, Mapping
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from functools import wraps
|
||||
import json
|
||||
from types import MethodType
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from a2a.types import Role, TaskState
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
from crewai.a2a.config import A2AConfig
|
||||
from crewai.a2a.config import A2AClientConfig, A2AConfig
|
||||
from crewai.a2a.extensions.base import ExtensionRegistry
|
||||
from crewai.a2a.task_helpers import TaskStateResult
|
||||
from crewai.a2a.templates import (
|
||||
@@ -26,13 +27,16 @@ from crewai.a2a.templates import (
|
||||
UNAVAILABLE_AGENTS_NOTICE_TEMPLATE,
|
||||
)
|
||||
from crewai.a2a.types import AgentResponseProtocol
|
||||
from crewai.a2a.utils import (
|
||||
aexecute_a2a_delegation,
|
||||
from crewai.a2a.utils.agent_card import (
|
||||
afetch_agent_card,
|
||||
execute_a2a_delegation,
|
||||
fetch_agent_card,
|
||||
get_a2a_agents_and_response_model,
|
||||
inject_a2a_server_methods,
|
||||
)
|
||||
from crewai.a2a.utils.delegation import (
|
||||
aexecute_a2a_delegation,
|
||||
execute_a2a_delegation,
|
||||
)
|
||||
from crewai.a2a.utils.response_model import get_a2a_agents_and_response_model
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AConversationCompletedEvent,
|
||||
@@ -122,10 +126,12 @@ def wrap_agent_with_a2a_instance(
|
||||
agent, "aexecute_task", MethodType(aexecute_task_with_a2a, agent)
|
||||
)
|
||||
|
||||
inject_a2a_server_methods(agent)
|
||||
|
||||
|
||||
def _fetch_card_from_config(
|
||||
config: A2AConfig,
|
||||
) -> tuple[A2AConfig, AgentCard | Exception]:
|
||||
config: A2AConfig | A2AClientConfig,
|
||||
) -> tuple[A2AConfig | A2AClientConfig, AgentCard | Exception]:
|
||||
"""Fetch agent card from A2A config.
|
||||
|
||||
Args:
|
||||
@@ -146,7 +152,7 @@ def _fetch_card_from_config(
|
||||
|
||||
|
||||
def _fetch_agent_cards_concurrently(
|
||||
a2a_agents: list[A2AConfig],
|
||||
a2a_agents: list[A2AConfig | A2AClientConfig],
|
||||
) -> tuple[dict[str, AgentCard], dict[str, str]]:
|
||||
"""Fetch agent cards concurrently for multiple A2A agents.
|
||||
|
||||
@@ -181,10 +187,10 @@ def _fetch_agent_cards_concurrently(
|
||||
|
||||
def _execute_task_with_a2a(
|
||||
self: Agent,
|
||||
a2a_agents: list[A2AConfig],
|
||||
a2a_agents: list[A2AConfig | A2AClientConfig],
|
||||
original_fn: Callable[..., str],
|
||||
task: Task,
|
||||
agent_response_model: type[BaseModel],
|
||||
agent_response_model: type[BaseModel] | None,
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
extension_registry: ExtensionRegistry,
|
||||
@@ -270,9 +276,9 @@ def _execute_task_with_a2a(
|
||||
|
||||
|
||||
def _augment_prompt_with_a2a(
|
||||
a2a_agents: list[A2AConfig],
|
||||
a2a_agents: list[A2AConfig | A2AClientConfig],
|
||||
task_description: str,
|
||||
agent_cards: dict[str, AgentCard],
|
||||
agent_cards: Mapping[str, AgentCard | dict[str, Any]],
|
||||
conversation_history: list[Message] | None = None,
|
||||
turn_num: int = 0,
|
||||
max_turns: int | None = None,
|
||||
@@ -304,7 +310,15 @@ def _augment_prompt_with_a2a(
|
||||
for config in a2a_agents:
|
||||
if config.endpoint in agent_cards:
|
||||
card = agent_cards[config.endpoint]
|
||||
agents_text += f"\n{card.model_dump_json(indent=2, exclude_none=True, include={'description', 'url', 'skills'})}\n"
|
||||
if isinstance(card, dict):
|
||||
filtered = {
|
||||
k: v
|
||||
for k, v in card.items()
|
||||
if k in {"description", "url", "skills"} and v is not None
|
||||
}
|
||||
agents_text += f"\n{json.dumps(filtered, indent=2)}\n"
|
||||
else:
|
||||
agents_text += f"\n{card.model_dump_json(indent=2, exclude_none=True, include={'description', 'url', 'skills'})}\n"
|
||||
|
||||
failed_agents = failed_agents or {}
|
||||
if failed_agents:
|
||||
@@ -372,7 +386,7 @@ IMPORTANT: You have the ability to delegate this task to remote A2A agents.
|
||||
|
||||
|
||||
def _parse_agent_response(
|
||||
raw_result: str | dict[str, Any], agent_response_model: type[BaseModel]
|
||||
raw_result: str | dict[str, Any], agent_response_model: type[BaseModel] | None
|
||||
) -> BaseModel | str | dict[str, Any]:
|
||||
"""Parse LLM output as AgentResponse or return raw agent response."""
|
||||
if agent_response_model:
|
||||
@@ -389,6 +403,11 @@ def _parse_agent_response(
|
||||
def _handle_max_turns_exceeded(
|
||||
conversation_history: list[Message],
|
||||
max_turns: int,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
agent_card: dict[str, Any] | None = None,
|
||||
) -> str:
|
||||
"""Handle the case when max turns is exceeded.
|
||||
|
||||
@@ -416,6 +435,11 @@ def _handle_max_turns_exceeded(
|
||||
final_result=final_message,
|
||||
error=None,
|
||||
total_turns=max_turns,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
),
|
||||
)
|
||||
return final_message
|
||||
@@ -427,6 +451,11 @@ def _handle_max_turns_exceeded(
|
||||
final_result=None,
|
||||
error=f"Conversation exceeded maximum turns ({max_turns})",
|
||||
total_turns=max_turns,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
),
|
||||
)
|
||||
raise Exception(f"A2A conversation exceeded maximum turns ({max_turns})")
|
||||
@@ -437,7 +466,12 @@ def _process_response_result(
|
||||
disable_structured_output: bool,
|
||||
turn_num: int,
|
||||
agent_role: str,
|
||||
agent_response_model: type[BaseModel],
|
||||
agent_response_model: type[BaseModel] | None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
agent_card: dict[str, Any] | None = None,
|
||||
) -> tuple[str | None, str | None]:
|
||||
"""Process LLM response and determine next action.
|
||||
|
||||
@@ -456,6 +490,10 @@ def _process_response_result(
|
||||
turn_number=final_turn_number,
|
||||
is_multiturn=True,
|
||||
agent_role=agent_role,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
@@ -465,6 +503,11 @@ def _process_response_result(
|
||||
final_result=result_text,
|
||||
error=None,
|
||||
total_turns=final_turn_number,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
),
|
||||
)
|
||||
return result_text, None
|
||||
@@ -485,6 +528,10 @@ def _process_response_result(
|
||||
turn_number=final_turn_number,
|
||||
is_multiturn=True,
|
||||
agent_role=agent_role,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
@@ -494,6 +541,11 @@ def _process_response_result(
|
||||
final_result=str(llm_response.message),
|
||||
error=None,
|
||||
total_turns=final_turn_number,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
),
|
||||
)
|
||||
return str(llm_response.message), None
|
||||
@@ -505,13 +557,15 @@ def _process_response_result(
|
||||
def _prepare_agent_cards_dict(
|
||||
a2a_result: TaskStateResult,
|
||||
agent_id: str,
|
||||
agent_cards: dict[str, AgentCard] | None,
|
||||
) -> dict[str, AgentCard]:
|
||||
agent_cards: Mapping[str, AgentCard | dict[str, Any]] | None,
|
||||
) -> dict[str, AgentCard | dict[str, Any]]:
|
||||
"""Prepare agent cards dictionary from result and existing cards.
|
||||
|
||||
Shared logic for both sync and async response handlers.
|
||||
"""
|
||||
agent_cards_dict = agent_cards or {}
|
||||
agent_cards_dict: dict[str, AgentCard | dict[str, Any]] = (
|
||||
dict(agent_cards) if agent_cards else {}
|
||||
)
|
||||
if "agent_card" in a2a_result and agent_id not in agent_cards_dict:
|
||||
agent_cards_dict[agent_id] = a2a_result["agent_card"]
|
||||
return agent_cards_dict
|
||||
@@ -523,11 +577,11 @@ def _prepare_delegation_context(
|
||||
task: Task,
|
||||
original_task_description: str | None,
|
||||
) -> tuple[
|
||||
list[A2AConfig],
|
||||
type[BaseModel],
|
||||
list[A2AConfig | A2AClientConfig],
|
||||
type[BaseModel] | None,
|
||||
str,
|
||||
str,
|
||||
A2AConfig,
|
||||
A2AConfig | A2AClientConfig,
|
||||
str | None,
|
||||
str | None,
|
||||
dict[str, Any] | None,
|
||||
@@ -591,8 +645,13 @@ def _handle_task_completion(
|
||||
task: Task,
|
||||
task_id_config: str | None,
|
||||
reference_task_ids: list[str],
|
||||
agent_config: A2AConfig,
|
||||
agent_config: A2AConfig | A2AClientConfig,
|
||||
turn_num: int,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
agent_card: dict[str, Any] | None = None,
|
||||
) -> tuple[str | None, str | None, list[str]]:
|
||||
"""Handle task completion state including reference task updates.
|
||||
|
||||
@@ -619,6 +678,11 @@ def _handle_task_completion(
|
||||
final_result=result_text,
|
||||
error=None,
|
||||
total_turns=final_turn_number,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
),
|
||||
)
|
||||
return str(result_text), task_id_config, reference_task_ids
|
||||
@@ -631,7 +695,7 @@ def _handle_agent_response_and_continue(
|
||||
a2a_result: TaskStateResult,
|
||||
agent_id: str,
|
||||
agent_cards: dict[str, AgentCard] | None,
|
||||
a2a_agents: list[A2AConfig],
|
||||
a2a_agents: list[A2AConfig | A2AClientConfig],
|
||||
original_task_description: str,
|
||||
conversation_history: list[Message],
|
||||
turn_num: int,
|
||||
@@ -640,8 +704,11 @@ def _handle_agent_response_and_continue(
|
||||
original_fn: Callable[..., str],
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
agent_response_model: type[BaseModel],
|
||||
agent_response_model: type[BaseModel] | None,
|
||||
remote_task_completed: bool = False,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
agent_card: dict[str, Any] | None = None,
|
||||
) -> tuple[str | None, str | None]:
|
||||
"""Handle A2A result and get CrewAI agent's response.
|
||||
|
||||
@@ -693,6 +760,11 @@ def _handle_agent_response_and_continue(
|
||||
turn_num=turn_num,
|
||||
agent_role=self.role,
|
||||
agent_response_model=agent_response_model,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
)
|
||||
|
||||
|
||||
@@ -745,6 +817,12 @@ def _delegate_to_a2a(
|
||||
|
||||
conversation_history: list[Message] = []
|
||||
|
||||
current_agent_card = agent_cards.get(agent_id) if agent_cards else None
|
||||
current_agent_card_dict = (
|
||||
current_agent_card.model_dump() if current_agent_card else None
|
||||
)
|
||||
current_a2a_agent_name = current_agent_card.name if current_agent_card else None
|
||||
|
||||
try:
|
||||
for turn_num in range(max_turns):
|
||||
console_formatter = getattr(crewai_event_bus, "_console", None)
|
||||
@@ -771,6 +849,9 @@ def _delegate_to_a2a(
|
||||
response_model=agent_config.response_model,
|
||||
turn_number=turn_num + 1,
|
||||
updates=agent_config.updates,
|
||||
transport_protocol=agent_config.transport_protocol,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
)
|
||||
|
||||
conversation_history = a2a_result.get("history", [])
|
||||
@@ -791,6 +872,11 @@ def _delegate_to_a2a(
|
||||
reference_task_ids,
|
||||
agent_config,
|
||||
turn_num,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
)
|
||||
if trusted_result is not None:
|
||||
@@ -812,6 +898,9 @@ def _delegate_to_a2a(
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
remote_task_completed=(a2a_result["status"] == TaskState.completed),
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
@@ -840,6 +929,9 @@ def _delegate_to_a2a(
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
remote_task_completed=False,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
@@ -856,19 +948,32 @@ def _delegate_to_a2a(
|
||||
final_result=None,
|
||||
error=error_msg,
|
||||
total_turns=turn_num + 1,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
),
|
||||
)
|
||||
return f"A2A delegation failed: {error_msg}"
|
||||
|
||||
return _handle_max_turns_exceeded(conversation_history, max_turns)
|
||||
return _handle_max_turns_exceeded(
|
||||
conversation_history,
|
||||
max_turns,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
finally:
|
||||
task.description = original_task_description
|
||||
|
||||
|
||||
async def _afetch_card_from_config(
|
||||
config: A2AConfig,
|
||||
) -> tuple[A2AConfig, AgentCard | Exception]:
|
||||
config: A2AConfig | A2AClientConfig,
|
||||
) -> tuple[A2AConfig | A2AClientConfig, AgentCard | Exception]:
|
||||
"""Fetch agent card from A2A config asynchronously."""
|
||||
try:
|
||||
card = await afetch_agent_card(
|
||||
@@ -882,7 +987,7 @@ async def _afetch_card_from_config(
|
||||
|
||||
|
||||
async def _afetch_agent_cards_concurrently(
|
||||
a2a_agents: list[A2AConfig],
|
||||
a2a_agents: list[A2AConfig | A2AClientConfig],
|
||||
) -> tuple[dict[str, AgentCard], dict[str, str]]:
|
||||
"""Fetch agent cards concurrently for multiple A2A agents using asyncio."""
|
||||
agent_cards: dict[str, AgentCard] = {}
|
||||
@@ -907,10 +1012,10 @@ async def _afetch_agent_cards_concurrently(
|
||||
|
||||
async def _aexecute_task_with_a2a(
|
||||
self: Agent,
|
||||
a2a_agents: list[A2AConfig],
|
||||
a2a_agents: list[A2AConfig | A2AClientConfig],
|
||||
original_fn: Callable[..., Coroutine[Any, Any, str]],
|
||||
task: Task,
|
||||
agent_response_model: type[BaseModel],
|
||||
agent_response_model: type[BaseModel] | None,
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
extension_registry: ExtensionRegistry,
|
||||
@@ -986,7 +1091,7 @@ async def _ahandle_agent_response_and_continue(
|
||||
a2a_result: TaskStateResult,
|
||||
agent_id: str,
|
||||
agent_cards: dict[str, AgentCard] | None,
|
||||
a2a_agents: list[A2AConfig],
|
||||
a2a_agents: list[A2AConfig | A2AClientConfig],
|
||||
original_task_description: str,
|
||||
conversation_history: list[Message],
|
||||
turn_num: int,
|
||||
@@ -995,8 +1100,11 @@ async def _ahandle_agent_response_and_continue(
|
||||
original_fn: Callable[..., Coroutine[Any, Any, str]],
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
agent_response_model: type[BaseModel],
|
||||
agent_response_model: type[BaseModel] | None,
|
||||
remote_task_completed: bool = False,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
agent_card: dict[str, Any] | None = None,
|
||||
) -> tuple[str | None, str | None]:
|
||||
"""Async version of _handle_agent_response_and_continue."""
|
||||
agent_cards_dict = _prepare_agent_cards_dict(a2a_result, agent_id, agent_cards)
|
||||
@@ -1026,6 +1134,11 @@ async def _ahandle_agent_response_and_continue(
|
||||
turn_num=turn_num,
|
||||
agent_role=self.role,
|
||||
agent_response_model=agent_response_model,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
)
|
||||
|
||||
|
||||
@@ -1060,6 +1173,12 @@ async def _adelegate_to_a2a(
|
||||
|
||||
conversation_history: list[Message] = []
|
||||
|
||||
current_agent_card = agent_cards.get(agent_id) if agent_cards else None
|
||||
current_agent_card_dict = (
|
||||
current_agent_card.model_dump() if current_agent_card else None
|
||||
)
|
||||
current_a2a_agent_name = current_agent_card.name if current_agent_card else None
|
||||
|
||||
try:
|
||||
for turn_num in range(max_turns):
|
||||
console_formatter = getattr(crewai_event_bus, "_console", None)
|
||||
@@ -1085,7 +1204,10 @@ async def _adelegate_to_a2a(
|
||||
agent_branch=agent_branch,
|
||||
response_model=agent_config.response_model,
|
||||
turn_number=turn_num + 1,
|
||||
transport_protocol=agent_config.transport_protocol,
|
||||
updates=agent_config.updates,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
)
|
||||
|
||||
conversation_history = a2a_result.get("history", [])
|
||||
@@ -1106,6 +1228,11 @@ async def _adelegate_to_a2a(
|
||||
reference_task_ids,
|
||||
agent_config,
|
||||
turn_num,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
)
|
||||
if trusted_result is not None:
|
||||
@@ -1127,6 +1254,9 @@ async def _adelegate_to_a2a(
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
remote_task_completed=(a2a_result["status"] == TaskState.completed),
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
@@ -1154,6 +1284,9 @@ async def _adelegate_to_a2a(
|
||||
context=context,
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
@@ -1170,11 +1303,24 @@ async def _adelegate_to_a2a(
|
||||
final_result=None,
|
||||
error=error_msg,
|
||||
total_turns=turn_num + 1,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
),
|
||||
)
|
||||
return f"A2A delegation failed: {error_msg}"
|
||||
|
||||
return _handle_max_turns_exceeded(conversation_history, max_turns)
|
||||
return _handle_max_turns_exceeded(
|
||||
conversation_history,
|
||||
max_turns,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
finally:
|
||||
task.description = original_task_description
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Sequence
|
||||
from collections.abc import Callable, Coroutine, Sequence
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
@@ -17,7 +17,6 @@ from urllib.parse import urlparse
|
||||
from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.a2a.config import A2AConfig
|
||||
from crewai.agent.utils import (
|
||||
ahandle_knowledge_retrieval,
|
||||
apply_training_data,
|
||||
@@ -35,6 +34,11 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
@@ -44,10 +48,10 @@ from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
)
|
||||
from crewai.experimental.crew_agent_executor_flow import CrewAgentExecutorFlow
|
||||
from crewai.experimental.agent_executor import AgentExecutor
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai.lite_agent_output import LiteAgentOutput
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.mcp import (
|
||||
MCPClient,
|
||||
@@ -65,26 +69,37 @@ from crewai.security.fingerprint import Fingerprint
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities.agent_utils import (
|
||||
get_tool_names,
|
||||
is_inside_event_loop,
|
||||
load_agent_from_repository,
|
||||
parse_tools,
|
||||
render_text_description_and_args,
|
||||
)
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.converter import Converter, ConverterError
|
||||
from crewai.utilities.guardrail import process_guardrail
|
||||
from crewai.utilities.guardrail_types import GuardrailType
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.prompts import Prompts
|
||||
from crewai.utilities.prompts import Prompts, StandardPromptResult, SystemPromptResult
|
||||
from crewai.utilities.pydantic_schema_utils import generate_model_description
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
try:
|
||||
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
|
||||
except ImportError:
|
||||
A2AClientConfig = Any
|
||||
A2AConfig = Any
|
||||
A2AServerConfig = Any
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_tools import CodeInterpreterTool
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import PlatformAppOrAction
|
||||
from crewai.lite_agent_output import LiteAgentOutput
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
@@ -106,7 +121,7 @@ class Agent(BaseAgent):
|
||||
The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
|
||||
|
||||
Attributes:
|
||||
agent_executor: An instance of the CrewAgentExecutor or CrewAgentExecutorFlow class.
|
||||
agent_executor: An instance of the CrewAgentExecutor or AgentExecutor class.
|
||||
role: The role of the agent.
|
||||
goal: The objective of the agent.
|
||||
backstory: The backstory of the agent.
|
||||
@@ -218,13 +233,27 @@ class Agent(BaseAgent):
|
||||
guardrail_max_retries: int = Field(
|
||||
default=3, description="Maximum number of retries when guardrail fails"
|
||||
)
|
||||
a2a: list[A2AConfig] | A2AConfig | None = Field(
|
||||
a2a: (
|
||||
list[A2AConfig | A2AServerConfig | A2AClientConfig]
|
||||
| A2AConfig
|
||||
| A2AServerConfig
|
||||
| A2AClientConfig
|
||||
| None
|
||||
) = Field(
|
||||
default=None,
|
||||
description="A2A (Agent-to-Agent) configuration for delegating tasks to remote agents. Can be a single A2AConfig or a dict mapping agent IDs to configs.",
|
||||
description="""
|
||||
A2A (Agent-to-Agent) configuration for delegating tasks to remote agents.
|
||||
Can be a single A2AConfig/A2AClientConfig/A2AServerConfig, or a list of any number of A2AConfig/A2AClientConfig with a single A2AServerConfig.
|
||||
""",
|
||||
)
|
||||
executor_class: type[CrewAgentExecutor] | type[CrewAgentExecutorFlow] = Field(
|
||||
executor_class: type[CrewAgentExecutor] | type[AgentExecutor] = Field(
|
||||
executor_class: type[CrewAgentExecutor] | type[AgentExecutor] = Field(
|
||||
default=CrewAgentExecutor,
|
||||
description="Class to use for the agent executor. Defaults to CrewAgentExecutor, can optionally use CrewAgentExecutorFlow.",
|
||||
description="Class to use for the agent executor. Defaults to CrewAgentExecutor, can optionally use AgentExecutor.",
|
||||
)
|
||||
max_tools_per_turn: int = Field(
|
||||
default=10,
|
||||
description="Maximum number of tool calls to execute per LLM turn before asking for reflection.",
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@@ -287,6 +316,22 @@ class Agent(BaseAgent):
|
||||
|
||||
return any(getattr(self.crew, attr) for attr in memory_attributes)
|
||||
|
||||
def _supports_native_tool_calling(self, tools: list[BaseTool]) -> bool:
|
||||
"""Check if the LLM supports native function calling with the given tools.
|
||||
|
||||
Args:
|
||||
tools: List of tools to check against.
|
||||
|
||||
Returns:
|
||||
True if native function calling is supported and tools are available.
|
||||
"""
|
||||
return (
|
||||
hasattr(self.llm, "supports_function_calling")
|
||||
and callable(getattr(self.llm, "supports_function_calling", None))
|
||||
and self.llm.supports_function_calling()
|
||||
and len(tools) > 0
|
||||
)
|
||||
|
||||
def execute_task(
|
||||
self,
|
||||
task: Task,
|
||||
@@ -709,9 +754,12 @@ class Agent(BaseAgent):
|
||||
raw_tools: list[BaseTool] = tools or self.tools or []
|
||||
parsed_tools = parse_tools(raw_tools)
|
||||
|
||||
use_native_tool_calling = self._supports_native_tool_calling(raw_tools)
|
||||
|
||||
prompt = Prompts(
|
||||
agent=self,
|
||||
has_tools=len(raw_tools) > 0,
|
||||
use_native_tool_calling=use_native_tool_calling,
|
||||
i18n=self.i18n,
|
||||
use_system_prompt=self.use_system_prompt,
|
||||
system_template=self.system_template,
|
||||
@@ -733,7 +781,7 @@ class Agent(BaseAgent):
|
||||
if self.agent_executor is not None:
|
||||
self._update_executor_parameters(
|
||||
task=task,
|
||||
tools=parsed_tools,
|
||||
tools=parsed_tools, # type: ignore[arg-type]
|
||||
raw_tools=raw_tools,
|
||||
prompt=prompt,
|
||||
stop_words=stop_words,
|
||||
@@ -742,7 +790,7 @@ class Agent(BaseAgent):
|
||||
else:
|
||||
self.agent_executor = self.executor_class(
|
||||
llm=cast(BaseLLM, self.llm),
|
||||
task=task,
|
||||
task=task, # type: ignore[arg-type]
|
||||
i18n=self.i18n,
|
||||
agent=self,
|
||||
crew=self.crew,
|
||||
@@ -760,16 +808,17 @@ class Agent(BaseAgent):
|
||||
request_within_rpm_limit=rpm_limit_fn,
|
||||
callbacks=[TokenCalcHandler(self._token_process)],
|
||||
response_model=task.response_model if task else None,
|
||||
# max_tools_per_turn=self.max_tools_per_turn, #TODO: drop this
|
||||
)
|
||||
|
||||
def _update_executor_parameters(
|
||||
self,
|
||||
task: Task | None,
|
||||
tools: list,
|
||||
tools: list[BaseTool],
|
||||
raw_tools: list[BaseTool],
|
||||
prompt: dict,
|
||||
prompt: SystemPromptResult | StandardPromptResult,
|
||||
stop_words: list[str],
|
||||
rpm_limit_fn: Callable | None,
|
||||
rpm_limit_fn: Callable | None, # type: ignore[type-arg]
|
||||
) -> None:
|
||||
"""Update executor parameters without recreating instance.
|
||||
|
||||
@@ -1567,26 +1616,26 @@ class Agent(BaseAgent):
|
||||
)
|
||||
return None
|
||||
|
||||
def kickoff(
|
||||
def _prepare_kickoff(
|
||||
def _prepare_kickoff(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent with the given messages using a LiteAgent instance.
|
||||
) -> tuple[AgentExecutor, dict[str, str], dict[str, Any], list[CrewStructuredTool]]:
|
||||
"""Prepare common setup for kickoff execution.
|
||||
|
||||
This method is useful when you want to use the Agent configuration but
|
||||
with the simpler and more direct execution flow of LiteAgent.
|
||||
This method handles all the common preparation logic shared between
|
||||
kickoff() and kickoff_async(), including tool processing, prompt building,
|
||||
executor creation, and input formatting.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
If a string is provided, it will be converted to a user message.
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
Tuple of (executor, inputs, agent_info, parsed_tools) ready for execution.
|
||||
"""
|
||||
# Process platform apps and MCP tools
|
||||
if self.apps:
|
||||
platform_tools = self.get_platform_tools(self.apps)
|
||||
if platform_tools and self.tools is not None:
|
||||
@@ -1596,25 +1645,361 @@ class Agent(BaseAgent):
|
||||
if mcps and self.tools is not None:
|
||||
self.tools.extend(mcps)
|
||||
|
||||
lite_agent = LiteAgent(
|
||||
id=self.id,
|
||||
role=self.role,
|
||||
goal=self.goal,
|
||||
backstory=self.backstory,
|
||||
llm=self.llm,
|
||||
tools=self.tools or [],
|
||||
max_iterations=self.max_iter,
|
||||
max_execution_time=self.max_execution_time,
|
||||
respect_context_window=self.respect_context_window,
|
||||
verbose=self.verbose,
|
||||
response_format=response_format,
|
||||
# Prepare tools
|
||||
raw_tools: list[BaseTool] = self.tools or []
|
||||
parsed_tools = parse_tools(raw_tools)
|
||||
|
||||
# Build agent_info for backward-compatible event emission
|
||||
agent_info = {
|
||||
"id": self.id,
|
||||
"role": self.role,
|
||||
"goal": self.goal,
|
||||
"backstory": self.backstory,
|
||||
"tools": raw_tools,
|
||||
"verbose": self.verbose,
|
||||
}
|
||||
|
||||
# Build prompt for standalone execution
|
||||
use_native_tool_calling = self._supports_native_tool_calling(raw_tools)
|
||||
prompt = Prompts(
|
||||
agent=self,
|
||||
has_tools=len(raw_tools) > 0,
|
||||
use_native_tool_calling=use_native_tool_calling,
|
||||
i18n=self.i18n,
|
||||
original_agent=self,
|
||||
guardrail=self.guardrail,
|
||||
guardrail_max_retries=self.guardrail_max_retries,
|
||||
use_system_prompt=self.use_system_prompt,
|
||||
system_template=self.system_template,
|
||||
prompt_template=self.prompt_template,
|
||||
response_template=self.response_template,
|
||||
).task_execution()
|
||||
|
||||
# Prepare stop words
|
||||
stop_words = [self.i18n.slice("observation")]
|
||||
if self.response_template:
|
||||
stop_words.append(
|
||||
self.response_template.split("{{ .Response }}")[1].strip()
|
||||
)
|
||||
|
||||
# Get RPM limit function
|
||||
rpm_limit_fn = (
|
||||
self._rpm_controller.check_or_wait if self._rpm_controller else None
|
||||
)
|
||||
|
||||
return lite_agent.kickoff(messages)
|
||||
# Create the executor for standalone mode (no crew, no task)
|
||||
executor = AgentExecutor(
|
||||
task=None,
|
||||
crew=None,
|
||||
llm=cast(BaseLLM, self.llm),
|
||||
agent=self,
|
||||
prompt=prompt,
|
||||
max_iter=self.max_iter,
|
||||
tools=parsed_tools,
|
||||
tools_names=get_tool_names(parsed_tools),
|
||||
stop_words=stop_words,
|
||||
tools_description=render_text_description_and_args(parsed_tools),
|
||||
tools_handler=self.tools_handler,
|
||||
original_tools=raw_tools,
|
||||
step_callback=self.step_callback,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
respect_context_window=self.respect_context_window,
|
||||
request_within_rpm_limit=rpm_limit_fn,
|
||||
callbacks=[TokenCalcHandler(self._token_process)],
|
||||
response_model=response_format,
|
||||
i18n=self.i18n,
|
||||
max_tools_per_turn=self.max_tools_per_turn,
|
||||
)
|
||||
|
||||
# Format messages
|
||||
if isinstance(messages, str):
|
||||
formatted_messages = messages
|
||||
else:
|
||||
formatted_messages = "\n".join(
|
||||
str(msg.get("content", "")) for msg in messages if msg.get("content")
|
||||
)
|
||||
|
||||
# Build the input dict for the executor
|
||||
inputs = {
|
||||
"input": formatted_messages,
|
||||
"tool_names": get_tool_names(parsed_tools),
|
||||
"tools": render_text_description_and_args(parsed_tools),
|
||||
}
|
||||
|
||||
return executor, inputs, agent_info, parsed_tools
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput | Coroutine[Any, Any, LiteAgentOutput]:
|
||||
"""
|
||||
Execute the agent with the given messages using the AgentExecutor.
|
||||
|
||||
This method provides standalone agent execution without requiring a Crew.
|
||||
It supports tools, response formatting, and guardrails.
|
||||
|
||||
When called from within a Flow (sync or async method), this automatically
|
||||
detects the event loop and returns a coroutine that the Flow framework
|
||||
awaits. Users don't need to handle async explicitly.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
If a string is provided, it will be converted to a user message.
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
When inside a Flow, returns a coroutine that resolves to LiteAgentOutput.
|
||||
|
||||
Note:
|
||||
For explicit async usage outside of Flow, use kickoff_async() directly.
|
||||
"""
|
||||
# Magic auto-async: if inside event loop (e.g., inside a Flow),
|
||||
# return coroutine for Flow to await
|
||||
if is_inside_event_loop():
|
||||
return self.kickoff_async(messages, response_format)
|
||||
|
||||
executor, inputs, agent_info, parsed_tools = self._prepare_kickoff(
|
||||
messages, response_format
|
||||
)
|
||||
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionStartedEvent(
|
||||
agent_info=agent_info,
|
||||
tools=parsed_tools,
|
||||
messages=messages,
|
||||
),
|
||||
)
|
||||
|
||||
output = self._execute_and_build_output(executor, inputs, response_format)
|
||||
if self.guardrail is not None:
|
||||
output = self._process_kickoff_guardrail(
|
||||
output=output,
|
||||
executor=executor,
|
||||
inputs=inputs,
|
||||
response_format=response_format,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionCompletedEvent(
|
||||
agent_info=agent_info,
|
||||
output=output.raw,
|
||||
),
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionErrorEvent(
|
||||
agent_info=agent_info,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def _execute_and_build_output(
|
||||
self,
|
||||
executor: AgentExecutor,
|
||||
inputs: dict[str, str],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Execute the agent and build the output object.
|
||||
|
||||
Args:
|
||||
executor: The executor instance.
|
||||
inputs: Input dictionary for execution.
|
||||
response_format: Optional response format.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput with raw output, formatted result, and metrics.
|
||||
"""
|
||||
import json
|
||||
|
||||
# Execute the agent (this is called from sync path, so invoke returns dict)
|
||||
result = cast(dict[str, Any], executor.invoke(inputs))
|
||||
raw_output = result.get("output", "")
|
||||
|
||||
# Handle response format conversion
|
||||
formatted_result: BaseModel | None = None
|
||||
if response_format:
|
||||
try:
|
||||
model_schema = generate_model_description(response_format)
|
||||
schema = json.dumps(model_schema, indent=2)
|
||||
instructions = self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
|
||||
converter = Converter(
|
||||
llm=self.llm,
|
||||
text=raw_output,
|
||||
model=response_format,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
conversion_result = converter.to_pydantic()
|
||||
if isinstance(conversion_result, BaseModel):
|
||||
formatted_result = conversion_result
|
||||
except ConverterError:
|
||||
pass # Keep raw output if conversion fails
|
||||
|
||||
# Get token usage metrics
|
||||
if isinstance(self.llm, BaseLLM):
|
||||
usage_metrics = self.llm.get_token_usage_summary()
|
||||
else:
|
||||
usage_metrics = self._token_process.get_summary()
|
||||
|
||||
return LiteAgentOutput(
|
||||
raw=raw_output,
|
||||
pydantic=formatted_result,
|
||||
agent_role=self.role,
|
||||
usage_metrics=usage_metrics.model_dump() if usage_metrics else None,
|
||||
messages=executor.messages,
|
||||
)
|
||||
|
||||
async def _execute_and_build_output_async(
|
||||
self,
|
||||
executor: AgentExecutor,
|
||||
inputs: dict[str, str],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Execute the agent asynchronously and build the output object.
|
||||
|
||||
This is the async version of _execute_and_build_output that uses
|
||||
invoke_async() for native async execution within event loops.
|
||||
|
||||
Args:
|
||||
executor: The executor instance.
|
||||
inputs: Input dictionary for execution.
|
||||
response_format: Optional response format.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput with raw output, formatted result, and metrics.
|
||||
"""
|
||||
import json
|
||||
|
||||
# Execute the agent asynchronously
|
||||
result = await executor.invoke_async(inputs)
|
||||
raw_output = result.get("output", "")
|
||||
|
||||
# Handle response format conversion
|
||||
formatted_result: BaseModel | None = None
|
||||
if response_format:
|
||||
try:
|
||||
model_schema = generate_model_description(response_format)
|
||||
schema = json.dumps(model_schema, indent=2)
|
||||
instructions = self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
|
||||
converter = Converter(
|
||||
llm=self.llm,
|
||||
text=raw_output,
|
||||
model=response_format,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
conversion_result = converter.to_pydantic()
|
||||
if isinstance(conversion_result, BaseModel):
|
||||
formatted_result = conversion_result
|
||||
except ConverterError:
|
||||
pass # Keep raw output if conversion fails
|
||||
|
||||
# Get token usage metrics
|
||||
if isinstance(self.llm, BaseLLM):
|
||||
usage_metrics = self.llm.get_token_usage_summary()
|
||||
else:
|
||||
usage_metrics = self._token_process.get_summary()
|
||||
|
||||
return LiteAgentOutput(
|
||||
raw=raw_output,
|
||||
pydantic=formatted_result,
|
||||
agent_role=self.role,
|
||||
usage_metrics=usage_metrics.model_dump() if usage_metrics else None,
|
||||
messages=executor.messages,
|
||||
)
|
||||
|
||||
def _process_kickoff_guardrail(
|
||||
self,
|
||||
output: LiteAgentOutput,
|
||||
executor: AgentExecutor,
|
||||
inputs: dict[str, str],
|
||||
response_format: type[Any] | None = None,
|
||||
retry_count: int = 0,
|
||||
) -> LiteAgentOutput:
|
||||
"""Process guardrail for kickoff execution with retry logic.
|
||||
|
||||
Args:
|
||||
output: Current agent output.
|
||||
executor: The executor instance.
|
||||
inputs: Input dictionary for re-execution.
|
||||
response_format: Optional response format.
|
||||
retry_count: Current retry count.
|
||||
|
||||
Returns:
|
||||
Validated/updated output.
|
||||
"""
|
||||
from crewai.utilities.guardrail_types import GuardrailCallable
|
||||
|
||||
# Ensure guardrail is callable
|
||||
guardrail_callable: GuardrailCallable
|
||||
if isinstance(self.guardrail, str):
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
|
||||
guardrail_callable = cast(
|
||||
GuardrailCallable,
|
||||
LLMGuardrail(description=self.guardrail, llm=cast(BaseLLM, self.llm)),
|
||||
)
|
||||
elif callable(self.guardrail):
|
||||
guardrail_callable = self.guardrail
|
||||
else:
|
||||
# Should not happen if called from kickoff with guardrail check
|
||||
return output
|
||||
|
||||
guardrail_result = process_guardrail(
|
||||
output=output,
|
||||
guardrail=guardrail_callable,
|
||||
retry_count=retry_count,
|
||||
event_source=self,
|
||||
from_agent=self,
|
||||
)
|
||||
|
||||
if not guardrail_result.success:
|
||||
if retry_count >= self.guardrail_max_retries:
|
||||
raise ValueError(
|
||||
f"Agent's guardrail failed validation after {self.guardrail_max_retries} retries. "
|
||||
f"Last error: {guardrail_result.error}"
|
||||
)
|
||||
|
||||
# Add feedback and re-execute
|
||||
executor._append_message_to_state(
|
||||
guardrail_result.error or "Guardrail validation failed",
|
||||
role="user",
|
||||
)
|
||||
|
||||
# Re-execute and build new output
|
||||
output = self._execute_and_build_output(executor, inputs, response_format)
|
||||
|
||||
# Recursively retry guardrail
|
||||
return self._process_kickoff_guardrail(
|
||||
output=output,
|
||||
executor=executor,
|
||||
inputs=inputs,
|
||||
response_format=response_format,
|
||||
retry_count=retry_count + 1,
|
||||
)
|
||||
|
||||
# Apply guardrail result if available
|
||||
if guardrail_result.result is not None:
|
||||
if isinstance(guardrail_result.result, str):
|
||||
output.raw = guardrail_result.result
|
||||
elif isinstance(guardrail_result.result, BaseModel):
|
||||
output.pydantic = guardrail_result.result
|
||||
|
||||
return output
|
||||
|
||||
async def kickoff_async(
|
||||
self,
|
||||
@@ -1622,9 +2007,11 @@ class Agent(BaseAgent):
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent asynchronously with the given messages using a LiteAgent instance.
|
||||
Execute the agent asynchronously with the given messages.
|
||||
|
||||
This is the async version of the kickoff method.
|
||||
This is the async version of the kickoff method that uses native async
|
||||
execution. It is designed for use within async contexts, such as when
|
||||
called from within an async Flow method.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
@@ -1635,21 +2022,48 @@ class Agent(BaseAgent):
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
lite_agent = LiteAgent(
|
||||
role=self.role,
|
||||
goal=self.goal,
|
||||
backstory=self.backstory,
|
||||
llm=self.llm,
|
||||
tools=self.tools or [],
|
||||
max_iterations=self.max_iter,
|
||||
max_execution_time=self.max_execution_time,
|
||||
respect_context_window=self.respect_context_window,
|
||||
verbose=self.verbose,
|
||||
response_format=response_format,
|
||||
i18n=self.i18n,
|
||||
original_agent=self,
|
||||
guardrail=self.guardrail,
|
||||
guardrail_max_retries=self.guardrail_max_retries,
|
||||
executor, inputs, agent_info, parsed_tools = self._prepare_kickoff(
|
||||
messages, response_format
|
||||
)
|
||||
|
||||
return await lite_agent.kickoff_async(messages)
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionStartedEvent(
|
||||
agent_info=agent_info,
|
||||
tools=parsed_tools,
|
||||
messages=messages,
|
||||
),
|
||||
)
|
||||
|
||||
output = await self._execute_and_build_output_async(
|
||||
executor, inputs, response_format
|
||||
)
|
||||
|
||||
if self.guardrail is not None:
|
||||
output = self._process_kickoff_guardrail(
|
||||
output=output,
|
||||
executor=executor,
|
||||
inputs=inputs,
|
||||
response_format=response_format,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionCompletedEvent(
|
||||
agent_info=agent_info,
|
||||
output=output.raw,
|
||||
),
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionErrorEvent(
|
||||
agent_info=agent_info,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
@@ -236,14 +236,30 @@ def process_tool_results(agent: Agent, result: Any) -> Any:
|
||||
def save_last_messages(agent: Agent) -> None:
|
||||
"""Save the last messages from agent executor.
|
||||
|
||||
Sanitizes messages to be compatible with TaskOutput's LLMMessage type,
|
||||
which only accepts 'user', 'assistant', 'system' roles and requires
|
||||
content to be a string or list (not None).
|
||||
|
||||
Args:
|
||||
agent: The agent instance.
|
||||
"""
|
||||
agent._last_messages = (
|
||||
agent.agent_executor.messages.copy()
|
||||
if agent.agent_executor and hasattr(agent.agent_executor, "messages")
|
||||
else []
|
||||
)
|
||||
if not agent.agent_executor or not hasattr(agent.agent_executor, "messages"):
|
||||
agent._last_messages = []
|
||||
return
|
||||
|
||||
sanitized_messages = []
|
||||
for msg in agent.agent_executor.messages:
|
||||
role = msg.get("role", "")
|
||||
# Only include messages with valid LLMMessage roles
|
||||
if role not in ("user", "assistant", "system"):
|
||||
continue
|
||||
# Ensure content is not None (can happen with tool call assistant messages)
|
||||
content = msg.get("content")
|
||||
if content is None:
|
||||
content = ""
|
||||
sanitized_messages.append({"role": role, "content": content})
|
||||
|
||||
agent._last_messages = sanitized_messages
|
||||
|
||||
|
||||
def prepare_tools(
|
||||
|
||||
@@ -21,9 +21,9 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class CrewAgentExecutorMixin:
|
||||
crew: Crew
|
||||
crew: Crew | None
|
||||
agent: Agent
|
||||
task: Task
|
||||
task: Task | None
|
||||
iterations: int
|
||||
max_iter: int
|
||||
messages: list[LLMMessage]
|
||||
|
||||
@@ -30,6 +30,7 @@ from crewai.hooks.llm_hooks import (
|
||||
)
|
||||
from crewai.utilities.agent_utils import (
|
||||
aget_llm_response,
|
||||
convert_tools_to_openai_schema,
|
||||
enforce_rpm_limit,
|
||||
format_message_for_llm,
|
||||
get_llm_response,
|
||||
@@ -215,6 +216,33 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
def _invoke_loop(self) -> AgentFinish:
|
||||
"""Execute agent loop until completion.
|
||||
|
||||
Checks if the LLM supports native function calling and uses that
|
||||
approach if available, otherwise falls back to the ReAct text pattern.
|
||||
|
||||
Returns:
|
||||
Final answer from the agent.
|
||||
"""
|
||||
# Check if model supports native function calling
|
||||
use_native_tools = (
|
||||
hasattr(self.llm, "supports_function_calling")
|
||||
and callable(getattr(self.llm, "supports_function_calling", None))
|
||||
and self.llm.supports_function_calling()
|
||||
and self.original_tools
|
||||
)
|
||||
|
||||
if use_native_tools:
|
||||
return self._invoke_loop_native_tools()
|
||||
|
||||
# Fall back to ReAct text-based pattern
|
||||
return self._invoke_loop_react()
|
||||
|
||||
def _invoke_loop_react(self) -> AgentFinish:
|
||||
"""Execute agent loop using ReAct text-based pattern.
|
||||
|
||||
This is the traditional approach where tool definitions are embedded
|
||||
in the prompt and the LLM outputs Action/Action Input text that is
|
||||
parsed to execute tools.
|
||||
|
||||
Returns:
|
||||
Final answer from the agent.
|
||||
"""
|
||||
@@ -244,6 +272,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
)
|
||||
print("--------------------------------")
|
||||
print("get_llm_response answer", answer)
|
||||
print("--------------------------------")
|
||||
# breakpoint()
|
||||
if self.response_model is not None:
|
||||
try:
|
||||
self.response_model.model_validate_json(answer)
|
||||
@@ -333,6 +365,338 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
def _invoke_loop_native_tools(self) -> AgentFinish:
|
||||
"""Execute agent loop using native function calling.
|
||||
|
||||
This method uses the LLM's native tool/function calling capability
|
||||
instead of the text-based ReAct pattern. The LLM directly returns
|
||||
structured tool calls which are executed and results fed back.
|
||||
|
||||
Returns:
|
||||
Final answer from the agent.
|
||||
"""
|
||||
print("--------------------------------")
|
||||
print("invoke_loop_native_tools")
|
||||
print("--------------------------------")
|
||||
# Convert tools to OpenAI schema format
|
||||
if not self.original_tools:
|
||||
# No tools available, fall back to simple LLM call
|
||||
return self._invoke_loop_native_no_tools()
|
||||
|
||||
openai_tools, available_functions = convert_tools_to_openai_schema(
|
||||
self.original_tools
|
||||
)
|
||||
|
||||
while True:
|
||||
try:
|
||||
if has_reached_max_iterations(self.iterations, self.max_iter):
|
||||
formatted_answer = handle_max_iterations_exceeded(
|
||||
None,
|
||||
printer=self._printer,
|
||||
i18n=self._i18n,
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
enforce_rpm_limit(self.request_within_rpm_limit)
|
||||
|
||||
# Debug: Show messages being sent to LLM
|
||||
print("--------------------------------")
|
||||
print(f"Messages count: {len(self.messages)}")
|
||||
for i, msg in enumerate(self.messages):
|
||||
role = msg.get("role", "unknown")
|
||||
content = msg.get("content", "")
|
||||
if content:
|
||||
preview = (
|
||||
content[:200] + "..." if len(content) > 200 else content
|
||||
)
|
||||
else:
|
||||
preview = "(no content)"
|
||||
print(f" [{i}] {role}: {preview}")
|
||||
print("--------------------------------")
|
||||
|
||||
# Call LLM with native tools
|
||||
# Pass available_functions=None so the LLM returns tool_calls
|
||||
# without executing them. The executor handles tool execution
|
||||
# via _handle_native_tool_calls to properly manage message history.
|
||||
answer = get_llm_response(
|
||||
llm=self.llm,
|
||||
messages=self.messages,
|
||||
callbacks=self.callbacks,
|
||||
printer=self._printer,
|
||||
tools=openai_tools,
|
||||
available_functions=None,
|
||||
from_task=self.task,
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
)
|
||||
print("--------------------------------")
|
||||
print("invoke_loop_native_tools answer", answer)
|
||||
print("--------------------------------")
|
||||
# print("get_llm_response answer", answer[:500] + "...")
|
||||
|
||||
# Check if the response is a list of tool calls
|
||||
if (
|
||||
isinstance(answer, list)
|
||||
and answer
|
||||
and self._is_tool_call_list(answer)
|
||||
):
|
||||
# Handle tool calls - execute tools and add results to messages
|
||||
self._handle_native_tool_calls(answer, available_functions)
|
||||
# Continue loop to let LLM analyze results and decide next steps
|
||||
continue
|
||||
|
||||
# Text or other response - handle as potential final answer
|
||||
if isinstance(answer, str):
|
||||
# Text response - this is the final answer
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=answer,
|
||||
)
|
||||
self._invoke_step_callback(formatted_answer)
|
||||
self._append_message(answer) # Save final answer to messages
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
# Unexpected response type, treat as final answer
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=str(answer),
|
||||
text=str(answer),
|
||||
)
|
||||
self._invoke_step_callback(formatted_answer)
|
||||
self._append_message(str(answer)) # Save final answer to messages
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
except Exception as e:
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
raise e
|
||||
if is_context_length_exceeded(e):
|
||||
handle_context_length(
|
||||
respect_context_window=self.respect_context_window,
|
||||
printer=self._printer,
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
|
||||
def _invoke_loop_native_no_tools(self) -> AgentFinish:
|
||||
"""Execute a simple LLM call when no tools are available.
|
||||
|
||||
Returns:
|
||||
Final answer from the agent.
|
||||
"""
|
||||
enforce_rpm_limit(self.request_within_rpm_limit)
|
||||
|
||||
answer = get_llm_response(
|
||||
llm=self.llm,
|
||||
messages=self.messages,
|
||||
callbacks=self.callbacks,
|
||||
printer=self._printer,
|
||||
from_task=self.task,
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
)
|
||||
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=str(answer),
|
||||
text=str(answer),
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
def _is_tool_call_list(self, response: list[Any]) -> bool:
|
||||
"""Check if a response is a list of tool calls.
|
||||
|
||||
Args:
|
||||
response: The response to check.
|
||||
|
||||
Returns:
|
||||
True if the response appears to be a list of tool calls.
|
||||
"""
|
||||
if not response:
|
||||
return False
|
||||
first_item = response[0]
|
||||
# OpenAI-style
|
||||
if hasattr(first_item, "function") or (
|
||||
isinstance(first_item, dict) and "function" in first_item
|
||||
):
|
||||
return True
|
||||
# Anthropic-style
|
||||
if (
|
||||
hasattr(first_item, "type")
|
||||
and getattr(first_item, "type", None) == "tool_use"
|
||||
):
|
||||
return True
|
||||
if hasattr(first_item, "name") and hasattr(first_item, "input"):
|
||||
return True
|
||||
# Gemini-style
|
||||
if hasattr(first_item, "function_call") and first_item.function_call:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _handle_native_tool_calls(
|
||||
self,
|
||||
tool_calls: list[Any],
|
||||
available_functions: dict[str, Callable[..., Any]],
|
||||
) -> None:
|
||||
"""Handle a single native tool call from the LLM.
|
||||
|
||||
Executes only the FIRST tool call and appends the result to message history.
|
||||
This enables sequential tool execution with reflection after each tool,
|
||||
allowing the LLM to reason about results before deciding on next steps.
|
||||
|
||||
Args:
|
||||
tool_calls: List of tool calls from the LLM (only first is processed).
|
||||
available_functions: Dict mapping function names to callables.
|
||||
"""
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
from crewai.events import crewai_event_bus
|
||||
from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
|
||||
if not tool_calls:
|
||||
return
|
||||
|
||||
# Only process the FIRST tool call for sequential execution with reflection
|
||||
tool_call = tool_calls[0]
|
||||
|
||||
# Extract tool call info - handle OpenAI-style, Anthropic-style, and Gemini-style
|
||||
if hasattr(tool_call, "function"):
|
||||
# OpenAI-style: has .function.name and .function.arguments
|
||||
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
|
||||
func_name = tool_call.function.name
|
||||
func_args = tool_call.function.arguments
|
||||
elif hasattr(tool_call, "function_call") and tool_call.function_call:
|
||||
# Gemini-style: has .function_call.name and .function_call.args
|
||||
call_id = f"call_{id(tool_call)}"
|
||||
func_name = tool_call.function_call.name
|
||||
func_args = (
|
||||
dict(tool_call.function_call.args)
|
||||
if tool_call.function_call.args
|
||||
else {}
|
||||
)
|
||||
elif hasattr(tool_call, "name") and hasattr(tool_call, "input"):
|
||||
# Anthropic format: has .name and .input (ToolUseBlock)
|
||||
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
|
||||
func_name = tool_call.name
|
||||
func_args = tool_call.input # Already a dict in Anthropic
|
||||
elif isinstance(tool_call, dict):
|
||||
call_id = tool_call.get("id", f"call_{id(tool_call)}")
|
||||
func_info = tool_call.get("function", {})
|
||||
func_name = func_info.get("name", "") or tool_call.get("name", "")
|
||||
func_args = func_info.get("arguments", "{}") or tool_call.get("input", {})
|
||||
else:
|
||||
return
|
||||
|
||||
# Append assistant message with single tool call
|
||||
assistant_message: LLMMessage = {
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": func_name,
|
||||
"arguments": func_args
|
||||
if isinstance(func_args, str)
|
||||
else json.dumps(func_args),
|
||||
},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
self.messages.append(assistant_message)
|
||||
|
||||
# Parse arguments for the single tool call
|
||||
if isinstance(func_args, str):
|
||||
try:
|
||||
args_dict = json.loads(func_args)
|
||||
except json.JSONDecodeError:
|
||||
args_dict = {}
|
||||
else:
|
||||
args_dict = func_args
|
||||
|
||||
# Emit tool usage started event
|
||||
started_at = datetime.now()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageStartedEvent(
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
# Execute the tool
|
||||
print(f"Using Tool: {func_name}")
|
||||
result = "Tool not found"
|
||||
if func_name in available_functions:
|
||||
try:
|
||||
tool_func = available_functions[func_name]
|
||||
result = tool_func(**args_dict)
|
||||
if not isinstance(result, str):
|
||||
result = str(result)
|
||||
except Exception as e:
|
||||
result = f"Error executing tool: {e}"
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageFinishedEvent(
|
||||
output=result,
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
started_at=started_at,
|
||||
finished_at=datetime.now(),
|
||||
),
|
||||
)
|
||||
|
||||
# Append tool result message
|
||||
tool_message: LLMMessage = {
|
||||
"role": "tool",
|
||||
"tool_call_id": call_id,
|
||||
"content": result,
|
||||
}
|
||||
self.messages.append(tool_message)
|
||||
|
||||
# Log the tool execution
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Tool {func_name} executed with result: {result[:200]}...",
|
||||
color="green",
|
||||
)
|
||||
|
||||
# Inject post-tool reasoning prompt to enforce analysis
|
||||
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
|
||||
reasoning_message: LLMMessage = {
|
||||
"role": "user",
|
||||
"content": reasoning_prompt,
|
||||
}
|
||||
self.messages.append(reasoning_message)
|
||||
|
||||
async def ainvoke(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Execute the agent asynchronously with given inputs.
|
||||
|
||||
@@ -382,6 +746,29 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
async def _ainvoke_loop(self) -> AgentFinish:
|
||||
"""Execute agent loop asynchronously until completion.
|
||||
|
||||
Checks if the LLM supports native function calling and uses that
|
||||
approach if available, otherwise falls back to the ReAct text pattern.
|
||||
|
||||
Returns:
|
||||
Final answer from the agent.
|
||||
"""
|
||||
# Check if model supports native function calling
|
||||
use_native_tools = (
|
||||
hasattr(self.llm, "supports_function_calling")
|
||||
and callable(getattr(self.llm, "supports_function_calling", None))
|
||||
and self.llm.supports_function_calling()
|
||||
and self.original_tools
|
||||
)
|
||||
|
||||
if use_native_tools:
|
||||
return await self._ainvoke_loop_native_tools()
|
||||
|
||||
# Fall back to ReAct text-based pattern
|
||||
return await self._ainvoke_loop_react()
|
||||
|
||||
async def _ainvoke_loop_react(self) -> AgentFinish:
|
||||
"""Execute agent loop asynchronously using ReAct text-based pattern.
|
||||
|
||||
Returns:
|
||||
Final answer from the agent.
|
||||
"""
|
||||
@@ -495,6 +882,139 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
async def _ainvoke_loop_native_tools(self) -> AgentFinish:
|
||||
"""Execute agent loop asynchronously using native function calling.
|
||||
|
||||
This method uses the LLM's native tool/function calling capability
|
||||
instead of the text-based ReAct pattern.
|
||||
|
||||
Returns:
|
||||
Final answer from the agent.
|
||||
"""
|
||||
# Convert tools to OpenAI schema format
|
||||
if not self.original_tools:
|
||||
return await self._ainvoke_loop_native_no_tools()
|
||||
|
||||
openai_tools, available_functions = convert_tools_to_openai_schema(
|
||||
self.original_tools
|
||||
)
|
||||
|
||||
while True:
|
||||
try:
|
||||
if has_reached_max_iterations(self.iterations, self.max_iter):
|
||||
formatted_answer = handle_max_iterations_exceeded(
|
||||
None,
|
||||
printer=self._printer,
|
||||
i18n=self._i18n,
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
enforce_rpm_limit(self.request_within_rpm_limit)
|
||||
|
||||
# Call LLM with native tools
|
||||
# Pass available_functions=None so the LLM returns tool_calls
|
||||
# without executing them. The executor handles tool execution
|
||||
# via _handle_native_tool_calls to properly manage message history.
|
||||
answer = await aget_llm_response(
|
||||
llm=self.llm,
|
||||
messages=self.messages,
|
||||
callbacks=self.callbacks,
|
||||
printer=self._printer,
|
||||
tools=openai_tools,
|
||||
available_functions=None,
|
||||
from_task=self.task,
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
)
|
||||
print("--------------------------------")
|
||||
print("native llm completion answer", answer)
|
||||
print("--------------------------------")
|
||||
|
||||
# Check if the response is a list of tool calls
|
||||
if (
|
||||
isinstance(answer, list)
|
||||
and answer
|
||||
and self._is_tool_call_list(answer)
|
||||
):
|
||||
# Handle tool calls - execute tools and add results to messages
|
||||
self._handle_native_tool_calls(answer, available_functions)
|
||||
# Continue loop to let LLM analyze results and decide next steps
|
||||
continue
|
||||
|
||||
# Text or other response - handle as potential final answer
|
||||
if isinstance(answer, str):
|
||||
# Text response - this is the final answer
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=answer,
|
||||
)
|
||||
self._invoke_step_callback(formatted_answer)
|
||||
self._append_message(answer) # Save final answer to messages
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
# Unexpected response type, treat as final answer
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=str(answer),
|
||||
text=str(answer),
|
||||
)
|
||||
self._invoke_step_callback(formatted_answer)
|
||||
self._append_message(str(answer)) # Save final answer to messages
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
except Exception as e:
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
raise e
|
||||
if is_context_length_exceeded(e):
|
||||
handle_context_length(
|
||||
respect_context_window=self.respect_context_window,
|
||||
printer=self._printer,
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
|
||||
async def _ainvoke_loop_native_no_tools(self) -> AgentFinish:
|
||||
"""Execute a simple async LLM call when no tools are available.
|
||||
|
||||
Returns:
|
||||
Final answer from the agent.
|
||||
"""
|
||||
enforce_rpm_limit(self.request_within_rpm_limit)
|
||||
|
||||
answer = await aget_llm_response(
|
||||
llm=self.llm,
|
||||
messages=self.messages,
|
||||
callbacks=self.callbacks,
|
||||
printer=self._printer,
|
||||
from_task=self.task,
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
)
|
||||
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=str(answer),
|
||||
text=str(answer),
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
def _handle_agent_action(
|
||||
self, formatted_answer: AgentAction, tool_result: ToolResult
|
||||
) -> AgentAction | AgentFinish:
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
from crewai.cli.authentication.providers.base_provider import BaseProvider
|
||||
|
||||
|
||||
class KeycloakProvider(BaseProvider):
|
||||
def get_authorize_url(self) -> str:
|
||||
return f"{self._oauth2_base_url()}/realms/{self.settings.extra.get('realm')}/protocol/openid-connect/auth/device"
|
||||
|
||||
def get_token_url(self) -> str:
|
||||
return f"{self._oauth2_base_url()}/realms/{self.settings.extra.get('realm')}/protocol/openid-connect/token"
|
||||
|
||||
def get_jwks_url(self) -> str:
|
||||
return f"{self._oauth2_base_url()}/realms/{self.settings.extra.get('realm')}/protocol/openid-connect/certs"
|
||||
|
||||
def get_issuer(self) -> str:
|
||||
return f"{self._oauth2_base_url()}/realms/{self.settings.extra.get('realm')}"
|
||||
|
||||
def get_audience(self) -> str:
|
||||
return self.settings.audience or "no-audience-provided"
|
||||
|
||||
def get_client_id(self) -> str:
|
||||
if self.settings.client_id is None:
|
||||
raise ValueError(
|
||||
"Client ID is required. Please set it in the configuration."
|
||||
)
|
||||
return self.settings.client_id
|
||||
|
||||
def get_required_fields(self) -> list[str]:
|
||||
return ["realm"]
|
||||
|
||||
def _oauth2_base_url(self) -> str:
|
||||
domain = self.settings.domain.removeprefix("https://").removeprefix("http://")
|
||||
return f"https://{domain}"
|
||||
@@ -1,10 +1,10 @@
|
||||
from functools import lru_cache
|
||||
import subprocess
|
||||
|
||||
|
||||
class Repository:
|
||||
def __init__(self, path: str = ".") -> None:
|
||||
self.path = path
|
||||
self._is_git_repo_cache: bool | None = None
|
||||
|
||||
if not self.is_git_installed():
|
||||
raise ValueError("Git is not installed or not found in your PATH.")
|
||||
@@ -40,26 +40,22 @@ class Repository:
|
||||
encoding="utf-8",
|
||||
).strip()
|
||||
|
||||
@lru_cache(maxsize=None) # noqa: B019
|
||||
def is_git_repo(self) -> bool:
|
||||
"""Check if the current directory is a git repository.
|
||||
|
||||
The result is cached at the instance level to avoid redundant checks
|
||||
while allowing proper garbage collection of Repository instances.
|
||||
Notes:
|
||||
- TODO: This method is cached to avoid redundant checks, but using lru_cache on methods can lead to memory leaks
|
||||
"""
|
||||
if self._is_git_repo_cache is not None:
|
||||
return self._is_git_repo_cache
|
||||
|
||||
try:
|
||||
subprocess.check_output(
|
||||
["git", "rev-parse", "--is-inside-work-tree"], # noqa: S607
|
||||
cwd=self.path,
|
||||
encoding="utf-8",
|
||||
)
|
||||
self._is_git_repo_cache = True
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
self._is_git_repo_cache = False
|
||||
|
||||
return self._is_git_repo_cache
|
||||
return False
|
||||
|
||||
def has_uncommitted_changes(self) -> bool:
|
||||
"""Check if the repository has uncommitted changes."""
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.8.0"
|
||||
"crewai[tools]==1.8.1"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.8.0"
|
||||
"crewai[tools]==1.8.1"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -209,10 +209,9 @@ class EventListener(BaseEventListener):
|
||||
@crewai_event_bus.on(TaskCompletedEvent)
|
||||
def on_task_completed(source: Any, event: TaskCompletedEvent) -> None:
|
||||
# Handle telemetry
|
||||
span = self.execution_spans.get(source)
|
||||
span = self.execution_spans.pop(source, None)
|
||||
if span:
|
||||
self._telemetry.task_ended(span, source, source.agent.crew)
|
||||
self.execution_spans[source] = None
|
||||
|
||||
# Pass task name if it exists
|
||||
task_name = get_task_name(source)
|
||||
@@ -222,11 +221,10 @@ class EventListener(BaseEventListener):
|
||||
|
||||
@crewai_event_bus.on(TaskFailedEvent)
|
||||
def on_task_failed(source: Any, event: TaskFailedEvent) -> None:
|
||||
span = self.execution_spans.get(source)
|
||||
span = self.execution_spans.pop(source, None)
|
||||
if span:
|
||||
if source.agent and source.agent.crew:
|
||||
self._telemetry.task_ended(span, source, source.agent.crew)
|
||||
self.execution_spans[source] = None
|
||||
|
||||
# Pass task name if it exists
|
||||
task_name = get_task_name(source)
|
||||
@@ -380,6 +378,12 @@ class EventListener(BaseEventListener):
|
||||
self.formatter.handle_llm_tool_usage_finished(
|
||||
event.tool_name,
|
||||
)
|
||||
else:
|
||||
self.formatter.handle_tool_usage_finished(
|
||||
event.tool_name,
|
||||
event.output,
|
||||
getattr(event, "run_attempts", None),
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source: Any, event: ToolUsageErrorEvent) -> None:
|
||||
|
||||
@@ -1,3 +1,29 @@
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AAgentCardFetchedEvent,
|
||||
A2AArtifactReceivedEvent,
|
||||
A2AAuthenticationFailedEvent,
|
||||
A2AConnectionErrorEvent,
|
||||
A2AConversationCompletedEvent,
|
||||
A2AConversationStartedEvent,
|
||||
A2ADelegationCompletedEvent,
|
||||
A2ADelegationStartedEvent,
|
||||
A2AMessageSentEvent,
|
||||
A2AParallelDelegationCompletedEvent,
|
||||
A2AParallelDelegationStartedEvent,
|
||||
A2APollingStartedEvent,
|
||||
A2APollingStatusEvent,
|
||||
A2APushNotificationReceivedEvent,
|
||||
A2APushNotificationRegisteredEvent,
|
||||
A2APushNotificationSentEvent,
|
||||
A2APushNotificationTimeoutEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
A2AServerTaskCanceledEvent,
|
||||
A2AServerTaskCompletedEvent,
|
||||
A2AServerTaskFailedEvent,
|
||||
A2AServerTaskStartedEvent,
|
||||
A2AStreamingChunkEvent,
|
||||
A2AStreamingStartedEvent,
|
||||
)
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
@@ -76,7 +102,31 @@ from crewai.events.types.tool_usage_events import (
|
||||
|
||||
|
||||
EventTypes = (
|
||||
CrewKickoffStartedEvent
|
||||
A2AAgentCardFetchedEvent
|
||||
| A2AArtifactReceivedEvent
|
||||
| A2AAuthenticationFailedEvent
|
||||
| A2AConnectionErrorEvent
|
||||
| A2AConversationCompletedEvent
|
||||
| A2AConversationStartedEvent
|
||||
| A2ADelegationCompletedEvent
|
||||
| A2ADelegationStartedEvent
|
||||
| A2AMessageSentEvent
|
||||
| A2APollingStartedEvent
|
||||
| A2APollingStatusEvent
|
||||
| A2APushNotificationReceivedEvent
|
||||
| A2APushNotificationRegisteredEvent
|
||||
| A2APushNotificationSentEvent
|
||||
| A2APushNotificationTimeoutEvent
|
||||
| A2AResponseReceivedEvent
|
||||
| A2AServerTaskCanceledEvent
|
||||
| A2AServerTaskCompletedEvent
|
||||
| A2AServerTaskFailedEvent
|
||||
| A2AServerTaskStartedEvent
|
||||
| A2AStreamingChunkEvent
|
||||
| A2AStreamingStartedEvent
|
||||
| A2AParallelDelegationStartedEvent
|
||||
| A2AParallelDelegationCompletedEvent
|
||||
| CrewKickoffStartedEvent
|
||||
| CrewKickoffCompletedEvent
|
||||
| CrewKickoffFailedEvent
|
||||
| CrewTestStartedEvent
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Trace collection listener for orchestrating trace collection."""
|
||||
|
||||
import os
|
||||
from typing import Any, ClassVar, cast
|
||||
from typing import Any, ClassVar
|
||||
import uuid
|
||||
|
||||
from typing_extensions import Self
|
||||
@@ -18,6 +18,32 @@ from crewai.events.listeners.tracing.types import TraceEvent
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
safe_serialize_to_dict,
|
||||
)
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AAgentCardFetchedEvent,
|
||||
A2AArtifactReceivedEvent,
|
||||
A2AAuthenticationFailedEvent,
|
||||
A2AConnectionErrorEvent,
|
||||
A2AConversationCompletedEvent,
|
||||
A2AConversationStartedEvent,
|
||||
A2ADelegationCompletedEvent,
|
||||
A2ADelegationStartedEvent,
|
||||
A2AMessageSentEvent,
|
||||
A2AParallelDelegationCompletedEvent,
|
||||
A2AParallelDelegationStartedEvent,
|
||||
A2APollingStartedEvent,
|
||||
A2APollingStatusEvent,
|
||||
A2APushNotificationReceivedEvent,
|
||||
A2APushNotificationRegisteredEvent,
|
||||
A2APushNotificationSentEvent,
|
||||
A2APushNotificationTimeoutEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
A2AServerTaskCanceledEvent,
|
||||
A2AServerTaskCompletedEvent,
|
||||
A2AServerTaskFailedEvent,
|
||||
A2AServerTaskStartedEvent,
|
||||
A2AStreamingChunkEvent,
|
||||
A2AStreamingStartedEvent,
|
||||
)
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
@@ -105,7 +131,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
"""Create or return singleton instance."""
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cast(Self, cls._instance)
|
||||
return cls._instance
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -160,6 +186,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
self._register_flow_event_handlers(crewai_event_bus)
|
||||
self._register_context_event_handlers(crewai_event_bus)
|
||||
self._register_action_event_handlers(crewai_event_bus)
|
||||
self._register_a2a_event_handlers(crewai_event_bus)
|
||||
self._register_system_event_handlers(crewai_event_bus)
|
||||
|
||||
self._listeners_setup = True
|
||||
@@ -439,6 +466,147 @@ class TraceCollectionListener(BaseEventListener):
|
||||
) -> None:
|
||||
self._handle_action_event("knowledge_query_failed", source, event)
|
||||
|
||||
def _register_a2a_event_handlers(self, event_bus: CrewAIEventsBus) -> None:
|
||||
"""Register handlers for A2A (Agent-to-Agent) events."""
|
||||
|
||||
@event_bus.on(A2ADelegationStartedEvent)
|
||||
def on_a2a_delegation_started(
|
||||
source: Any, event: A2ADelegationStartedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_delegation_started", source, event)
|
||||
|
||||
@event_bus.on(A2ADelegationCompletedEvent)
|
||||
def on_a2a_delegation_completed(
|
||||
source: Any, event: A2ADelegationCompletedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_delegation_completed", source, event)
|
||||
|
||||
@event_bus.on(A2AConversationStartedEvent)
|
||||
def on_a2a_conversation_started(
|
||||
source: Any, event: A2AConversationStartedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_conversation_started", source, event)
|
||||
|
||||
@event_bus.on(A2AMessageSentEvent)
|
||||
def on_a2a_message_sent(source: Any, event: A2AMessageSentEvent) -> None:
|
||||
self._handle_action_event("a2a_message_sent", source, event)
|
||||
|
||||
@event_bus.on(A2AResponseReceivedEvent)
|
||||
def on_a2a_response_received(
|
||||
source: Any, event: A2AResponseReceivedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_response_received", source, event)
|
||||
|
||||
@event_bus.on(A2AConversationCompletedEvent)
|
||||
def on_a2a_conversation_completed(
|
||||
source: Any, event: A2AConversationCompletedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_conversation_completed", source, event)
|
||||
|
||||
@event_bus.on(A2APollingStartedEvent)
|
||||
def on_a2a_polling_started(source: Any, event: A2APollingStartedEvent) -> None:
|
||||
self._handle_action_event("a2a_polling_started", source, event)
|
||||
|
||||
@event_bus.on(A2APollingStatusEvent)
|
||||
def on_a2a_polling_status(source: Any, event: A2APollingStatusEvent) -> None:
|
||||
self._handle_action_event("a2a_polling_status", source, event)
|
||||
|
||||
@event_bus.on(A2APushNotificationRegisteredEvent)
|
||||
def on_a2a_push_notification_registered(
|
||||
source: Any, event: A2APushNotificationRegisteredEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_push_notification_registered", source, event)
|
||||
|
||||
@event_bus.on(A2APushNotificationReceivedEvent)
|
||||
def on_a2a_push_notification_received(
|
||||
source: Any, event: A2APushNotificationReceivedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_push_notification_received", source, event)
|
||||
|
||||
@event_bus.on(A2APushNotificationSentEvent)
|
||||
def on_a2a_push_notification_sent(
|
||||
source: Any, event: A2APushNotificationSentEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_push_notification_sent", source, event)
|
||||
|
||||
@event_bus.on(A2APushNotificationTimeoutEvent)
|
||||
def on_a2a_push_notification_timeout(
|
||||
source: Any, event: A2APushNotificationTimeoutEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_push_notification_timeout", source, event)
|
||||
|
||||
@event_bus.on(A2AStreamingStartedEvent)
|
||||
def on_a2a_streaming_started(
|
||||
source: Any, event: A2AStreamingStartedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_streaming_started", source, event)
|
||||
|
||||
@event_bus.on(A2AStreamingChunkEvent)
|
||||
def on_a2a_streaming_chunk(source: Any, event: A2AStreamingChunkEvent) -> None:
|
||||
self._handle_action_event("a2a_streaming_chunk", source, event)
|
||||
|
||||
@event_bus.on(A2AAgentCardFetchedEvent)
|
||||
def on_a2a_agent_card_fetched(
|
||||
source: Any, event: A2AAgentCardFetchedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_agent_card_fetched", source, event)
|
||||
|
||||
@event_bus.on(A2AAuthenticationFailedEvent)
|
||||
def on_a2a_authentication_failed(
|
||||
source: Any, event: A2AAuthenticationFailedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_authentication_failed", source, event)
|
||||
|
||||
@event_bus.on(A2AArtifactReceivedEvent)
|
||||
def on_a2a_artifact_received(
|
||||
source: Any, event: A2AArtifactReceivedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_artifact_received", source, event)
|
||||
|
||||
@event_bus.on(A2AConnectionErrorEvent)
|
||||
def on_a2a_connection_error(
|
||||
source: Any, event: A2AConnectionErrorEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_connection_error", source, event)
|
||||
|
||||
@event_bus.on(A2AServerTaskStartedEvent)
|
||||
def on_a2a_server_task_started(
|
||||
source: Any, event: A2AServerTaskStartedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_server_task_started", source, event)
|
||||
|
||||
@event_bus.on(A2AServerTaskCompletedEvent)
|
||||
def on_a2a_server_task_completed(
|
||||
source: Any, event: A2AServerTaskCompletedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_server_task_completed", source, event)
|
||||
|
||||
@event_bus.on(A2AServerTaskCanceledEvent)
|
||||
def on_a2a_server_task_canceled(
|
||||
source: Any, event: A2AServerTaskCanceledEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_server_task_canceled", source, event)
|
||||
|
||||
@event_bus.on(A2AServerTaskFailedEvent)
|
||||
def on_a2a_server_task_failed(
|
||||
source: Any, event: A2AServerTaskFailedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_server_task_failed", source, event)
|
||||
|
||||
@event_bus.on(A2AParallelDelegationStartedEvent)
|
||||
def on_a2a_parallel_delegation_started(
|
||||
source: Any, event: A2AParallelDelegationStartedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_parallel_delegation_started", source, event)
|
||||
|
||||
@event_bus.on(A2AParallelDelegationCompletedEvent)
|
||||
def on_a2a_parallel_delegation_completed(
|
||||
source: Any, event: A2AParallelDelegationCompletedEvent
|
||||
) -> None:
|
||||
self._handle_action_event(
|
||||
"a2a_parallel_delegation_completed", source, event
|
||||
)
|
||||
|
||||
def _register_system_event_handlers(self, event_bus: CrewAIEventsBus) -> None:
|
||||
"""Register handlers for system signal events (SIGTERM, SIGINT, etc.)."""
|
||||
|
||||
@@ -570,10 +738,15 @@ class TraceCollectionListener(BaseEventListener):
|
||||
if event_type not in self.complex_events:
|
||||
return safe_serialize_to_dict(event)
|
||||
if event_type == "task_started":
|
||||
task_name = event.task.name or event.task.description
|
||||
task_display_name = (
|
||||
task_name[:80] + "..." if len(task_name) > 80 else task_name
|
||||
)
|
||||
return {
|
||||
"task_description": event.task.description,
|
||||
"expected_output": event.task.expected_output,
|
||||
"task_name": event.task.name or event.task.description,
|
||||
"task_name": task_name,
|
||||
"task_display_name": task_display_name,
|
||||
"context": event.context,
|
||||
"agent_role": source.agent.role,
|
||||
"task_id": str(event.task.id),
|
||||
|
||||
@@ -4,68 +4,120 @@ This module defines events emitted during A2A protocol delegation,
|
||||
including both single-turn and multiturn conversation flows.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Literal
|
||||
|
||||
from pydantic import model_validator
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class A2AEventBase(BaseEvent):
|
||||
"""Base class for A2A events with task/agent context."""
|
||||
|
||||
from_task: Any | None = None
|
||||
from_agent: Any | None = None
|
||||
from_task: Any = None
|
||||
from_agent: Any = None
|
||||
|
||||
def __init__(self, **data: Any) -> None:
|
||||
"""Initialize A2A event, extracting task and agent metadata."""
|
||||
if data.get("from_task"):
|
||||
task = data["from_task"]
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def extract_task_and_agent_metadata(cls, data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Extract task and agent metadata before validation."""
|
||||
if task := data.get("from_task"):
|
||||
data["task_id"] = str(task.id)
|
||||
data["task_name"] = task.name or task.description
|
||||
data.setdefault("source_fingerprint", str(task.id))
|
||||
data.setdefault("source_type", "task")
|
||||
data.setdefault(
|
||||
"fingerprint_metadata",
|
||||
{
|
||||
"task_id": str(task.id),
|
||||
"task_name": task.name or task.description,
|
||||
},
|
||||
)
|
||||
data["from_task"] = None
|
||||
|
||||
if data.get("from_agent"):
|
||||
agent = data["from_agent"]
|
||||
if agent := data.get("from_agent"):
|
||||
data["agent_id"] = str(agent.id)
|
||||
data["agent_role"] = agent.role
|
||||
data.setdefault("source_fingerprint", str(agent.id))
|
||||
data.setdefault("source_type", "agent")
|
||||
data.setdefault(
|
||||
"fingerprint_metadata",
|
||||
{
|
||||
"agent_id": str(agent.id),
|
||||
"agent_role": agent.role,
|
||||
},
|
||||
)
|
||||
data["from_agent"] = None
|
||||
|
||||
super().__init__(**data)
|
||||
return data
|
||||
|
||||
|
||||
class A2ADelegationStartedEvent(A2AEventBase):
|
||||
"""Event emitted when A2A delegation starts.
|
||||
|
||||
Attributes:
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL)
|
||||
task_description: Task being delegated to the A2A agent
|
||||
agent_id: A2A agent identifier
|
||||
is_multiturn: Whether this is part of a multiturn conversation
|
||||
turn_number: Current turn number (1-indexed, 1 for single-turn)
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL).
|
||||
task_description: Task being delegated to the A2A agent.
|
||||
agent_id: A2A agent identifier.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
turn_number: Current turn number (1-indexed, 1 for single-turn).
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
agent_card: Full A2A agent card metadata.
|
||||
protocol_version: A2A protocol version being used.
|
||||
provider: Agent provider/organization info from agent card.
|
||||
skill_id: ID of the specific skill being invoked.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_delegation_started"
|
||||
endpoint: str
|
||||
task_description: str
|
||||
agent_id: str
|
||||
context_id: str | None = None
|
||||
is_multiturn: bool = False
|
||||
turn_number: int = 1
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
protocol_version: str | None = None
|
||||
provider: dict[str, Any] | None = None
|
||||
skill_id: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2ADelegationCompletedEvent(A2AEventBase):
|
||||
"""Event emitted when A2A delegation completes.
|
||||
|
||||
Attributes:
|
||||
status: Completion status (completed, input_required, failed, etc.)
|
||||
result: Result message if status is completed
|
||||
error: Error/response message (error for failed, response for input_required)
|
||||
is_multiturn: Whether this is part of a multiturn conversation
|
||||
status: Completion status (completed, input_required, failed, etc.).
|
||||
result: Result message if status is completed.
|
||||
error: Error/response message (error for failed, response for input_required).
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
agent_card: Full A2A agent card metadata.
|
||||
provider: Agent provider/organization info from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_delegation_completed"
|
||||
status: str
|
||||
result: str | None = None
|
||||
error: str | None = None
|
||||
context_id: str | None = None
|
||||
is_multiturn: bool = False
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
provider: dict[str, Any] | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AConversationStartedEvent(A2AEventBase):
|
||||
@@ -75,51 +127,95 @@ class A2AConversationStartedEvent(A2AEventBase):
|
||||
before the first message exchange.
|
||||
|
||||
Attributes:
|
||||
agent_id: A2A agent identifier
|
||||
endpoint: A2A agent endpoint URL
|
||||
a2a_agent_name: Name of the A2A agent from agent card
|
||||
agent_id: A2A agent identifier.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
agent_card: Full A2A agent card metadata.
|
||||
protocol_version: A2A protocol version being used.
|
||||
provider: Agent provider/organization info from agent card.
|
||||
skill_id: ID of the specific skill being invoked.
|
||||
reference_task_ids: Related task IDs for context.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_conversation_started"
|
||||
agent_id: str
|
||||
endpoint: str
|
||||
context_id: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
protocol_version: str | None = None
|
||||
provider: dict[str, Any] | None = None
|
||||
skill_id: str | None = None
|
||||
reference_task_ids: list[str] | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AMessageSentEvent(A2AEventBase):
|
||||
"""Event emitted when a message is sent to the A2A agent.
|
||||
|
||||
Attributes:
|
||||
message: Message content sent to the A2A agent
|
||||
turn_number: Current turn number (1-indexed)
|
||||
is_multiturn: Whether this is part of a multiturn conversation
|
||||
agent_role: Role of the CrewAI agent sending the message
|
||||
message: Message content sent to the A2A agent.
|
||||
turn_number: Current turn number (1-indexed).
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
message_id: Unique A2A message identifier.
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
agent_role: Role of the CrewAI agent sending the message.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
skill_id: ID of the specific skill being invoked.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_message_sent"
|
||||
message: str
|
||||
turn_number: int
|
||||
context_id: str | None = None
|
||||
message_id: str | None = None
|
||||
is_multiturn: bool = False
|
||||
agent_role: str | None = None
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
skill_id: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AResponseReceivedEvent(A2AEventBase):
|
||||
"""Event emitted when a response is received from the A2A agent.
|
||||
|
||||
Attributes:
|
||||
response: Response content from the A2A agent
|
||||
turn_number: Current turn number (1-indexed)
|
||||
is_multiturn: Whether this is part of a multiturn conversation
|
||||
status: Response status (input_required, completed, etc.)
|
||||
agent_role: Role of the CrewAI agent (for display)
|
||||
response: Response content from the A2A agent.
|
||||
turn_number: Current turn number (1-indexed).
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
message_id: Unique A2A message identifier.
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
status: Response status (input_required, completed, etc.).
|
||||
final: Whether this is the final response in the stream.
|
||||
agent_role: Role of the CrewAI agent (for display).
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_response_received"
|
||||
response: str
|
||||
turn_number: int
|
||||
context_id: str | None = None
|
||||
message_id: str | None = None
|
||||
is_multiturn: bool = False
|
||||
status: str
|
||||
final: bool = False
|
||||
agent_role: str | None = None
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AConversationCompletedEvent(A2AEventBase):
|
||||
@@ -128,85 +224,433 @@ class A2AConversationCompletedEvent(A2AEventBase):
|
||||
This is emitted once at the end of a multiturn conversation.
|
||||
|
||||
Attributes:
|
||||
status: Final status (completed, failed, etc.)
|
||||
final_result: Final result if completed successfully
|
||||
error: Error message if failed
|
||||
total_turns: Total number of turns in the conversation
|
||||
status: Final status (completed, failed, etc.).
|
||||
final_result: Final result if completed successfully.
|
||||
error: Error message if failed.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
total_turns: Total number of turns in the conversation.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
agent_card: Full A2A agent card metadata.
|
||||
reference_task_ids: Related task IDs for context.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_conversation_completed"
|
||||
status: Literal["completed", "failed"]
|
||||
final_result: str | None = None
|
||||
error: str | None = None
|
||||
context_id: str | None = None
|
||||
total_turns: int
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
reference_task_ids: list[str] | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2APollingStartedEvent(A2AEventBase):
|
||||
"""Event emitted when polling mode begins for A2A delegation.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID being polled
|
||||
polling_interval: Seconds between poll attempts
|
||||
endpoint: A2A agent endpoint URL
|
||||
task_id: A2A task ID being polled.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
polling_interval: Seconds between poll attempts.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_polling_started"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
polling_interval: float
|
||||
endpoint: str
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2APollingStatusEvent(A2AEventBase):
|
||||
"""Event emitted on each polling iteration.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID being polled
|
||||
state: Current task state from remote agent
|
||||
elapsed_seconds: Time since polling started
|
||||
poll_count: Number of polls completed
|
||||
task_id: A2A task ID being polled.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
state: Current task state from remote agent.
|
||||
elapsed_seconds: Time since polling started.
|
||||
poll_count: Number of polls completed.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_polling_status"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
state: str
|
||||
elapsed_seconds: float
|
||||
poll_count: int
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2APushNotificationRegisteredEvent(A2AEventBase):
|
||||
"""Event emitted when push notification callback is registered.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for which callback is registered
|
||||
callback_url: URL where agent will send push notifications
|
||||
task_id: A2A task ID for which callback is registered.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
callback_url: URL where agent will send push notifications.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_registered"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
callback_url: str
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2APushNotificationReceivedEvent(A2AEventBase):
|
||||
"""Event emitted when a push notification is received.
|
||||
|
||||
This event should be emitted by the user's webhook handler when it receives
|
||||
a push notification from the remote A2A agent, before calling
|
||||
`result_store.store_result()`.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID from the notification
|
||||
state: Current task state from the notification
|
||||
task_id: A2A task ID from the notification.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
state: Current task state from the notification.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_received"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
state: str
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2APushNotificationSentEvent(A2AEventBase):
|
||||
"""Event emitted when a push notification is sent to a callback URL.
|
||||
|
||||
Emitted by the A2A server when it sends a task status update to the
|
||||
client's registered push notification callback URL.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID being notified.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
callback_url: URL the notification was sent to.
|
||||
state: Task state being reported.
|
||||
success: Whether the notification was successfully delivered.
|
||||
error: Error message if delivery failed.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_sent"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
callback_url: str
|
||||
state: str
|
||||
success: bool = True
|
||||
error: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2APushNotificationTimeoutEvent(A2AEventBase):
|
||||
"""Event emitted when push notification wait times out.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID that timed out
|
||||
timeout_seconds: Timeout duration in seconds
|
||||
task_id: A2A task ID that timed out.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
timeout_seconds: Timeout duration in seconds.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_timeout"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
timeout_seconds: float
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AStreamingStartedEvent(A2AEventBase):
|
||||
"""Event emitted when streaming mode begins for A2A delegation.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for the streaming session.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
turn_number: Current turn number (1-indexed).
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
agent_role: Role of the CrewAI agent.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_streaming_started"
|
||||
task_id: str | None = None
|
||||
context_id: str | None = None
|
||||
endpoint: str
|
||||
a2a_agent_name: str | None = None
|
||||
turn_number: int = 1
|
||||
is_multiturn: bool = False
|
||||
agent_role: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AStreamingChunkEvent(A2AEventBase):
|
||||
"""Event emitted when a streaming chunk is received.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for the streaming session.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
chunk: The text content of the chunk.
|
||||
chunk_index: Index of this chunk in the stream (0-indexed).
|
||||
final: Whether this is the final chunk in the stream.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
turn_number: Current turn number (1-indexed).
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_streaming_chunk"
|
||||
task_id: str | None = None
|
||||
context_id: str | None = None
|
||||
chunk: str
|
||||
chunk_index: int
|
||||
final: bool = False
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
turn_number: int = 1
|
||||
is_multiturn: bool = False
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AAgentCardFetchedEvent(A2AEventBase):
|
||||
"""Event emitted when an agent card is successfully fetched.
|
||||
|
||||
Attributes:
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
agent_card: Full A2A agent card metadata.
|
||||
protocol_version: A2A protocol version from agent card.
|
||||
provider: Agent provider/organization info from agent card.
|
||||
cached: Whether the agent card was retrieved from cache.
|
||||
fetch_time_ms: Time taken to fetch the agent card in milliseconds.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_agent_card_fetched"
|
||||
endpoint: str
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
protocol_version: str | None = None
|
||||
provider: dict[str, Any] | None = None
|
||||
cached: bool = False
|
||||
fetch_time_ms: float | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AAuthenticationFailedEvent(A2AEventBase):
|
||||
"""Event emitted when authentication to an A2A agent fails.
|
||||
|
||||
Attributes:
|
||||
endpoint: A2A agent endpoint URL.
|
||||
auth_type: Type of authentication attempted (e.g., bearer, oauth2, api_key).
|
||||
error: Error message describing the failure.
|
||||
status_code: HTTP status code if applicable.
|
||||
a2a_agent_name: Name of the A2A agent if known.
|
||||
protocol_version: A2A protocol version being used.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_authentication_failed"
|
||||
endpoint: str
|
||||
auth_type: str | None = None
|
||||
error: str
|
||||
status_code: int | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
protocol_version: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AArtifactReceivedEvent(A2AEventBase):
|
||||
"""Event emitted when an artifact is received from a remote A2A agent.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID the artifact belongs to.
|
||||
artifact_id: Unique identifier for the artifact.
|
||||
artifact_name: Name of the artifact.
|
||||
artifact_description: Purpose description of the artifact.
|
||||
mime_type: MIME type of the artifact content.
|
||||
size_bytes: Size of the artifact in bytes.
|
||||
append: Whether content should be appended to existing artifact.
|
||||
last_chunk: Whether this is the final chunk of the artifact.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
context_id: Context ID for correlation.
|
||||
turn_number: Current turn number (1-indexed).
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_artifact_received"
|
||||
task_id: str
|
||||
artifact_id: str
|
||||
artifact_name: str | None = None
|
||||
artifact_description: str | None = None
|
||||
mime_type: str | None = None
|
||||
size_bytes: int | None = None
|
||||
append: bool = False
|
||||
last_chunk: bool = False
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
context_id: str | None = None
|
||||
turn_number: int = 1
|
||||
is_multiturn: bool = False
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AConnectionErrorEvent(A2AEventBase):
|
||||
"""Event emitted when a connection error occurs during A2A communication.
|
||||
|
||||
Attributes:
|
||||
endpoint: A2A agent endpoint URL.
|
||||
error: Error message describing the connection failure.
|
||||
error_type: Type of error (e.g., timeout, connection_refused, dns_error).
|
||||
status_code: HTTP status code if applicable.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
operation: The operation being attempted when error occurred.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
task_id: A2A task ID if applicable.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_connection_error"
|
||||
endpoint: str
|
||||
error: str
|
||||
error_type: str | None = None
|
||||
status_code: int | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
operation: str | None = None
|
||||
context_id: str | None = None
|
||||
task_id: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AServerTaskStartedEvent(A2AEventBase):
|
||||
"""Event emitted when an A2A server task execution starts.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for this execution.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_started"
|
||||
task_id: str
|
||||
context_id: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AServerTaskCompletedEvent(A2AEventBase):
|
||||
"""Event emitted when an A2A server task execution completes.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for this execution.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
result: The task result.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_completed"
|
||||
task_id: str
|
||||
context_id: str
|
||||
result: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AServerTaskCanceledEvent(A2AEventBase):
|
||||
"""Event emitted when an A2A server task execution is canceled.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for this execution.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_canceled"
|
||||
task_id: str
|
||||
context_id: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AServerTaskFailedEvent(A2AEventBase):
|
||||
"""Event emitted when an A2A server task execution fails.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for this execution.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
error: Error message describing the failure.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_failed"
|
||||
task_id: str
|
||||
context_id: str
|
||||
error: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AParallelDelegationStartedEvent(A2AEventBase):
|
||||
"""Event emitted when parallel delegation to multiple A2A agents begins.
|
||||
|
||||
Attributes:
|
||||
endpoints: List of A2A agent endpoints being delegated to.
|
||||
task_description: Description of the task being delegated.
|
||||
"""
|
||||
|
||||
type: str = "a2a_parallel_delegation_started"
|
||||
endpoints: list[str]
|
||||
task_description: str
|
||||
|
||||
|
||||
class A2AParallelDelegationCompletedEvent(A2AEventBase):
|
||||
"""Event emitted when parallel delegation to multiple A2A agents completes.
|
||||
|
||||
Attributes:
|
||||
endpoints: List of A2A agent endpoints that were delegated to.
|
||||
success_count: Number of successful delegations.
|
||||
failure_count: Number of failed delegations.
|
||||
results: Summary of results from each agent.
|
||||
"""
|
||||
|
||||
type: str = "a2a_parallel_delegation_completed"
|
||||
endpoints: list[str]
|
||||
success_count: int
|
||||
failure_count: int
|
||||
results: dict[str, str] | None = None
|
||||
|
||||
@@ -366,6 +366,32 @@ To enable tracing, do any one of these:
|
||||
|
||||
self.print_panel(content, f"🔧 Tool Execution Started (#{iteration})", "yellow")
|
||||
|
||||
def handle_tool_usage_finished(
|
||||
self,
|
||||
tool_name: str,
|
||||
output: str,
|
||||
run_attempts: int | None = None,
|
||||
) -> None:
|
||||
"""Handle tool usage finished event with panel display."""
|
||||
if not self.verbose:
|
||||
return
|
||||
|
||||
iteration = self.tool_usage_counts.get(tool_name, 1)
|
||||
|
||||
content = Text()
|
||||
content.append("Tool Completed\n", style="green bold")
|
||||
content.append("Tool: ", style="white")
|
||||
content.append(f"{tool_name}\n", style="green bold")
|
||||
|
||||
if output:
|
||||
content.append("Output: ", style="white")
|
||||
|
||||
content.append(f"{output}\n", style="green")
|
||||
|
||||
self.print_panel(
|
||||
content, f"✅ Tool Execution Completed (#{iteration})", "green"
|
||||
)
|
||||
|
||||
def handle_tool_usage_error(
|
||||
self,
|
||||
tool_name: str,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from crewai.experimental.crew_agent_executor_flow import CrewAgentExecutorFlow
|
||||
from crewai.experimental.agent_executor import AgentExecutor, CrewAgentExecutorFlow
|
||||
from crewai.experimental.evaluation import (
|
||||
AgentEvaluationResult,
|
||||
AgentEvaluator,
|
||||
@@ -23,8 +23,9 @@ from crewai.experimental.evaluation import (
|
||||
__all__ = [
|
||||
"AgentEvaluationResult",
|
||||
"AgentEvaluator",
|
||||
"AgentExecutor",
|
||||
"BaseEvaluator",
|
||||
"CrewAgentExecutorFlow",
|
||||
"CrewAgentExecutorFlow", # Deprecated alias for AgentExecutor
|
||||
"EvaluationScore",
|
||||
"EvaluationTraceCallback",
|
||||
"ExperimentResult",
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Coroutine
|
||||
import threading
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Any, Literal, cast
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -17,16 +18,24 @@ from crewai.agents.parser import (
|
||||
OutputParserError,
|
||||
)
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled_in_context,
|
||||
)
|
||||
from crewai.events.types.logging_events import (
|
||||
AgentLogsExecutionEvent,
|
||||
AgentLogsStartedEvent,
|
||||
)
|
||||
from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.flow.flow import Flow, listen, or_, router, start
|
||||
from crewai.hooks.llm_hooks import (
|
||||
get_after_llm_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
)
|
||||
from crewai.utilities.agent_utils import (
|
||||
convert_tools_to_openai_schema,
|
||||
enforce_rpm_limit,
|
||||
format_message_for_llm,
|
||||
get_llm_response,
|
||||
@@ -37,6 +46,7 @@ from crewai.utilities.agent_utils import (
|
||||
handle_unknown_error,
|
||||
has_reached_max_iterations,
|
||||
is_context_length_exceeded,
|
||||
is_inside_event_loop,
|
||||
process_llm_response,
|
||||
)
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
@@ -71,15 +81,21 @@ class AgentReActState(BaseModel):
|
||||
current_answer: AgentAction | AgentFinish | None = Field(default=None)
|
||||
is_finished: bool = Field(default=False)
|
||||
ask_for_human_input: bool = Field(default=False)
|
||||
use_native_tools: bool = Field(default=False)
|
||||
pending_tool_calls: list[Any] = Field(default_factory=list)
|
||||
|
||||
|
||||
class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
"""Flow-based executor matching CrewAgentExecutor interface.
|
||||
class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
"""Agent Executor for both standalone agents and crew-bound agents.
|
||||
|
||||
Inherits from:
|
||||
- Flow[AgentReActState]: Provides flow orchestration capabilities
|
||||
- CrewAgentExecutorMixin: Provides memory methods (short/long/external term)
|
||||
|
||||
This executor can operate in two modes:
|
||||
- Standalone mode: When crew and task are None (used by Agent.kickoff())
|
||||
- Crew mode: When crew and task are provided (used by Agent.execute_task())
|
||||
|
||||
Note: Multiple instances may be created during agent initialization
|
||||
(cache setup, RPM controller setup, etc.) but only the final instance
|
||||
should execute tasks via invoke().
|
||||
@@ -88,8 +104,6 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
def __init__(
|
||||
self,
|
||||
llm: BaseLLM,
|
||||
task: Task,
|
||||
crew: Crew,
|
||||
agent: Agent,
|
||||
prompt: SystemPromptResult | StandardPromptResult,
|
||||
max_iter: int,
|
||||
@@ -98,6 +112,8 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
stop_words: list[str],
|
||||
tools_description: str,
|
||||
tools_handler: ToolsHandler,
|
||||
task: Task | None = None,
|
||||
crew: Crew | None = None,
|
||||
step_callback: Any = None,
|
||||
original_tools: list[BaseTool] | None = None,
|
||||
function_calling_llm: BaseLLM | Any | None = None,
|
||||
@@ -106,13 +122,12 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
callbacks: list[Any] | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
i18n: I18N | None = None,
|
||||
max_tools_per_turn: int = 10,
|
||||
) -> None:
|
||||
"""Initialize the flow-based agent executor.
|
||||
|
||||
Args:
|
||||
llm: Language model instance.
|
||||
task: Task to execute.
|
||||
crew: Crew instance.
|
||||
agent: Agent to execute.
|
||||
prompt: Prompt templates.
|
||||
max_iter: Maximum iterations.
|
||||
@@ -121,6 +136,8 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
stop_words: Stop word list.
|
||||
tools_description: Tool descriptions.
|
||||
tools_handler: Tool handler instance.
|
||||
task: Optional task to execute (None for standalone agent execution).
|
||||
crew: Optional crew instance (None for standalone agent execution).
|
||||
step_callback: Optional step callback.
|
||||
original_tools: Original tool list.
|
||||
function_calling_llm: Optional function calling LLM.
|
||||
@@ -129,11 +146,12 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
callbacks: Optional callbacks list.
|
||||
response_model: Optional Pydantic model for structured outputs.
|
||||
"""
|
||||
print("lorenze using agent executor")
|
||||
self._i18n: I18N = i18n or get_i18n()
|
||||
self.llm = llm
|
||||
self.task = task
|
||||
self.task: Task | None = task
|
||||
self.agent = agent
|
||||
self.crew = crew
|
||||
self.crew: Crew | None = crew
|
||||
self.prompt = prompt
|
||||
self.tools = tools
|
||||
self.tools_names = tools_names
|
||||
@@ -149,6 +167,7 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self.respect_context_window = respect_context_window
|
||||
self.request_within_rpm_limit = request_within_rpm_limit
|
||||
self.response_model = response_model
|
||||
self.max_tools_per_turn = max_tools_per_turn
|
||||
self.log_error_after = 3
|
||||
self._console: Console = Console()
|
||||
|
||||
@@ -178,7 +197,6 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
else self.stop
|
||||
)
|
||||
)
|
||||
|
||||
self._state = AgentReActState()
|
||||
|
||||
def _ensure_flow_initialized(self) -> None:
|
||||
@@ -189,14 +207,66 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
Only the instance that actually executes via invoke() will emit events.
|
||||
"""
|
||||
if not self._flow_initialized:
|
||||
current_tracing = is_tracing_enabled_in_context()
|
||||
# Now call Flow's __init__ which will replace self._state
|
||||
# with Flow's managed state. Suppress flow events since this is
|
||||
# an agent executor, not a user-facing flow.
|
||||
super().__init__(
|
||||
suppress_flow_events=True,
|
||||
tracing=current_tracing if current_tracing else None,
|
||||
)
|
||||
self._flow_initialized = True
|
||||
|
||||
def _check_native_tool_support(self) -> bool:
|
||||
"""Check if LLM supports native function calling.
|
||||
|
||||
Returns:
|
||||
True if the LLM supports native function calling and tools are available.
|
||||
"""
|
||||
return (
|
||||
hasattr(self.llm, "supports_function_calling")
|
||||
and callable(getattr(self.llm, "supports_function_calling", None))
|
||||
and self.llm.supports_function_calling()
|
||||
and bool(self.original_tools)
|
||||
)
|
||||
|
||||
def _setup_native_tools(self) -> None:
|
||||
"""Convert tools to OpenAI schema format for native function calling."""
|
||||
if self.original_tools:
|
||||
self._openai_tools, self._available_functions = (
|
||||
convert_tools_to_openai_schema(self.original_tools)
|
||||
)
|
||||
|
||||
def _is_tool_call_list(self, response: list[Any]) -> bool:
|
||||
"""Check if a response is a list of tool calls.
|
||||
|
||||
Args:
|
||||
response: The response to check.
|
||||
|
||||
Returns:
|
||||
True if the response appears to be a list of tool calls.
|
||||
"""
|
||||
if not response:
|
||||
return False
|
||||
first_item = response[0]
|
||||
# Check for OpenAI-style tool call structure
|
||||
if hasattr(first_item, "function") or (
|
||||
isinstance(first_item, dict) and "function" in first_item
|
||||
):
|
||||
return True
|
||||
# Check for Anthropic-style tool call structure (ToolUseBlock)
|
||||
if (
|
||||
hasattr(first_item, "type")
|
||||
and getattr(first_item, "type", None) == "tool_use"
|
||||
):
|
||||
return True
|
||||
if hasattr(first_item, "name") and hasattr(first_item, "input"):
|
||||
return True
|
||||
# Check for Gemini-style function call (Part with function_call)
|
||||
if hasattr(first_item, "function_call") and first_item.function_call:
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def use_stop_words(self) -> bool:
|
||||
"""Check to determine if stop words are being used.
|
||||
@@ -229,6 +299,11 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
def initialize_reasoning(self) -> Literal["initialized"]:
|
||||
"""Initialize the reasoning flow and emit agent start logs."""
|
||||
self._show_start_logs()
|
||||
# Check for native tool support on first iteration
|
||||
if self.state.iterations == 0:
|
||||
self.state.use_native_tools = self._check_native_tool_support()
|
||||
if self.state.use_native_tools:
|
||||
self._setup_native_tools()
|
||||
return "initialized"
|
||||
|
||||
@listen("force_final_answer")
|
||||
@@ -255,8 +330,21 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
Returns routing decision based on parsing result.
|
||||
"""
|
||||
try:
|
||||
iteration_start = time.time()
|
||||
print(f"\n{'=' * 60}")
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] ITERATION {self.state.iterations} - call_llm_and_parse START (ReAct)"
|
||||
)
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] Messages count: {len(self.state.messages)}"
|
||||
)
|
||||
print(f"{'=' * 60}")
|
||||
|
||||
enforce_rpm_limit(self.request_within_rpm_limit)
|
||||
|
||||
llm_start = time.time()
|
||||
print(f"[{time.strftime('%H:%M:%S')}] LLM CALL START")
|
||||
|
||||
answer = get_llm_response(
|
||||
llm=self.llm,
|
||||
messages=list(self.state.messages),
|
||||
@@ -264,12 +352,25 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
printer=self._printer,
|
||||
from_task=self.task,
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
response_model=None,
|
||||
executor_context=self,
|
||||
)
|
||||
|
||||
llm_elapsed = time.time() - llm_start
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] LLM CALL END - took {llm_elapsed:.2f}s"
|
||||
)
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] Answer length: {len(answer) if answer else 0} chars"
|
||||
)
|
||||
|
||||
# Parse the LLM response
|
||||
parse_start = time.time()
|
||||
formatted_answer = process_llm_response(answer, self.use_stop_words)
|
||||
parse_elapsed = time.time() - parse_start
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] Parsing took {parse_elapsed:.3f}s -> {type(formatted_answer).__name__}"
|
||||
)
|
||||
self.state.current_answer = formatted_answer
|
||||
|
||||
if "Final Answer:" in answer and isinstance(formatted_answer, AgentAction):
|
||||
@@ -285,6 +386,10 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
preview_text.append(f"{answer[:200]}...", style="yellow dim")
|
||||
self._console.print(preview_text)
|
||||
|
||||
iteration_elapsed = time.time() - iteration_start
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] ITERATION total: {iteration_elapsed:.2f}s"
|
||||
)
|
||||
return "parsed"
|
||||
|
||||
except OutputParserError as e:
|
||||
@@ -303,6 +408,129 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
|
||||
@listen("continue_reasoning_native")
|
||||
def call_llm_native_tools(
|
||||
self,
|
||||
) -> Literal["native_tool_calls", "native_finished", "context_error"]:
|
||||
"""Execute LLM call with native function calling.
|
||||
|
||||
Always calls the LLM so it can read reflection prompts and decide
|
||||
whether to provide a final answer or request more tools.
|
||||
|
||||
Returns routing decision based on whether tool calls or final answer.
|
||||
"""
|
||||
try:
|
||||
iteration_start = time.time()
|
||||
print(f"\n{'=' * 60}")
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] ITERATION {self.state.iterations} - call_llm_native_tools START"
|
||||
)
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] pending_tool_calls before LLM: {len(self.state.pending_tool_calls)}"
|
||||
)
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] Messages count: {len(self.state.messages)}"
|
||||
)
|
||||
print(f"{'=' * 60}")
|
||||
|
||||
# Clear pending tools - LLM will decide what to do next after reading
|
||||
# the reflection prompt. It can either:
|
||||
# 1. Return a final answer (string) if it has enough info
|
||||
# 2. Return tool calls (possibly same ones, or different ones)
|
||||
self.state.pending_tool_calls.clear()
|
||||
|
||||
enforce_rpm_limit(self.request_within_rpm_limit)
|
||||
|
||||
last_msg_content = (
|
||||
self.state.messages[-1].get("content", "")
|
||||
if self.state.messages
|
||||
else ""
|
||||
)
|
||||
last_msg_preview = (
|
||||
last_msg_content[:200] if last_msg_content else "(no content)"
|
||||
)
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] Last message to LLM: {last_msg_preview}..."
|
||||
)
|
||||
|
||||
# Call LLM with native tools
|
||||
llm_start = time.time()
|
||||
print(f"[{time.strftime('%H:%M:%S')}] LLM CALL START")
|
||||
|
||||
answer = get_llm_response(
|
||||
llm=self.llm,
|
||||
messages=list(self.state.messages),
|
||||
callbacks=self.callbacks,
|
||||
printer=self._printer,
|
||||
tools=self._openai_tools,
|
||||
available_functions=None,
|
||||
from_task=self.task,
|
||||
from_agent=self.agent,
|
||||
response_model=None,
|
||||
executor_context=self,
|
||||
)
|
||||
|
||||
llm_elapsed = time.time() - llm_start
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] LLM CALL END - took {llm_elapsed:.2f}s"
|
||||
)
|
||||
print(f"[{time.strftime('%H:%M:%S')}] Answer type: {type(answer).__name__}")
|
||||
|
||||
# Check if the response is a list of tool calls
|
||||
if isinstance(answer, list) and answer and self._is_tool_call_list(answer):
|
||||
# Store tool calls for sequential processing
|
||||
self.state.pending_tool_calls = list(answer)
|
||||
iteration_elapsed = time.time() - iteration_start
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] -> Routing to native_tool_calls ({len(answer)} tools)"
|
||||
)
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] ITERATION total: {iteration_elapsed:.2f}s"
|
||||
)
|
||||
return "native_tool_calls"
|
||||
|
||||
# Text response - this is the final answer
|
||||
if isinstance(answer, str):
|
||||
self.state.current_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=answer,
|
||||
)
|
||||
self._invoke_step_callback(self.state.current_answer)
|
||||
self._append_message_to_state(answer)
|
||||
iteration_elapsed = time.time() - iteration_start
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] -> FINAL ANSWER (string, len={len(answer)})"
|
||||
)
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] ITERATION total: {iteration_elapsed:.2f}s"
|
||||
)
|
||||
return "native_finished"
|
||||
|
||||
# Unexpected response type, treat as final answer
|
||||
self.state.current_answer = AgentFinish(
|
||||
thought="",
|
||||
output=str(answer),
|
||||
text=str(answer),
|
||||
)
|
||||
self._invoke_step_callback(self.state.current_answer)
|
||||
self._append_message_to_state(str(answer))
|
||||
iteration_elapsed = time.time() - iteration_start
|
||||
print(f"[{time.strftime('%H:%M:%S')}] -> FINAL ANSWER (unexpected type)")
|
||||
print(
|
||||
f"[{time.strftime('%H:%M:%S')}] ITERATION total: {iteration_elapsed:.2f}s"
|
||||
)
|
||||
return "native_finished"
|
||||
|
||||
except Exception as e:
|
||||
if is_context_length_exceeded(e):
|
||||
self._last_context_error = e
|
||||
return "context_error"
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
raise e
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
|
||||
@router(call_llm_and_parse)
|
||||
def route_by_answer_type(self) -> Literal["execute_tool", "agent_finished"]:
|
||||
"""Route based on whether answer is AgentAction or AgentFinish."""
|
||||
@@ -358,6 +586,14 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self.state.is_finished = True
|
||||
return "tool_result_is_final"
|
||||
|
||||
# Inject post-tool reasoning prompt to enforce analysis
|
||||
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
|
||||
reasoning_message: LLMMessage = {
|
||||
"role": "user",
|
||||
"content": reasoning_prompt,
|
||||
}
|
||||
self.state.messages.append(reasoning_message)
|
||||
|
||||
return "tool_completed"
|
||||
|
||||
except Exception as e:
|
||||
@@ -367,6 +603,173 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self._console.print(error_text)
|
||||
raise
|
||||
|
||||
@listen("native_tool_calls")
|
||||
def execute_native_tool(self) -> Literal["native_tool_completed"]:
|
||||
"""Execute a SINGLE native tool call with reflection after.
|
||||
|
||||
Processes only the first tool from pending_tool_calls, then asks
|
||||
the LLM if it can answer the task. Remaining tools stay in the queue
|
||||
for potential execution on next iteration.
|
||||
"""
|
||||
if not self.state.pending_tool_calls:
|
||||
return "native_tool_completed"
|
||||
|
||||
# Pop just the first tool (leave the rest in queue for potential continuation)
|
||||
tool_call = self.state.pending_tool_calls.pop(0)
|
||||
print(
|
||||
f"Executing 1 tool, {len(self.state.pending_tool_calls)} remaining in queue"
|
||||
)
|
||||
|
||||
# Extract tool call info - handle OpenAI, Anthropic, and Gemini formats
|
||||
if hasattr(tool_call, "function"):
|
||||
# OpenAI format: has .function.name and .function.arguments
|
||||
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
|
||||
func_name = tool_call.function.name
|
||||
func_args = tool_call.function.arguments
|
||||
elif hasattr(tool_call, "function_call") and tool_call.function_call:
|
||||
# Gemini format: has .function_call.name and .function_call.args
|
||||
call_id = f"call_{id(tool_call)}"
|
||||
func_name = tool_call.function_call.name
|
||||
func_args = (
|
||||
dict(tool_call.function_call.args)
|
||||
if tool_call.function_call.args
|
||||
else {}
|
||||
)
|
||||
elif hasattr(tool_call, "name") and hasattr(tool_call, "input"):
|
||||
# Anthropic format: has .name and .input (ToolUseBlock)
|
||||
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
|
||||
func_name = tool_call.name
|
||||
func_args = tool_call.input # Already a dict in Anthropic
|
||||
elif isinstance(tool_call, dict):
|
||||
call_id = tool_call.get("id", f"call_{id(tool_call)}")
|
||||
func_info = tool_call.get("function", {})
|
||||
func_name = func_info.get("name", "") or tool_call.get("name", "")
|
||||
func_args = func_info.get("arguments", "{}") or tool_call.get("input", {})
|
||||
else:
|
||||
# Unrecognized format - skip and try next
|
||||
return "native_tool_completed"
|
||||
|
||||
# Append assistant message with single tool call
|
||||
assistant_message: LLMMessage = {
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": func_name,
|
||||
"arguments": func_args
|
||||
if isinstance(func_args, str)
|
||||
else json.dumps(func_args),
|
||||
},
|
||||
}
|
||||
],
|
||||
}
|
||||
self.state.messages.append(assistant_message)
|
||||
|
||||
# Parse arguments for the single tool call
|
||||
if isinstance(func_args, str):
|
||||
try:
|
||||
args_dict = json.loads(func_args)
|
||||
except json.JSONDecodeError:
|
||||
args_dict = {}
|
||||
else:
|
||||
args_dict = func_args
|
||||
|
||||
# Emit tool usage started event
|
||||
started_at = datetime.now()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageStartedEvent(
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
# Execute the tool
|
||||
result = "Tool not found"
|
||||
if func_name in self._available_functions:
|
||||
try:
|
||||
tool_func = self._available_functions[func_name]
|
||||
result = tool_func(**args_dict)
|
||||
if not isinstance(result, str):
|
||||
result = str(result)
|
||||
except Exception as e:
|
||||
result = f"Error executing tool: {e}"
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageFinishedEvent(
|
||||
output=result,
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
started_at=started_at,
|
||||
finished_at=datetime.now(),
|
||||
),
|
||||
)
|
||||
|
||||
# Append tool result message
|
||||
tool_message: LLMMessage = {
|
||||
"role": "tool",
|
||||
"tool_call_id": call_id,
|
||||
"content": result,
|
||||
}
|
||||
self.state.messages.append(tool_message)
|
||||
|
||||
# Log the tool execution
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Tool {func_name} executed with result: {result[:200]}...",
|
||||
color="green",
|
||||
)
|
||||
|
||||
# Only add reflection prompt if there are still pending tools
|
||||
# If no pending tools, skip reflection - LLM will naturally continue
|
||||
if self.state.pending_tool_calls:
|
||||
print("--------------------------------")
|
||||
print(
|
||||
f"REFLECTION: {len(self.state.pending_tool_calls)} tools pending - asking LLM to decide"
|
||||
)
|
||||
print("--------------------------------")
|
||||
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
|
||||
|
||||
reasoning_message: LLMMessage = {
|
||||
"role": "user",
|
||||
"content": reasoning_prompt,
|
||||
}
|
||||
self.state.messages.append(reasoning_message)
|
||||
else:
|
||||
print("--------------------------------")
|
||||
print("SKIPPING REFLECTION: No pending tools - LLM will continue naturally")
|
||||
print("--------------------------------")
|
||||
|
||||
return "native_tool_completed"
|
||||
|
||||
def _extract_tool_name(self, tool_call: Any) -> str:
|
||||
"""Extract tool name from various tool call formats."""
|
||||
if hasattr(tool_call, "function"):
|
||||
return tool_call.function.name
|
||||
if hasattr(tool_call, "function_call") and tool_call.function_call:
|
||||
return tool_call.function_call.name
|
||||
if hasattr(tool_call, "name"):
|
||||
return tool_call.name
|
||||
if isinstance(tool_call, dict):
|
||||
func_info = tool_call.get("function", {})
|
||||
return func_info.get("name", "") or tool_call.get("name", "unknown")
|
||||
return "unknown"
|
||||
|
||||
@router(execute_native_tool)
|
||||
def increment_native_and_continue(self) -> Literal["initialized"]:
|
||||
"""Increment iteration counter after native tool execution."""
|
||||
self.state.iterations += 1
|
||||
return "initialized"
|
||||
|
||||
@listen("initialized")
|
||||
def continue_iteration(self) -> Literal["check_iteration"]:
|
||||
"""Bridge listener that connects iteration loop back to iteration check."""
|
||||
@@ -375,10 +778,14 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
@router(or_(initialize_reasoning, continue_iteration))
|
||||
def check_max_iterations(
|
||||
self,
|
||||
) -> Literal["force_final_answer", "continue_reasoning"]:
|
||||
) -> Literal[
|
||||
"force_final_answer", "continue_reasoning", "continue_reasoning_native"
|
||||
]:
|
||||
"""Check if max iterations reached before proceeding with reasoning."""
|
||||
if has_reached_max_iterations(self.state.iterations, self.max_iter):
|
||||
return "force_final_answer"
|
||||
if self.state.use_native_tools:
|
||||
return "continue_reasoning_native"
|
||||
return "continue_reasoning"
|
||||
|
||||
@router(execute_tool_action)
|
||||
@@ -387,7 +794,7 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self.state.iterations += 1
|
||||
return "initialized"
|
||||
|
||||
@listen(or_("agent_finished", "tool_result_is_final"))
|
||||
@listen(or_("agent_finished", "tool_result_is_final", "native_finished"))
|
||||
def finalize(self) -> Literal["completed", "skipped"]:
|
||||
"""Finalize execution and emit completion logs."""
|
||||
if self.state.current_answer is None:
|
||||
@@ -449,15 +856,25 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
|
||||
return "initialized"
|
||||
|
||||
def invoke(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
def invoke(
|
||||
self, inputs: dict[str, Any]
|
||||
) -> dict[str, Any] | Coroutine[Any, Any, dict[str, Any]]:
|
||||
"""Execute agent with given inputs.
|
||||
|
||||
When called from within an existing event loop (e.g., inside a Flow),
|
||||
this method returns a coroutine that should be awaited. The Flow
|
||||
framework handles this automatically.
|
||||
|
||||
Args:
|
||||
inputs: Input dictionary containing prompt variables.
|
||||
|
||||
Returns:
|
||||
Dictionary with agent output.
|
||||
Dictionary with agent output, or a coroutine if inside an event loop.
|
||||
"""
|
||||
# Magic auto-async: if inside event loop, return coroutine for Flow to await
|
||||
if is_inside_event_loop():
|
||||
return self.invoke_async(inputs)
|
||||
|
||||
self._ensure_flow_initialized()
|
||||
|
||||
with self._execution_lock:
|
||||
@@ -525,6 +942,174 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
finally:
|
||||
self._is_executing = False
|
||||
|
||||
async def invoke_async(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Execute agent asynchronously with given inputs.
|
||||
|
||||
This method is designed for use within async contexts, such as when
|
||||
the agent is called from within an async Flow method. It uses
|
||||
kickoff_async() directly instead of running in a separate thread.
|
||||
|
||||
Args:
|
||||
inputs: Input dictionary containing prompt variables.
|
||||
|
||||
Returns:
|
||||
Dictionary with agent output, or a coroutine if inside an event loop.
|
||||
"""
|
||||
# Magic auto-async: if inside event loop, return coroutine for Flow to await
|
||||
if is_inside_event_loop():
|
||||
return self.invoke_async(inputs)
|
||||
|
||||
self._ensure_flow_initialized()
|
||||
|
||||
with self._execution_lock:
|
||||
if self._is_executing:
|
||||
raise RuntimeError(
|
||||
"Executor is already running. "
|
||||
"Cannot invoke the same executor instance concurrently."
|
||||
)
|
||||
self._is_executing = True
|
||||
self._has_been_invoked = True
|
||||
|
||||
try:
|
||||
# Reset state for fresh execution
|
||||
self.state.messages.clear()
|
||||
self.state.iterations = 0
|
||||
self.state.current_answer = None
|
||||
self.state.is_finished = False
|
||||
|
||||
if "system" in self.prompt:
|
||||
prompt = cast("SystemPromptResult", self.prompt)
|
||||
system_prompt = self._format_prompt(prompt["system"], inputs)
|
||||
user_prompt = self._format_prompt(prompt["user"], inputs)
|
||||
self.state.messages.append(
|
||||
format_message_for_llm(system_prompt, role="system")
|
||||
)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
else:
|
||||
user_prompt = self._format_prompt(self.prompt["prompt"], inputs)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
self.state.ask_for_human_input = bool(
|
||||
inputs.get("ask_for_human_input", False)
|
||||
)
|
||||
|
||||
# Use async kickoff directly since we're already in an async context
|
||||
await self.kickoff_async()
|
||||
|
||||
formatted_answer = self.state.current_answer
|
||||
|
||||
if not isinstance(formatted_answer, AgentFinish):
|
||||
raise RuntimeError(
|
||||
"Agent execution ended without reaching a final answer."
|
||||
)
|
||||
|
||||
if self.state.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
except AssertionError:
|
||||
fail_text = Text()
|
||||
fail_text.append("❌ ", style="red bold")
|
||||
fail_text.append(
|
||||
"Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
style="red",
|
||||
)
|
||||
self._console.print(fail_text)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
finally:
|
||||
self._is_executing = False
|
||||
|
||||
async def invoke_async(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Execute agent asynchronously with given inputs.
|
||||
|
||||
This method is designed for use within async contexts, such as when
|
||||
the agent is called from within an async Flow method. It uses
|
||||
kickoff_async() directly instead of running in a separate thread.
|
||||
|
||||
Args:
|
||||
inputs: Input dictionary containing prompt variables.
|
||||
|
||||
Returns:
|
||||
Dictionary with agent output.
|
||||
"""
|
||||
self._ensure_flow_initialized()
|
||||
|
||||
with self._execution_lock:
|
||||
if self._is_executing:
|
||||
raise RuntimeError(
|
||||
"Executor is already running. "
|
||||
"Cannot invoke the same executor instance concurrently."
|
||||
)
|
||||
self._is_executing = True
|
||||
self._has_been_invoked = True
|
||||
|
||||
try:
|
||||
# Reset state for fresh execution
|
||||
self.state.messages.clear()
|
||||
self.state.iterations = 0
|
||||
self.state.current_answer = None
|
||||
self.state.is_finished = False
|
||||
self.state.use_native_tools = False
|
||||
self.state.pending_tool_calls = []
|
||||
|
||||
if "system" in self.prompt:
|
||||
prompt = cast("SystemPromptResult", self.prompt)
|
||||
system_prompt = self._format_prompt(prompt["system"], inputs)
|
||||
user_prompt = self._format_prompt(prompt["user"], inputs)
|
||||
self.state.messages.append(
|
||||
format_message_for_llm(system_prompt, role="system")
|
||||
)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
else:
|
||||
user_prompt = self._format_prompt(self.prompt["prompt"], inputs)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
self.state.ask_for_human_input = bool(
|
||||
inputs.get("ask_for_human_input", False)
|
||||
)
|
||||
|
||||
# Use async kickoff directly since we're already in an async context
|
||||
await self.kickoff_async()
|
||||
|
||||
formatted_answer = self.state.current_answer
|
||||
|
||||
if not isinstance(formatted_answer, AgentFinish):
|
||||
raise RuntimeError(
|
||||
"Agent execution ended without reaching a final answer."
|
||||
)
|
||||
|
||||
if self.state.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
except AssertionError:
|
||||
fail_text = Text()
|
||||
fail_text.append("❌ ", style="red bold")
|
||||
fail_text.append(
|
||||
"Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
style="red",
|
||||
)
|
||||
self._console.print(fail_text)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
finally:
|
||||
self._is_executing = False
|
||||
|
||||
def _handle_agent_action(
|
||||
self, formatted_answer: AgentAction, tool_result: ToolResult
|
||||
) -> AgentAction | AgentFinish:
|
||||
@@ -583,11 +1168,14 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
if self.agent is None:
|
||||
raise ValueError("Agent cannot be None")
|
||||
|
||||
if self.task is None:
|
||||
return
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self.agent,
|
||||
AgentLogsStartedEvent(
|
||||
agent_role=self.agent.role,
|
||||
task_description=(self.task.description if self.task else "Not Found"),
|
||||
task_description=self.task.description,
|
||||
verbose=self.agent.verbose
|
||||
or (hasattr(self, "crew") and getattr(self.crew, "verbose", False)),
|
||||
),
|
||||
@@ -621,10 +1209,12 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
result: Agent's final output.
|
||||
human_feedback: Optional feedback from human.
|
||||
"""
|
||||
# Early return if no crew (standalone mode)
|
||||
if self.crew is None:
|
||||
return
|
||||
|
||||
agent_id = str(self.agent.id)
|
||||
train_iteration = (
|
||||
getattr(self.crew, "_train_iteration", None) if self.crew else None
|
||||
)
|
||||
train_iteration = getattr(self.crew, "_train_iteration", None)
|
||||
|
||||
if train_iteration is None or not isinstance(train_iteration, int):
|
||||
train_error = Text()
|
||||
@@ -806,3 +1396,7 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
requiring arbitrary_types_allowed=True.
|
||||
"""
|
||||
return core_schema.any_schema()
|
||||
|
||||
|
||||
# Backward compatibility alias (deprecated)
|
||||
CrewAgentExecutorFlow = AgentExecutor
|
||||
@@ -12,6 +12,7 @@ from concurrent.futures import Future
|
||||
import copy
|
||||
import inspect
|
||||
import logging
|
||||
import threading
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -64,6 +65,7 @@ from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.types import FlowExecutionData, FlowMethodName, PendingListenerKey
|
||||
from crewai.flow.utils import (
|
||||
_extract_all_methods,
|
||||
_extract_all_methods_recursive,
|
||||
_normalize_condition,
|
||||
get_possible_return_constants,
|
||||
is_flow_condition_dict,
|
||||
@@ -73,6 +75,7 @@ from crewai.flow.utils import (
|
||||
is_simple_flow_condition,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.flow.async_feedback.types import PendingFeedbackContext
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
@@ -396,6 +399,62 @@ def and_(*conditions: str | FlowCondition | Callable[..., Any]) -> FlowCondition
|
||||
return {"type": AND_CONDITION, "conditions": processed_conditions}
|
||||
|
||||
|
||||
class StateProxy(Generic[T]):
|
||||
"""Proxy that provides thread-safe access to flow state.
|
||||
|
||||
Wraps state objects (dict or BaseModel) and uses a lock for all write
|
||||
operations to prevent race conditions when parallel listeners modify state.
|
||||
"""
|
||||
|
||||
__slots__ = ("_proxy_lock", "_proxy_state")
|
||||
|
||||
def __init__(self, state: T, lock: threading.Lock) -> None:
|
||||
object.__setattr__(self, "_proxy_state", state)
|
||||
object.__setattr__(self, "_proxy_lock", lock)
|
||||
|
||||
def __getattr__(self, name: str) -> Any:
|
||||
return getattr(object.__getattribute__(self, "_proxy_state"), name)
|
||||
|
||||
def __setattr__(self, name: str, value: Any) -> None:
|
||||
if name in ("_proxy_state", "_proxy_lock"):
|
||||
object.__setattr__(self, name, value)
|
||||
else:
|
||||
with object.__getattribute__(self, "_proxy_lock"):
|
||||
setattr(object.__getattribute__(self, "_proxy_state"), name, value)
|
||||
|
||||
def __getitem__(self, key: str) -> Any:
|
||||
return object.__getattribute__(self, "_proxy_state")[key]
|
||||
|
||||
def __setitem__(self, key: str, value: Any) -> None:
|
||||
with object.__getattribute__(self, "_proxy_lock"):
|
||||
object.__getattribute__(self, "_proxy_state")[key] = value
|
||||
|
||||
def __delitem__(self, key: str) -> None:
|
||||
with object.__getattribute__(self, "_proxy_lock"):
|
||||
del object.__getattribute__(self, "_proxy_state")[key]
|
||||
|
||||
def __contains__(self, key: str) -> bool:
|
||||
return key in object.__getattribute__(self, "_proxy_state")
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return repr(object.__getattribute__(self, "_proxy_state"))
|
||||
|
||||
def _unwrap(self) -> T:
|
||||
"""Return the underlying state object."""
|
||||
return cast(T, object.__getattribute__(self, "_proxy_state"))
|
||||
|
||||
def model_dump(self) -> dict[str, Any]:
|
||||
"""Return state as a dictionary.
|
||||
|
||||
Works for both dict and BaseModel underlying states.
|
||||
"""
|
||||
state = object.__getattribute__(self, "_proxy_state")
|
||||
if isinstance(state, dict):
|
||||
return state
|
||||
result: dict[str, Any] = state.model_dump()
|
||||
return result
|
||||
|
||||
|
||||
class FlowMeta(type):
|
||||
def __new__(
|
||||
mcs,
|
||||
@@ -519,7 +578,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._methods: dict[FlowMethodName, FlowMethod[Any, Any]] = {}
|
||||
self._method_execution_counts: dict[FlowMethodName, int] = {}
|
||||
self._pending_and_listeners: dict[PendingListenerKey, set[FlowMethodName]] = {}
|
||||
self._fired_or_listeners: set[FlowMethodName] = (
|
||||
set()
|
||||
) # Track OR listeners that already fired
|
||||
self._method_outputs: list[Any] = [] # list to store all method outputs
|
||||
self._state_lock = threading.Lock()
|
||||
self._or_listeners_lock = threading.Lock()
|
||||
self._completed_methods: set[FlowMethodName] = (
|
||||
set()
|
||||
) # Track completed methods for reload
|
||||
@@ -564,13 +628,182 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
method = method.__get__(self, self.__class__)
|
||||
self._methods[method.__name__] = method
|
||||
|
||||
def _mark_or_listener_fired(self, listener_name: FlowMethodName) -> bool:
|
||||
"""Mark an OR listener as fired atomically.
|
||||
|
||||
Args:
|
||||
listener_name: The name of the OR listener to mark.
|
||||
|
||||
Returns:
|
||||
True if this call was the first to fire the listener.
|
||||
False if the listener was already fired.
|
||||
"""
|
||||
with self._or_listeners_lock:
|
||||
if listener_name in self._fired_or_listeners:
|
||||
return False
|
||||
self._fired_or_listeners.add(listener_name)
|
||||
return True
|
||||
|
||||
def _clear_or_listeners(self) -> None:
|
||||
"""Clear fired OR listeners for cyclic flows."""
|
||||
with self._or_listeners_lock:
|
||||
self._fired_or_listeners.clear()
|
||||
|
||||
def _discard_or_listener(self, listener_name: FlowMethodName) -> None:
|
||||
"""Discard a single OR listener from the fired set."""
|
||||
with self._or_listeners_lock:
|
||||
self._fired_or_listeners.discard(listener_name)
|
||||
|
||||
def _build_racing_groups(self) -> dict[frozenset[FlowMethodName], FlowMethodName]:
|
||||
"""Identify groups of methods that race for the same OR listener.
|
||||
|
||||
Analyzes the flow graph to find listeners with OR conditions that have
|
||||
multiple trigger methods. These trigger methods form a "racing group"
|
||||
where only the first to complete should trigger the OR listener.
|
||||
|
||||
Only methods that are EXCLUSIVELY sources for the OR listener are included
|
||||
in the racing group. Methods that are also triggers for other listeners
|
||||
(e.g., AND conditions) are not cancelled when another racing source wins.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping frozensets of racing method names to their
|
||||
shared OR listener name.
|
||||
|
||||
Example:
|
||||
If we have `@listen(or_(method_a, method_b))` on `handler`,
|
||||
and method_a/method_b aren't used elsewhere,
|
||||
this returns: {frozenset({'method_a', 'method_b'}): 'handler'}
|
||||
"""
|
||||
racing_groups: dict[frozenset[FlowMethodName], FlowMethodName] = {}
|
||||
|
||||
method_to_listeners: dict[FlowMethodName, set[FlowMethodName]] = {}
|
||||
for listener_name, condition_data in self._listeners.items():
|
||||
if is_simple_flow_condition(condition_data):
|
||||
_, methods = condition_data
|
||||
for m in methods:
|
||||
method_to_listeners.setdefault(m, set()).add(listener_name)
|
||||
elif is_flow_condition_dict(condition_data):
|
||||
all_methods = _extract_all_methods_recursive(condition_data)
|
||||
for m in all_methods:
|
||||
method_name = FlowMethodName(m) if isinstance(m, str) else m
|
||||
method_to_listeners.setdefault(method_name, set()).add(
|
||||
listener_name
|
||||
)
|
||||
|
||||
for listener_name, condition_data in self._listeners.items():
|
||||
if listener_name in self._routers:
|
||||
continue
|
||||
|
||||
trigger_methods: set[FlowMethodName] = set()
|
||||
|
||||
if is_simple_flow_condition(condition_data):
|
||||
condition_type, methods = condition_data
|
||||
if condition_type == OR_CONDITION and len(methods) > 1:
|
||||
trigger_methods = set(methods)
|
||||
|
||||
elif is_flow_condition_dict(condition_data):
|
||||
top_level_type = condition_data.get("type", OR_CONDITION)
|
||||
if top_level_type == OR_CONDITION:
|
||||
all_methods = _extract_all_methods_recursive(condition_data)
|
||||
if len(all_methods) > 1:
|
||||
trigger_methods = set(
|
||||
FlowMethodName(m) if isinstance(m, str) else m
|
||||
for m in all_methods
|
||||
)
|
||||
|
||||
if trigger_methods:
|
||||
exclusive_methods = {
|
||||
m
|
||||
for m in trigger_methods
|
||||
if method_to_listeners.get(m, set()) == {listener_name}
|
||||
}
|
||||
if len(exclusive_methods) > 1:
|
||||
racing_groups[frozenset(exclusive_methods)] = listener_name
|
||||
|
||||
return racing_groups
|
||||
|
||||
def _get_racing_group_for_listeners(
|
||||
self,
|
||||
listener_names: list[FlowMethodName],
|
||||
) -> tuple[frozenset[FlowMethodName], FlowMethodName] | None:
|
||||
"""Check if the given listeners form a racing group.
|
||||
|
||||
Args:
|
||||
listener_names: List of listener method names being executed.
|
||||
|
||||
Returns:
|
||||
Tuple of (racing_members, or_listener_name) if these listeners race,
|
||||
None otherwise.
|
||||
"""
|
||||
if not hasattr(self, "_racing_groups_cache"):
|
||||
self._racing_groups_cache = self._build_racing_groups()
|
||||
|
||||
listener_set = set(listener_names)
|
||||
|
||||
for racing_members, or_listener in self._racing_groups_cache.items():
|
||||
if racing_members & listener_set:
|
||||
racing_subset = racing_members & listener_set
|
||||
if len(racing_subset) > 1:
|
||||
return (frozenset(racing_subset), or_listener)
|
||||
|
||||
return None
|
||||
|
||||
async def _execute_racing_listeners(
|
||||
self,
|
||||
racing_listeners: frozenset[FlowMethodName],
|
||||
other_listeners: list[FlowMethodName],
|
||||
result: Any,
|
||||
) -> None:
|
||||
"""Execute racing listeners with first-wins semantics.
|
||||
|
||||
Racing listeners are executed in parallel, but once the first one
|
||||
completes, the others are cancelled. Non-racing listeners in the
|
||||
same batch are executed normally in parallel.
|
||||
|
||||
Args:
|
||||
racing_listeners: Set of listener names that race for an OR condition.
|
||||
other_listeners: Other listeners to execute in parallel (not racing).
|
||||
result: The result from the triggering method.
|
||||
"""
|
||||
racing_tasks = [
|
||||
asyncio.create_task(
|
||||
self._execute_single_listener(name, result),
|
||||
name=str(name),
|
||||
)
|
||||
for name in racing_listeners
|
||||
]
|
||||
|
||||
other_tasks = [
|
||||
asyncio.create_task(
|
||||
self._execute_single_listener(name, result),
|
||||
name=str(name),
|
||||
)
|
||||
for name in other_listeners
|
||||
]
|
||||
|
||||
if racing_tasks:
|
||||
for coro in asyncio.as_completed(racing_tasks):
|
||||
try:
|
||||
await coro
|
||||
except Exception as e:
|
||||
logger.debug(f"Racing listener failed: {e}")
|
||||
continue
|
||||
break
|
||||
|
||||
for task in racing_tasks:
|
||||
if not task.done():
|
||||
task.cancel()
|
||||
|
||||
if other_tasks:
|
||||
await asyncio.gather(*other_tasks, return_exceptions=True)
|
||||
|
||||
@classmethod
|
||||
def from_pending(
|
||||
cls,
|
||||
flow_id: str,
|
||||
persistence: FlowPersistence | None = None,
|
||||
**kwargs: Any,
|
||||
) -> "Flow[Any]":
|
||||
) -> Flow[Any]:
|
||||
"""Create a Flow instance from a pending feedback state.
|
||||
|
||||
This classmethod is used to restore a flow that was paused waiting
|
||||
@@ -631,7 +864,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return instance
|
||||
|
||||
@property
|
||||
def pending_feedback(self) -> "PendingFeedbackContext | None":
|
||||
def pending_feedback(self) -> PendingFeedbackContext | None:
|
||||
"""Get the pending feedback context if this flow is waiting for feedback.
|
||||
|
||||
Returns:
|
||||
@@ -716,9 +949,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
Raises:
|
||||
ValueError: If no pending feedback context exists
|
||||
"""
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
from datetime import datetime
|
||||
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
|
||||
if self._pending_feedback_context is None:
|
||||
raise ValueError(
|
||||
"No pending feedback context. Use from_pending() to restore a paused flow."
|
||||
@@ -740,12 +974,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
# No default and no feedback - use first outcome
|
||||
collapsed_outcome = emit[0]
|
||||
elif emit:
|
||||
# Collapse feedback to outcome using LLM
|
||||
collapsed_outcome = self._collapse_to_outcome(
|
||||
feedback=feedback,
|
||||
outcomes=emit,
|
||||
llm=llm,
|
||||
)
|
||||
if llm is not None:
|
||||
collapsed_outcome = self._collapse_to_outcome(
|
||||
feedback=feedback,
|
||||
outcomes=emit,
|
||||
llm=llm,
|
||||
)
|
||||
else:
|
||||
collapsed_outcome = emit[0]
|
||||
|
||||
# Create result
|
||||
result = HumanFeedbackResult(
|
||||
@@ -784,21 +1020,16 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
# This allows methods to re-execute in loops (e.g., implement_changes → suggest_changes → implement_changes)
|
||||
self._is_execution_resuming = False
|
||||
|
||||
# Determine what to pass to listeners
|
||||
final_result: Any = result
|
||||
try:
|
||||
if emit and collapsed_outcome:
|
||||
# Router behavior - the outcome itself triggers listeners
|
||||
# First, add the outcome to method outputs as a router would
|
||||
self._method_outputs.append(collapsed_outcome)
|
||||
|
||||
# Then trigger listeners for the outcome (e.g., "approved" triggers @listen("approved"))
|
||||
final_result = await self._execute_listeners(
|
||||
FlowMethodName(collapsed_outcome), # Use outcome as trigger
|
||||
result, # Pass HumanFeedbackResult to listeners
|
||||
await self._execute_listeners(
|
||||
FlowMethodName(collapsed_outcome),
|
||||
result,
|
||||
)
|
||||
else:
|
||||
# Normal behavior - pass the HumanFeedbackResult
|
||||
final_result = await self._execute_listeners(
|
||||
await self._execute_listeners(
|
||||
FlowMethodName(context.method_name),
|
||||
result,
|
||||
)
|
||||
@@ -894,18 +1125,17 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
# Handle case where initial_state is a type (class)
|
||||
if isinstance(self.initial_state, type):
|
||||
if issubclass(self.initial_state, FlowState):
|
||||
return self.initial_state() # Uses model defaults
|
||||
if issubclass(self.initial_state, BaseModel):
|
||||
# Validate that the model has an id field
|
||||
model_fields = getattr(self.initial_state, "model_fields", None)
|
||||
state_class: type[T] = self.initial_state
|
||||
if issubclass(state_class, FlowState):
|
||||
return state_class()
|
||||
if issubclass(state_class, BaseModel):
|
||||
model_fields = getattr(state_class, "model_fields", None)
|
||||
if not model_fields or "id" not in model_fields:
|
||||
raise ValueError("Flow state model must have an 'id' field")
|
||||
instance = self.initial_state()
|
||||
# Ensure id is set - generate UUID if empty
|
||||
if not getattr(instance, "id", None):
|
||||
object.__setattr__(instance, "id", str(uuid4()))
|
||||
return instance
|
||||
model_instance = state_class()
|
||||
if not getattr(model_instance, "id", None):
|
||||
object.__setattr__(model_instance, "id", str(uuid4()))
|
||||
return model_instance
|
||||
if self.initial_state is dict:
|
||||
return cast(T, {"id": str(uuid4())})
|
||||
|
||||
@@ -970,7 +1200,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
@property
|
||||
def state(self) -> T:
|
||||
return self._state
|
||||
return StateProxy(self._state, self._state_lock) # type: ignore[return-value]
|
||||
|
||||
@property
|
||||
def method_outputs(self) -> list[Any]:
|
||||
@@ -1295,6 +1525,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._completed_methods.clear()
|
||||
self._method_outputs.clear()
|
||||
self._pending_and_listeners.clear()
|
||||
self._clear_or_listeners()
|
||||
else:
|
||||
# We're restoring from persistence, set the flag
|
||||
self._is_execution_resuming = True
|
||||
@@ -1346,9 +1577,26 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._initialize_state(inputs)
|
||||
|
||||
try:
|
||||
# Determine which start methods to execute at kickoff
|
||||
# Conditional start methods (with __trigger_methods__) are only triggered by their conditions
|
||||
# UNLESS there are no unconditional starts (then all starts run as entry points)
|
||||
unconditional_starts = [
|
||||
start_method
|
||||
for start_method in self._start_methods
|
||||
if not getattr(
|
||||
self._methods.get(start_method), "__trigger_methods__", None
|
||||
)
|
||||
]
|
||||
# If there are unconditional starts, only run those at kickoff
|
||||
# If there are NO unconditional starts, run all starts (including conditional ones)
|
||||
starts_to_execute = (
|
||||
unconditional_starts
|
||||
if unconditional_starts
|
||||
else self._start_methods
|
||||
)
|
||||
tasks = [
|
||||
self._execute_start_method(start_method)
|
||||
for start_method in self._start_methods
|
||||
for start_method in starts_to_execute
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
except Exception as e:
|
||||
@@ -1431,13 +1679,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
)
|
||||
self._event_futures.clear()
|
||||
|
||||
trace_listener = TraceCollectionListener()
|
||||
if trace_listener.batch_manager.batch_owner_type == "flow":
|
||||
if trace_listener.first_time_handler.is_first_time:
|
||||
trace_listener.first_time_handler.mark_events_collected()
|
||||
trace_listener.first_time_handler.handle_execution_completion()
|
||||
else:
|
||||
trace_listener.batch_manager.finalize_batch()
|
||||
if not self.suppress_flow_events:
|
||||
trace_listener = TraceCollectionListener()
|
||||
if trace_listener.batch_manager.batch_owner_type == "flow":
|
||||
if trace_listener.first_time_handler.is_first_time:
|
||||
trace_listener.first_time_handler.mark_events_collected()
|
||||
trace_listener.first_time_handler.handle_execution_completion()
|
||||
else:
|
||||
trace_listener.batch_manager.finalize_batch()
|
||||
|
||||
return final_output
|
||||
finally:
|
||||
@@ -1481,6 +1730,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return
|
||||
# For cyclic flows, clear from completed to allow re-execution
|
||||
self._completed_methods.discard(start_method_name)
|
||||
# Also clear fired OR listeners to allow them to fire again in new cycle
|
||||
self._clear_or_listeners()
|
||||
|
||||
method = self._methods[start_method_name]
|
||||
enhanced_method = self._inject_trigger_payload_for_start_method(method)
|
||||
@@ -1503,11 +1754,25 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if self.last_human_feedback is not None
|
||||
else result
|
||||
)
|
||||
tasks = [
|
||||
self._execute_single_listener(listener_name, listener_result)
|
||||
for listener_name in listeners_for_result
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
racing_group = self._get_racing_group_for_listeners(
|
||||
listeners_for_result
|
||||
)
|
||||
if racing_group:
|
||||
racing_members, _ = racing_group
|
||||
other_listeners = [
|
||||
name
|
||||
for name in listeners_for_result
|
||||
if name not in racing_members
|
||||
]
|
||||
await self._execute_racing_listeners(
|
||||
racing_members, other_listeners, listener_result
|
||||
)
|
||||
else:
|
||||
tasks = [
|
||||
self._execute_single_listener(listener_name, listener_result)
|
||||
for listener_name in listeners_for_result
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
else:
|
||||
await self._execute_listeners(start_method_name, result)
|
||||
|
||||
@@ -1573,11 +1838,19 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if future:
|
||||
self._event_futures.append(future)
|
||||
|
||||
result = (
|
||||
await method(*args, **kwargs)
|
||||
if asyncio.iscoroutinefunction(method)
|
||||
else method(*args, **kwargs)
|
||||
)
|
||||
if asyncio.iscoroutinefunction(method):
|
||||
result = await method(*args, **kwargs)
|
||||
else:
|
||||
# Run sync methods in thread pool for isolation
|
||||
# This allows Agent.kickoff() to work synchronously inside Flow methods
|
||||
import contextvars
|
||||
|
||||
ctx = contextvars.copy_context()
|
||||
result = await asyncio.to_thread(ctx.run, method, *args, **kwargs)
|
||||
|
||||
# Auto-await coroutines returned from sync methods (enables AgentExecutor pattern)
|
||||
if asyncio.iscoroutine(result):
|
||||
result = await result
|
||||
|
||||
self._method_outputs.append(result)
|
||||
self._method_execution_counts[method_name] = (
|
||||
@@ -1724,11 +1997,27 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
listener_result = router_result_to_feedback.get(
|
||||
str(current_trigger), result
|
||||
)
|
||||
tasks = [
|
||||
self._execute_single_listener(listener_name, listener_result)
|
||||
for listener_name in listeners_triggered
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
racing_group = self._get_racing_group_for_listeners(
|
||||
listeners_triggered
|
||||
)
|
||||
if racing_group:
|
||||
racing_members, _ = racing_group
|
||||
other_listeners = [
|
||||
name
|
||||
for name in listeners_triggered
|
||||
if name not in racing_members
|
||||
]
|
||||
await self._execute_racing_listeners(
|
||||
racing_members, other_listeners, listener_result
|
||||
)
|
||||
else:
|
||||
tasks = [
|
||||
self._execute_single_listener(
|
||||
listener_name, listener_result
|
||||
)
|
||||
for listener_name in listeners_triggered
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
if current_trigger in router_results:
|
||||
# Find start methods triggered by this router result
|
||||
@@ -1745,14 +2034,16 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
should_trigger = current_trigger in all_methods
|
||||
|
||||
if should_trigger:
|
||||
# Only execute if this is a cycle (method was already completed)
|
||||
# Execute conditional start method triggered by router result
|
||||
if method_name in self._completed_methods:
|
||||
# For router-triggered start methods in cycles, temporarily clear resumption flag
|
||||
# to allow cyclic execution
|
||||
# For cyclic re-execution, temporarily clear resumption flag
|
||||
was_resuming = self._is_execution_resuming
|
||||
self._is_execution_resuming = False
|
||||
await self._execute_start_method(method_name)
|
||||
self._is_execution_resuming = was_resuming
|
||||
else:
|
||||
# First-time execution of conditional start
|
||||
await self._execute_start_method(method_name)
|
||||
|
||||
def _evaluate_condition(
|
||||
self,
|
||||
@@ -1850,8 +2141,21 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
condition_type, methods = condition_data
|
||||
|
||||
if condition_type == OR_CONDITION:
|
||||
if trigger_method in methods:
|
||||
triggered.append(listener_name)
|
||||
# Only trigger multi-source OR listeners (or_(A, B, C)) once - skip if already fired
|
||||
# Simple single-method listeners fire every time their trigger occurs
|
||||
# Routers also fire every time - they're decision points
|
||||
has_multiple_triggers = len(methods) > 1
|
||||
should_check_fired = has_multiple_triggers and not is_router
|
||||
|
||||
if (
|
||||
not should_check_fired
|
||||
or listener_name not in self._fired_or_listeners
|
||||
):
|
||||
if trigger_method in methods:
|
||||
triggered.append(listener_name)
|
||||
# Only track multi-source OR listeners (not single-method or routers)
|
||||
if should_check_fired:
|
||||
self._fired_or_listeners.add(listener_name)
|
||||
elif condition_type == AND_CONDITION:
|
||||
pending_key = PendingListenerKey(listener_name)
|
||||
if pending_key not in self._pending_and_listeners:
|
||||
@@ -1864,10 +2168,26 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._pending_and_listeners.pop(pending_key, None)
|
||||
|
||||
elif is_flow_condition_dict(condition_data):
|
||||
# For complex conditions, check if top-level is OR and track accordingly
|
||||
top_level_type = condition_data.get("type", OR_CONDITION)
|
||||
is_or_based = top_level_type == OR_CONDITION
|
||||
|
||||
# Only track multi-source OR conditions (multiple sub-conditions), not routers
|
||||
sub_conditions = condition_data.get("conditions", [])
|
||||
has_multiple_triggers = is_or_based and len(sub_conditions) > 1
|
||||
should_check_fired = has_multiple_triggers and not is_router
|
||||
|
||||
# Skip compound OR-based listeners that have already fired
|
||||
if should_check_fired and listener_name in self._fired_or_listeners:
|
||||
continue
|
||||
|
||||
if self._evaluate_condition(
|
||||
condition_data, trigger_method, listener_name
|
||||
):
|
||||
triggered.append(listener_name)
|
||||
# Track compound OR-based listeners so they only fire once
|
||||
if should_check_fired:
|
||||
self._fired_or_listeners.add(listener_name)
|
||||
|
||||
return triggered
|
||||
|
||||
@@ -1896,9 +2216,22 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if self._is_execution_resuming:
|
||||
# During resumption, skip execution but continue listeners
|
||||
await self._execute_listeners(listener_name, None)
|
||||
|
||||
# For routers, also check if any conditional starts they triggered are completed
|
||||
# If so, continue their chains
|
||||
if listener_name in self._routers:
|
||||
for start_method_name in self._start_methods:
|
||||
if (
|
||||
start_method_name in self._listeners
|
||||
and start_method_name in self._completed_methods
|
||||
):
|
||||
# This conditional start was executed, continue its chain
|
||||
await self._execute_start_method(start_method_name)
|
||||
return
|
||||
# For cyclic flows, clear from completed to allow re-execution
|
||||
self._completed_methods.discard(listener_name)
|
||||
# Also clear from fired OR listeners for cyclic flows
|
||||
self._discard_or_listener(listener_name)
|
||||
|
||||
try:
|
||||
method = self._methods[listener_name]
|
||||
@@ -1931,11 +2264,25 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if self.last_human_feedback is not None
|
||||
else listener_result
|
||||
)
|
||||
tasks = [
|
||||
self._execute_single_listener(name, feedback_result)
|
||||
for name in listeners_for_result
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
racing_group = self._get_racing_group_for_listeners(
|
||||
listeners_for_result
|
||||
)
|
||||
if racing_group:
|
||||
racing_members, _ = racing_group
|
||||
other_listeners = [
|
||||
name
|
||||
for name in listeners_for_result
|
||||
if name not in racing_members
|
||||
]
|
||||
await self._execute_racing_listeners(
|
||||
racing_members, other_listeners, feedback_result
|
||||
)
|
||||
else:
|
||||
tasks = [
|
||||
self._execute_single_listener(name, feedback_result)
|
||||
for name in listeners_for_result
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
except Exception as e:
|
||||
# Don't log HumanFeedbackPending as an error - it's expected control flow
|
||||
@@ -2049,7 +2396,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
from crewai.llms.base_llm import BaseLLM as BaseLLMClass
|
||||
from crewai.utilities.i18n import get_i18n
|
||||
|
||||
# Get or create LLM instance
|
||||
llm_instance: BaseLLMClass
|
||||
if isinstance(llm, str):
|
||||
llm_instance = LLM(model=llm)
|
||||
elif isinstance(llm, BaseLLMClass):
|
||||
@@ -2084,26 +2431,23 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
response_model=FeedbackOutcome,
|
||||
)
|
||||
|
||||
# Parse the response - LLM returns JSON string when using response_model
|
||||
if isinstance(response, str):
|
||||
import json
|
||||
|
||||
try:
|
||||
parsed = json.loads(response)
|
||||
return parsed.get("outcome", outcomes[0])
|
||||
return str(parsed.get("outcome", outcomes[0]))
|
||||
except json.JSONDecodeError:
|
||||
# Not valid JSON, might be raw outcome string
|
||||
response_clean = response.strip()
|
||||
for outcome in outcomes:
|
||||
if outcome.lower() == response_clean.lower():
|
||||
return outcome
|
||||
return outcomes[0]
|
||||
elif isinstance(response, FeedbackOutcome):
|
||||
return response.outcome
|
||||
return str(response.outcome)
|
||||
elif hasattr(response, "outcome"):
|
||||
return response.outcome
|
||||
return str(response.outcome)
|
||||
else:
|
||||
# Unexpected type, fall back to first outcome
|
||||
logger.warning(f"Unexpected response type: {type(response)}")
|
||||
return outcomes[0]
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import inspect
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field, InstanceOf, model_validator
|
||||
from typing_extensions import Self
|
||||
@@ -14,14 +15,14 @@ class FlowTrackable(BaseModel):
|
||||
inspecting the call stack.
|
||||
"""
|
||||
|
||||
parent_flow: InstanceOf[Flow] | None = Field(
|
||||
parent_flow: InstanceOf[Flow[Any]] | None = Field(
|
||||
default=None,
|
||||
description="The parent flow of the instance, if it was created inside a flow.",
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _set_parent_flow(self) -> Self:
|
||||
max_depth = 5
|
||||
max_depth = 8
|
||||
frame = inspect.currentframe()
|
||||
|
||||
try:
|
||||
|
||||
@@ -61,7 +61,7 @@ class PersistenceDecorator:
|
||||
@classmethod
|
||||
def persist_state(
|
||||
cls,
|
||||
flow_instance: Flow,
|
||||
flow_instance: Flow[Any],
|
||||
method_name: str,
|
||||
persistence_instance: FlowPersistence,
|
||||
verbose: bool = False,
|
||||
@@ -90,7 +90,13 @@ class PersistenceDecorator:
|
||||
flow_uuid: str | None = None
|
||||
if isinstance(state, dict):
|
||||
flow_uuid = state.get("id")
|
||||
elif isinstance(state, BaseModel):
|
||||
elif hasattr(state, "_unwrap"):
|
||||
unwrapped = state._unwrap()
|
||||
if isinstance(unwrapped, dict):
|
||||
flow_uuid = unwrapped.get("id")
|
||||
else:
|
||||
flow_uuid = getattr(unwrapped, "id", None)
|
||||
elif isinstance(state, BaseModel) or hasattr(state, "id"):
|
||||
flow_uuid = getattr(state, "id", None)
|
||||
|
||||
if not flow_uuid:
|
||||
@@ -104,10 +110,11 @@ class PersistenceDecorator:
|
||||
logger.info(LOG_MESSAGES["save_state"].format(flow_uuid))
|
||||
|
||||
try:
|
||||
state_data = state._unwrap() if hasattr(state, "_unwrap") else state
|
||||
persistence_instance.save_state(
|
||||
flow_uuid=flow_uuid,
|
||||
method_name=method_name,
|
||||
state_data=state,
|
||||
state_data=state_data,
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = LOG_MESSAGES["save_error"].format(method_name, str(e))
|
||||
@@ -126,7 +133,9 @@ class PersistenceDecorator:
|
||||
raise ValueError(error_msg) from e
|
||||
|
||||
|
||||
def persist(persistence: FlowPersistence | None = None, verbose: bool = False):
|
||||
def persist(
|
||||
persistence: FlowPersistence | None = None, verbose: bool = False
|
||||
) -> Callable[[type | Callable[..., T]], type | Callable[..., T]]:
|
||||
"""Decorator to persist flow state.
|
||||
|
||||
This decorator can be applied at either the class level or method level.
|
||||
@@ -189,8 +198,8 @@ def persist(persistence: FlowPersistence | None = None, verbose: bool = False):
|
||||
if asyncio.iscoroutinefunction(method):
|
||||
# Create a closure to capture the current name and method
|
||||
def create_async_wrapper(
|
||||
method_name: str, original_method: Callable
|
||||
):
|
||||
method_name: str, original_method: Callable[..., Any]
|
||||
) -> Callable[..., Any]:
|
||||
@functools.wraps(original_method)
|
||||
async def method_wrapper(
|
||||
self: Any, *args: Any, **kwargs: Any
|
||||
@@ -221,8 +230,8 @@ def persist(persistence: FlowPersistence | None = None, verbose: bool = False):
|
||||
else:
|
||||
# Create a closure to capture the current name and method
|
||||
def create_sync_wrapper(
|
||||
method_name: str, original_method: Callable
|
||||
):
|
||||
method_name: str, original_method: Callable[..., Any]
|
||||
) -> Callable[..., Any]:
|
||||
@functools.wraps(original_method)
|
||||
def method_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
||||
result = original_method(self, *args, **kwargs)
|
||||
@@ -268,7 +277,7 @@ def persist(persistence: FlowPersistence | None = None, verbose: bool = False):
|
||||
PersistenceDecorator.persist_state(
|
||||
flow_instance, method.__name__, actual_persistence, verbose
|
||||
)
|
||||
return result
|
||||
return cast(T, result)
|
||||
|
||||
for attr in [
|
||||
"__is_start_method__",
|
||||
|
||||
@@ -10,6 +10,7 @@ from typing import (
|
||||
get_origin,
|
||||
)
|
||||
import uuid
|
||||
import warnings
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -80,6 +81,11 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
"""
|
||||
A lightweight agent that can process messages and use tools.
|
||||
|
||||
.. deprecated::
|
||||
LiteAgent is deprecated and will be removed in a future version.
|
||||
Use ``Agent().kickoff(messages)`` instead, which provides the same
|
||||
functionality with additional features like memory and knowledge support.
|
||||
|
||||
This agent is simpler than the full Agent class, focusing on direct execution
|
||||
rather than task delegation. It's designed to be used for simple interactions
|
||||
where a full crew is not needed.
|
||||
@@ -164,6 +170,18 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
default_factory=get_after_llm_call_hooks
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def emit_deprecation_warning(self) -> Self:
|
||||
"""Emit deprecation warning for LiteAgent usage."""
|
||||
warnings.warn(
|
||||
"LiteAgent is deprecated and will be removed in a future version. "
|
||||
"Use Agent().kickoff(messages) instead, which provides the same "
|
||||
"functionality with additional features like memory and knowledge support.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def setup_llm(self) -> Self:
|
||||
"""Set up the LLM and other components after initialization."""
|
||||
|
||||
@@ -931,7 +931,6 @@ class LLM(BaseLLM):
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
|
||||
if not tool_calls or not available_functions:
|
||||
|
||||
if response_model and self.is_litellm:
|
||||
instructor_instance = InternalInstructor(
|
||||
content=full_response,
|
||||
@@ -1144,8 +1143,12 @@ class LLM(BaseLLM):
|
||||
if response_model:
|
||||
params["response_model"] = response_model
|
||||
response = litellm.completion(**params)
|
||||
|
||||
if hasattr(response,"usage") and not isinstance(response.usage, type) and response.usage:
|
||||
|
||||
if (
|
||||
hasattr(response, "usage")
|
||||
and not isinstance(response.usage, type)
|
||||
and response.usage
|
||||
):
|
||||
usage_info = response.usage
|
||||
self._track_token_usage_internal(usage_info)
|
||||
|
||||
@@ -1199,16 +1202,19 @@ class LLM(BaseLLM):
|
||||
)
|
||||
return text_response
|
||||
|
||||
# --- 6) If there is no text response, no available functions, but there are tool calls, return the tool calls
|
||||
if tool_calls and not available_functions and not text_response:
|
||||
# --- 6) If there are tool calls but no available functions, return the tool calls
|
||||
# This allows the caller (e.g., executor) to handle tool execution
|
||||
if tool_calls and not available_functions:
|
||||
return tool_calls
|
||||
|
||||
# --- 7) Handle tool calls if present
|
||||
tool_result = self._handle_tool_call(
|
||||
tool_calls, available_functions, from_task, from_agent
|
||||
)
|
||||
if tool_result is not None:
|
||||
return tool_result
|
||||
# --- 7) Handle tool calls if present (execute when available_functions provided)
|
||||
if tool_calls and available_functions:
|
||||
tool_result = self._handle_tool_call(
|
||||
tool_calls, available_functions, from_task, from_agent
|
||||
)
|
||||
if tool_result is not None:
|
||||
return tool_result
|
||||
|
||||
# --- 8) If tool call handling didn't return a result, emit completion event and return text response
|
||||
self._handle_emit_call_events(
|
||||
response=text_response,
|
||||
@@ -1273,7 +1279,11 @@ class LLM(BaseLLM):
|
||||
params["response_model"] = response_model
|
||||
response = await litellm.acompletion(**params)
|
||||
|
||||
if hasattr(response,"usage") and not isinstance(response.usage, type) and response.usage:
|
||||
if (
|
||||
hasattr(response, "usage")
|
||||
and not isinstance(response.usage, type)
|
||||
and response.usage
|
||||
):
|
||||
usage_info = response.usage
|
||||
self._track_token_usage_internal(usage_info)
|
||||
|
||||
@@ -1321,14 +1331,18 @@ class LLM(BaseLLM):
|
||||
)
|
||||
return text_response
|
||||
|
||||
if tool_calls and not available_functions and not text_response:
|
||||
# If there are tool calls but no available functions, return the tool calls
|
||||
# This allows the caller (e.g., executor) to handle tool execution
|
||||
if tool_calls and not available_functions:
|
||||
return tool_calls
|
||||
|
||||
tool_result = self._handle_tool_call(
|
||||
tool_calls, available_functions, from_task, from_agent
|
||||
)
|
||||
if tool_result is not None:
|
||||
return tool_result
|
||||
# Handle tool calls if present (execute when available_functions provided)
|
||||
if tool_calls and available_functions:
|
||||
tool_result = self._handle_tool_call(
|
||||
tool_calls, available_functions, from_task, from_agent
|
||||
)
|
||||
if tool_result is not None:
|
||||
return tool_result
|
||||
|
||||
self._handle_emit_call_events(
|
||||
response=text_response,
|
||||
@@ -1363,7 +1377,7 @@ class LLM(BaseLLM):
|
||||
"""
|
||||
full_response = ""
|
||||
chunk_count = 0
|
||||
|
||||
|
||||
usage_info = None
|
||||
|
||||
accumulated_tool_args: defaultdict[int, AccumulatedToolArgs] = defaultdict(
|
||||
|
||||
@@ -445,7 +445,7 @@ class BaseLLM(ABC):
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
return str(result)
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error executing function '{function_name}': {e!s}"
|
||||
|
||||
@@ -418,6 +418,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
- System messages are separate from conversation messages
|
||||
- Messages must alternate between user and assistant
|
||||
- First message must be from user
|
||||
- Tool results must be in user messages with tool_result content blocks
|
||||
- When thinking is enabled, assistant messages must start with thinking blocks
|
||||
|
||||
Args:
|
||||
@@ -431,6 +432,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
formatted_messages: list[LLMMessage] = []
|
||||
system_message: str | None = None
|
||||
pending_tool_results: list[dict[str, Any]] = []
|
||||
|
||||
for message in base_formatted:
|
||||
role = message.get("role")
|
||||
@@ -441,16 +443,47 @@ class AnthropicCompletion(BaseLLM):
|
||||
system_message += f"\n\n{content}"
|
||||
else:
|
||||
system_message = cast(str, content)
|
||||
else:
|
||||
role_str = role if role is not None else "user"
|
||||
elif role == "tool":
|
||||
# Convert OpenAI-style tool message to Anthropic tool_result format
|
||||
# These will be collected and added as a user message
|
||||
tool_call_id = message.get("tool_call_id", "")
|
||||
tool_result = {
|
||||
"type": "tool_result",
|
||||
"tool_use_id": tool_call_id,
|
||||
"content": content if content else "",
|
||||
}
|
||||
pending_tool_results.append(tool_result)
|
||||
elif role == "assistant":
|
||||
# First, flush any pending tool results as a user message
|
||||
if pending_tool_results:
|
||||
formatted_messages.append(
|
||||
{"role": "user", "content": pending_tool_results}
|
||||
)
|
||||
pending_tool_results = []
|
||||
|
||||
if isinstance(content, list):
|
||||
formatted_messages.append({"role": role_str, "content": content})
|
||||
elif (
|
||||
role_str == "assistant"
|
||||
and self.thinking
|
||||
and self.previous_thinking_blocks
|
||||
):
|
||||
# Handle assistant message with tool_calls (convert to Anthropic format)
|
||||
tool_calls = message.get("tool_calls", [])
|
||||
if tool_calls:
|
||||
assistant_content: list[dict[str, Any]] = []
|
||||
for tc in tool_calls:
|
||||
if isinstance(tc, dict):
|
||||
func = tc.get("function", {})
|
||||
tool_use = {
|
||||
"type": "tool_use",
|
||||
"id": tc.get("id", ""),
|
||||
"name": func.get("name", ""),
|
||||
"input": json.loads(func.get("arguments", "{}"))
|
||||
if isinstance(func.get("arguments"), str)
|
||||
else func.get("arguments", {}),
|
||||
}
|
||||
assistant_content.append(tool_use)
|
||||
if assistant_content:
|
||||
formatted_messages.append(
|
||||
{"role": "assistant", "content": assistant_content}
|
||||
)
|
||||
elif isinstance(content, list):
|
||||
formatted_messages.append({"role": "assistant", "content": content})
|
||||
elif self.thinking and self.previous_thinking_blocks:
|
||||
structured_content = cast(
|
||||
list[dict[str, Any]],
|
||||
[
|
||||
@@ -459,14 +492,34 @@ class AnthropicCompletion(BaseLLM):
|
||||
],
|
||||
)
|
||||
formatted_messages.append(
|
||||
LLMMessage(role=role_str, content=structured_content)
|
||||
LLMMessage(role="assistant", content=structured_content)
|
||||
)
|
||||
else:
|
||||
content_str = content if content is not None else ""
|
||||
formatted_messages.append(
|
||||
LLMMessage(role="assistant", content=content_str)
|
||||
)
|
||||
else:
|
||||
# User message - first flush any pending tool results
|
||||
if pending_tool_results:
|
||||
formatted_messages.append(
|
||||
{"role": "user", "content": pending_tool_results}
|
||||
)
|
||||
pending_tool_results = []
|
||||
|
||||
role_str = role if role is not None else "user"
|
||||
if isinstance(content, list):
|
||||
formatted_messages.append({"role": role_str, "content": content})
|
||||
else:
|
||||
content_str = content if content is not None else ""
|
||||
formatted_messages.append(
|
||||
LLMMessage(role=role_str, content=content_str)
|
||||
)
|
||||
|
||||
# Flush any remaining pending tool results
|
||||
if pending_tool_results:
|
||||
formatted_messages.append({"role": "user", "content": pending_tool_results})
|
||||
|
||||
# Ensure first message is from user (Anthropic requirement)
|
||||
if not formatted_messages:
|
||||
# If no messages, add a default user message
|
||||
@@ -526,13 +579,19 @@ class AnthropicCompletion(BaseLLM):
|
||||
return structured_json
|
||||
|
||||
# Check if Claude wants to use tools
|
||||
if response.content and available_functions:
|
||||
if response.content:
|
||||
tool_uses = [
|
||||
block for block in response.content if isinstance(block, ToolUseBlock)
|
||||
]
|
||||
|
||||
if tool_uses:
|
||||
# Handle tool use conversation flow
|
||||
# If no available_functions, return tool calls for executor to handle
|
||||
# This allows the executor to manage tool execution with proper
|
||||
# message history and post-tool reasoning prompts
|
||||
if not available_functions:
|
||||
return list(tool_uses)
|
||||
|
||||
# Handle tool use conversation flow internally
|
||||
return self._handle_tool_use_conversation(
|
||||
response,
|
||||
tool_uses,
|
||||
@@ -696,7 +755,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
return structured_json
|
||||
|
||||
if final_message.content and available_functions:
|
||||
if final_message.content:
|
||||
tool_uses = [
|
||||
block
|
||||
for block in final_message.content
|
||||
@@ -704,7 +763,11 @@ class AnthropicCompletion(BaseLLM):
|
||||
]
|
||||
|
||||
if tool_uses:
|
||||
# Handle tool use conversation flow
|
||||
# If no available_functions, return tool calls for executor to handle
|
||||
if not available_functions:
|
||||
return list(tool_uses)
|
||||
|
||||
# Handle tool use conversation flow internally
|
||||
return self._handle_tool_use_conversation(
|
||||
final_message,
|
||||
tool_uses,
|
||||
@@ -933,12 +996,16 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
return structured_json
|
||||
|
||||
if response.content and available_functions:
|
||||
if response.content:
|
||||
tool_uses = [
|
||||
block for block in response.content if isinstance(block, ToolUseBlock)
|
||||
]
|
||||
|
||||
if tool_uses:
|
||||
# If no available_functions, return tool calls for executor to handle
|
||||
if not available_functions:
|
||||
return list(tool_uses)
|
||||
|
||||
return await self._ahandle_tool_use_conversation(
|
||||
response,
|
||||
tool_uses,
|
||||
@@ -1079,7 +1146,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
return structured_json
|
||||
|
||||
if final_message.content and available_functions:
|
||||
if final_message.content:
|
||||
tool_uses = [
|
||||
block
|
||||
for block in final_message.content
|
||||
@@ -1087,6 +1154,10 @@ class AnthropicCompletion(BaseLLM):
|
||||
]
|
||||
|
||||
if tool_uses:
|
||||
# If no available_functions, return tool calls for executor to handle
|
||||
if not available_functions:
|
||||
return list(tool_uses)
|
||||
|
||||
return await self._ahandle_tool_use_conversation(
|
||||
final_message,
|
||||
tool_uses,
|
||||
|
||||
@@ -443,7 +443,7 @@ class AzureCompletion(BaseLLM):
|
||||
params["presence_penalty"] = self.presence_penalty
|
||||
if self.max_tokens is not None:
|
||||
params["max_tokens"] = self.max_tokens
|
||||
if self.stop:
|
||||
if self.stop and self.supports_stop_words():
|
||||
params["stop"] = self.stop
|
||||
|
||||
# Handle tools/functions for Azure OpenAI models
|
||||
@@ -514,10 +514,31 @@ class AzureCompletion(BaseLLM):
|
||||
|
||||
for message in base_formatted:
|
||||
role = message.get("role", "user") # Default to user if no role
|
||||
content = message.get("content", "")
|
||||
# Handle None content - Azure requires string content
|
||||
content = message.get("content") or ""
|
||||
|
||||
# Azure AI Inference requires both 'role' and 'content'
|
||||
azure_messages.append({"role": role, "content": content})
|
||||
# Handle tool role messages - keep as tool role for Azure OpenAI
|
||||
if role == "tool":
|
||||
tool_call_id = message.get("tool_call_id", "unknown")
|
||||
azure_messages.append(
|
||||
{
|
||||
"role": "tool",
|
||||
"tool_call_id": tool_call_id,
|
||||
"content": content,
|
||||
}
|
||||
)
|
||||
# Handle assistant messages with tool_calls
|
||||
elif role == "assistant" and message.get("tool_calls"):
|
||||
tool_calls = message.get("tool_calls", [])
|
||||
azure_msg: LLMMessage = {
|
||||
"role": "assistant",
|
||||
"content": content, # Already defaulted to "" above
|
||||
"tool_calls": tool_calls,
|
||||
}
|
||||
azure_messages.append(azure_msg)
|
||||
else:
|
||||
# Azure AI Inference requires both 'role' and 'content'
|
||||
azure_messages.append({"role": role, "content": content})
|
||||
|
||||
return azure_messages
|
||||
|
||||
@@ -604,6 +625,11 @@ class AzureCompletion(BaseLLM):
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
# If there are tool_calls but no available_functions, return the tool_calls
|
||||
# This allows the caller (e.g., executor) to handle tool execution
|
||||
if message.tool_calls and not available_functions:
|
||||
return list(message.tool_calls)
|
||||
|
||||
# Handle tool calls
|
||||
if message.tool_calls and available_functions:
|
||||
tool_call = message.tool_calls[0] # Handle first tool call
|
||||
@@ -775,6 +801,21 @@ class AzureCompletion(BaseLLM):
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
# If there are tool_calls but no available_functions, return them
|
||||
# in OpenAI-compatible format for executor to handle
|
||||
if tool_calls and not available_functions:
|
||||
return [
|
||||
{
|
||||
"id": call_data.get("id", f"call_{idx}"),
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": call_data["name"],
|
||||
"arguments": call_data["arguments"],
|
||||
},
|
||||
}
|
||||
for idx, call_data in tool_calls.items()
|
||||
]
|
||||
|
||||
# Handle completed tool calls
|
||||
if tool_calls and available_functions:
|
||||
for call_data in tool_calls.values():
|
||||
@@ -931,8 +972,28 @@ class AzureCompletion(BaseLLM):
|
||||
return self.is_openai_model
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
"""Check if the model supports stop words."""
|
||||
return True # Most Azure models support stop sequences
|
||||
"""Check if the model supports stop words.
|
||||
|
||||
Models using the Responses API (GPT-5 family, o-series reasoning models,
|
||||
computer-use-preview) do not support stop sequences.
|
||||
See: https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-models/concepts/models-sold-directly-by-azure
|
||||
"""
|
||||
model_lower = self.model.lower() if self.model else ""
|
||||
|
||||
if "gpt-5" in model_lower:
|
||||
return False
|
||||
|
||||
o_series_models = ["o1", "o3", "o4", "o1-mini", "o3-mini", "o4-mini"]
|
||||
|
||||
responses_api_models = ["computer-use-preview"]
|
||||
|
||||
unsupported_stop_models = o_series_models + responses_api_models
|
||||
|
||||
for unsupported in unsupported_stop_models:
|
||||
if unsupported in model_lower:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the model."""
|
||||
|
||||
@@ -54,15 +54,21 @@ class GeminiCompletion(BaseLLM):
|
||||
safety_settings: dict[str, Any] | None = None,
|
||||
client_params: dict[str, Any] | None = None,
|
||||
interceptor: BaseInterceptor[Any, Any] | None = None,
|
||||
use_vertexai: bool | None = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize Google Gemini chat completion client.
|
||||
|
||||
Args:
|
||||
model: Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro')
|
||||
api_key: Google API key (defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var)
|
||||
project: Google Cloud project ID (for Vertex AI)
|
||||
location: Google Cloud location (for Vertex AI, defaults to 'us-central1')
|
||||
api_key: Google API key for Gemini API authentication.
|
||||
Defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var.
|
||||
NOTE: Cannot be used with Vertex AI (project parameter). Use Gemini API instead.
|
||||
project: Google Cloud project ID for Vertex AI with ADC authentication.
|
||||
Requires Application Default Credentials (gcloud auth application-default login).
|
||||
NOTE: Vertex AI does NOT support API keys, only OAuth2/ADC.
|
||||
If both api_key and project are set, api_key takes precedence.
|
||||
location: Google Cloud location (for Vertex AI with ADC, defaults to 'us-central1')
|
||||
temperature: Sampling temperature (0-2)
|
||||
top_p: Nucleus sampling parameter
|
||||
top_k: Top-k sampling parameter
|
||||
@@ -73,6 +79,12 @@ class GeminiCompletion(BaseLLM):
|
||||
client_params: Additional parameters to pass to the Google Gen AI Client constructor.
|
||||
Supports parameters like http_options, credentials, debug_config, etc.
|
||||
interceptor: HTTP interceptor (not yet supported for Gemini).
|
||||
use_vertexai: Whether to use Vertex AI instead of Gemini API.
|
||||
- True: Use Vertex AI (with ADC or Express mode with API key)
|
||||
- False: Use Gemini API (explicitly override env var)
|
||||
- None (default): Check GOOGLE_GENAI_USE_VERTEXAI env var
|
||||
When using Vertex AI with API key (Express mode), http_options with
|
||||
api_version="v1" is automatically configured.
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
if interceptor is not None:
|
||||
@@ -95,7 +107,8 @@ class GeminiCompletion(BaseLLM):
|
||||
self.project = project or os.getenv("GOOGLE_CLOUD_PROJECT")
|
||||
self.location = location or os.getenv("GOOGLE_CLOUD_LOCATION") or "us-central1"
|
||||
|
||||
use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true"
|
||||
if use_vertexai is None:
|
||||
use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true"
|
||||
|
||||
self.client = self._initialize_client(use_vertexai)
|
||||
|
||||
@@ -146,13 +159,34 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
Returns:
|
||||
Initialized Google Gen AI Client
|
||||
|
||||
Note:
|
||||
Google Gen AI SDK has two distinct endpoints with different auth requirements:
|
||||
- Gemini API (generativelanguage.googleapis.com): Supports API key authentication
|
||||
- Vertex AI (aiplatform.googleapis.com): Only supports OAuth2/ADC, NO API keys
|
||||
|
||||
When vertexai=True is set, it routes to aiplatform.googleapis.com which rejects
|
||||
API keys. Use Gemini API endpoint for API key authentication instead.
|
||||
"""
|
||||
client_params = {}
|
||||
|
||||
if self.client_params:
|
||||
client_params.update(self.client_params)
|
||||
|
||||
if use_vertexai or self.project:
|
||||
# Determine authentication mode based on available credentials
|
||||
has_api_key = bool(self.api_key)
|
||||
has_project = bool(self.project)
|
||||
|
||||
if has_api_key and has_project:
|
||||
logging.warning(
|
||||
"Both API key and project provided. Using API key authentication. "
|
||||
"Project/location parameters are ignored when using API keys. "
|
||||
"To use Vertex AI with ADC, remove the api_key parameter."
|
||||
)
|
||||
has_project = False
|
||||
|
||||
# Vertex AI with ADC (project without API key)
|
||||
if (use_vertexai or has_project) and not has_api_key:
|
||||
client_params.update(
|
||||
{
|
||||
"vertexai": True,
|
||||
@@ -161,12 +195,20 @@ class GeminiCompletion(BaseLLM):
|
||||
}
|
||||
)
|
||||
|
||||
client_params.pop("api_key", None)
|
||||
|
||||
elif self.api_key:
|
||||
# API key authentication (works with both Gemini API and Vertex AI Express)
|
||||
elif has_api_key:
|
||||
client_params["api_key"] = self.api_key
|
||||
|
||||
client_params.pop("vertexai", None)
|
||||
# Vertex AI Express mode: API key + vertexai=True + http_options with api_version="v1"
|
||||
# See: https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey
|
||||
if use_vertexai:
|
||||
client_params["vertexai"] = True
|
||||
client_params["http_options"] = types.HttpOptions(api_version="v1")
|
||||
else:
|
||||
# This ensures we use the Gemini API (generativelanguage.googleapis.com)
|
||||
client_params["vertexai"] = False
|
||||
|
||||
# Clean up project/location (not allowed with API key)
|
||||
client_params.pop("project", None)
|
||||
client_params.pop("location", None)
|
||||
|
||||
@@ -175,10 +217,13 @@ class GeminiCompletion(BaseLLM):
|
||||
return genai.Client(**client_params)
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
"Either GOOGLE_API_KEY/GEMINI_API_KEY (for Gemini API) or "
|
||||
"GOOGLE_CLOUD_PROJECT (for Vertex AI) must be set"
|
||||
"Authentication required. Provide one of:\n"
|
||||
" 1. API key via GOOGLE_API_KEY or GEMINI_API_KEY environment variable\n"
|
||||
" (use_vertexai=True is optional for Vertex AI with API key)\n"
|
||||
" 2. For Vertex AI with ADC: Set GOOGLE_CLOUD_PROJECT and run:\n"
|
||||
" gcloud auth application-default login\n"
|
||||
" 3. Pass api_key parameter directly to LLM constructor\n"
|
||||
) from e
|
||||
|
||||
return genai.Client(**client_params)
|
||||
|
||||
def _get_client_params(self) -> dict[str, Any]:
|
||||
@@ -202,6 +247,8 @@ class GeminiCompletion(BaseLLM):
|
||||
"location": self.location,
|
||||
}
|
||||
)
|
||||
if self.api_key:
|
||||
params["api_key"] = self.api_key
|
||||
elif self.api_key:
|
||||
params["api_key"] = self.api_key
|
||||
|
||||
@@ -606,6 +653,17 @@ class GeminiCompletion(BaseLLM):
|
||||
if response.candidates and (self.tools or available_functions):
|
||||
candidate = response.candidates[0]
|
||||
if candidate.content and candidate.content.parts:
|
||||
# Collect function call parts
|
||||
function_call_parts = [
|
||||
part for part in candidate.content.parts if part.function_call
|
||||
]
|
||||
|
||||
# If there are function calls but no available_functions,
|
||||
# return them for the executor to handle (like OpenAI/Anthropic)
|
||||
if function_call_parts and not available_functions:
|
||||
return function_call_parts
|
||||
|
||||
# Otherwise execute the tools internally
|
||||
for part in candidate.content.parts:
|
||||
if part.function_call:
|
||||
function_name = part.function_call.name
|
||||
@@ -720,7 +778,7 @@ class GeminiCompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str:
|
||||
) -> str | list[dict[str, Any]]:
|
||||
"""Finalize streaming response with usage tracking, function execution, and events.
|
||||
|
||||
Args:
|
||||
@@ -738,6 +796,21 @@ class GeminiCompletion(BaseLLM):
|
||||
"""
|
||||
self._track_token_usage_internal(usage_data)
|
||||
|
||||
# If there are function calls but no available_functions,
|
||||
# return them for the executor to handle
|
||||
if function_calls and not available_functions:
|
||||
return [
|
||||
{
|
||||
"id": call_data["id"],
|
||||
"function": {
|
||||
"name": call_data["name"],
|
||||
"arguments": json.dumps(call_data["args"]),
|
||||
},
|
||||
"type": "function",
|
||||
}
|
||||
for call_data in function_calls.values()
|
||||
]
|
||||
|
||||
# Handle completed function calls
|
||||
if function_calls and available_functions:
|
||||
for call_data in function_calls.values():
|
||||
|
||||
@@ -428,6 +428,12 @@ class OpenAICompletion(BaseLLM):
|
||||
choice: Choice = response.choices[0]
|
||||
message = choice.message
|
||||
|
||||
# If there are tool_calls but no available_functions, return the tool_calls
|
||||
# This allows the caller (e.g., executor) to handle tool execution
|
||||
if message.tool_calls and not available_functions:
|
||||
return list(message.tool_calls)
|
||||
|
||||
# If there are tool_calls and available_functions, execute the tools
|
||||
if message.tool_calls and available_functions:
|
||||
tool_call = message.tool_calls[0]
|
||||
function_name = tool_call.function.name
|
||||
@@ -725,6 +731,15 @@ class OpenAICompletion(BaseLLM):
|
||||
choice: Choice = response.choices[0]
|
||||
message = choice.message
|
||||
|
||||
# If there are tool_calls but no available_functions, return the tool_calls
|
||||
# This allows the caller (e.g., executor) to handle tool execution
|
||||
if message.tool_calls and not available_functions:
|
||||
print("--------------------------------")
|
||||
print("lorenze tool_calls", list(message.tool_calls))
|
||||
print("--------------------------------")
|
||||
return list(message.tool_calls)
|
||||
|
||||
# If there are tool_calls and available_functions, execute the tools
|
||||
if message.tool_calls and available_functions:
|
||||
tool_call = message.tool_calls[0]
|
||||
function_name = tool_call.function.name
|
||||
|
||||
@@ -105,6 +105,21 @@ def log_tool_conversion(tool: dict[str, Any], provider: str) -> None:
|
||||
logging.error(f"{provider}: Tool structure: {tool}")
|
||||
|
||||
|
||||
def sanitize_function_name(name: str) -> str:
|
||||
"""Sanitize function name for LLM provider compatibility.
|
||||
|
||||
Replaces invalid characters with underscores. Valid characters are:
|
||||
letters, numbers, underscore, dot, colon, and dash.
|
||||
|
||||
Args:
|
||||
name: Original function name
|
||||
|
||||
Returns:
|
||||
Sanitized function name with invalid characters replaced
|
||||
"""
|
||||
return re.sub(r"[^a-zA-Z0-9_.\-:]", "_", name)
|
||||
|
||||
|
||||
def safe_tool_conversion(
|
||||
tool: dict[str, Any], provider: str
|
||||
) -> tuple[str, str, dict[str, Any]]:
|
||||
@@ -127,7 +142,10 @@ def safe_tool_conversion(
|
||||
|
||||
name, description, parameters = extract_tool_info(tool)
|
||||
|
||||
validated_name = validate_function_name(name, provider)
|
||||
# Sanitize name before validation (replace invalid chars with underscores)
|
||||
sanitized_name = sanitize_function_name(name)
|
||||
|
||||
validated_name = validate_function_name(sanitized_name, provider)
|
||||
|
||||
logging.info(f"{provider}: Successfully validated tool '{validated_name}'")
|
||||
return validated_name, description, parameters
|
||||
|
||||
@@ -11,6 +11,9 @@
|
||||
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
||||
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
|
||||
"no_tools": "\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
"native_tools": "\nUse available tools to gather information and complete your task.",
|
||||
"native_task": "\nCurrent Task: {input}\n\nThis is VERY important to you, your job depends on it!",
|
||||
"post_tool_reasoning": "PAUSE and THINK before responding.\n\nInternally consider (DO NOT output these steps):\n- What key insights did the tool provide?\n- Have I fulfilled ALL requirements from my original instructions (e.g., minimum tool calls, specific sources)?\n- Do I have enough information to fully answer the task?\n\nIF you have NOT met all requirements or need more information: Call another tool now.\n\nIF you have met all requirements and have sufficient information: Provide ONLY your final answer in the format specified by the task's expected output. Do NOT include reasoning steps, analysis sections, or meta-commentary. Just deliver the answer.",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. When responding, I must use the following format:\n\n```\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n```\nThis Thought/Action/Action Input/Result can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
|
||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
"""Utilities for creating and manipulating types."""
|
||||
|
||||
from typing import Annotated, Final, Literal
|
||||
|
||||
from typing_extensions import TypeAliasType
|
||||
from typing import Annotated, Final, Literal, cast
|
||||
|
||||
|
||||
_DYNAMIC_LITERAL_ALIAS: Final[Literal["DynamicLiteral"]] = "DynamicLiteral"
|
||||
@@ -20,6 +18,11 @@ def create_literals_from_strings(
|
||||
|
||||
Returns:
|
||||
Literal type for each A2A agent ID
|
||||
|
||||
Raises:
|
||||
ValueError: If values is empty (Literal requires at least one value)
|
||||
"""
|
||||
unique_values: tuple[str, ...] = tuple(dict.fromkeys(values))
|
||||
return Literal.__getitem__(unique_values)
|
||||
if not unique_values:
|
||||
raise ValueError("Cannot create Literal type from empty values")
|
||||
return cast(type, Literal.__getitem__(unique_values))
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Sequence
|
||||
import json
|
||||
import re
|
||||
@@ -54,6 +55,23 @@ console = Console()
|
||||
_MULTIPLE_NEWLINES: Final[re.Pattern[str]] = re.compile(r"\n+")
|
||||
|
||||
|
||||
def is_inside_event_loop() -> bool:
|
||||
"""Check if code is currently running inside an asyncio event loop.
|
||||
|
||||
This is used to detect when code is being called from within an async context
|
||||
(e.g., inside a Flow). In such cases, callers should return a coroutine
|
||||
instead of executing synchronously to avoid nested event loop errors.
|
||||
|
||||
Returns:
|
||||
True if inside a running event loop, False otherwise.
|
||||
"""
|
||||
try:
|
||||
asyncio.get_running_loop()
|
||||
return True
|
||||
except RuntimeError:
|
||||
return False
|
||||
|
||||
|
||||
def parse_tools(tools: list[BaseTool]) -> list[CrewStructuredTool]:
|
||||
"""Parse tools to be used for the task.
|
||||
|
||||
@@ -108,6 +126,67 @@ def render_text_description_and_args(
|
||||
return "\n".join(tool_strings)
|
||||
|
||||
|
||||
def convert_tools_to_openai_schema(
|
||||
tools: Sequence[BaseTool | CrewStructuredTool],
|
||||
) -> tuple[list[dict[str, Any]], dict[str, Callable[..., Any]]]:
|
||||
"""Convert CrewAI tools to OpenAI function calling format.
|
||||
|
||||
This function converts CrewAI BaseTool and CrewStructuredTool objects
|
||||
into the OpenAI-compatible tool schema format that can be passed to
|
||||
LLM providers for native function calling.
|
||||
|
||||
Args:
|
||||
tools: List of CrewAI tool objects to convert.
|
||||
|
||||
Returns:
|
||||
Tuple containing:
|
||||
- List of OpenAI-format tool schema dictionaries
|
||||
- Dict mapping tool names to their callable run() methods
|
||||
|
||||
Example:
|
||||
>>> tools = [CalculatorTool(), SearchTool()]
|
||||
>>> schemas, functions = convert_tools_to_openai_schema(tools)
|
||||
>>> # schemas can be passed to llm.call(tools=schemas)
|
||||
>>> # functions can be passed to llm.call(available_functions=functions)
|
||||
"""
|
||||
openai_tools: list[dict[str, Any]] = []
|
||||
available_functions: dict[str, Callable[..., Any]] = {}
|
||||
|
||||
for tool in tools:
|
||||
# Get the JSON schema for tool parameters
|
||||
parameters: dict[str, Any] = {}
|
||||
if hasattr(tool, "args_schema") and tool.args_schema is not None:
|
||||
try:
|
||||
parameters = tool.args_schema.model_json_schema()
|
||||
# Remove title and description from schema root as they're redundant
|
||||
parameters.pop("title", None)
|
||||
parameters.pop("description", None)
|
||||
except Exception:
|
||||
parameters = {}
|
||||
|
||||
# Extract original description from formatted description
|
||||
# BaseTool formats description as "Tool Name: ...\nTool Arguments: ...\nTool Description: {original}"
|
||||
description = tool.description
|
||||
if "Tool Description:" in description:
|
||||
# Extract the original description after "Tool Description:"
|
||||
description = description.split("Tool Description:")[-1].strip()
|
||||
|
||||
sanitized_name = re.sub(r"[^a-zA-Z0-9_.\-:]", "_", tool.name)
|
||||
|
||||
schema: dict[str, Any] = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": sanitized_name,
|
||||
"description": description,
|
||||
"parameters": parameters,
|
||||
},
|
||||
}
|
||||
openai_tools.append(schema)
|
||||
available_functions[sanitized_name] = tool.run # type: ignore[attr-defined]
|
||||
|
||||
return openai_tools, available_functions
|
||||
|
||||
|
||||
def has_reached_max_iterations(iterations: int, max_iterations: int) -> bool:
|
||||
"""Check if the maximum number of iterations has been reached.
|
||||
|
||||
@@ -234,11 +313,13 @@ def get_llm_response(
|
||||
messages: list[LLMMessage],
|
||||
callbacks: list[TokenCalcHandler],
|
||||
printer: Printer,
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
available_functions: dict[str, Callable[..., Any]] | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | LiteAgent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
executor_context: CrewAgentExecutor | LiteAgent | None = None,
|
||||
) -> str:
|
||||
) -> str | Any:
|
||||
"""Call the LLM and return the response, handling any invalid responses.
|
||||
|
||||
Args:
|
||||
@@ -246,13 +327,16 @@ def get_llm_response(
|
||||
messages: The messages to send to the LLM.
|
||||
callbacks: List of callbacks for the LLM call.
|
||||
printer: Printer instance for output.
|
||||
tools: Optional list of tool schemas for native function calling.
|
||||
available_functions: Optional dict mapping function names to callables.
|
||||
from_task: Optional task context for the LLM call.
|
||||
from_agent: Optional agent context for the LLM call.
|
||||
response_model: Optional Pydantic model for structured outputs.
|
||||
executor_context: Optional executor context for hook invocation.
|
||||
|
||||
Returns:
|
||||
The response from the LLM as a string.
|
||||
The response from the LLM as a string, or tool call results if
|
||||
native function calling is used.
|
||||
|
||||
Raises:
|
||||
Exception: If an error occurs.
|
||||
@@ -267,7 +351,9 @@ def get_llm_response(
|
||||
try:
|
||||
answer = llm.call(
|
||||
messages,
|
||||
tools=tools,
|
||||
callbacks=callbacks,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent, # type: ignore[arg-type]
|
||||
response_model=response_model,
|
||||
@@ -289,11 +375,13 @@ async def aget_llm_response(
|
||||
messages: list[LLMMessage],
|
||||
callbacks: list[TokenCalcHandler],
|
||||
printer: Printer,
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
available_functions: dict[str, Callable[..., Any]] | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | LiteAgent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
executor_context: CrewAgentExecutor | None = None,
|
||||
) -> str:
|
||||
) -> str | Any:
|
||||
"""Call the LLM asynchronously and return the response.
|
||||
|
||||
Args:
|
||||
@@ -301,13 +389,16 @@ async def aget_llm_response(
|
||||
messages: The messages to send to the LLM.
|
||||
callbacks: List of callbacks for the LLM call.
|
||||
printer: Printer instance for output.
|
||||
tools: Optional list of tool schemas for native function calling.
|
||||
available_functions: Optional dict mapping function names to callables.
|
||||
from_task: Optional task context for the LLM call.
|
||||
from_agent: Optional agent context for the LLM call.
|
||||
response_model: Optional Pydantic model for structured outputs.
|
||||
executor_context: Optional executor context for hook invocation.
|
||||
|
||||
Returns:
|
||||
The response from the LLM as a string.
|
||||
The response from the LLM as a string, or tool call results if
|
||||
native function calling is used.
|
||||
|
||||
Raises:
|
||||
Exception: If an error occurs.
|
||||
@@ -321,7 +412,9 @@ async def aget_llm_response(
|
||||
try:
|
||||
answer = await llm.acall(
|
||||
messages,
|
||||
tools=tools,
|
||||
callbacks=callbacks,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent, # type: ignore[arg-type]
|
||||
response_model=response_model,
|
||||
|
||||
@@ -22,7 +22,9 @@ class SystemPromptResult(StandardPromptResult):
|
||||
user: Annotated[str, "The user prompt component"]
|
||||
|
||||
|
||||
COMPONENTS = Literal["role_playing", "tools", "no_tools", "task"]
|
||||
COMPONENTS = Literal[
|
||||
"role_playing", "tools", "no_tools", "native_tools", "task", "native_task"
|
||||
]
|
||||
|
||||
|
||||
class Prompts(BaseModel):
|
||||
@@ -36,6 +38,10 @@ class Prompts(BaseModel):
|
||||
has_tools: bool = Field(
|
||||
default=False, description="Indicates if the agent has access to tools"
|
||||
)
|
||||
use_native_tool_calling: bool = Field(
|
||||
default=False,
|
||||
description="Whether to use native function calling instead of ReAct format",
|
||||
)
|
||||
system_template: str | None = Field(
|
||||
default=None, description="Custom system prompt template"
|
||||
)
|
||||
@@ -58,12 +64,24 @@ class Prompts(BaseModel):
|
||||
A dictionary containing the constructed prompt(s).
|
||||
"""
|
||||
slices: list[COMPONENTS] = ["role_playing"]
|
||||
# When using native tool calling with tools, use native_tools instructions
|
||||
# When using ReAct pattern with tools, use tools instructions
|
||||
# When no tools are available, use no_tools instructions
|
||||
if self.has_tools:
|
||||
slices.append("tools")
|
||||
if self.use_native_tool_calling:
|
||||
slices.append("native_tools")
|
||||
else:
|
||||
slices.append("tools")
|
||||
else:
|
||||
slices.append("no_tools")
|
||||
system: str = self._build_prompt(slices)
|
||||
slices.append("task")
|
||||
|
||||
# Use native_task for native tool calling (no "Thought:" prompt)
|
||||
# Use task for ReAct pattern (includes "Thought:" prompt)
|
||||
task_slice: COMPONENTS = (
|
||||
"native_task" if self.use_native_tool_calling else "task"
|
||||
)
|
||||
slices.append(task_slice)
|
||||
|
||||
if (
|
||||
not self.system_template
|
||||
@@ -72,7 +90,7 @@ class Prompts(BaseModel):
|
||||
):
|
||||
return SystemPromptResult(
|
||||
system=system,
|
||||
user=self._build_prompt(["task"]),
|
||||
user=self._build_prompt([task_slice]),
|
||||
prompt=self._build_prompt(slices),
|
||||
)
|
||||
return StandardPromptResult(
|
||||
|
||||
@@ -13,5 +13,5 @@ class LLMMessage(TypedDict):
|
||||
instead of str | list[dict[str, str]]
|
||||
"""
|
||||
|
||||
role: Literal["user", "assistant", "system"]
|
||||
content: str | list[dict[str, Any]]
|
||||
role: Literal["user", "assistant", "system", "tool"]
|
||||
content: str | list[dict[str, Any]] | None
|
||||
|
||||
325
lib/crewai/tests/a2a/utils/test_agent_card.py
Normal file
325
lib/crewai/tests/a2a/utils/test_agent_card.py
Normal file
@@ -0,0 +1,325 @@
|
||||
"""Tests for A2A agent card utilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from a2a.types import AgentCard, AgentSkill
|
||||
|
||||
from crewai import Agent
|
||||
from crewai.a2a.config import A2AClientConfig, A2AServerConfig
|
||||
from crewai.a2a.utils.agent_card import inject_a2a_server_methods
|
||||
|
||||
|
||||
class TestInjectA2AServerMethods:
|
||||
"""Tests for inject_a2a_server_methods function."""
|
||||
|
||||
def test_agent_with_server_config_gets_to_agent_card_method(self) -> None:
|
||||
"""Agent with A2AServerConfig should have to_agent_card method injected."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
assert hasattr(agent, "to_agent_card")
|
||||
assert callable(agent.to_agent_card)
|
||||
|
||||
def test_agent_without_server_config_no_injection(self) -> None:
|
||||
"""Agent without A2AServerConfig should not get to_agent_card method."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AClientConfig(endpoint="http://example.com"),
|
||||
)
|
||||
|
||||
assert not hasattr(agent, "to_agent_card")
|
||||
|
||||
def test_agent_without_a2a_no_injection(self) -> None:
|
||||
"""Agent without any a2a config should not get to_agent_card method."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
)
|
||||
|
||||
assert not hasattr(agent, "to_agent_card")
|
||||
|
||||
def test_agent_with_mixed_configs_gets_injection(self) -> None:
|
||||
"""Agent with list containing A2AServerConfig should get to_agent_card."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=[
|
||||
A2AClientConfig(endpoint="http://example.com"),
|
||||
A2AServerConfig(name="My Agent"),
|
||||
],
|
||||
)
|
||||
|
||||
assert hasattr(agent, "to_agent_card")
|
||||
assert callable(agent.to_agent_card)
|
||||
|
||||
def test_manual_injection_on_plain_agent(self) -> None:
|
||||
"""inject_a2a_server_methods should work when called manually."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
)
|
||||
# Manually set server config and inject
|
||||
object.__setattr__(agent, "a2a", A2AServerConfig())
|
||||
inject_a2a_server_methods(agent)
|
||||
|
||||
assert hasattr(agent, "to_agent_card")
|
||||
assert callable(agent.to_agent_card)
|
||||
|
||||
|
||||
class TestToAgentCard:
|
||||
"""Tests for the injected to_agent_card method."""
|
||||
|
||||
def test_returns_agent_card(self) -> None:
|
||||
"""to_agent_card should return an AgentCard instance."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
|
||||
assert isinstance(card, AgentCard)
|
||||
|
||||
def test_uses_agent_role_as_name(self) -> None:
|
||||
"""AgentCard name should default to agent role."""
|
||||
agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze data",
|
||||
backstory="Expert analyst",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
|
||||
assert card.name == "Data Analyst"
|
||||
|
||||
def test_uses_server_config_name(self) -> None:
|
||||
"""AgentCard name should prefer A2AServerConfig.name over role."""
|
||||
agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze data",
|
||||
backstory="Expert analyst",
|
||||
a2a=A2AServerConfig(name="Custom Agent Name"),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
|
||||
assert card.name == "Custom Agent Name"
|
||||
|
||||
def test_uses_goal_as_description(self) -> None:
|
||||
"""AgentCard description should include agent goal."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Accomplish important tasks",
|
||||
backstory="Has extensive experience",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
|
||||
assert "Accomplish important tasks" in card.description
|
||||
|
||||
def test_uses_server_config_description(self) -> None:
|
||||
"""AgentCard description should prefer A2AServerConfig.description."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Accomplish important tasks",
|
||||
backstory="Has extensive experience",
|
||||
a2a=A2AServerConfig(description="Custom description"),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
|
||||
assert card.description == "Custom description"
|
||||
|
||||
def test_uses_provided_url(self) -> None:
|
||||
"""AgentCard url should use the provided URL."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://my-server.com:9000")
|
||||
|
||||
assert card.url == "http://my-server.com:9000"
|
||||
|
||||
def test_uses_server_config_url(self) -> None:
|
||||
"""AgentCard url should prefer A2AServerConfig.url over provided URL."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(url="http://configured-url.com"),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://fallback-url.com")
|
||||
|
||||
assert card.url == "http://configured-url.com/"
|
||||
|
||||
def test_generates_default_skill(self) -> None:
|
||||
"""AgentCard should have at least one skill based on agent role."""
|
||||
agent = Agent(
|
||||
role="Research Assistant",
|
||||
goal="Help with research",
|
||||
backstory="Skilled researcher",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
|
||||
assert len(card.skills) >= 1
|
||||
skill = card.skills[0]
|
||||
assert skill.name == "Research Assistant"
|
||||
assert skill.description == "Help with research"
|
||||
|
||||
def test_uses_server_config_skills(self) -> None:
|
||||
"""AgentCard skills should prefer A2AServerConfig.skills."""
|
||||
custom_skill = AgentSkill(
|
||||
id="custom-skill",
|
||||
name="Custom Skill",
|
||||
description="A custom skill",
|
||||
tags=["custom"],
|
||||
)
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(skills=[custom_skill]),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
|
||||
assert len(card.skills) == 1
|
||||
assert card.skills[0].id == "custom-skill"
|
||||
assert card.skills[0].name == "Custom Skill"
|
||||
|
||||
def test_includes_custom_version(self) -> None:
|
||||
"""AgentCard should include version from A2AServerConfig."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(version="2.0.0"),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
|
||||
assert card.version == "2.0.0"
|
||||
|
||||
def test_default_version(self) -> None:
|
||||
"""AgentCard should have default version 1.0.0."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
|
||||
assert card.version == "1.0.0"
|
||||
|
||||
|
||||
class TestAgentCardJsonStructure:
|
||||
"""Tests for the JSON structure of AgentCard."""
|
||||
|
||||
def test_json_has_required_fields(self) -> None:
|
||||
"""AgentCard JSON should contain all required A2A protocol fields."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
json_data = card.model_dump()
|
||||
|
||||
assert "name" in json_data
|
||||
assert "description" in json_data
|
||||
assert "url" in json_data
|
||||
assert "version" in json_data
|
||||
assert "skills" in json_data
|
||||
assert "capabilities" in json_data
|
||||
assert "defaultInputModes" in json_data
|
||||
assert "defaultOutputModes" in json_data
|
||||
|
||||
def test_json_skills_structure(self) -> None:
|
||||
"""Each skill in JSON should have required fields."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
json_data = card.model_dump()
|
||||
|
||||
assert len(json_data["skills"]) >= 1
|
||||
skill = json_data["skills"][0]
|
||||
assert "id" in skill
|
||||
assert "name" in skill
|
||||
assert "description" in skill
|
||||
assert "tags" in skill
|
||||
|
||||
def test_json_capabilities_structure(self) -> None:
|
||||
"""Capabilities in JSON should have expected fields."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
json_data = card.model_dump()
|
||||
|
||||
capabilities = json_data["capabilities"]
|
||||
assert "streaming" in capabilities
|
||||
assert "pushNotifications" in capabilities
|
||||
|
||||
def test_json_serializable(self) -> None:
|
||||
"""AgentCard should be JSON serializable."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
json_str = card.model_dump_json()
|
||||
|
||||
assert isinstance(json_str, str)
|
||||
assert "Test Agent" in json_str
|
||||
assert "http://localhost:8000" in json_str
|
||||
|
||||
def test_json_excludes_none_values(self) -> None:
|
||||
"""AgentCard JSON with exclude_none should omit None fields."""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
a2a=A2AServerConfig(),
|
||||
)
|
||||
|
||||
card = agent.to_agent_card("http://localhost:8000")
|
||||
json_data = card.model_dump(exclude_none=True)
|
||||
|
||||
assert "provider" not in json_data
|
||||
assert "documentationUrl" not in json_data
|
||||
assert "iconUrl" not in json_data
|
||||
374
lib/crewai/tests/a2a/utils/test_task.py
Normal file
374
lib/crewai/tests/a2a/utils/test_task.py
Normal file
@@ -0,0 +1,374 @@
|
||||
"""Tests for A2A task utilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from a2a.server.agent_execution import RequestContext
|
||||
from a2a.server.events import EventQueue
|
||||
from a2a.types import Message, Task as A2ATask, TaskState, TaskStatus
|
||||
|
||||
from crewai.a2a.utils.task import cancel, cancellable, execute
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_agent() -> MagicMock:
|
||||
"""Create a mock CrewAI agent."""
|
||||
agent = MagicMock()
|
||||
agent.role = "Test Agent"
|
||||
agent.tools = []
|
||||
agent.aexecute_task = AsyncMock(return_value="Task completed successfully")
|
||||
return agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_task(mock_context: MagicMock) -> MagicMock:
|
||||
"""Create a mock Task."""
|
||||
task = MagicMock()
|
||||
task.id = mock_context.task_id
|
||||
task.name = "Mock Task"
|
||||
task.description = "Mock task description"
|
||||
return task
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_context() -> MagicMock:
|
||||
"""Create a mock RequestContext."""
|
||||
context = MagicMock(spec=RequestContext)
|
||||
context.task_id = "test-task-123"
|
||||
context.context_id = "test-context-456"
|
||||
context.get_user_input.return_value = "Test user message"
|
||||
context.message = MagicMock(spec=Message)
|
||||
context.current_task = None
|
||||
return context
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_event_queue() -> AsyncMock:
|
||||
"""Create a mock EventQueue."""
|
||||
queue = AsyncMock(spec=EventQueue)
|
||||
queue.enqueue_event = AsyncMock()
|
||||
return queue
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(autouse=True)
|
||||
async def clear_cache(mock_context: MagicMock) -> None:
|
||||
"""Clear cancel flag from cache before each test."""
|
||||
from aiocache import caches
|
||||
|
||||
cache = caches.get("default")
|
||||
await cache.delete(f"cancel:{mock_context.task_id}")
|
||||
|
||||
|
||||
class TestCancellableDecorator:
|
||||
"""Tests for the cancellable decorator."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_executes_function_without_context(self) -> None:
|
||||
"""Function executes normally when no RequestContext is provided."""
|
||||
call_count = 0
|
||||
|
||||
@cancellable
|
||||
async def my_func(value: int) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return value * 2
|
||||
|
||||
result = await my_func(5)
|
||||
|
||||
assert result == 10
|
||||
assert call_count == 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_executes_function_with_context(self, mock_context: MagicMock) -> None:
|
||||
"""Function executes normally with RequestContext when not cancelled."""
|
||||
@cancellable
|
||||
async def my_func(context: RequestContext) -> str:
|
||||
await asyncio.sleep(0.01)
|
||||
return "completed"
|
||||
|
||||
result = await my_func(mock_context)
|
||||
|
||||
assert result == "completed"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cancellation_raises_cancelled_error(
|
||||
self, mock_context: MagicMock
|
||||
) -> None:
|
||||
"""Function raises CancelledError when cancel flag is set."""
|
||||
from aiocache import caches
|
||||
|
||||
cache = caches.get("default")
|
||||
|
||||
@cancellable
|
||||
async def slow_func(context: RequestContext) -> str:
|
||||
await asyncio.sleep(1.0)
|
||||
return "should not reach"
|
||||
|
||||
await cache.set(f"cancel:{mock_context.task_id}", True)
|
||||
|
||||
with pytest.raises(asyncio.CancelledError):
|
||||
await slow_func(mock_context)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cleanup_removes_cancel_flag(self, mock_context: MagicMock) -> None:
|
||||
"""Cancel flag is cleaned up after execution."""
|
||||
from aiocache import caches
|
||||
|
||||
cache = caches.get("default")
|
||||
|
||||
@cancellable
|
||||
async def quick_func(context: RequestContext) -> str:
|
||||
return "done"
|
||||
|
||||
await quick_func(mock_context)
|
||||
|
||||
flag = await cache.get(f"cancel:{mock_context.task_id}")
|
||||
assert flag is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extracts_context_from_kwargs(self, mock_context: MagicMock) -> None:
|
||||
"""Context can be passed as keyword argument."""
|
||||
@cancellable
|
||||
async def my_func(value: int, context: RequestContext | None = None) -> int:
|
||||
return value + 1
|
||||
|
||||
result = await my_func(10, context=mock_context)
|
||||
|
||||
assert result == 11
|
||||
|
||||
|
||||
class TestExecute:
|
||||
"""Tests for the execute function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_successful_execution(
|
||||
self,
|
||||
mock_agent: MagicMock,
|
||||
mock_context: MagicMock,
|
||||
mock_event_queue: AsyncMock,
|
||||
mock_task: MagicMock,
|
||||
) -> None:
|
||||
"""Execute completes successfully and enqueues completed task."""
|
||||
with (
|
||||
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
|
||||
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
|
||||
):
|
||||
await execute(mock_agent, mock_context, mock_event_queue)
|
||||
|
||||
mock_agent.aexecute_task.assert_called_once()
|
||||
mock_event_queue.enqueue_event.assert_called_once()
|
||||
assert mock_bus.emit.call_count == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_emits_started_event(
|
||||
self,
|
||||
mock_agent: MagicMock,
|
||||
mock_context: MagicMock,
|
||||
mock_event_queue: AsyncMock,
|
||||
mock_task: MagicMock,
|
||||
) -> None:
|
||||
"""Execute emits A2AServerTaskStartedEvent."""
|
||||
with (
|
||||
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
|
||||
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
|
||||
):
|
||||
await execute(mock_agent, mock_context, mock_event_queue)
|
||||
|
||||
first_call = mock_bus.emit.call_args_list[0]
|
||||
event = first_call[0][1]
|
||||
|
||||
assert event.type == "a2a_server_task_started"
|
||||
assert event.task_id == mock_context.task_id
|
||||
assert event.context_id == mock_context.context_id
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_emits_completed_event(
|
||||
self,
|
||||
mock_agent: MagicMock,
|
||||
mock_context: MagicMock,
|
||||
mock_event_queue: AsyncMock,
|
||||
mock_task: MagicMock,
|
||||
) -> None:
|
||||
"""Execute emits A2AServerTaskCompletedEvent on success."""
|
||||
with (
|
||||
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
|
||||
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
|
||||
):
|
||||
await execute(mock_agent, mock_context, mock_event_queue)
|
||||
|
||||
second_call = mock_bus.emit.call_args_list[1]
|
||||
event = second_call[0][1]
|
||||
|
||||
assert event.type == "a2a_server_task_completed"
|
||||
assert event.task_id == mock_context.task_id
|
||||
assert event.result == "Task completed successfully"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_emits_failed_event_on_exception(
|
||||
self,
|
||||
mock_agent: MagicMock,
|
||||
mock_context: MagicMock,
|
||||
mock_event_queue: AsyncMock,
|
||||
mock_task: MagicMock,
|
||||
) -> None:
|
||||
"""Execute emits A2AServerTaskFailedEvent on exception."""
|
||||
mock_agent.aexecute_task = AsyncMock(side_effect=ValueError("Test error"))
|
||||
|
||||
with (
|
||||
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
|
||||
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
|
||||
):
|
||||
with pytest.raises(Exception):
|
||||
await execute(mock_agent, mock_context, mock_event_queue)
|
||||
|
||||
failed_call = mock_bus.emit.call_args_list[1]
|
||||
event = failed_call[0][1]
|
||||
|
||||
assert event.type == "a2a_server_task_failed"
|
||||
assert "Test error" in event.error
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_emits_canceled_event_on_cancellation(
|
||||
self,
|
||||
mock_agent: MagicMock,
|
||||
mock_context: MagicMock,
|
||||
mock_event_queue: AsyncMock,
|
||||
mock_task: MagicMock,
|
||||
) -> None:
|
||||
"""Execute emits A2AServerTaskCanceledEvent on CancelledError."""
|
||||
mock_agent.aexecute_task = AsyncMock(side_effect=asyncio.CancelledError())
|
||||
|
||||
with (
|
||||
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
|
||||
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
|
||||
):
|
||||
with pytest.raises(asyncio.CancelledError):
|
||||
await execute(mock_agent, mock_context, mock_event_queue)
|
||||
|
||||
canceled_call = mock_bus.emit.call_args_list[1]
|
||||
event = canceled_call[0][1]
|
||||
|
||||
assert event.type == "a2a_server_task_canceled"
|
||||
assert event.task_id == mock_context.task_id
|
||||
|
||||
|
||||
class TestCancel:
|
||||
"""Tests for the cancel function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_sets_cancel_flag_in_cache(
|
||||
self,
|
||||
mock_context: MagicMock,
|
||||
mock_event_queue: AsyncMock,
|
||||
) -> None:
|
||||
"""Cancel sets the cancel flag in cache."""
|
||||
from aiocache import caches
|
||||
|
||||
cache = caches.get("default")
|
||||
|
||||
await cancel(mock_context, mock_event_queue)
|
||||
|
||||
flag = await cache.get(f"cancel:{mock_context.task_id}")
|
||||
assert flag is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_enqueues_task_status_update_event(
|
||||
self,
|
||||
mock_context: MagicMock,
|
||||
mock_event_queue: AsyncMock,
|
||||
) -> None:
|
||||
"""Cancel enqueues TaskStatusUpdateEvent with canceled state."""
|
||||
await cancel(mock_context, mock_event_queue)
|
||||
|
||||
mock_event_queue.enqueue_event.assert_called_once()
|
||||
event = mock_event_queue.enqueue_event.call_args[0][0]
|
||||
|
||||
assert event.task_id == mock_context.task_id
|
||||
assert event.context_id == mock_context.context_id
|
||||
assert event.status.state == TaskState.canceled
|
||||
assert event.final is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_returns_none_when_no_current_task(
|
||||
self,
|
||||
mock_context: MagicMock,
|
||||
mock_event_queue: AsyncMock,
|
||||
) -> None:
|
||||
"""Cancel returns None when context has no current_task."""
|
||||
mock_context.current_task = None
|
||||
|
||||
result = await cancel(mock_context, mock_event_queue)
|
||||
|
||||
assert result is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_returns_updated_task_when_current_task_exists(
|
||||
self,
|
||||
mock_context: MagicMock,
|
||||
mock_event_queue: AsyncMock,
|
||||
) -> None:
|
||||
"""Cancel returns updated task when context has current_task."""
|
||||
current_task = MagicMock(spec=A2ATask)
|
||||
current_task.status = TaskStatus(state=TaskState.working)
|
||||
mock_context.current_task = current_task
|
||||
|
||||
result = await cancel(mock_context, mock_event_queue)
|
||||
|
||||
assert result is current_task
|
||||
assert result.status.state == TaskState.canceled
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cleanup_after_cancel(
|
||||
self,
|
||||
mock_context: MagicMock,
|
||||
mock_event_queue: AsyncMock,
|
||||
) -> None:
|
||||
"""Cancel flag persists for cancellable decorator to detect."""
|
||||
from aiocache import caches
|
||||
|
||||
cache = caches.get("default")
|
||||
|
||||
await cancel(mock_context, mock_event_queue)
|
||||
|
||||
flag = await cache.get(f"cancel:{mock_context.task_id}")
|
||||
assert flag is True
|
||||
|
||||
await cache.delete(f"cancel:{mock_context.task_id}")
|
||||
|
||||
|
||||
class TestExecuteAndCancelIntegration:
|
||||
"""Integration tests for execute and cancel working together."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cancel_stops_running_execute(
|
||||
self,
|
||||
mock_agent: MagicMock,
|
||||
mock_context: MagicMock,
|
||||
mock_event_queue: AsyncMock,
|
||||
mock_task: MagicMock,
|
||||
) -> None:
|
||||
"""Calling cancel stops a running execute."""
|
||||
async def slow_task(**kwargs: Any) -> str:
|
||||
await asyncio.sleep(2.0)
|
||||
return "should not complete"
|
||||
|
||||
mock_agent.aexecute_task = slow_task
|
||||
|
||||
with (
|
||||
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
|
||||
patch("crewai.a2a.utils.task.crewai_event_bus"),
|
||||
):
|
||||
execute_task = asyncio.create_task(
|
||||
execute(mock_agent, mock_context, mock_event_queue)
|
||||
)
|
||||
|
||||
await asyncio.sleep(0.1)
|
||||
await cancel(mock_context, mock_event_queue)
|
||||
|
||||
with pytest.raises(asyncio.CancelledError):
|
||||
await execute_task
|
||||
@@ -14,6 +14,16 @@ except ImportError:
|
||||
A2A_SDK_INSTALLED = False
|
||||
|
||||
|
||||
def _create_mock_agent_card(name: str = "Test", url: str = "http://test-endpoint.com/"):
|
||||
"""Create a mock agent card with proper model_dump behavior."""
|
||||
mock_card = MagicMock()
|
||||
mock_card.name = name
|
||||
mock_card.url = url
|
||||
mock_card.model_dump.return_value = {"name": name, "url": url}
|
||||
mock_card.model_dump_json.return_value = f'{{"name": "{name}", "url": "{url}"}}'
|
||||
return mock_card
|
||||
|
||||
|
||||
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
|
||||
def test_trust_remote_completion_status_true_returns_directly():
|
||||
"""When trust_remote_completion_status=True and A2A returns completed, return result directly."""
|
||||
@@ -44,8 +54,7 @@ def test_trust_remote_completion_status_true_returns_directly():
|
||||
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
|
||||
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
|
||||
):
|
||||
mock_card = MagicMock()
|
||||
mock_card.name = "Test"
|
||||
mock_card = _create_mock_agent_card()
|
||||
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
|
||||
|
||||
# A2A returns completed
|
||||
@@ -110,8 +119,7 @@ def test_trust_remote_completion_status_false_continues_conversation():
|
||||
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
|
||||
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
|
||||
):
|
||||
mock_card = MagicMock()
|
||||
mock_card.name = "Test"
|
||||
mock_card = _create_mock_agent_card()
|
||||
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
|
||||
|
||||
# A2A returns completed
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Unit tests for CrewAgentExecutorFlow.
|
||||
"""Unit tests for AgentExecutor.
|
||||
|
||||
Tests the Flow-based agent executor implementation including state management,
|
||||
flow methods, routing logic, and error handling.
|
||||
@@ -8,9 +8,9 @@ from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.experimental.crew_agent_executor_flow import (
|
||||
from crewai.experimental.agent_executor import (
|
||||
AgentReActState,
|
||||
CrewAgentExecutorFlow,
|
||||
AgentExecutor,
|
||||
)
|
||||
from crewai.agents.parser import AgentAction, AgentFinish
|
||||
|
||||
@@ -43,8 +43,8 @@ class TestAgentReActState:
|
||||
assert state.ask_for_human_input is True
|
||||
|
||||
|
||||
class TestCrewAgentExecutorFlow:
|
||||
"""Test CrewAgentExecutorFlow class."""
|
||||
class TestAgentExecutor:
|
||||
"""Test AgentExecutor class."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_dependencies(self):
|
||||
@@ -87,8 +87,8 @@ class TestCrewAgentExecutorFlow:
|
||||
}
|
||||
|
||||
def test_executor_initialization(self, mock_dependencies):
|
||||
"""Test CrewAgentExecutorFlow initialization."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
"""Test AgentExecutor initialization."""
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
|
||||
assert executor.llm == mock_dependencies["llm"]
|
||||
assert executor.task == mock_dependencies["task"]
|
||||
@@ -100,9 +100,9 @@ class TestCrewAgentExecutorFlow:
|
||||
def test_initialize_reasoning(self, mock_dependencies):
|
||||
"""Test flow entry point."""
|
||||
with patch.object(
|
||||
CrewAgentExecutorFlow, "_show_start_logs"
|
||||
AgentExecutor, "_show_start_logs"
|
||||
) as mock_show_start:
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
result = executor.initialize_reasoning()
|
||||
|
||||
assert result == "initialized"
|
||||
@@ -110,7 +110,7 @@ class TestCrewAgentExecutorFlow:
|
||||
|
||||
def test_check_max_iterations_not_reached(self, mock_dependencies):
|
||||
"""Test routing when iterations < max."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor.state.iterations = 5
|
||||
|
||||
result = executor.check_max_iterations()
|
||||
@@ -118,7 +118,7 @@ class TestCrewAgentExecutorFlow:
|
||||
|
||||
def test_check_max_iterations_reached(self, mock_dependencies):
|
||||
"""Test routing when iterations >= max."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor.state.iterations = 10
|
||||
|
||||
result = executor.check_max_iterations()
|
||||
@@ -126,7 +126,7 @@ class TestCrewAgentExecutorFlow:
|
||||
|
||||
def test_route_by_answer_type_action(self, mock_dependencies):
|
||||
"""Test routing for AgentAction."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor.state.current_answer = AgentAction(
|
||||
thought="thinking", tool="search", tool_input="query", text="action text"
|
||||
)
|
||||
@@ -136,7 +136,7 @@ class TestCrewAgentExecutorFlow:
|
||||
|
||||
def test_route_by_answer_type_finish(self, mock_dependencies):
|
||||
"""Test routing for AgentFinish."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor.state.current_answer = AgentFinish(
|
||||
thought="final thoughts", output="Final answer", text="complete"
|
||||
)
|
||||
@@ -146,7 +146,7 @@ class TestCrewAgentExecutorFlow:
|
||||
|
||||
def test_continue_iteration(self, mock_dependencies):
|
||||
"""Test iteration continuation."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
|
||||
result = executor.continue_iteration()
|
||||
|
||||
@@ -154,8 +154,8 @@ class TestCrewAgentExecutorFlow:
|
||||
|
||||
def test_finalize_success(self, mock_dependencies):
|
||||
"""Test finalize with valid AgentFinish."""
|
||||
with patch.object(CrewAgentExecutorFlow, "_show_logs") as mock_show_logs:
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
with patch.object(AgentExecutor, "_show_logs") as mock_show_logs:
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor.state.current_answer = AgentFinish(
|
||||
thought="final thinking", output="Done", text="complete"
|
||||
)
|
||||
@@ -168,7 +168,7 @@ class TestCrewAgentExecutorFlow:
|
||||
|
||||
def test_finalize_failure(self, mock_dependencies):
|
||||
"""Test finalize skips when given AgentAction instead of AgentFinish."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor.state.current_answer = AgentAction(
|
||||
thought="thinking", tool="search", tool_input="query", text="action text"
|
||||
)
|
||||
@@ -181,7 +181,7 @@ class TestCrewAgentExecutorFlow:
|
||||
|
||||
def test_format_prompt(self, mock_dependencies):
|
||||
"""Test prompt formatting."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
inputs = {"input": "test input", "tool_names": "tool1, tool2", "tools": "desc"}
|
||||
|
||||
result = executor._format_prompt("Prompt {input} {tool_names} {tools}", inputs)
|
||||
@@ -192,18 +192,18 @@ class TestCrewAgentExecutorFlow:
|
||||
|
||||
def test_is_training_mode_false(self, mock_dependencies):
|
||||
"""Test training mode detection when not in training."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
assert executor._is_training_mode() is False
|
||||
|
||||
def test_is_training_mode_true(self, mock_dependencies):
|
||||
"""Test training mode detection when in training."""
|
||||
mock_dependencies["crew"]._train = True
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
assert executor._is_training_mode() is True
|
||||
|
||||
def test_append_message_to_state(self, mock_dependencies):
|
||||
"""Test message appending to state."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
initial_count = len(executor.state.messages)
|
||||
|
||||
executor._append_message_to_state("test message")
|
||||
@@ -216,7 +216,7 @@ class TestCrewAgentExecutorFlow:
|
||||
callback = Mock()
|
||||
mock_dependencies["step_callback"] = callback
|
||||
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
answer = AgentFinish(thought="thinking", output="test", text="final")
|
||||
|
||||
executor._invoke_step_callback(answer)
|
||||
@@ -226,14 +226,14 @@ class TestCrewAgentExecutorFlow:
|
||||
def test_invoke_step_callback_none(self, mock_dependencies):
|
||||
"""Test step callback when none provided."""
|
||||
mock_dependencies["step_callback"] = None
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
|
||||
# Should not raise error
|
||||
executor._invoke_step_callback(
|
||||
AgentFinish(thought="thinking", output="test", text="final")
|
||||
)
|
||||
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.handle_output_parser_exception")
|
||||
@patch("crewai.experimental.agent_executor.handle_output_parser_exception")
|
||||
def test_recover_from_parser_error(
|
||||
self, mock_handle_exception, mock_dependencies
|
||||
):
|
||||
@@ -242,7 +242,7 @@ class TestCrewAgentExecutorFlow:
|
||||
|
||||
mock_handle_exception.return_value = None
|
||||
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor._last_parser_error = OutputParserError("test error")
|
||||
initial_iterations = executor.state.iterations
|
||||
|
||||
@@ -252,12 +252,12 @@ class TestCrewAgentExecutorFlow:
|
||||
assert executor.state.iterations == initial_iterations + 1
|
||||
mock_handle_exception.assert_called_once()
|
||||
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.handle_context_length")
|
||||
@patch("crewai.experimental.agent_executor.handle_context_length")
|
||||
def test_recover_from_context_length(
|
||||
self, mock_handle_context, mock_dependencies
|
||||
):
|
||||
"""Test recovery from context length error."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor._last_context_error = Exception("context too long")
|
||||
initial_iterations = executor.state.iterations
|
||||
|
||||
@@ -270,16 +270,16 @@ class TestCrewAgentExecutorFlow:
|
||||
def test_use_stop_words_property(self, mock_dependencies):
|
||||
"""Test use_stop_words property."""
|
||||
mock_dependencies["llm"].supports_stop_words.return_value = True
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
assert executor.use_stop_words is True
|
||||
|
||||
mock_dependencies["llm"].supports_stop_words.return_value = False
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
assert executor.use_stop_words is False
|
||||
|
||||
def test_compatibility_properties(self, mock_dependencies):
|
||||
"""Test compatibility properties for mixin."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor.state.messages = [{"role": "user", "content": "test"}]
|
||||
executor.state.iterations = 5
|
||||
|
||||
@@ -321,8 +321,8 @@ class TestFlowErrorHandling:
|
||||
"tools_handler": Mock(),
|
||||
}
|
||||
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.get_llm_response")
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.enforce_rpm_limit")
|
||||
@patch("crewai.experimental.agent_executor.get_llm_response")
|
||||
@patch("crewai.experimental.agent_executor.enforce_rpm_limit")
|
||||
def test_call_llm_parser_error(
|
||||
self, mock_enforce_rpm, mock_get_llm, mock_dependencies
|
||||
):
|
||||
@@ -332,15 +332,15 @@ class TestFlowErrorHandling:
|
||||
mock_enforce_rpm.return_value = None
|
||||
mock_get_llm.side_effect = OutputParserError("parse failed")
|
||||
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
result = executor.call_llm_and_parse()
|
||||
|
||||
assert result == "parser_error"
|
||||
assert executor._last_parser_error is not None
|
||||
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.get_llm_response")
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.enforce_rpm_limit")
|
||||
@patch("crewai.experimental.crew_agent_executor_flow.is_context_length_exceeded")
|
||||
@patch("crewai.experimental.agent_executor.get_llm_response")
|
||||
@patch("crewai.experimental.agent_executor.enforce_rpm_limit")
|
||||
@patch("crewai.experimental.agent_executor.is_context_length_exceeded")
|
||||
def test_call_llm_context_error(
|
||||
self,
|
||||
mock_is_context_exceeded,
|
||||
@@ -353,7 +353,7 @@ class TestFlowErrorHandling:
|
||||
mock_get_llm.side_effect = Exception("context length")
|
||||
mock_is_context_exceeded.return_value = True
|
||||
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
result = executor.call_llm_and_parse()
|
||||
|
||||
assert result == "context_error"
|
||||
@@ -397,10 +397,10 @@ class TestFlowInvoke:
|
||||
"tools_handler": Mock(),
|
||||
}
|
||||
|
||||
@patch.object(CrewAgentExecutorFlow, "kickoff")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_short_term_memory")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_long_term_memory")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_external_memory")
|
||||
@patch.object(AgentExecutor, "kickoff")
|
||||
@patch.object(AgentExecutor, "_create_short_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_long_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_external_memory")
|
||||
def test_invoke_success(
|
||||
self,
|
||||
mock_external_memory,
|
||||
@@ -410,7 +410,7 @@ class TestFlowInvoke:
|
||||
mock_dependencies,
|
||||
):
|
||||
"""Test successful invoke without human feedback."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
|
||||
# Mock kickoff to set the final answer in state
|
||||
def mock_kickoff_side_effect():
|
||||
@@ -429,10 +429,10 @@ class TestFlowInvoke:
|
||||
mock_long_term_memory.assert_called_once()
|
||||
mock_external_memory.assert_called_once()
|
||||
|
||||
@patch.object(CrewAgentExecutorFlow, "kickoff")
|
||||
@patch.object(AgentExecutor, "kickoff")
|
||||
def test_invoke_failure_no_agent_finish(self, mock_kickoff, mock_dependencies):
|
||||
"""Test invoke fails without AgentFinish."""
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
executor.state.current_answer = AgentAction(
|
||||
thought="thinking", tool="test", tool_input="test", text="action text"
|
||||
)
|
||||
@@ -442,10 +442,10 @@ class TestFlowInvoke:
|
||||
with pytest.raises(RuntimeError, match="without reaching a final answer"):
|
||||
executor.invoke(inputs)
|
||||
|
||||
@patch.object(CrewAgentExecutorFlow, "kickoff")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_short_term_memory")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_long_term_memory")
|
||||
@patch.object(CrewAgentExecutorFlow, "_create_external_memory")
|
||||
@patch.object(AgentExecutor, "kickoff")
|
||||
@patch.object(AgentExecutor, "_create_short_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_long_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_external_memory")
|
||||
def test_invoke_with_system_prompt(
|
||||
self,
|
||||
mock_external_memory,
|
||||
@@ -459,7 +459,7 @@ class TestFlowInvoke:
|
||||
"system": "System: {input}",
|
||||
"user": "User: {input} {tool_names} {tools}",
|
||||
}
|
||||
executor = CrewAgentExecutorFlow(**mock_dependencies)
|
||||
executor = AgentExecutor(**mock_dependencies)
|
||||
|
||||
def mock_kickoff_side_effect():
|
||||
executor.state.current_answer = AgentFinish(
|
||||
@@ -72,62 +72,53 @@ class ResearchResult(BaseModel):
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.parametrize("verbose", [True, False])
|
||||
def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose):
|
||||
"""Test that LiteAgent is created with the correct parameters when Agent.kickoff() is called."""
|
||||
def test_agent_kickoff_preserves_parameters(verbose):
|
||||
"""Test that Agent.kickoff() uses the correct parameters from the Agent."""
|
||||
# Create a test agent with specific parameters
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Test response"
|
||||
mock_llm.stop = []
|
||||
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_usage_metrics = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
mock_llm.get_token_usage_summary.return_value = mock_usage_metrics
|
||||
|
||||
custom_tools = [WebSearchTool(), CalculatorTool()]
|
||||
max_iter = 10
|
||||
max_execution_time = 300
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test Goal",
|
||||
backstory="Test Backstory",
|
||||
llm=llm,
|
||||
llm=mock_llm,
|
||||
tools=custom_tools,
|
||||
max_iter=max_iter,
|
||||
max_execution_time=max_execution_time,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# Create a mock to capture the created LiteAgent
|
||||
created_lite_agent = None
|
||||
original_lite_agent = LiteAgent
|
||||
# Call kickoff and verify it works
|
||||
result = agent.kickoff("Test query")
|
||||
|
||||
# Define a mock LiteAgent class that captures its arguments
|
||||
class MockLiteAgent(original_lite_agent):
|
||||
def __init__(self, **kwargs):
|
||||
nonlocal created_lite_agent
|
||||
created_lite_agent = kwargs
|
||||
super().__init__(**kwargs)
|
||||
# Verify the agent was configured correctly
|
||||
assert agent.role == "Test Agent"
|
||||
assert agent.goal == "Test Goal"
|
||||
assert agent.backstory == "Test Backstory"
|
||||
assert len(agent.tools) == 2
|
||||
assert isinstance(agent.tools[0], WebSearchTool)
|
||||
assert isinstance(agent.tools[1], CalculatorTool)
|
||||
assert agent.max_iter == max_iter
|
||||
assert agent.verbose == verbose
|
||||
|
||||
# Patch the LiteAgent class
|
||||
monkeypatch.setattr("crewai.agent.core.LiteAgent", MockLiteAgent)
|
||||
|
||||
# Call kickoff to create the LiteAgent
|
||||
agent.kickoff("Test query")
|
||||
|
||||
# Verify all parameters were passed correctly
|
||||
assert created_lite_agent is not None
|
||||
assert created_lite_agent["role"] == "Test Agent"
|
||||
assert created_lite_agent["goal"] == "Test Goal"
|
||||
assert created_lite_agent["backstory"] == "Test Backstory"
|
||||
assert created_lite_agent["llm"] == llm
|
||||
assert len(created_lite_agent["tools"]) == 2
|
||||
assert isinstance(created_lite_agent["tools"][0], WebSearchTool)
|
||||
assert isinstance(created_lite_agent["tools"][1], CalculatorTool)
|
||||
assert created_lite_agent["max_iterations"] == max_iter
|
||||
assert created_lite_agent["max_execution_time"] == max_execution_time
|
||||
assert created_lite_agent["verbose"] == verbose
|
||||
assert created_lite_agent["response_format"] is None
|
||||
|
||||
# Test with a response_format
|
||||
class TestResponse(BaseModel):
|
||||
test_field: str
|
||||
|
||||
agent.kickoff("Test query", response_format=TestResponse)
|
||||
assert created_lite_agent["response_format"] == TestResponse
|
||||
# Verify kickoff returned a result
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@@ -310,7 +301,8 @@ def verify_agent_parent_flow(result, agent, flow):
|
||||
|
||||
|
||||
def test_sets_parent_flow_when_inside_flow():
|
||||
captured_agent = None
|
||||
"""Test that an Agent can be created and executed inside a Flow context."""
|
||||
captured_event = None
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Test response"
|
||||
@@ -343,15 +335,17 @@ def test_sets_parent_flow_when_inside_flow():
|
||||
event_received = threading.Event()
|
||||
|
||||
@crewai_event_bus.on(LiteAgentExecutionStartedEvent)
|
||||
def capture_agent(source, event):
|
||||
nonlocal captured_agent
|
||||
captured_agent = source
|
||||
def capture_event(source, event):
|
||||
nonlocal captured_event
|
||||
captured_event = event
|
||||
event_received.set()
|
||||
|
||||
flow.kickoff()
|
||||
result = flow.kickoff()
|
||||
|
||||
assert event_received.wait(timeout=5), "Timeout waiting for agent execution event"
|
||||
assert captured_agent.parent_flow is flow
|
||||
assert captured_event is not None
|
||||
assert captured_event.agent_info["role"] == "Test Agent"
|
||||
assert result is not None
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@@ -373,16 +367,14 @@ def test_guardrail_is_called_using_string():
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailStartedEvent)
|
||||
def capture_guardrail_started(source, event):
|
||||
assert isinstance(source, LiteAgent)
|
||||
assert source.original_agent == agent
|
||||
assert isinstance(source, Agent)
|
||||
with condition:
|
||||
guardrail_events["started"].append(event)
|
||||
condition.notify()
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
|
||||
def capture_guardrail_completed(source, event):
|
||||
assert isinstance(source, LiteAgent)
|
||||
assert source.original_agent == agent
|
||||
assert isinstance(source, Agent)
|
||||
with condition:
|
||||
guardrail_events["completed"].append(event)
|
||||
condition.notify()
|
||||
@@ -683,3 +675,151 @@ def test_agent_kickoff_with_mcp_tools(mock_get_mcp_tools):
|
||||
|
||||
# Verify MCP tools were retrieved
|
||||
mock_get_mcp_tools.assert_called_once_with("https://mcp.exa.ai/mcp?api_key=test_exa_key&profile=research")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for LiteAgent inside Flow (magic auto-async pattern)
|
||||
# ============================================================================
|
||||
|
||||
from crewai.flow.flow import listen
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_inside_flow_sync():
|
||||
"""Test that LiteAgent.kickoff() works magically inside a Flow.
|
||||
|
||||
This tests the "magic auto-async" pattern where calling agent.kickoff()
|
||||
from within a Flow automatically detects the event loop and returns a
|
||||
coroutine that the Flow framework awaits. Users don't need to use async/await.
|
||||
"""
|
||||
# Track execution
|
||||
execution_log = []
|
||||
|
||||
class TestFlow(Flow):
|
||||
@start()
|
||||
def run_agent(self):
|
||||
execution_log.append("flow_started")
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Answer questions",
|
||||
backstory="A helpful test assistant",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
# Magic: just call kickoff() normally - it auto-detects Flow context
|
||||
result = agent.kickoff(messages="What is 2+2? Reply with just the number.")
|
||||
execution_log.append("agent_completed")
|
||||
return result
|
||||
|
||||
flow = TestFlow()
|
||||
result = flow.kickoff()
|
||||
|
||||
# Verify the flow executed successfully
|
||||
assert "flow_started" in execution_log
|
||||
assert "agent_completed" in execution_log
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_inside_flow_with_tools():
|
||||
"""Test that LiteAgent with tools works correctly inside a Flow."""
|
||||
class TestFlow(Flow):
|
||||
@start()
|
||||
def run_agent_with_tools(self):
|
||||
agent = Agent(
|
||||
role="Calculator Agent",
|
||||
goal="Perform calculations",
|
||||
backstory="A math expert",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
tools=[CalculatorTool()],
|
||||
verbose=False,
|
||||
)
|
||||
result = agent.kickoff(messages="Calculate 10 * 5")
|
||||
return result
|
||||
|
||||
flow = TestFlow()
|
||||
result = flow.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
assert result.raw is not None
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_multiple_agents_in_same_flow():
|
||||
"""Test that multiple LiteAgents can run sequentially in the same Flow."""
|
||||
class MultiAgentFlow(Flow):
|
||||
@start()
|
||||
def first_step(self):
|
||||
agent1 = Agent(
|
||||
role="First Agent",
|
||||
goal="Greet users",
|
||||
backstory="A friendly greeter",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
return agent1.kickoff(messages="Say hello")
|
||||
|
||||
@listen(first_step)
|
||||
def second_step(self, first_result):
|
||||
agent2 = Agent(
|
||||
role="Second Agent",
|
||||
goal="Say goodbye",
|
||||
backstory="A polite farewell agent",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
return agent2.kickoff(messages="Say goodbye")
|
||||
|
||||
flow = MultiAgentFlow()
|
||||
result = flow.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_kickoff_async_inside_flow():
|
||||
"""Test that Agent.kickoff_async() works correctly from async Flow methods."""
|
||||
class AsyncAgentFlow(Flow):
|
||||
@start()
|
||||
async def async_agent_step(self):
|
||||
agent = Agent(
|
||||
role="Async Test Agent",
|
||||
goal="Answer questions asynchronously",
|
||||
backstory="An async helper",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
result = await agent.kickoff_async(messages="What is 3+3?")
|
||||
return result
|
||||
|
||||
flow = AsyncAgentFlow()
|
||||
result = flow.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_standalone_still_works():
|
||||
"""Test that LiteAgent.kickoff() still works normally outside of a Flow.
|
||||
|
||||
This verifies that the magic auto-async pattern doesn't break standalone usage
|
||||
where there's no event loop running.
|
||||
"""
|
||||
agent = Agent(
|
||||
role="Standalone Agent",
|
||||
goal="Answer questions",
|
||||
backstory="A helpful assistant",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# This should work normally - no Flow, no event loop
|
||||
result = agent.kickoff(messages="What is 5+5? Reply with just the number.")
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
assert result.raw is not None
|
||||
|
||||
479
lib/crewai/tests/agents/test_native_tool_calling.py
Normal file
479
lib/crewai/tests/agents/test_native_tool_calling.py
Normal file
@@ -0,0 +1,479 @@
|
||||
"""Integration tests for native tool calling functionality.
|
||||
|
||||
These tests verify that agents can use native function calling
|
||||
when the LLM supports it, across multiple providers.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.llm import LLM
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
# Check for optional provider availability
|
||||
try:
|
||||
import anthropic
|
||||
HAS_ANTHROPIC = True
|
||||
except ImportError:
|
||||
HAS_ANTHROPIC = False
|
||||
|
||||
try:
|
||||
import google.genai
|
||||
HAS_GOOGLE_GENAI = True
|
||||
except ImportError:
|
||||
HAS_GOOGLE_GENAI = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
class CalculatorInput(BaseModel):
|
||||
"""Input schema for calculator tool."""
|
||||
|
||||
expression: str = Field(description="Mathematical expression to evaluate")
|
||||
|
||||
|
||||
class CalculatorTool(BaseTool):
|
||||
"""A calculator tool that performs mathematical calculations."""
|
||||
|
||||
name: str = "calculator"
|
||||
description: str = "Perform mathematical calculations. Use this for any math operations."
|
||||
args_schema: type[BaseModel] = CalculatorInput
|
||||
|
||||
def _run(self, expression: str) -> str:
|
||||
"""Execute the calculation."""
|
||||
try:
|
||||
# Safe evaluation for basic math
|
||||
result = eval(expression) # noqa: S307
|
||||
return f"The result of {expression} is {result}"
|
||||
except Exception as e:
|
||||
return f"Error calculating {expression}: {e}"
|
||||
|
||||
|
||||
class WeatherInput(BaseModel):
|
||||
"""Input schema for weather tool."""
|
||||
|
||||
location: str = Field(description="City name to get weather for")
|
||||
|
||||
|
||||
class WeatherTool(BaseTool):
|
||||
"""A mock weather tool for testing."""
|
||||
|
||||
name: str = "get_weather"
|
||||
description: str = "Get the current weather for a location"
|
||||
args_schema: type[BaseModel] = WeatherInput
|
||||
|
||||
def _run(self, location: str) -> str:
|
||||
"""Get weather (mock implementation)."""
|
||||
return f"The weather in {location} is sunny with a temperature of 72°F"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def calculator_tool() -> CalculatorTool:
|
||||
"""Create a calculator tool for testing."""
|
||||
return CalculatorTool()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def weather_tool() -> WeatherTool:
|
||||
"""Create a weather tool for testing."""
|
||||
return WeatherTool()
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# OpenAI Provider Tests
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TestOpenAINativeToolCalling:
|
||||
"""Tests for native tool calling with OpenAI models."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_openai_agent_with_native_tool_calling(
|
||||
self, calculator_tool: CalculatorTool
|
||||
) -> None:
|
||||
"""Test OpenAI agent can use native tool calling."""
|
||||
agent = Agent(
|
||||
role="Math Assistant",
|
||||
goal="Help users with mathematical calculations",
|
||||
backstory="You are a helpful math assistant.",
|
||||
tools=[calculator_tool],
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
max_iter=3,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Calculate what is 15 * 8",
|
||||
expected_output="The result of the calculation",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
assert "120" in str(result.raw)
|
||||
|
||||
def test_openai_agent_kickoff_with_tools_mocked(
|
||||
self, calculator_tool: CalculatorTool
|
||||
) -> None:
|
||||
"""Test OpenAI agent kickoff with mocked LLM call."""
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
|
||||
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
|
||||
agent = Agent(
|
||||
role="Math Assistant",
|
||||
goal="Calculate math",
|
||||
backstory="You calculate.",
|
||||
tools=[calculator_tool],
|
||||
llm=llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Calculate 15 * 8",
|
||||
expected_output="Result",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert mock_call.called
|
||||
assert result is not None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Anthropic Provider Tests
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@pytest.mark.skipif(not HAS_ANTHROPIC, reason="anthropic package not installed")
|
||||
class TestAnthropicNativeToolCalling:
|
||||
"""Tests for native tool calling with Anthropic models."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_anthropic_api_key(self):
|
||||
"""Mock ANTHROPIC_API_KEY for tests."""
|
||||
if "ANTHROPIC_API_KEY" not in os.environ:
|
||||
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
|
||||
yield
|
||||
else:
|
||||
yield
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_anthropic_agent_with_native_tool_calling(
|
||||
self, calculator_tool: CalculatorTool
|
||||
) -> None:
|
||||
"""Test Anthropic agent can use native tool calling."""
|
||||
agent = Agent(
|
||||
role="Math Assistant",
|
||||
goal="Help users with mathematical calculations",
|
||||
backstory="You are a helpful math assistant.",
|
||||
tools=[calculator_tool],
|
||||
llm=LLM(model="anthropic/claude-3-5-haiku-20241022"),
|
||||
verbose=False,
|
||||
max_iter=3,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Calculate what is 15 * 8",
|
||||
expected_output="The result of the calculation",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
|
||||
def test_anthropic_agent_kickoff_with_tools_mocked(
|
||||
self, calculator_tool: CalculatorTool
|
||||
) -> None:
|
||||
"""Test Anthropic agent kickoff with mocked LLM call."""
|
||||
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
|
||||
|
||||
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
|
||||
agent = Agent(
|
||||
role="Math Assistant",
|
||||
goal="Calculate math",
|
||||
backstory="You calculate.",
|
||||
tools=[calculator_tool],
|
||||
llm=llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Calculate 15 * 8",
|
||||
expected_output="Result",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert mock_call.called
|
||||
assert result is not None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Google/Gemini Provider Tests
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@pytest.mark.skipif(not HAS_GOOGLE_GENAI, reason="google-genai package not installed")
|
||||
class TestGeminiNativeToolCalling:
|
||||
"""Tests for native tool calling with Gemini models."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_google_api_key(self):
|
||||
"""Mock GOOGLE_API_KEY for tests."""
|
||||
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-key"}):
|
||||
yield
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_gemini_agent_with_native_tool_calling(
|
||||
self, calculator_tool: CalculatorTool
|
||||
) -> None:
|
||||
"""Test Gemini agent can use native tool calling."""
|
||||
agent = Agent(
|
||||
role="Math Assistant",
|
||||
goal="Help users with mathematical calculations",
|
||||
backstory="You are a helpful math assistant.",
|
||||
tools=[calculator_tool],
|
||||
llm=LLM(model="gemini/gemini-2.0-flash-001"),
|
||||
verbose=False,
|
||||
max_iter=3,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Calculate what is 15 * 8",
|
||||
expected_output="The result of the calculation",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
|
||||
def test_gemini_agent_kickoff_with_tools_mocked(
|
||||
self, calculator_tool: CalculatorTool
|
||||
) -> None:
|
||||
"""Test Gemini agent kickoff with mocked LLM call."""
|
||||
llm = LLM(model="gemini/gemini-2.0-flash-001")
|
||||
|
||||
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
|
||||
agent = Agent(
|
||||
role="Math Assistant",
|
||||
goal="Calculate math",
|
||||
backstory="You calculate.",
|
||||
tools=[calculator_tool],
|
||||
llm=llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Calculate 15 * 8",
|
||||
expected_output="Result",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert mock_call.called
|
||||
assert result is not None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Azure Provider Tests
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TestAzureNativeToolCalling:
|
||||
"""Tests for native tool calling with Azure OpenAI models."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_azure_env(self):
|
||||
"""Mock Azure environment variables for tests."""
|
||||
env_vars = {
|
||||
"AZURE_API_KEY": "test-key",
|
||||
"AZURE_API_BASE": "https://test.openai.azure.com",
|
||||
"AZURE_API_VERSION": "2024-02-15-preview",
|
||||
}
|
||||
with patch.dict(os.environ, env_vars):
|
||||
yield
|
||||
|
||||
def test_azure_agent_kickoff_with_tools_mocked(
|
||||
self, calculator_tool: CalculatorTool
|
||||
) -> None:
|
||||
"""Test Azure agent kickoff with mocked LLM call."""
|
||||
llm = LLM(
|
||||
model="azure/gpt-4o-mini",
|
||||
api_key="test-key",
|
||||
base_url="https://test.openai.azure.com",
|
||||
)
|
||||
|
||||
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
|
||||
agent = Agent(
|
||||
role="Math Assistant",
|
||||
goal="Calculate math",
|
||||
backstory="You calculate.",
|
||||
tools=[calculator_tool],
|
||||
llm=llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Calculate 15 * 8",
|
||||
expected_output="Result",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert mock_call.called
|
||||
assert result is not None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Bedrock Provider Tests
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@pytest.mark.skipif(not HAS_BOTO3, reason="boto3 package not installed")
|
||||
class TestBedrockNativeToolCalling:
|
||||
"""Tests for native tool calling with AWS Bedrock models."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_aws_env(self):
|
||||
"""Mock AWS environment variables for tests."""
|
||||
env_vars = {
|
||||
"AWS_ACCESS_KEY_ID": "test-key",
|
||||
"AWS_SECRET_ACCESS_KEY": "test-secret",
|
||||
"AWS_REGION": "us-east-1",
|
||||
}
|
||||
with patch.dict(os.environ, env_vars):
|
||||
yield
|
||||
|
||||
def test_bedrock_agent_kickoff_with_tools_mocked(
|
||||
self, calculator_tool: CalculatorTool
|
||||
) -> None:
|
||||
"""Test Bedrock agent kickoff with mocked LLM call."""
|
||||
llm = LLM(model="bedrock/anthropic.claude-3-haiku-20240307-v1:0")
|
||||
|
||||
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
|
||||
agent = Agent(
|
||||
role="Math Assistant",
|
||||
goal="Calculate math",
|
||||
backstory="You calculate.",
|
||||
tools=[calculator_tool],
|
||||
llm=llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Calculate 15 * 8",
|
||||
expected_output="Result",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert mock_call.called
|
||||
assert result is not None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Cross-Provider Native Tool Calling Behavior Tests
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TestNativeToolCallingBehavior:
|
||||
"""Tests for native tool calling behavior across providers."""
|
||||
|
||||
def test_supports_function_calling_check(self) -> None:
|
||||
"""Test that supports_function_calling() is properly checked."""
|
||||
# OpenAI should support function calling
|
||||
openai_llm = LLM(model="gpt-4o-mini")
|
||||
assert hasattr(openai_llm, "supports_function_calling")
|
||||
assert openai_llm.supports_function_calling() is True
|
||||
|
||||
@pytest.mark.skipif(not HAS_ANTHROPIC, reason="anthropic package not installed")
|
||||
def test_anthropic_supports_function_calling(self) -> None:
|
||||
"""Test that Anthropic models support function calling."""
|
||||
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
|
||||
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
|
||||
assert hasattr(llm, "supports_function_calling")
|
||||
assert llm.supports_function_calling() is True
|
||||
|
||||
@pytest.mark.skipif(not HAS_GOOGLE_GENAI, reason="google-genai package not installed")
|
||||
def test_gemini_supports_function_calling(self) -> None:
|
||||
"""Test that Gemini models support function calling."""
|
||||
# with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-key"}):
|
||||
print("GOOGLE_API_KEY", os.getenv("GOOGLE_API_KEY"))
|
||||
llm = LLM(model="gemini/gemini-2.5-flash")
|
||||
assert hasattr(llm, "supports_function_calling")
|
||||
# Gemini uses supports_tools property
|
||||
assert llm.supports_function_calling() is True
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Token Usage Tests
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TestNativeToolCallingTokenUsage:
|
||||
"""Tests for token usage with native tool calling."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_openai_native_tool_calling_token_usage(
|
||||
self, calculator_tool: CalculatorTool
|
||||
) -> None:
|
||||
"""Test token usage tracking with OpenAI native tool calling."""
|
||||
agent = Agent(
|
||||
role="Calculator",
|
||||
goal="Perform calculations efficiently",
|
||||
backstory="You calculate things.",
|
||||
tools=[calculator_tool],
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
max_iter=3,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="What is 100 / 4?",
|
||||
expected_output="The result",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert result.token_usage is not None
|
||||
assert result.token_usage.total_tokens > 0
|
||||
assert result.token_usage.successful_requests >= 1
|
||||
|
||||
print(f"\n[OPENAI NATIVE TOOL CALLING TOKEN USAGE]")
|
||||
print(f" Prompt tokens: {result.token_usage.prompt_tokens}")
|
||||
print(f" Completion tokens: {result.token_usage.completion_tokens}")
|
||||
print(f" Total tokens: {result.token_usage.total_tokens}")
|
||||
@@ -0,0 +1,119 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. A helpful
|
||||
test assistant\nYour personal goal is: Answer questions\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
What is 2+2? Reply with just the number.\n\nBegin! This is VERY important to
|
||||
you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '673'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7b0HjL79y39EkUcMLrRhPFe3XGj\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444914,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: 4\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 136,\n \"completion_tokens\": 13,\n
|
||||
\ \"total_tokens\": 149,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_8bbc38b4db\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:55 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '857'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '341'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '358'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,255 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator Agent. A math
|
||||
expert\nYour personal goal is: Perform calculations\nYou ONLY have access to
|
||||
the following tools, and should NEVER make up tools that are not listed here:\n\nTool
|
||||
Name: calculate\nTool Arguments: {\n \"properties\": {\n \"expression\":
|
||||
{\n \"title\": \"Expression\",\n \"type\": \"string\"\n }\n },\n \"required\":
|
||||
[\n \"expression\"\n ],\n \"title\": \"CalculatorToolSchema\",\n \"type\":
|
||||
\"object\",\n \"additionalProperties\": false\n}\nTool Description: Calculate
|
||||
the result of a mathematical expression.\n\nIMPORTANT: Use the following format
|
||||
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [calculate], just the name, exactly as
|
||||
it''s written.\nAction Input: the input to the action, just a simple JSON object,
|
||||
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
|
||||
result of the action\n```\n\nOnce all necessary information is gathered, return
|
||||
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n```"},{"role":"user","content":"\nCurrent
|
||||
Task: Calculate 10 * 5\n\nBegin! This is VERY important to you, use the tools
|
||||
available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1403'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7avghVPSpszLmlbHpwDQlWDoD6O\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444909,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I need to calculate the expression
|
||||
10 * 5.\\nAction: calculate\\nAction Input: {\\\"expression\\\":\\\"10 * 5\\\"}\\nObservation:
|
||||
50\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 291,\n \"completion_tokens\": 33,\n
|
||||
\ \"total_tokens\": 324,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:49 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '939'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '579'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '598'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator Agent. A math
|
||||
expert\nYour personal goal is: Perform calculations\nYou ONLY have access to
|
||||
the following tools, and should NEVER make up tools that are not listed here:\n\nTool
|
||||
Name: calculate\nTool Arguments: {\n \"properties\": {\n \"expression\":
|
||||
{\n \"title\": \"Expression\",\n \"type\": \"string\"\n }\n },\n \"required\":
|
||||
[\n \"expression\"\n ],\n \"title\": \"CalculatorToolSchema\",\n \"type\":
|
||||
\"object\",\n \"additionalProperties\": false\n}\nTool Description: Calculate
|
||||
the result of a mathematical expression.\n\nIMPORTANT: Use the following format
|
||||
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [calculate], just the name, exactly as
|
||||
it''s written.\nAction Input: the input to the action, just a simple JSON object,
|
||||
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
|
||||
result of the action\n```\n\nOnce all necessary information is gathered, return
|
||||
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n```"},{"role":"user","content":"\nCurrent
|
||||
Task: Calculate 10 * 5\n\nBegin! This is VERY important to you, use the tools
|
||||
available and give your best Final Answer, your job depends on it!\n\nThought:"},{"role":"assistant","content":"Thought:
|
||||
I need to calculate the expression 10 * 5.\nAction: calculate\nAction Input:
|
||||
{\"expression\":\"10 * 5\"}\nObservation: The result of 10 * 5 is 50"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1591'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7avDhDZCLvv8v2dh8ZQRrLdci6A\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444909,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now know the final answer.\\nFinal
|
||||
Answer: 50\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 337,\n \"completion_tokens\": 14,\n
|
||||
\ \"total_tokens\": 351,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:50 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '864'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '429'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '457'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,119 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Async Test Agent. An async
|
||||
helper\nYour personal goal is: Answer questions asynchronously\nTo give my best
|
||||
complete final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
What is 3+3?\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '657'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7atOGxtc4y3oYNI62WiQ0Vogsdv\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444907,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: The sum of 3 + 3 is 6. Therefore, the outcome is that if you add three
|
||||
and three together, you will arrive at the total of six.\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
131,\n \"completion_tokens\": 46,\n \"total_tokens\": 177,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:48 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '983'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '944'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1192'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,119 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Standalone Agent. A helpful
|
||||
assistant\nYour personal goal is: Answer questions\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
What is 5+5? Reply with just the number.\n\nBegin! This is VERY important to
|
||||
you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '674'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7azhPwUHQ0p5tdhxSAmLPoE8UgC\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444913,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: 10\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 136,\n \"completion_tokens\": 13,\n
|
||||
\ \"total_tokens\": 149,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:54 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '858'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '455'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '583'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,239 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are First Agent. A friendly
|
||||
greeter\nYour personal goal is: Greet users\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
hello\n\nBegin! This is VERY important to you, use the tools available and give
|
||||
your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '632'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CyRKzgODZ9yn3F9OkaXsscLk2Ln3N\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768520801,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: Hello! Welcome! I'm so glad to see you here. If you need any assistance
|
||||
or have any questions, feel free to ask. Have a wonderful day!\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
127,\n \"completion_tokens\": 43,\n \"total_tokens\": 170,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 23:46:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '990'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '880'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1160'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Second Agent. A polite
|
||||
farewell agent\nYour personal goal is: Say goodbye\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
Say goodbye\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '640'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CyRL1Ua2PkK5xXPp3KeF0AnGAk3JP\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768520803,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: As we reach the end of our conversation, I want to express my gratitude
|
||||
for the time we've shared. It's been a pleasure assisting you, and I hope
|
||||
you found our interaction helpful and enjoyable. Remember, whenever you need
|
||||
assistance, I'm just a message away. Wishing you all the best in your future
|
||||
endeavors. Goodbye and take care!\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 126,\n \"completion_tokens\":
|
||||
79,\n \"total_tokens\": 205,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 23:46:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '1189'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '1363'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1605'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,75 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: What is the capital
|
||||
of Japan?\n\nThis is the expected criteria for your final answer: The capital
|
||||
of Japan\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "role":
|
||||
"user"}], "systemInstruction": {"parts": [{"text": "You are Research Assistant.
|
||||
You are a helpful research assistant.\nYour personal goal is: Find information
|
||||
about the capital of Japan\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"}], "role": "user"}, "generationConfig": {"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '952'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- aiplatform.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.59.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://aiplatform.googleapis.com/v1/publishers/google/models/gemini-2.0-flash-exp:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"role\":
|
||||
\"model\",\n \"parts\": [\n {\n \"text\": \"The
|
||||
capital of Japan is Tokyo.\\nFinal Answer: Tokyo\\n\"\n }\n ]\n
|
||||
\ },\n \"finishReason\": \"STOP\",\n \"avgLogprobs\": -0.017845841554495003\n
|
||||
\ }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 163,\n \"candidatesTokenCount\":
|
||||
13,\n \"totalTokenCount\": 176,\n \"trafficType\": \"ON_DEMAND\",\n
|
||||
\ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 163\n }\n ],\n \"candidatesTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 13\n
|
||||
\ }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash-exp\",\n \"createTime\":
|
||||
\"2026-01-15T22:27:38.066749Z\",\n \"responseId\": \"2mlpab2JBNOFidsPh5GigQs\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 22:27:38 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
content-length:
|
||||
- '786'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
@@ -1,456 +1,528 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "00000000-0000-0000-0000-000000000000", "execution_type": "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, "crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-05T22:19:56.074812+00:00"}}'
|
||||
body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Guardrail Agent.
|
||||
You are a expert at validating the output of a task. By providing effective
|
||||
feedback if the output is not valid.\\nYour personal goal is: Validate the output
|
||||
of the task\\nTo give my best complete final answer to the task respond using
|
||||
the exact following format:\\n\\nThought: I now can give a great answer\\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\\n\\nI MUST use these formats, my job depends
|
||||
on it!\"},{\"role\":\"user\",\"content\":\"\\nCurrent Task: \\n Ensure
|
||||
the following task result complies with the given guardrail.\\n\\n Task
|
||||
result:\\n \\n Lorem Ipsum is simply dummy text of the printing
|
||||
and typesetting industry. Lorem Ipsum has been the industry's standard dummy
|
||||
text ever\\n \\n\\n Guardrail:\\n Ensure the result has
|
||||
less than 10 words\\n\\n Your task:\\n - Confirm if the Task result
|
||||
complies with the guardrail.\\n - If not, provide clear feedback explaining
|
||||
what is wrong (e.g., by how much it violates the rule, or what specific part
|
||||
fails).\\n - Focus only on identifying issues \u2014 do not propose corrections.\\n
|
||||
\ - If the Task result complies with the guardrail, saying that is valid\\n
|
||||
\ \\n\\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\\n\\nThought:\"}],\"model\":\"gpt-4o\"}"
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '434'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.3.0
|
||||
X-Crewai-Version:
|
||||
- 1.3.0
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:19:56 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net
|
||||
https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com
|
||||
https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 230c6cb5-92c7-448d-8c94-e5548a9f4259
|
||||
x-runtime:
|
||||
- '0.073220'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Guardrail Agent. You are a expert at validating the output of a task. By providing effective feedback if the output is not valid.\nYour personal goal is: Validate the output of the task\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\":
|
||||
[\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\": false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"\n Ensure the following task result complies with the given guardrail.\n\n Task result:\n \n Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry''s standard dummy text ever\n \n\n Guardrail:\n Ensure
|
||||
the result has less than 10 words\n\n Your task:\n - Confirm if the Task result complies with the guardrail.\n - If not, provide clear feedback explaining what is wrong (e.g., by how much it violates the rule, or what specific part fails).\n - Focus only on identifying issues — do not propose corrections.\n - If the Task result complies with the guardrail, saying that is valid\n "}],"model":"gpt-4o"}'
|
||||
headers:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2452'
|
||||
- '1467'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CYg96Riy2RJRxnBHvoROukymP9wvs\",\n \"object\": \"chat.completion\",\n \"created\": 1762381196,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I need to check if the task result meets the requirement of having less than 10 words.\\n\\nFinal Answer: {\\n \\\"valid\\\": false,\\n \\\"feedback\\\": \\\"The task result contains more than 10 words, violating the guardrail. The text provided contains about 21 words.\\\"\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 489,\n \"completion_tokens\": 61,\n \"total_tokens\": 550,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\"\
|
||||
: 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_cbf1785567\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7yHRYTZi8yzRbcODnKr92keLKCb\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768446357,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The task result provided has more than
|
||||
10 words. I will count the words to verify this.\\n\\nThe task result is the
|
||||
following text:\\n\\\"Lorem Ipsum is simply dummy text of the printing and
|
||||
typesetting industry. Lorem Ipsum has been the industry's standard dummy text
|
||||
ever\\\"\\n\\nCounting the words:\\n\\n1. Lorem \\n2. Ipsum \\n3. is \\n4.
|
||||
simply \\n5. dummy \\n6. text \\n7. of \\n8. the \\n9. printing \\n10. and
|
||||
\\n11. typesetting \\n12. industry. \\n13. Lorem \\n14. Ipsum \\n15. has \\n16.
|
||||
been \\n17. the \\n18. industry's \\n19. standard \\n20. dummy \\n21. text
|
||||
\\n22. ever\\n\\nThe total word count is 22.\\n\\nThought: I now can give
|
||||
a great answer\\nFinal Answer: The task result does not comply with the guardrail.
|
||||
It contains 22 words, which exceeds the limit of 10 words.\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
285,\n \"completion_tokens\": 195,\n \"total_tokens\": 480,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_deacdd5f6f\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:19:58 GMT
|
||||
- Thu, 15 Jan 2026 03:05:59 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:49:58 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '1557'
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '2201'
|
||||
- '2130'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '2401'
|
||||
- '2147'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29439'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1.122s
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"{\n \"valid\": false,\n \"feedback\": \"The task result contains more than 10 words, violating the guardrail. The text provided contains about 21 words.\"\n}"}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly
|
||||
adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\":
|
||||
{\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\":
|
||||
{\n \"properties\": {\n \"valid\": {\n \"description\":
|
||||
\"Whether the task output complies with the guardrail\",\n \"title\":
|
||||
\"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\":
|
||||
{\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\":
|
||||
\"null\"\n }\n ],\n \"default\": null,\n \"description\":
|
||||
\"A feedback about the task output if it is not valid\",\n \"title\":
|
||||
\"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\":
|
||||
\"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."},{"role":"user","content":"The task result does not comply with
|
||||
the guardrail. It contains 22 words, which exceeds the limit of 10 words."}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether
|
||||
the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A
|
||||
feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1884'
|
||||
- '1835'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=REDACTED; _cfuvid=REDACTED
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- chat.completions.parse
|
||||
- beta.chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CYg98QlZ8NTrQ69676MpXXyCoZJT8\",\n \"object\": \"chat.completion\",\n \"created\": 1762381198,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"{\\\"valid\\\":false,\\\"feedback\\\":\\\"The task result contains more than 10 words, violating the guardrail. The text provided contains about 21 words.\\\"}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 374,\n \"completion_tokens\": 32,\n \"total_tokens\": 406,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n\
|
||||
\ \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_cbf1785567\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7yJiPCk4fXuogyT5e8XeGRLCSf8\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768446359,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"{\\\"valid\\\":false,\\\"feedback\\\":\\\"The
|
||||
task output exceeds the word limit of 10 words by containing 22 words.\\\"}\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
363,\n \"completion_tokens\": 25,\n \"total_tokens\": 388,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_a0e9480a2f\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:19:59 GMT
|
||||
- Thu, 15 Jan 2026 03:05:59 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '913'
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '419'
|
||||
- '488'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '432'
|
||||
- '507'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29702'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 596ms
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Guardrail Agent. You are a expert at validating the output of a task. By providing effective feedback if the output is not valid.\nYour personal goal is: Validate the output of the task\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\":
|
||||
[\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\": false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"\n Ensure the following task result complies with the given guardrail.\n\n Task result:\n \n Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry''s standard dummy text ever\n \n\n Guardrail:\n Ensure
|
||||
the result has less than 500 words\n\n Your task:\n - Confirm if the Task result complies with the guardrail.\n - If not, provide clear feedback explaining what is wrong (e.g., by how much it violates the rule, or what specific part fails).\n - Focus only on identifying issues — do not propose corrections.\n - If the Task result complies with the guardrail, saying that is valid\n "}],"model":"gpt-4o"}'
|
||||
body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Guardrail Agent.
|
||||
You are a expert at validating the output of a task. By providing effective
|
||||
feedback if the output is not valid.\\nYour personal goal is: Validate the output
|
||||
of the task\\nTo give my best complete final answer to the task respond using
|
||||
the exact following format:\\n\\nThought: I now can give a great answer\\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\\n\\nI MUST use these formats, my job depends
|
||||
on it!\"},{\"role\":\"user\",\"content\":\"\\nCurrent Task: \\n Ensure
|
||||
the following task result complies with the given guardrail.\\n\\n Task
|
||||
result:\\n \\n Lorem Ipsum is simply dummy text of the printing
|
||||
and typesetting industry. Lorem Ipsum has been the industry's standard dummy
|
||||
text ever\\n \\n\\n Guardrail:\\n Ensure the result has
|
||||
less than 500 words\\n\\n Your task:\\n - Confirm if the Task
|
||||
result complies with the guardrail.\\n - If not, provide clear feedback
|
||||
explaining what is wrong (e.g., by how much it violates the rule, or what specific
|
||||
part fails).\\n - Focus only on identifying issues \u2014 do not propose
|
||||
corrections.\\n - If the Task result complies with the guardrail, saying
|
||||
that is valid\\n \\n\\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\\n\\nThought:\"}],\"model\":\"gpt-4o\"}"
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2453'
|
||||
- '1468'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CYgBMV6fu7EvV2BqzMdJaKyLAg1WW\",\n \"object\": \"chat.completion\",\n \"created\": 1762381336,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\\"valid\\\": true, \\\"feedback\\\": null}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 489,\n \"completion_tokens\": 23,\n \"total_tokens\": 512,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\"\
|
||||
: \"fp_cbf1785567\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7yKa0rmi2YoTLpyXt9hjeLt2rTI\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768446360,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"First, I'll count the number of words
|
||||
in the Task result to ensure it complies with the guardrail. \\n\\nThe Task
|
||||
result is: \\\"Lorem Ipsum is simply dummy text of the printing and typesetting
|
||||
industry. Lorem Ipsum has been the industry's standard dummy text ever.\\\"\\n\\nBy
|
||||
counting the words: \\n1. Lorem\\n2. Ipsum\\n3. is\\n4. simply\\n5. dummy\\n6.
|
||||
text\\n7. of\\n8. the\\n9. printing\\n10. and\\n11. typesetting\\n12. industry\\n13.
|
||||
Lorem\\n14. Ipsum\\n15. has\\n16. been\\n17. the\\n18. industry's\\n19. standard\\n20.
|
||||
dummy\\n21. text\\n22. ever\\n\\nThere are 22 words total in the Task result.\\n\\nI
|
||||
need to verify if the count of 22 words is less than the guardrail limit of
|
||||
500 words.\\n\\nThought: I now can give a great answer\\nFinal Answer: The
|
||||
Task result complies with the guardrail as it contains 22 words, which is
|
||||
less than the 500-word limit. Therefore, the output is valid.\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
285,\n \"completion_tokens\": 227,\n \"total_tokens\": 512,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_deacdd5f6f\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:22:16 GMT
|
||||
- Thu, 15 Jan 2026 03:06:02 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:52:16 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '1668'
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '327'
|
||||
- '2502'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '372'
|
||||
- '2522'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29438'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1.124s
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"{\"valid\": true, \"feedback\": null}"}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly
|
||||
adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\":
|
||||
{\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\":
|
||||
{\n \"properties\": {\n \"valid\": {\n \"description\":
|
||||
\"Whether the task output complies with the guardrail\",\n \"title\":
|
||||
\"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\":
|
||||
{\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\":
|
||||
\"null\"\n }\n ],\n \"default\": null,\n \"description\":
|
||||
\"A feedback about the task output if it is not valid\",\n \"title\":
|
||||
\"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\":
|
||||
\"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."},{"role":"user","content":"The Task result complies with the
|
||||
guardrail as it contains 22 words, which is less than the 500-word limit. Therefore,
|
||||
the output is valid."}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether
|
||||
the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A
|
||||
feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1762'
|
||||
- '1864'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=REDACTED; _cfuvid=REDACTED
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- chat.completions.parse
|
||||
- beta.chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CYgBMU20R45qGGaLN6vNAmW1NR4R6\",\n \"object\": \"chat.completion\",\n \"created\": 1762381336,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"{\\\"valid\\\":true,\\\"feedback\\\":null}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 347,\n \"completion_tokens\": 9,\n \"total_tokens\": 356,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_cbf1785567\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7yMAjNYSCz2foZPEcSVCuapzF8y\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768446362,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"{\\\"valid\\\":true,\\\"feedback\\\":null}\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
369,\n \"completion_tokens\": 9,\n \"total_tokens\": 378,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_a0e9480a2f\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:22:17 GMT
|
||||
- Thu, 15 Jan 2026 03:06:03 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '837'
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '1081'
|
||||
- '413'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1241'
|
||||
- '650'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29478'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1.042s
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
138
lib/crewai/tests/cli/authentication/providers/test_keycloak.py
Normal file
138
lib/crewai/tests/cli/authentication/providers/test_keycloak.py
Normal file
@@ -0,0 +1,138 @@
|
||||
import pytest
|
||||
|
||||
from crewai.cli.authentication.main import Oauth2Settings
|
||||
from crewai.cli.authentication.providers.keycloak import KeycloakProvider
|
||||
|
||||
|
||||
class TestKeycloakProvider:
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_method(self):
|
||||
self.valid_settings = Oauth2Settings(
|
||||
provider="keycloak",
|
||||
domain="keycloak.example.com",
|
||||
client_id="test-client-id",
|
||||
audience="test-audience",
|
||||
extra={
|
||||
"realm": "test-realm"
|
||||
}
|
||||
)
|
||||
self.provider = KeycloakProvider(self.valid_settings)
|
||||
|
||||
def test_initialization_with_valid_settings(self):
|
||||
provider = KeycloakProvider(self.valid_settings)
|
||||
assert provider.settings == self.valid_settings
|
||||
assert provider.settings.provider == "keycloak"
|
||||
assert provider.settings.domain == "keycloak.example.com"
|
||||
assert provider.settings.client_id == "test-client-id"
|
||||
assert provider.settings.audience == "test-audience"
|
||||
assert provider.settings.extra.get("realm") == "test-realm"
|
||||
|
||||
def test_get_authorize_url(self):
|
||||
expected_url = "https://keycloak.example.com/realms/test-realm/protocol/openid-connect/auth/device"
|
||||
assert self.provider.get_authorize_url() == expected_url
|
||||
|
||||
def test_get_authorize_url_with_different_domain(self):
|
||||
settings = Oauth2Settings(
|
||||
provider="keycloak",
|
||||
domain="auth.company.com",
|
||||
client_id="test-client",
|
||||
audience="test-audience",
|
||||
extra={
|
||||
"realm": "my-realm"
|
||||
}
|
||||
)
|
||||
provider = KeycloakProvider(settings)
|
||||
expected_url = "https://auth.company.com/realms/my-realm/protocol/openid-connect/auth/device"
|
||||
assert provider.get_authorize_url() == expected_url
|
||||
|
||||
def test_get_token_url(self):
|
||||
expected_url = "https://keycloak.example.com/realms/test-realm/protocol/openid-connect/token"
|
||||
assert self.provider.get_token_url() == expected_url
|
||||
|
||||
def test_get_token_url_with_different_domain(self):
|
||||
settings = Oauth2Settings(
|
||||
provider="keycloak",
|
||||
domain="sso.enterprise.com",
|
||||
client_id="test-client",
|
||||
audience="test-audience",
|
||||
extra={
|
||||
"realm": "enterprise-realm"
|
||||
}
|
||||
)
|
||||
provider = KeycloakProvider(settings)
|
||||
expected_url = "https://sso.enterprise.com/realms/enterprise-realm/protocol/openid-connect/token"
|
||||
assert provider.get_token_url() == expected_url
|
||||
|
||||
def test_get_jwks_url(self):
|
||||
expected_url = "https://keycloak.example.com/realms/test-realm/protocol/openid-connect/certs"
|
||||
assert self.provider.get_jwks_url() == expected_url
|
||||
|
||||
def test_get_jwks_url_with_different_domain(self):
|
||||
settings = Oauth2Settings(
|
||||
provider="keycloak",
|
||||
domain="identity.org",
|
||||
client_id="test-client",
|
||||
audience="test-audience",
|
||||
extra={
|
||||
"realm": "org-realm"
|
||||
}
|
||||
)
|
||||
provider = KeycloakProvider(settings)
|
||||
expected_url = "https://identity.org/realms/org-realm/protocol/openid-connect/certs"
|
||||
assert provider.get_jwks_url() == expected_url
|
||||
|
||||
def test_get_issuer(self):
|
||||
expected_issuer = "https://keycloak.example.com/realms/test-realm"
|
||||
assert self.provider.get_issuer() == expected_issuer
|
||||
|
||||
def test_get_issuer_with_different_domain(self):
|
||||
settings = Oauth2Settings(
|
||||
provider="keycloak",
|
||||
domain="login.myapp.io",
|
||||
client_id="test-client",
|
||||
audience="test-audience",
|
||||
extra={
|
||||
"realm": "app-realm"
|
||||
}
|
||||
)
|
||||
provider = KeycloakProvider(settings)
|
||||
expected_issuer = "https://login.myapp.io/realms/app-realm"
|
||||
assert provider.get_issuer() == expected_issuer
|
||||
|
||||
def test_get_audience(self):
|
||||
assert self.provider.get_audience() == "test-audience"
|
||||
|
||||
def test_get_client_id(self):
|
||||
assert self.provider.get_client_id() == "test-client-id"
|
||||
|
||||
def test_get_required_fields(self):
|
||||
assert self.provider.get_required_fields() == ["realm"]
|
||||
|
||||
def test_oauth2_base_url(self):
|
||||
assert self.provider._oauth2_base_url() == "https://keycloak.example.com"
|
||||
|
||||
def test_oauth2_base_url_strips_https_prefix(self):
|
||||
settings = Oauth2Settings(
|
||||
provider="keycloak",
|
||||
domain="https://keycloak.example.com",
|
||||
client_id="test-client-id",
|
||||
audience="test-audience",
|
||||
extra={
|
||||
"realm": "test-realm"
|
||||
}
|
||||
)
|
||||
provider = KeycloakProvider(settings)
|
||||
assert provider._oauth2_base_url() == "https://keycloak.example.com"
|
||||
|
||||
def test_oauth2_base_url_strips_http_prefix(self):
|
||||
settings = Oauth2Settings(
|
||||
provider="keycloak",
|
||||
domain="http://keycloak.example.com",
|
||||
client_id="test-client-id",
|
||||
audience="test-audience",
|
||||
extra={
|
||||
"realm": "test-realm"
|
||||
}
|
||||
)
|
||||
provider = KeycloakProvider(settings)
|
||||
assert provider._oauth2_base_url() == "https://keycloak.example.com"
|
||||
@@ -1,8 +1,4 @@
|
||||
import gc
|
||||
import weakref
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.cli.git import Repository
|
||||
|
||||
|
||||
@@ -103,82 +99,3 @@ def test_origin_url(fp, repository):
|
||||
stdout="https://github.com/user/repo.git\n",
|
||||
)
|
||||
assert repository.origin_url() == "https://github.com/user/repo.git"
|
||||
|
||||
|
||||
def test_repository_garbage_collection(fp):
|
||||
"""Test that Repository instances can be garbage collected.
|
||||
|
||||
This test verifies the fix for the memory leak issue where using
|
||||
@lru_cache on the is_git_repo() method prevented garbage collection
|
||||
of Repository instances.
|
||||
"""
|
||||
fp.register(["git", "--version"], stdout="git version 2.30.0\n")
|
||||
fp.register(["git", "rev-parse", "--is-inside-work-tree"], stdout="true\n")
|
||||
fp.register(["git", "fetch"], stdout="")
|
||||
|
||||
repo = Repository(path=".")
|
||||
weak_ref = weakref.ref(repo)
|
||||
|
||||
assert weak_ref() is not None
|
||||
|
||||
del repo
|
||||
gc.collect()
|
||||
|
||||
assert weak_ref() is None, (
|
||||
"Repository instance was not garbage collected. "
|
||||
"This indicates a memory leak, likely from @lru_cache on instance methods."
|
||||
)
|
||||
|
||||
|
||||
def test_is_git_repo_caching(fp):
|
||||
"""Test that is_git_repo() result is cached at the instance level.
|
||||
|
||||
This verifies that the instance-level caching works correctly,
|
||||
only calling the subprocess once per instance.
|
||||
"""
|
||||
fp.register(["git", "--version"], stdout="git version 2.30.0\n")
|
||||
fp.register(["git", "rev-parse", "--is-inside-work-tree"], stdout="true\n")
|
||||
fp.register(["git", "fetch"], stdout="")
|
||||
|
||||
repo = Repository(path=".")
|
||||
|
||||
assert repo._is_git_repo_cache is True
|
||||
|
||||
result1 = repo.is_git_repo()
|
||||
result2 = repo.is_git_repo()
|
||||
|
||||
assert result1 is True
|
||||
assert result2 is True
|
||||
assert repo._is_git_repo_cache is True
|
||||
|
||||
|
||||
def test_multiple_repository_instances_independent_caches(fp):
|
||||
"""Test that multiple Repository instances have independent caches.
|
||||
|
||||
This verifies that the instance-level caching doesn't share state
|
||||
between different Repository instances.
|
||||
"""
|
||||
fp.register(["git", "--version"], stdout="git version 2.30.0\n")
|
||||
fp.register(["git", "rev-parse", "--is-inside-work-tree"], stdout="true\n")
|
||||
fp.register(["git", "fetch"], stdout="")
|
||||
|
||||
fp.register(["git", "--version"], stdout="git version 2.30.0\n")
|
||||
fp.register(["git", "rev-parse", "--is-inside-work-tree"], stdout="true\n")
|
||||
fp.register(["git", "fetch"], stdout="")
|
||||
|
||||
repo1 = Repository(path=".")
|
||||
repo2 = Repository(path=".")
|
||||
|
||||
assert repo1._is_git_repo_cache is True
|
||||
assert repo2._is_git_repo_cache is True
|
||||
|
||||
assert repo1._is_git_repo_cache is not repo2._is_git_repo_cache or (
|
||||
repo1._is_git_repo_cache == repo2._is_git_repo_cache
|
||||
)
|
||||
|
||||
weak_ref1 = weakref.ref(repo1)
|
||||
del repo1
|
||||
gc.collect()
|
||||
|
||||
assert weak_ref1() is None
|
||||
assert repo2._is_git_repo_cache is True
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user