Compare commits

..

1 Commits

Author SHA1 Message Date
Devin AI
37b75aeb6a fix: handle 'Action: None' in parser to prevent OutputParserError
When LLMs output 'Action: None' (or variations like 'Action: N/A'),
the parser now correctly treats this as a signal for a direct response
instead of raising an OutputParserError.

This fixes issue #4186 where the parser would fail and leak internal
'Thought:' text to users instead of providing a clean response.

Changes:
- Add ACTION_NONE_REGEX constant to match non-action values
- Update parse() to detect and handle Action: None patterns
- Convert Action: None to AgentFinish with the thought as output
- Add comprehensive tests for all variations

Closes #4186

Co-Authored-By: João <joao@crewai.com>
2026-01-06 19:44:09 +00:00
97 changed files with 842 additions and 9337 deletions

View File

@@ -120,8 +120,6 @@ HEADERS_TO_FILTER = {
"accept-encoding": "ACCEPT-ENCODING-XXX",
"x-amzn-requestid": "X-AMZN-REQUESTID-XXX",
"x-amzn-RequestId": "X-AMZN-REQUESTID-XXX",
"x-a2a-notification-token": "X-A2A-NOTIFICATION-TOKEN-XXX",
"x-a2a-version": "X-A2A-VERSION-XXX",
}

View File

@@ -61,9 +61,7 @@
"groups": [
{
"group": "Welcome",
"pages": [
"index"
]
"pages": ["index"]
}
]
},
@@ -73,11 +71,7 @@
"groups": [
{
"group": "Get Started",
"pages": [
"en/introduction",
"en/installation",
"en/quickstart"
]
"pages": ["en/introduction", "en/installation", "en/quickstart"]
},
{
"group": "Guides",
@@ -85,23 +79,17 @@
{
"group": "Strategy",
"icon": "compass",
"pages": [
"en/guides/concepts/evaluating-use-cases"
]
"pages": ["en/guides/concepts/evaluating-use-cases"]
},
{
"group": "Agents",
"icon": "user",
"pages": [
"en/guides/agents/crafting-effective-agents"
]
"pages": ["en/guides/agents/crafting-effective-agents"]
},
{
"group": "Crews",
"icon": "users",
"pages": [
"en/guides/crews/first-crew"
]
"pages": ["en/guides/crews/first-crew"]
},
{
"group": "Flows",
@@ -291,7 +279,6 @@
"en/observability/arize-phoenix",
"en/observability/braintrust",
"en/observability/datadog",
"en/observability/galileo",
"en/observability/langdb",
"en/observability/langfuse",
"en/observability/langtrace",
@@ -337,9 +324,7 @@
},
{
"group": "Telemetry",
"pages": [
"en/telemetry"
]
"pages": ["en/telemetry"]
}
]
},
@@ -349,9 +334,7 @@
"groups": [
{
"group": "Getting Started",
"pages": [
"en/enterprise/introduction"
]
"pages": ["en/enterprise/introduction"]
},
{
"group": "Build",
@@ -360,8 +343,7 @@
"en/enterprise/features/crew-studio",
"en/enterprise/features/marketplace",
"en/enterprise/features/agent-repositories",
"en/enterprise/features/tools-and-integrations",
"en/enterprise/features/pii-trace-redactions"
"en/enterprise/features/tools-and-integrations"
]
},
{
@@ -444,9 +426,7 @@
},
{
"group": "Resources",
"pages": [
"en/enterprise/resources/frequently-asked-questions"
]
"pages": ["en/enterprise/resources/frequently-asked-questions"]
}
]
},
@@ -472,10 +452,7 @@
"groups": [
{
"group": "Examples",
"pages": [
"en/examples/example",
"en/examples/cookbooks"
]
"pages": ["en/examples/example", "en/examples/cookbooks"]
}
]
},
@@ -485,9 +462,7 @@
"groups": [
{
"group": "Release Notes",
"pages": [
"en/changelog"
]
"pages": ["en/changelog"]
}
]
}
@@ -526,9 +501,7 @@
"groups": [
{
"group": "Bem-vindo",
"pages": [
"pt-BR/index"
]
"pages": ["pt-BR/index"]
}
]
},
@@ -550,23 +523,17 @@
{
"group": "Estratégia",
"icon": "compass",
"pages": [
"pt-BR/guides/concepts/evaluating-use-cases"
]
"pages": ["pt-BR/guides/concepts/evaluating-use-cases"]
},
{
"group": "Agentes",
"icon": "user",
"pages": [
"pt-BR/guides/agents/crafting-effective-agents"
]
"pages": ["pt-BR/guides/agents/crafting-effective-agents"]
},
{
"group": "Crews",
"icon": "users",
"pages": [
"pt-BR/guides/crews/first-crew"
]
"pages": ["pt-BR/guides/crews/first-crew"]
},
{
"group": "Flows",
@@ -743,7 +710,6 @@
"pt-BR/observability/arize-phoenix",
"pt-BR/observability/braintrust",
"pt-BR/observability/datadog",
"pt-BR/observability/galileo",
"pt-BR/observability/langdb",
"pt-BR/observability/langfuse",
"pt-BR/observability/langtrace",
@@ -788,9 +754,7 @@
},
{
"group": "Telemetria",
"pages": [
"pt-BR/telemetry"
]
"pages": ["pt-BR/telemetry"]
}
]
},
@@ -800,9 +764,7 @@
"groups": [
{
"group": "Começando",
"pages": [
"pt-BR/enterprise/introduction"
]
"pages": ["pt-BR/enterprise/introduction"]
},
{
"group": "Construir",
@@ -811,8 +773,7 @@
"pt-BR/enterprise/features/crew-studio",
"pt-BR/enterprise/features/marketplace",
"pt-BR/enterprise/features/agent-repositories",
"pt-BR/enterprise/features/tools-and-integrations",
"pt-BR/enterprise/features/pii-trace-redactions"
"pt-BR/enterprise/features/tools-and-integrations"
]
},
{
@@ -922,10 +883,7 @@
"groups": [
{
"group": "Exemplos",
"pages": [
"pt-BR/examples/example",
"pt-BR/examples/cookbooks"
]
"pages": ["pt-BR/examples/example", "pt-BR/examples/cookbooks"]
}
]
},
@@ -935,9 +893,7 @@
"groups": [
{
"group": "Notas de Versão",
"pages": [
"pt-BR/changelog"
]
"pages": ["pt-BR/changelog"]
}
]
}
@@ -976,9 +932,7 @@
"groups": [
{
"group": "환영합니다",
"pages": [
"ko/index"
]
"pages": ["ko/index"]
}
]
},
@@ -988,11 +942,7 @@
"groups": [
{
"group": "시작 안내",
"pages": [
"ko/introduction",
"ko/installation",
"ko/quickstart"
]
"pages": ["ko/introduction", "ko/installation", "ko/quickstart"]
},
{
"group": "가이드",
@@ -1000,23 +950,17 @@
{
"group": "전략",
"icon": "compass",
"pages": [
"ko/guides/concepts/evaluating-use-cases"
]
"pages": ["ko/guides/concepts/evaluating-use-cases"]
},
{
"group": "에이전트 (Agents)",
"icon": "user",
"pages": [
"ko/guides/agents/crafting-effective-agents"
]
"pages": ["ko/guides/agents/crafting-effective-agents"]
},
{
"group": "크루 (Crews)",
"icon": "users",
"pages": [
"ko/guides/crews/first-crew"
]
"pages": ["ko/guides/crews/first-crew"]
},
{
"group": "플로우 (Flows)",
@@ -1205,7 +1149,6 @@
"ko/observability/arize-phoenix",
"ko/observability/braintrust",
"ko/observability/datadog",
"ko/observability/galileo",
"ko/observability/langdb",
"ko/observability/langfuse",
"ko/observability/langtrace",
@@ -1250,9 +1193,7 @@
},
{
"group": "Telemetry",
"pages": [
"ko/telemetry"
]
"pages": ["ko/telemetry"]
}
]
},
@@ -1262,9 +1203,7 @@
"groups": [
{
"group": "시작 안내",
"pages": [
"ko/enterprise/introduction"
]
"pages": ["ko/enterprise/introduction"]
},
{
"group": "빌드",
@@ -1273,8 +1212,7 @@
"ko/enterprise/features/crew-studio",
"ko/enterprise/features/marketplace",
"ko/enterprise/features/agent-repositories",
"ko/enterprise/features/tools-and-integrations",
"ko/enterprise/features/pii-trace-redactions"
"ko/enterprise/features/tools-and-integrations"
]
},
{
@@ -1356,9 +1294,7 @@
},
{
"group": "학습 자원",
"pages": [
"ko/enterprise/resources/frequently-asked-questions"
]
"pages": ["ko/enterprise/resources/frequently-asked-questions"]
}
]
},
@@ -1384,10 +1320,7 @@
"groups": [
{
"group": "예시",
"pages": [
"ko/examples/example",
"ko/examples/cookbooks"
]
"pages": ["ko/examples/example", "ko/examples/cookbooks"]
}
]
},
@@ -1397,9 +1330,7 @@
"groups": [
{
"group": "릴리스 노트",
"pages": [
"ko/changelog"
]
"pages": ["ko/changelog"]
}
]
}

View File

@@ -4,516 +4,6 @@ description: "Product updates, improvements, and bug fixes for CrewAI"
icon: "clock"
mode: "wide"
---
<Update label="Jan 08, 2026">
## v1.8.0
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.8.0)
## What's Changed
### Features
- Add native async chain for a2a
- Add a2a update mechanisms (poll/stream/push) with handlers and config
- Introduce global flow configuration for human-in-the-loop feedback
- Add streaming tool call events and fix provider ID tracking
- Introduce production-ready Flows and Crews architecture
- Add HITL for Flows
- Improve EventListener and TraceCollectionListener for enhanced event handling
### Bug Fixes
- Handle missing a2a dependency as optional
- Correct error fetching for WorkOS login polling
- Fix wrong trigger name in sample documentation
### Documentation
- Update webhook-streaming documentation
- Adjust AOP to AMP documentation language
### Contributors
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide, @mplachta
</Update>
<Update label="Dec 19, 2025">
## v1.7.2
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.7.2)
## What's Changed
### Bug Fixes
- Resolve connection issues
### Documentation
- Update api-reference/status docs page
### Contributors
@greysonlalonde, @heitorado, @lorenzejay, @lucasgomide
</Update>
<Update label="Dec 16, 2025">
## v1.7.1
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.7.1)
## What's Changed
### Improvements
- Add `--no-commit` flag to bump command
- Use JSON schema for tool argument serialization
### Bug Fixes
- Fix error message display from response when tool repository login fails
- Fix graceful termination of future when executing a task asynchronously
- Fix task ordering by adding index
- Fix platform compatibility checks for Windows signals
- Fix RPM controller timer to prevent process hang
- Fix token usage recording and validate response model on stream
### Documentation
- Add translated documentation for async
- Add documentation for AOP Deploy API
- Add documentation for the agent handler connector
- Add documentation on native async
### Contributors
@Llamrei, @dragosmc, @gilfeig, @greysonlalonde, @heitorado, @lorenzejay, @mattatcha, @vinibrsl
</Update>
<Update label="Dec 09, 2025">
## v1.7.0
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.7.0)
## What's Changed
### Features
- Add async flow kickoff
- Add async crew support
- Add async task support
- Add async knowledge support
- Add async memory support
- Add async support for tools and agent executor; improve typing and docs
- Implement a2a extensions API and async agent card caching; fix task propagation & streaming
- Add native async tool support
- Add async llm support
- Create sys event types and handler
### Bug Fixes
- Fix issue to ensure nonetypes are not passed to otel
- Fix deadlock in token store file operations
- Fix to ensure otel span is closed
- Use HuggingFaceEmbeddingFunction for embeddings, update keys and add tests
- Fix to ensure supports_tools is true for all supported anthropic models
- Ensure hooks work with lite agents flows
### Contributors
@greysonlalonde, @lorenzejay
</Update>
<Update label="Nov 29, 2025">
## v1.6.1
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.6.1)
## What's Changed
### Bug Fixes
- Fix ChatCompletionsClient call to ensure proper functionality
- Ensure async methods are executable for annotations
- Fix parameters in RagTool.add, add typing, and tests
- Remove invalid parameter from SSE client
- Erase 'oauth2_extra' setting on 'crewai config reset' command
### Refactoring
- Enhance model validation and provider inference in LLM class
### Contributors
@Vidit-Ostwal, @greysonlalonde, @heitorado, @lorenzejay
</Update>
<Update label="Nov 25, 2025">
## v1.6.0
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.6.0)
## What's Changed
### Features
- Add streaming result support to flows and crews
- Add gemini-3-pro-preview
- Support CLI login with Entra ID
- Add Merge Agent Handler tool
- Enhance flow event state management
### Bug Fixes
- Ensure custom rag store persist path is set if passed
- Ensure fuzzy returns are more strict and show type warning
- Re-add openai response_format parameter and add test
- Fix rag tool embeddings configuration
- Ensure flow execution start panel is not shown on plot
### Documentation
- Update references from AMP to AOP in documentation
- Update AMP to AOP
### Contributors
@Vidit-Ostwal, @gilfeig, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @markmcd
</Update>
<Update label="Nov 22, 2025">
## v0.203.2
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/0.203.2)
## What's Changed
- Hotfix version bump from 0.203.1 to 0.203.2
</Update>
<Update label="Nov 16, 2025">
## v1.5.0
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.5.0)
## What's Changed
### Features
- Add a2a trust remote completion status flag
- Fetch and store more data about Okta authorization server
- Implement before and after LLM call hooks in CrewAgentExecutor
- Expose messages to TaskOutput and LiteAgentOutputs
- Enhance schema description of QdrantVectorSearchTool
### Bug Fixes
- Ensure tracing instrumentation flags are correctly applied
- Fix custom tool documentation links and add Mintlify broken links action
### Documentation
- Enhance task guardrail documentation with LLM-based validation support
### Contributors
@danielfsbarreto, @greysonlalonde, @heitorado, @lorenzejay, @theCyberTech
</Update>
<Update label="Nov 07, 2025">
## v1.4.1
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.4.1)
## What's Changed
### Bug Fixes
- Fix handling of agent max iterations
- Resolve routing issues for LLM model syntax to respected providers
### Contributors
@greysonlalonde
</Update>
<Update label="Nov 07, 2025">
## v1.4.0
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.4.0)
## What's Changed
### Features
- Add support for non-AST plot routes
- Implement first-class support for MCP
- Add Pydantic validation dunder to BaseInterceptor
- Add support for LLM message interceptor hooks
- Cache i18n prompts for efficient use
- Enhance QdrantVectorSearchTool
### Bug Fixes
- Fix issues with keeping stopwords updated
- Resolve unpickleable values in flow state
- Ensure lite agents course-correct on validation errors
- Fix callback argument hashing to ensure caching works
- Allow adding RAG source content from valid URLs
- Make plot node selection smoother
- Fix duplicating document IDs for knowledge
### Refactoring
- Improve MCP tool execution handling with concurrent futures
- Simplify flow handling, typing, and logging; update UI and tests
- Refactor stop word management to a property
### Documentation
- Migrate embedder to embedding_model and require vectordb across tool docs; add provider examples (en/ko/pt-BR)
### Contributors
@danielfsbarreto, @greysonlalonde, @lorenzejay, @lucasgomide, @tonykipkemboi
</Update>
<Update label="Nov 01, 2025">
## v1.3.0
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.3.0)
## What's Changed
### Features
- Refactor flow handling, typing, and logging
- Enhance QdrantVectorSearchTool
### Bug Fixes
- Fix Firecrawl tools and add tests
- Refactor use_stop_words to property and add check for stop words
### Documentation
- Migrate embedder to embedding_model and require vectordb across tool docs
- Add provider examples in English, Korean, and Portuguese
### Refactoring
- Improve flow handling and UI updates
### Contributors
@danielfsbarreto, @greysonlalonde, @lorenzejay, @lucasgomide, @tonykipkemboi
</Update>
<Update label="Oct 27, 2025">
## v1.2.1
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.2.1)
## What's Changed
### Features
- Add support for Datadog integration
- Support apps and mcps in liteagent
### Documentation
- Describe mandatory environment variable for calling Platform tools for each integration
- Add Datadog integration documentation
### Contributors
@barieom, @lorenzejay, @lucasgomide, @sabrenner
</Update>
<Update label="Oct 24, 2025">
## v1.2.0
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.2.0)
## What's Changed
### Bug Fixes
- Update default LLM model and improve error logging in LLM utilities
- Change flow visualization directory and method inspection
### Dropping Unused
- Remove aisuite
### Contributors
@greysonlalonde, @lorenzejay
</Update>
<Update label="Oct 21, 2025">
## v1.1.0
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.1.0)
## What's Changed
### Features
- Enhance InternalInstructor to support multiple LLM providers
- Implement mypy plugin base
- Improve QdrantVectorSearchTool
### Bug Fixes
- Correct broken integration documentation links
- Fix double trace call and add types
- Pin template versions to latest
### Documentation
- Update LLM integration details and examples
### Refactoring
- Improve CrewBase typing
### Contributors
@cwarre33, @danielfsbarreto, @greysonlalonde, @lorenzejay
</Update>
<Update label="Oct 20, 2025">
## v1.0.0
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0)
## What's Changed
### Features
- Bump versions to 1.0.0
- Enhance knowledge and guardrail event handling in Agent class
- Inject tool repository credentials in crewai run command
### Bug Fixes
- Preserve nested condition structure in Flow decorators
- Add standard print parameters to Printer.print method
- Fix errors when there is no input() available
- Add a leeway of 10s when decoding JWT
- Revert bad cron schedule
- Correct cron schedule to run every 5 days at specific dates
- Use system PATH for Docker binary instead of hardcoded path
- Add CodeQL configuration to properly exclude template directories
### Documentation
- Update security policy for vulnerability reporting
- Add guide for capturing telemetry logs in CrewAI AMP
- Add missing /resume files
- Clarify webhook URL parameter in HITL workflows
### Contributors
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide, @mplachta, @theCyberTech
</Update>
<Update label="Oct 18, 2025">
## v1.0.0b3 (Pre-release)
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0b3)
## What's Changed
### Features
- Enhance task guardrail functionality and validation
- Improve support for importing native SDK
- Add Azure native tests
- Enhance BedrockCompletion class with advanced features
- Enhance GeminiCompletion class with client parameter support
- Enhance AnthropicCompletion class with additional client parameters
### Bug Fixes
- Preserve nested condition structure in Flow decorators
- Add standard print parameters to Printer.print method
- Remove stdout prints and improve test determinism
### Refactoring
- Convert project module to metaclass with full typing
### Contributors
@greysonlalonde, @lorenzejay
</Update>
<Update label="Oct 16, 2025">
## v1.0.0b2 (Pre-release)
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0b2)
## What's Changed
### Features
- Enhance OpenAICompletion class with additional client parameters
- Improve event bus thread safety and async support
- Inject tool repository credentials in crewai run command
### Bug Fixes
- Fix issue where it errors out if there is no input() available
- Add a leeway of 10s when decoding JWT
- Fix copying and adding NOT_SPECIFIED check in task.py
### Documentation
- Ensure CREWAI_PLATFORM_INTEGRATION_TOKEN is mentioned in documentation
- Update triggers documentation
### Contributors
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide
</Update>
<Update label="Oct 14, 2025">
## v1.0.0b1 (Pre-release)
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0b1)
## What's Changed
### Features
- Enhance OpenAICompletion class with additional client parameters
- Improve event bus thread safety and async support
- Implement Bedrock LLM integration
### Bug Fixes
- Fix issue with missing input() availability
- Resolve JWT decoding error by adding a leeway of 10 seconds
- Inject tool repository credentials in crewai run command
- Fix copy and add NOT_SPECIFIED check in task.py
### Documentation
- Ensure CREWAI_PLATFORM_INTEGRATION_TOKEN is mentioned in documentation
- Update triggers documentation
### Contributors
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide
</Update>
<Update label="Oct 13, 2025">
## v0.203.1
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/0.203.1)
## What's Changed
### Core Improvements & Fixes
- Fixed injection of tool repository credentials into the `crewai run` command
- Added a 10-second leeway when decoding JWTs to reduce token validation errors
- Corrected (then reverted) cron schedule fix intended to run jobs every 5 days at specific dates
### Documentation & Guides
- Updated security policy to clarify the process for vulnerability reporting
</Update>
<Update label="Oct 09, 2025">
## v1.0.0a4 (Pre-release)
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0a4)
## What's Changed
### Features
- Enhance knowledge and guardrail event handling in Agent class
- Introduce trigger listing and execution commands for local development
- Update documentation with new approach to consume Platform Actions
- Add guide for capturing telemetry logs in CrewAI AMP
### Bug Fixes
- Revert bad cron schedule
- Correct cron schedule to run every 5 days at specific dates
- Remove duplicate line and add explicit environment variable
- Resolve linting errors across the codebase
- Replace print statements with logger in agent and memory handling
- Use system PATH for Docker binary instead of hardcoded path
- Allow failed PyPI publish
- Match tag and release title, ignore devtools build for PyPI
### Documentation
- Update security policy for vulnerability reporting
- Add missing /resume files
- Clarify webhook URL parameter in HITL workflows
### Contributors
@Vidit-Ostwal, @greysonlalonde, @lorenzejay, @lucasgomide, @theCyberTech
</Update>
<Update label="Sep 30, 2025">
## v1.0.0a1

View File

@@ -574,10 +574,6 @@ When you run this Flow, the output will change based on the random boolean value
### Human in the Loop (human feedback)
<Note>
The `@human_feedback` decorator requires **CrewAI version 1.8.0 or higher**.
</Note>
The `@human_feedback` decorator enables human-in-the-loop workflows by pausing flow execution to collect feedback from a human. This is useful for approval gates, quality review, and decision points that require human judgment.
```python Code

View File

@@ -1,342 +0,0 @@
---
title: PII Redaction for Traces
description: "Automatically redact sensitive data from crew and flow execution traces"
icon: "lock"
mode: "wide"
---
## Overview
PII Redaction is a CrewAI AMP feature that automatically detects and masks Personally Identifiable Information (PII) in your crew and flow execution traces. This ensures sensitive data like credit card numbers, social security numbers, email addresses, and names are not exposed in your CrewAI AMP traces. You can also create custom recognizers to protect organization-specific data.
<Info>
PII Redaction is available on the Enterprise plan.
Deployment must be version 1.8.0 or higher.
</Info>
<Frame>
![PII Redaction Overview](/images/enterprise/pii_mask_recognizer_trace_example.png)
</Frame>
## Why PII Redaction Matters
When running AI agents in production, sensitive information often flows through your crews:
- Customer data from CRM integrations
- Financial information from payment processors
- Personal details from form submissions
- Internal employee data
Without proper redaction, this data appears in traces, making compliance with regulations like GDPR, HIPAA, and PCI-DSS challenging. PII Redaction solves this by automatically masking sensitive data before it's stored in traces.
## How It Works
1. **Detect** - Scan trace event data for known PII patterns
2. **Classify** - Identify the type of sensitive data (credit card, SSN, email, etc.)
3. **Mask/Redact** - Replace the sensitive data with masked values based on your configuration
```
Original: "Contact john.doe@company.com or call 555-123-4567"
Redacted: "Contact <EMAIL_ADDRESS> or call <PHONE_NUMBER>"
```
## Enabling PII Redaction
<Info>
You must be on the Enterprise plan and your deployment must be version 1.8.0 or higher to use this feature.
</Info>
<Steps>
<Step title="Navigate to Crew Settings">
In the CrewAI AMP dashboard, select your deployed crew and go to one of your deployments/automations, then navigate to **Settings** → **PII Protection**.
</Step>
<Step title="Enable PII Protection">
Toggle on **PII Redaction for Traces**. This will enable automatic scanning and redaction of trace data.
<Info>
You need to manually enable PII Redaction for each deployment.
</Info>
<Frame>
![Enable PII Redaction](/images/enterprise/pii_mask_recognizer_enable.png)
</Frame>
</Step>
<Step title="Configure Entity Types">
Select which types of PII to detect and redact. Each entity can be individually enabled or disabled.
<Frame>
![Configure Entities](/images/enterprise/pii_mask_recognizer_supported_entities.png)
</Frame>
</Step>
<Step title="Save">
Save your configuration. PII redaction will be active on all subsequent crew executions, no redeployment is needed.
</Step>
</Steps>
## Supported Entity Types
CrewAI supports the following PII entity types, organized by category.
### Global Entities
| Entity | Description | Example |
|--------|-------------|---------|
| `CREDIT_CARD` | Credit/debit card numbers | "4111-1111-1111-1111" |
| `CRYPTO` | Cryptocurrency wallet addresses | "bc1qxy2kgd..." |
| `DATE_TIME` | Dates and times | "January 15, 2024" |
| `EMAIL_ADDRESS` | Email addresses | "john@example.com" |
| `IBAN_CODE` | International bank account numbers | "DE89 3704 0044 0532 0130 00" |
| `IP_ADDRESS` | IPv4 and IPv6 addresses | "192.168.1.1" |
| `LOCATION` | Geographic locations | "New York City" |
| `MEDICAL_LICENSE` | Medical license numbers | "MD12345" |
| `NRP` | Nationalities, religious, or political groups | - |
| `PERSON` | Personal names | "John Doe" |
| `PHONE_NUMBER` | Phone numbers in various formats | "+1 (555) 123-4567" |
| `URL` | Web URLs | "https://example.com" |
### US-Specific Entities
| Entity | Description | Example |
|--------|-------------|---------|
| `US_BANK_NUMBER` | US Bank account numbers | "1234567890" |
| `US_DRIVER_LICENSE` | US Driver's license numbers | "D1234567" |
| `US_ITIN` | Individual Taxpayer ID | "900-70-0000" |
| `US_PASSPORT` | US Passport numbers | "123456789" |
| `US_SSN` | Social Security Numbers | "123-45-6789" |
## Redaction Actions
For each enabled entity, you can configure how the data is redacted:
| Action | Description | Example Output |
|--------|-------------|----------------|
| `mask` | Replace with the entity type label | `<CREDIT_CARD>` |
| `redact` | Completely remove the text | *(empty)* |
## Custom Recognizers
In addition to built-in entities, you can create **custom recognizers** to detect organization-specific PII patterns.
<Frame>
![Custom Recognizers](/images/enterprise/pii_mask_recognizer.png)
</Frame>
### Recognizer Types
You have two options for custom recognizers:
| Type | Best For | Example Use Case |
|------|----------|------------------|
| **Pattern-based (Regex)** | Structured data with predictable formats | Salary amounts, employee IDs, project codes |
| **Deny-list** | Exact string matches | Company names, internal codenames, specific terms |
### Creating a Custom Recognizer
<Steps>
<Step title="Navigate to Custom Recognizers">
Go to your Organization **Settings** → **Organization** → **Add Recognizer**.
</Step>
<Step title="Configure the Recognizer">
<Frame>
![Configure Recognizer](/images/enterprise/pii_mask_recognizer_create.png)
</Frame>
Configure the following fields:
- **Name**: A descriptive name for the recognizer
- **Entity Type**: The entity label that will appear in redacted output (e.g., `EMPLOYEE_ID`, `SALARY`)
- **Type**: Choose between Regex Pattern or Deny List
- **Pattern/Values**: Regex pattern or list of strings to match
- **Confidence Threshold**: Minimum score (0.0-1.0) required for a match to trigger redaction. Higher values (e.g., 0.8) reduce false positives but may miss some matches. Lower values (e.g., 0.5) catch more matches but may over-redact. Default is 0.8.
- **Context Words** (optional): Words that increase detection confidence when found nearby
</Step>
<Step title="Save">
Save the recognizer. It will be available to enable on your deployments.
</Step>
</Steps>
### Understanding Entity Types
The **Entity Type** determines how matched content appears in redacted traces:
```
Entity Type: SALARY
Pattern: salary:\s*\$\s*\d+
Input: "Employee salary: $50,000"
Output: "Employee <SALARY>"
```
### Using Context Words
Context words improve accuracy by increasing confidence when specific terms appear near the matched pattern:
```
Context Words: "project", "code", "internal"
Entity Type: PROJECT_CODE
Pattern: PRJ-\d{4}
```
When "project" or "code" appears near "PRJ-1234", the recognizer has higher confidence it's a true match, reducing false positives.
## Viewing Redacted Traces
Once PII redaction is enabled, your traces will show redacted values in place of sensitive data:
```
Task Output: "Customer <PERSON> placed order #12345.
Contact email: <EMAIL_ADDRESS>, phone: <PHONE_NUMBER>.
Payment processed for card ending in <CREDIT_CARD>."
```
Redacted values are clearly marked with angle brackets and the entity type label (e.g., `<EMAIL_ADDRESS>`), making it easy to understand what data was protected while still allowing you to debug and monitor crew behavior.
## Best Practices
### Performance Considerations
<Steps>
<Step title="Enable Only Needed Entities">
Each enabled entity adds processing overhead. Only enable entities relevant to your data.
</Step>
<Step title="Use Specific Patterns">
For custom recognizers, use specific patterns to reduce false positives and improve performance. Regex patterns are best when identifying specific patterns in the traces such as salary, employee id, project code, etc. Deny-list recognizers are best when identifying exact strings in the traces such as company names, internal codenames, etc.
</Step>
<Step title="Leverage Context Words">
Context words improve accuracy by only triggering detection when surrounding text matches.
</Step>
</Steps>
## Troubleshooting
<Accordion title="PII Not Being Redacted">
**Possible Causes:**
- Entity type not enabled in configuration
- Pattern doesn't match the data format
- Custom recognizer has syntax errors
**Solutions:**
- Verify entity is enabled in Settings → Security
- Test regex patterns with sample data
- Check logs for configuration errors
</Accordion>
<Accordion title="Too Much Data Being Redacted">
**Possible Causes:**
- Overly broad entity types enabled (e.g., `DATE_TIME` catches dates everywhere)
- Custom recognizer patterns are too general
**Solutions:**
- Disable entities that cause false positives
- Make custom patterns more specific
- Add context words to improve accuracy
</Accordion>
<Accordion title="Performance Issues">
**Possible Causes:**
- Too many entities enabled
- NLP-based entities (`PERSON`, `LOCATION`, `NRP`) are computationally expensive as they use machine learning models
**Solutions:**
- Only enable entities you actually need
- Consider using pattern-based alternatives where possible
- Monitor trace processing times in the dashboard
</Accordion>
---
## Practical Example: Salary Pattern Matching
This example demonstrates how to create a custom recognizer to detect and mask salary information in your traces.
### Use Case
Your crew processes employee or financial data that includes salary information in formats like:
- `salary: $50,000`
- `salary: $125,000.00`
- `salary:$1,500.50`
You want to automatically mask these values to protect sensitive compensation data.
### Configuration
<Frame>
![Salary Recognizer Configuration](/images/enterprise/pii_mask_custom_recognizer_salary.png)
</Frame>
| Field | Value |
|-------|-------|
| **Name** | `SALARY` |
| **Entity Type** | `SALARY` |
| **Type** | Regex Pattern |
| **Regex Pattern** | `salary:\s*\$\s*\d{1,3}(,\d{3})*(\.\d{2})?` |
| **Action** | Mask |
| **Confidence Threshold** | `0.8` |
| **Context Words** | `salary, compensation, pay, wage, income` |
### Regex Pattern Breakdown
| Pattern Component | Meaning |
|-------------------|---------|
| `salary:` | Matches the literal text "salary:" |
| `\s*` | Matches zero or more whitespace characters |
| `\$` | Matches the dollar sign (escaped) |
| `\s*` | Matches zero or more whitespace characters after $ |
| `\d{1,3}` | Matches 1-3 digits (e.g., "1", "50", "125") |
| `(,\d{3})*` | Matches comma-separated thousands (e.g., ",000", ",500,000") |
| `(\.\d{2})?` | Optionally matches cents (e.g., ".00", ".50") |
### Example Results
```
Original: "Employee record shows salary: $125,000.00 annually"
Redacted: "Employee record shows <SALARY> annually"
Original: "Base salary:$50,000 with bonus potential"
Redacted: "Base <SALARY> with bonus potential"
```
<Tip>
Adding context words like "salary", "compensation", "pay", "wage", and "income" helps increase detection confidence when these terms appear near the matched pattern, reducing false positives.
</Tip>
### Enable the Recognizer for Your Deployments
<Warning>
Creating a custom recognizer at the organization level does not automatically enable it for your deployments. You must manually enable each recognizer for every deployment where you want it applied.
</Warning>
After creating your custom recognizer, enable it for each deployment:
<Steps>
<Step title="Navigate to Your Deployment">
Go to your deployment/automation and open **Settings** → **PII Protection**.
</Step>
<Step title="Select Custom Recognizers">
Under **Mask Recognizers**, you'll see your organization-defined recognizers. Check the box next to the recognizers you want to enable.
<Frame>
![Enable Custom Recognizer](/images/enterprise/pii_mask_recognizers_options.png)
</Frame>
</Step>
<Step title="Save Configuration">
Save your changes. The recognizer will be active on all subsequent executions for this deployment.
</Step>
</Steps>
<Info>
Repeat this process for each deployment where you need the custom recognizer. This gives you granular control over which recognizers are active in different environments (e.g., development vs. production).
</Info>

View File

@@ -87,14 +87,6 @@ The `A2AConfig` class accepts the following parameters:
When `True`, returns the A2A agent's result directly when it signals completion. When `False`, allows the server agent to review the result and potentially continue the conversation.
</ParamField>
<ParamField path="updates" type="UpdateConfig" default="StreamingConfig()">
Update mechanism for receiving task status. Options: `StreamingConfig`, `PollingConfig`, or `PushNotificationConfig`.
</ParamField>
<ParamField path="transport_protocol" type="Literal['JSONRPC', 'GRPC', 'HTTP+JSON']" default="JSONRPC">
Transport protocol for A2A communication. Options: `JSONRPC` (default), `GRPC`, or `HTTP+JSON`.
</ParamField>
## Authentication
For A2A agents that require authentication, use one of the provided auth schemes:
@@ -261,74 +253,6 @@ When `fail_fast=False`:
- If all agents fail, the LLM receives a notice about unavailable agents and handles the task directly
- Connection errors are captured and included in the context for better decision-making
## Update Mechanisms
Control how your agent receives task status updates from remote A2A agents:
<Tabs>
<Tab title="Streaming (Default)">
```python Code
from crewai.a2a import A2AConfig
from crewai.a2a.updates import StreamingConfig
agent = Agent(
role="Research Coordinator",
goal="Coordinate research tasks",
backstory="Expert at delegation",
llm="gpt-4o",
a2a=A2AConfig(
endpoint="https://research.example.com/.well-known/agent-card.json",
updates=StreamingConfig()
)
)
```
</Tab>
<Tab title="Polling">
```python Code
from crewai.a2a import A2AConfig
from crewai.a2a.updates import PollingConfig
agent = Agent(
role="Research Coordinator",
goal="Coordinate research tasks",
backstory="Expert at delegation",
llm="gpt-4o",
a2a=A2AConfig(
endpoint="https://research.example.com/.well-known/agent-card.json",
updates=PollingConfig(
interval=2.0,
timeout=300.0,
max_polls=100
)
)
)
```
</Tab>
<Tab title="Push Notifications">
```python Code
from crewai.a2a import A2AConfig
from crewai.a2a.updates import PushNotificationConfig
agent = Agent(
role="Research Coordinator",
goal="Coordinate research tasks",
backstory="Expert at delegation",
llm="gpt-4o",
a2a=A2AConfig(
endpoint="https://research.example.com/.well-known/agent-card.json",
updates=PushNotificationConfig(
url={base_url}/a2a/callback",
token="your-validation-token",
timeout=300.0
)
)
)
```
</Tab>
</Tabs>
## Best Practices
<CardGroup cols={2}>

View File

@@ -7,10 +7,6 @@ mode: "wide"
## Overview
<Note>
The `@human_feedback` decorator requires **CrewAI version 1.8.0 or higher**. Make sure to update your installation before using this feature.
</Note>
The `@human_feedback` decorator enables human-in-the-loop (HITL) workflows directly within CrewAI Flows. It allows you to pause flow execution, present output to a human for review, collect their feedback, and optionally route to different listeners based on the feedback outcome.
This is particularly valuable for:

View File

@@ -11,10 +11,10 @@ Human-in-the-Loop (HITL) is a powerful approach that combines artificial intelli
CrewAI offers two main approaches for implementing human-in-the-loop workflows:
| Approach | Best For | Integration | Version |
|----------|----------|-------------|---------|
| **Flow-based** (`@human_feedback` decorator) | Local development, console-based review, synchronous workflows | [Human Feedback in Flows](/en/learn/human-feedback-in-flows) | **1.8.0+** |
| **Webhook-based** (Enterprise) | Production deployments, async workflows, external integrations (Slack, Teams, etc.) | This guide | - |
| Approach | Best For | Integration |
|----------|----------|-------------|
| **Flow-based** (`@human_feedback` decorator) | Local development, console-based review, synchronous workflows | [Human Feedback in Flows](/en/learn/human-feedback-in-flows) |
| **Webhook-based** (Enterprise) | Production deployments, async workflows, external integrations (Slack, Teams, etc.) | This guide |
<Tip>
If you're building flows and want to add human review steps with routing based on feedback, check out the [Human Feedback in Flows](/en/learn/human-feedback-in-flows) guide for the `@human_feedback` decorator.

View File

@@ -1,115 +0,0 @@
---
title: Galileo
description: Galileo integration for CrewAI tracing and evaluation
icon: telescope
mode: "wide"
---
## Overview
This guide demonstrates how to integrate **Galileo** with **CrewAI**
for comprehensive tracing and Evaluation Engineering.
By the end of this guide, you will be able to trace your CrewAI agents,
monitor their performance, and evaluate their behaviour with
Galileo's powerful observability platform.
> **What is Galileo?** [Galileo](https://galileo.ai) is AI evaluation and observability
platform that delivers end-to-end tracing, evaluation,
and monitoring for AI applications. It enables teams to capture ground truth,
create robust guardrails, and run systematic experiments with
built-in experiment tracking and performance analytics—ensuring reliability,
transparency, and continuous improvement across the AI lifecycle.
## Getting started
This tutorial follows the [CrewAI quickstart](/en/quickstart) and shows how to add
Galileo's [CrewAIEventListener](https://v2docs.galileo.ai/sdk-api/python/reference/handlers/crewai/handler),
an event handler.
For more information, see Galileos
[Add Galileo to a CrewAI Application](https://v2docs.galileo.ai/how-to-guides/third-party-integrations/add-galileo-to-crewai/add-galileo-to-crewai)
how-to guide.
> **Note** This tutorial assumes you have completed the [CrewAI quickstart](/en/quickstart).
If you want a completed comprehensive example, see the Galileo
[CrewAI sdk-example repo](https://github.com/rungalileo/sdk-examples/tree/main/python/agent/crew-ai).
### Step 1: Install dependencies
Install the required dependencies for your app.
Create a virtual environment using your preferred method,
then install dependencies inside that environment using your
preferred tool:
```bash
uv add galileo
```
### Step 2: Add to the .env file from the [CrewAI quickstart](/en/quickstart)
```bash
# Your Galileo API key
GALILEO_API_KEY="your-galileo-api-key"
# Your Galileo project name
GALILEO_PROJECT="your-galileo-project-name"
# The name of the Log stream you want to use for logging
GALILEO_LOG_STREAM="your-galileo-log-stream "
```
### Step 3: Add the Galileo event listener
To enable logging with Galileo, you need to create an instance of the `CrewAIEventListener`.
Import the Galileo CrewAI handler package by
adding the following code at the top of your main.py file:
```python
from galileo.handlers.crewai.handler import CrewAIEventListener
```
At the start of your run function, create the event listener:
```python
def run():
# Create the event listener
CrewAIEventListener()
# The rest of your existing code goes here
```
When you create the listener instance, it is automatically
registered with CrewAI.
### Step 4: Run your crew
Run your crew with the CrewAI CLI:
```bash
crewai run
```
### Step 5: View the traces in Galileo
Once your crew has finished, the traces will be flushed and appear in Galileo.
![Galileo trace view](/images/galileo-trace-veiw.png)
## Understanding the Galileo Integration
Galileo integrates with CrewAI by registering an event listener
that captures Crew execution events (e.g., agent actions, tool calls, model responses)
and forwards them to Galileo for observability and evaluation.
### Understanding the event listener
Creating a `CrewAIEventListener()` instance is all thats
required to enable Galileo for a CrewAI run. When instantiated, the listener:
- Automatically registers itself with CrewAI
- Reads Galileo configuration from environment variables
- Logs all run data to the Galileo project and log stream specified by
`GALILEO_PROJECT` and `GALILEO_LOG_STREAM`
No additional configuration or code changes are required.
All data from this run is logged to the Galileo project and
log stream specified by your environment configuration
(for example, GALILEO_PROJECT and GALILEO_LOG_STREAM).

Binary file not shown.

Before

Width:  |  Height:  |  Size: 906 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 200 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 865 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1021 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 116 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 239 KiB

View File

@@ -4,545 +4,6 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정"
icon: "clock"
mode: "wide"
---
<Update label="2026년 1월 8일">
## v1.8.0
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.8.0)
## 변경 사항
### 기능
- a2a를 위한 네이티브 비동기 체인 추가
- 핸들러 및 설정과 함께 a2a 업데이트 메커니즘(poll/stream/push) 추가
- 휴먼 인 더 루프 피드백을 위한 전역 흐름 설정 도입
- 스트리밍 도구 호출 이벤트 추가 및 프로바이더 ID 추적 수정
- 프로덕션 준비된 Flows 및 Crews 아키텍처 도입
- Flows를 위한 HITL 추가
- 향상된 이벤트 처리를 위한 EventListener 및 TraceCollectionListener 개선
### 버그 수정
- 누락된 a2a 종속성을 선택적으로 처리
- WorkOS 로그인 폴링을 위한 오류 가져오기 수정
- 샘플 문서의 잘못된 트리거 이름 수정
### 문서
- 웹훅 스트리밍 문서 업데이트
- AOP에서 AMP로 문서 언어 조정
### 기여자
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide, @mplachta
</Update>
<Update label="2025년 12월 19일">
## v1.7.2
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.7.2)
## 변경 사항
### 버그 수정
- 연결 문제 해결
### 문서
- api-reference/status 문서 페이지 업데이트
### 기여자
@greysonlalonde, @heitorado, @lorenzejay, @lucasgomide
</Update>
<Update label="2025년 12월 16일">
## v1.7.1
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.7.1)
## 변경 사항
### 개선 사항
- bump 명령에 `--no-commit` 플래그 추가
- 도구 인수 직렬화에 JSON 스키마 사용
### 버그 수정
- 도구 저장소 로그인 실패 시 응답에서 오류 메시지 표시 수정
- 비동기 작업 실행 시 future의 정상적인 종료 수정
- 인덱스를 추가하여 작업 순서 수정
- Windows 신호에 대한 플랫폼 호환성 검사 수정
- 프로세스 중단을 방지하기 위한 RPM 컨트롤러 타이머 수정
- 토큰 사용량 기록 수정 및 스트림에서 응답 모델 검증
### 문서
- 비동기에 대한 번역된 문서 추가
- AOP Deploy API 문서 추가
- 에이전트 핸들러 커넥터 문서 추가
- 네이티브 비동기 문서 추가
### 기여자
@Llamrei, @dragosmc, @gilfeig, @greysonlalonde, @heitorado, @lorenzejay, @mattatcha, @vinibrsl
</Update>
<Update label="2025년 12월 9일">
## v1.7.0
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.7.0)
## 변경 사항
### 기능
- 비동기 흐름 킥오프 추가
- 비동기 크루 지원 추가
- 비동기 작업 지원 추가
- 비동기 지식 지원 추가
- 비동기 메모리 지원 추가
- 도구 및 에이전트 실행기에 대한 비동기 지원 추가; 타입 및 문서 개선
- a2a 확장 API 및 비동기 에이전트 카드 캐싱 구현; 작업 전파 및 스트리밍 수정
- 네이티브 비동기 도구 지원 추가
- 비동기 llm 지원 추가
- sys 이벤트 유형 및 핸들러 생성
### 버그 수정
- nonetypes가 otel에 전달되지 않도록 보장하는 문제 수정
- 토큰 저장소 파일 작업의 교착 상태 수정
- otel span이 닫히도록 보장하는 수정
- 임베딩에 HuggingFaceEmbeddingFunction 사용, 키 업데이트 및 테스트 추가
- 모든 지원되는 anthropic 모델에 대해 supports_tools가 true인지 확인
- 라이트 에이전트 흐름에서 훅이 작동하도록 보장
### 기여자
@greysonlalonde, @lorenzejay
</Update>
<Update label="2025년 11월 29일">
## v1.6.1
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.6.1)
## 변경 사항
### 버그 수정
- ChatCompletionsClient 호출이 제대로 작동하도록 수정
- 어노테이션에 대해 비동기 메서드가 실행 가능하도록 보장
- RagTool.add의 매개변수 수정, 타입 및 테스트 추가
- SSE 클라이언트에서 잘못된 매개변수 제거
- 'crewai config reset' 명령에서 'oauth2_extra' 설정 삭제
### 리팩토링
- LLM 클래스에서 모델 검증 및 프로바이더 추론 향상
### 기여자
@Vidit-Ostwal, @greysonlalonde, @heitorado, @lorenzejay
</Update>
<Update label="2025년 11월 25일">
## v1.6.0
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.6.0)
## 변경 사항
### 기능
- 흐름 및 크루에 스트리밍 결과 지원 추가
- gemini-3-pro-preview 추가
- Entra ID를 사용한 CLI 로그인 지원
- Merge Agent Handler 도구 추가
- 흐름 이벤트 상태 관리 향상
### 버그 수정
- 사용자 지정 rag 저장소 지속 경로가 전달된 경우 설정되도록 보장
- 퍼지 반환이 더 엄격하고 타입 경고를 표시하도록 보장
- openai response_format 매개변수 다시 추가 및 테스트 추가
- rag 도구 임베딩 설정 수정
- 플롯에서 흐름 실행 시작 패널이 표시되지 않도록 보장
### 문서
- 문서에서 AMP에서 AOP로 참조 업데이트
- AMP에서 AOP로 업데이트
### 기여자
@Vidit-Ostwal, @gilfeig, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @markmcd
</Update>
<Update label="2025년 11월 22일">
## v0.203.2
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/0.203.2)
## 변경 사항
- 0.203.1에서 0.203.2로 핫픽스 버전 범프
</Update>
<Update label="2025년 11월 16일">
## v1.5.0
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.5.0)
## 변경 사항
### 기능
- a2a 신뢰 원격 완료 상태 플래그 추가
- Okta 인증 서버에 대한 더 많은 데이터 가져오기 및 저장
- CrewAgentExecutor에서 LLM 호출 전후 훅 구현
- TaskOutput 및 LiteAgentOutputs에 메시지 노출
- QdrantVectorSearchTool의 스키마 설명 향상
### 버그 수정
- 추적 인스트루멘테이션 플래그가 올바르게 적용되도록 보장
- 사용자 정의 도구 문서 링크 수정 및 Mintlify 깨진 링크 작업 추가
### 문서
- LLM 기반 검증 지원으로 작업 가드레일 문서 향상
### 기여자
@danielfsbarreto, @greysonlalonde, @heitorado, @lorenzejay, @theCyberTech
</Update>
<Update label="2025년 11월 7일">
## v1.4.1
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.4.1)
## 변경 사항
### 버그 수정
- 에이전트 최대 반복 처리 수정
- LLM 모델 구문에 대한 라우팅 문제를 해당 프로바이더로 해결
### 기여자
@greysonlalonde
</Update>
<Update label="2025년 11월 7일">
## v1.4.0
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.4.0)
## 변경 사항
### 기능
- 비AST 플롯 경로 지원 추가
- MCP에 대한 일급 지원 구현
- BaseInterceptor에 Pydantic 검증 던더 추가
- LLM 메시지 인터셉터 훅 지원 추가
- 효율적인 사용을 위한 i18n 프롬프트 캐싱
- QdrantVectorSearchTool 향상
### 버그 수정
- stopwords 업데이트 유지 관련 문제 수정
- 흐름 상태에서 피클할 수 없는 값 해결
- 라이트 에이전트가 검증 오류 시 수정되도록 보장
- 캐싱이 작동하도록 콜백 인수 해싱 수정
- 유효한 URL에서 RAG 소스 콘텐츠 추가 허용
- 플롯 노드 선택을 더 부드럽게 만듦
- 지식에 대한 중복 문서 ID 수정
### 리팩토링
- concurrent futures로 MCP 도구 실행 처리 개선
- 흐름 처리, 타입 및 로깅 단순화; UI 및 테스트 업데이트
- 중지 단어 관리를 속성으로 리팩토링
### 문서
- embedder를 embedding_model로 마이그레이션하고 도구 문서 전체에 vectordb 필요; 프로바이더 예제 추가 (en/ko/pt-BR)
### 기여자
@danielfsbarreto, @greysonlalonde, @lorenzejay, @lucasgomide, @tonykipkemboi
</Update>
<Update label="2025년 11월 1일">
## v1.3.0
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.3.0)
## 변경 사항
### 기능
- 흐름 처리, 타입 및 로깅 리팩토링
- QdrantVectorSearchTool 향상
### 버그 수정
- Firecrawl 도구 수정 및 테스트 추가
- use_stop_words를 속성으로 리팩토링하고 중지 단어 확인 추가
### 문서
- embedder를 embedding_model로 마이그레이션하고 도구 문서 전체에 vectordb 필요
- 영어, 한국어 및 포르투갈어로 프로바이더 예제 추가
### 리팩토링
- 흐름 처리 및 UI 업데이트 개선
### 기여자
@danielfsbarreto, @greysonlalonde, @lorenzejay, @lucasgomide, @tonykipkemboi
</Update>
<Update label="2025년 10월 27일">
## v1.2.1
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.2.1)
## 변경 사항
### 기능
- Datadog 통합 지원 추가
- liteagent에서 apps 및 mcps 지원
### 문서
- 각 통합에 대해 Platform 도구를 호출하기 위한 필수 환경 변수 설명
- Datadog 통합 문서 추가
### 기여자
@barieom, @lorenzejay, @lucasgomide, @sabrenner
</Update>
<Update label="2025년 10월 24일">
## v1.2.0
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.2.0)
## 변경 사항
### 버그 수정
- 기본 LLM 모델 업데이트 및 LLM 유틸리티의 오류 로깅 개선
- 흐름 시각화 디렉토리 및 메서드 검사 변경
### 사용되지 않는 항목 삭제
- aisuite 제거
### 기여자
@greysonlalonde, @lorenzejay
</Update>
<Update label="2025년 10월 21일">
## v1.1.0
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.1.0)
## 변경 사항
### 기능
- InternalInstructor를 향상하여 여러 LLM 프로바이더 지원
- mypy 플러그인 기반 구현
- QdrantVectorSearchTool 개선
### 버그 수정
- 깨진 통합 문서 링크 수정
- 이중 추적 호출 수정 및 타입 추가
- 템플릿 버전을 최신으로 고정
### 문서
- LLM 통합 세부 정보 및 예제 업데이트
### 리팩토링
- CrewBase 타이핑 개선
### 기여자
@cwarre33, @danielfsbarreto, @greysonlalonde, @lorenzejay
</Update>
<Update label="2025년 10월 20일">
## v1.0.0
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0)
## 변경 사항
### 기능
- 버전을 1.0.0으로 범프
- Agent 클래스에서 지식 및 가드레일 이벤트 처리 향상
- crewai run 명령에 도구 저장소 자격 증명 주입
### 버그 수정
- Flow 데코레이터에서 중첩된 조건 구조 유지
- Printer.print 메서드에 표준 인쇄 매개변수 추가
- input()을 사용할 수 없을 때 오류 수정
- JWT 디코딩 시 10초 여유 추가
- 잘못된 cron 일정 되돌리기
- 특정 날짜에 5일마다 실행되도록 cron 일정 수정
- 하드코딩된 경로 대신 Docker 바이너리에 시스템 PATH 사용
- 템플릿 디렉토리를 올바르게 제외하기 위한 CodeQL 구성 추가
### 문서
- 취약점 보고를 위한 보안 정책 업데이트
- CrewAI AMP에서 텔레메트리 로그 캡처 가이드 추가
- 누락된 /resume 파일 추가
- HITL 워크플로에서 웹훅 URL 매개변수 명확화
### 기여자
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide, @mplachta, @theCyberTech
</Update>
<Update label="2025년 10월 18일">
## v1.0.0b3 (프리릴리스)
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0b3)
## 변경 사항
### 기능
- 작업 가드레일 기능 및 검증 향상
- 네이티브 SDK 가져오기 지원 개선
- Azure 네이티브 테스트 추가
- 고급 기능으로 BedrockCompletion 클래스 향상
- 클라이언트 매개변수 지원으로 GeminiCompletion 클래스 향상
- 추가 클라이언트 매개변수로 AnthropicCompletion 클래스 향상
### 버그 수정
- Flow 데코레이터에서 중첩된 조건 구조 유지
- Printer.print 메서드에 표준 인쇄 매개변수 추가
- stdout 인쇄 제거 및 테스트 결정론 개선
### 리팩토링
- 전체 타이핑을 포함한 메타클래스로 프로젝트 모듈 변환
### 기여자
@greysonlalonde, @lorenzejay
</Update>
<Update label="2025년 10월 16일">
## v1.0.0b2 (프리릴리스)
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0b2)
## 변경 사항
### 기능
- 추가 클라이언트 매개변수로 OpenAICompletion 클래스 향상
- 이벤트 버스 스레드 안전성 및 비동기 지원 개선
- crewai run 명령에 도구 저장소 자격 증명 주입
### 버그 수정
- input()을 사용할 수 없을 때 오류가 발생하는 문제 수정
- JWT 디코딩 시 10초 여유 추가
- task.py에서 복사 및 NOT_SPECIFIED 확인 수정
### 문서
- 문서에서 CREWAI_PLATFORM_INTEGRATION_TOKEN이 언급되도록 보장
- 트리거 문서 업데이트
### 기여자
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide
</Update>
<Update label="2025년 10월 14일">
## v1.0.0b1 (프리릴리스)
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0b1)
## 변경 사항
### 기능
- 추가 클라이언트 매개변수로 OpenAICompletion 클래스 향상
- 이벤트 버스 스레드 안전성 및 비동기 지원 개선
- Bedrock LLM 통합 구현
### 버그 수정
- 누락된 input() 가용성 문제 수정
- 10초 여유를 추가하여 JWT 디코딩 오류 해결
- crewai run 명령에 도구 저장소 자격 증명 주입
- task.py에서 복사 및 NOT_SPECIFIED 확인 수정
### 문서
- 문서에서 CREWAI_PLATFORM_INTEGRATION_TOKEN이 언급되도록 보장
- 트리거 문서 업데이트
### 기여자
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide
</Update>
<Update label="2025년 10월 13일">
## v0.203.1
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/0.203.1)
## 변경 사항
### 핵심 개선 및 수정
- `crewai run` 명령에 도구 저장소 자격 증명 주입 수정
- 토큰 검증 오류를 줄이기 위해 JWT 디코딩 시 10초 여유 추가
- 특정 날짜에 5일마다 작업을 실행하도록 의도된 cron 일정 수정(이후 되돌림)
### 문서 및 가이드
- 취약점 보고 프로세스를 명확히 하기 위해 보안 정책 업데이트
</Update>
<Update label="2025년 10월 9일">
## v1.0.0a4 (프리릴리스)
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0a4)
## 변경 사항
### 기능
- Agent 클래스에서 지식 및 가드레일 이벤트 처리 향상
- 로컬 개발을 위한 트리거 목록 및 실행 명령 도입
- Platform Actions을 소비하는 새로운 접근 방식으로 문서 업데이트
- CrewAI AMP에서 텔레메트리 로그 캡처 가이드 추가
### 버그 수정
- 잘못된 cron 일정 되돌리기
- 특정 날짜에 5일마다 실행되도록 cron 일정 수정
- 중복 행 제거 및 명시적 환경 변수 추가
### 기여자
@greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide, @mplachta, @theCyberTech
</Update>
<Update label="2025년 10월 7일">
## v1.0.0a3 (프리릴리스)
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0a3)
## 변경 사항
### 기능
- 플랫폼 작업에 대한 에이전트 지원 추가
- 코드 실행기 도구에 인터프리터 인수 추가
- 플랫폼 앱 실행에 대한 직접 지원
### 문서
- 플랫폼 작업 문서 추가
- MCP 문서에 stdio 및 sse 전송 유형 추가
- AWS 모델 목록 업데이트
### 기여자
@greysonlalonde, @heitorado, @lorenzejay, @lucasgomide
</Update>
<Update label="2025년 10월 3일">
## v1.0.0a2 (프리릴리스)
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0a2)
## 변경 사항
### 핵심 개선 및 수정
- 모노레포를 위한 CI 업데이트
- 기본 Anthropic 모델을 claude-sonnet-4-20250514로 업데이트
- 모델 업데이트에 대한 테스트 수정
### 기여자
@greysonlalonde, @lorenzejay
</Update>
<Update label="2025년 9월 30일">
## v1.0.0a1

View File

@@ -567,10 +567,6 @@ Fourth method running
### Human in the Loop (인간 피드백)
<Note>
`@human_feedback` 데코레이터는 **CrewAI 버전 1.8.0 이상**이 필요합니다.
</Note>
`@human_feedback` 데코레이터는 인간의 피드백을 수집하기 위해 플로우 실행을 일시 중지하는 human-in-the-loop 워크플로우를 가능하게 합니다. 이는 승인 게이트, 품질 검토, 인간의 판단이 필요한 결정 지점에 유용합니다.
```python Code

View File

@@ -1,342 +0,0 @@
---
title: 트레이스용 PII 삭제
description: "크루 및 플로우 실행 트레이스에서 민감한 데이터를 자동으로 삭제합니다"
icon: "lock"
mode: "wide"
---
## 개요
PII 삭제는 크루 및 플로우 실행 트레이스에서 개인 식별 정보(PII)를 자동으로 감지하고 마스킹하는 CrewAI AMP 기능입니다. 이를 통해 신용카드 번호, 주민등록번호, 이메일 주소, 이름과 같은 민감한 데이터가 CrewAI AMP 트레이스에 노출되지 않도록 보장합니다. 또한 조직별 데이터를 보호하기 위해 커스텀 인식기를 생성할 수 있습니다.
<Info>
PII 삭제는 Enterprise 플랜에서 사용 가능합니다.
배포 버전은 1.8.0 이상이어야 합니다.
</Info>
<Frame>
![PII 삭제 개요](/images/enterprise/pii_mask_recognizer_trace_example.png)
</Frame>
## PII 삭제가 중요한 이유
프로덕션 환경에서 AI 에이전트를 실행할 때, 민감한 정보가 종종 크루를 통해 흐릅니다:
- CRM 통합의 고객 데이터
- 결제 처리업체의 금융 정보
- 양식 제출의 개인 정보
- 내부 직원 데이터
적절한 삭제 없이는 이 데이터가 트레이스에 나타나, GDPR, HIPAA, PCI-DSS와 같은 규정 준수가 어려워집니다. PII 삭제는 트레이스에 저장되기 전에 민감한 데이터를 자동으로 마스킹하여 이 문제를 해결합니다.
## 작동 방식
1. **감지** - 알려진 PII 패턴에 대해 트레이스 이벤트 데이터를 스캔
2. **분류** - 민감한 데이터 유형 식별 (신용카드, SSN, 이메일 등)
3. **마스킹/삭제** - 구성에 따라 민감한 데이터를 마스킹된 값으로 대체
```
원본: "john.doe@company.com으로 연락하거나 555-123-4567로 전화하세요"
삭제됨: "<EMAIL_ADDRESS>로 연락하거나 <PHONE_NUMBER>로 전화하세요"
```
## PII 삭제 활성화
<Info>
이 기능을 사용하려면 Enterprise 플랜이어야 하며 배포 버전이 1.8.0 이상이어야 합니다.
</Info>
<Steps>
<Step title="크루 설정으로 이동">
CrewAI AMP 대시보드에서 배포된 크루를 선택하고 배포/자동화 중 하나로 이동한 다음 **Settings** → **PII Protection**으로 이동합니다.
</Step>
<Step title="PII 보호 활성화">
**PII Redaction for Traces**를 토글하여 활성화합니다. 이렇게 하면 트레이스 데이터의 자동 스캔 및 삭제가 활성화됩니다.
<Info>
각 배포에 대해 PII 삭제를 수동으로 활성화해야 합니다.
</Info>
<Frame>
![PII 삭제 활성화](/images/enterprise/pii_mask_recognizer_enable.png)
</Frame>
</Step>
<Step title="엔티티 유형 구성">
감지하고 삭제할 PII 유형을 선택합니다. 각 엔티티는 개별적으로 활성화하거나 비활성화할 수 있습니다.
<Frame>
![엔티티 구성](/images/enterprise/pii_mask_recognizer_supported_entities.png)
</Frame>
</Step>
<Step title="저장">
구성을 저장합니다. PII 삭제는 이후 모든 크루 실행에서 활성화되며, 재배포가 필요하지 않습니다.
</Step>
</Steps>
## 지원되는 엔티티 유형
CrewAI는 카테고리별로 구성된 다음 PII 엔티티 유형을 지원합니다.
### 글로벌 엔티티
| 엔티티 | 설명 | 예시 |
|--------|------|------|
| `CREDIT_CARD` | 신용/직불 카드 번호 | "4111-1111-1111-1111" |
| `CRYPTO` | 암호화폐 지갑 주소 | "bc1qxy2kgd..." |
| `DATE_TIME` | 날짜 및 시간 | "2024년 1월 15일" |
| `EMAIL_ADDRESS` | 이메일 주소 | "john@example.com" |
| `IBAN_CODE` | 국제 은행 계좌 번호 | "DE89 3704 0044 0532 0130 00" |
| `IP_ADDRESS` | IPv4 및 IPv6 주소 | "192.168.1.1" |
| `LOCATION` | 지리적 위치 | "뉴욕시" |
| `MEDICAL_LICENSE` | 의료 면허 번호 | "MD12345" |
| `NRP` | 국적, 종교 또는 정치 그룹 | - |
| `PERSON` | 개인 이름 | "홍길동" |
| `PHONE_NUMBER` | 다양한 형식의 전화번호 | "+82 (10) 1234-5678" |
| `URL` | 웹 URL | "https://example.com" |
### 미국 특정 엔티티
| 엔티티 | 설명 | 예시 |
|--------|------|------|
| `US_BANK_NUMBER` | 미국 은행 계좌 번호 | "1234567890" |
| `US_DRIVER_LICENSE` | 미국 운전면허 번호 | "D1234567" |
| `US_ITIN` | 개인 납세자 번호 | "900-70-0000" |
| `US_PASSPORT` | 미국 여권 번호 | "123456789" |
| `US_SSN` | 사회보장번호 | "123-45-6789" |
## 삭제 작업
활성화된 각 엔티티에 대해 데이터가 삭제되는 방식을 구성할 수 있습니다:
| 작업 | 설명 | 출력 예시 |
|------|------|----------|
| `mask` | 엔티티 유형 레이블로 대체 | `<CREDIT_CARD>` |
| `redact` | 텍스트를 완전히 제거 | *(비어있음)* |
## 커스텀 인식기
기본 제공 엔티티 외에도 조직별 PII 패턴을 감지하기 위한 **커스텀 인식기**를 생성할 수 있습니다.
<Frame>
![커스텀 인식기](/images/enterprise/pii_mask_recognizer.png)
</Frame>
### 인식기 유형
커스텀 인식기에는 두 가지 옵션이 있습니다:
| 유형 | 적합한 용도 | 사용 사례 예시 |
|------|------------|---------------|
| **패턴 기반 (Regex)** | 예측 가능한 형식의 구조화된 데이터 | 급여 금액, 직원 ID, 프로젝트 코드 |
| **거부 목록** | 정확한 문자열 매칭 | 회사명, 내부 코드명, 특정 용어 |
### 커스텀 인식기 생성
<Steps>
<Step title="커스텀 인식기로 이동">
조직 **Settings** → **Organization** → **Add Recognizer**로 이동합니다.
</Step>
<Step title="인식기 구성">
<Frame>
![인식기 구성](/images/enterprise/pii_mask_recognizer_create.png)
</Frame>
다음 필드를 구성합니다:
- **Name**: 인식기의 설명적 이름
- **Entity Type**: 삭제된 출력에 나타날 엔티티 레이블 (예: `EMPLOYEE_ID`, `SALARY`)
- **Type**: Regex 패턴 또는 거부 목록 중 선택
- **Pattern/Values**: 매칭할 Regex 패턴 또는 문자열 목록
- **Confidence Threshold**: 삭제를 트리거하는 데 필요한 최소 점수 (0.0-1.0). 높은 값 (예: 0.8)은 거짓 양성을 줄이지만 일부 매치를 놓칠 수 있습니다. 낮은 값 (예: 0.5)은 더 많은 매치를 잡지만 과도하게 삭제할 수 있습니다. 기본값은 0.8입니다.
- **Context Words** (선택사항): 근처에서 발견될 때 감지 신뢰도를 높이는 단어
</Step>
<Step title="저장">
인식기를 저장합니다. 배포에서 활성화할 수 있게 됩니다.
</Step>
</Steps>
### 엔티티 유형 이해하기
**Entity Type**은 매칭된 콘텐츠가 삭제된 트레이스에 어떻게 나타나는지 결정합니다:
```
Entity Type: SALARY
Pattern: salary:\s*\$\s*\d+
입력: "직원 급여: $50,000"
출력: "직원 <SALARY>"
```
### 컨텍스트 단어 사용
컨텍스트 단어는 매칭된 패턴 근처에 특정 용어가 나타날 때 신뢰도를 높여 정확도를 향상시킵니다:
```
Context Words: "project", "code", "internal"
Entity Type: PROJECT_CODE
Pattern: PRJ-\d{4}
```
"project" 또는 "code"가 "PRJ-1234" 근처에 나타나면, 인식기는 그것이 진정한 매치라는 확신이 높아져 거짓 양성을 줄입니다.
## 삭제된 트레이스 보기
PII 삭제가 활성화되면, 트레이스에서 민감한 데이터 대신 삭제된 값이 표시됩니다:
```
Task Output: "고객 <PERSON>이 주문 #12345를 했습니다.
연락처 이메일: <EMAIL_ADDRESS>, 전화: <PHONE_NUMBER>.
<CREDIT_CARD>로 끝나는 카드로 결제가 처리되었습니다."
```
삭제된 값은 꺾쇠 괄호와 엔티티 유형 레이블 (예: `<EMAIL_ADDRESS>`)로 명확하게 표시되어, 어떤 데이터가 보호되었는지 쉽게 이해할 수 있으면서도 크루 동작을 디버그하고 모니터링할 수 있습니다.
## 모범 사례
### 성능 고려사항
<Steps>
<Step title="필요한 엔티티만 활성화">
활성화된 각 엔티티는 처리 오버헤드를 추가합니다. 데이터와 관련된 엔티티만 활성화하세요.
</Step>
<Step title="구체적인 패턴 사용">
커스텀 인식기의 경우 거짓 양성을 줄이고 성능을 향상시키기 위해 구체적인 패턴을 사용하세요. Regex 패턴은 급여, 직원 ID, 프로젝트 코드 등 특정 패턴을 식별할 때 가장 적합합니다. 거부 목록 인식기는 회사명, 내부 코드명 등 정확한 문자열을 식별할 때 가장 적합합니다.
</Step>
<Step title="컨텍스트 단어 활용">
컨텍스트 단어는 주변 텍스트가 매칭될 때만 감지를 트리거하여 정확도를 향상시킵니다.
</Step>
</Steps>
## 문제 해결
<Accordion title="PII가 삭제되지 않음">
**가능한 원인:**
- 구성에서 엔티티 유형이 활성화되지 않음
- 패턴이 데이터 형식과 매치되지 않음
- 커스텀 인식기에 구문 오류가 있음
**해결책:**
- Settings → Security에서 엔티티가 활성화되어 있는지 확인
- 샘플 데이터로 regex 패턴 테스트
- 구성 오류에 대한 로그 확인
</Accordion>
<Accordion title="너무 많은 데이터가 삭제됨">
**가능한 원인:**
- 너무 광범위한 엔티티 유형이 활성화됨 (예: `DATE_TIME`이 모든 곳의 날짜를 잡음)
- 커스텀 인식기 패턴이 너무 일반적임
**해결책:**
- 거짓 양성을 유발하는 엔티티 비활성화
- 커스텀 패턴을 더 구체적으로 만들기
- 정확도 향상을 위해 컨텍스트 단어 추가
</Accordion>
<Accordion title="성능 문제">
**가능한 원인:**
- 너무 많은 엔티티가 활성화됨
- NLP 기반 엔티티 (`PERSON`, `LOCATION`, `NRP`)는 머신러닝 모델을 사용하므로 계산 비용이 높음
**해결책:**
- 실제로 필요한 엔티티만 활성화
- 가능한 경우 패턴 기반 대안 고려
- 대시보드에서 트레이스 처리 시간 모니터링
</Accordion>
---
## 실제 예시: 급여 패턴 매칭
이 예시는 트레이스에서 급여 정보를 감지하고 마스킹하는 커스텀 인식기를 생성하는 방법을 보여줍니다.
### 사용 사례
크루가 다음과 같은 형식의 급여 정보가 포함된 직원 또는 재무 데이터를 처리합니다:
- `salary: $50,000`
- `salary: $125,000.00`
- `salary:$1,500.50`
민감한 보상 데이터를 보호하기 위해 이러한 값을 자동으로 마스킹하려고 합니다.
### 구성
<Frame>
![급여 인식기 구성](/images/enterprise/pii_mask_custom_recognizer_salary.png)
</Frame>
| 필드 | 값 |
|------|-----|
| **Name** | `SALARY` |
| **Entity Type** | `SALARY` |
| **Type** | Regex Pattern |
| **Regex Pattern** | `salary:\s*\$\s*\d{1,3}(,\d{3})*(\.\d{2})?` |
| **Action** | Mask |
| **Confidence Threshold** | `0.8` |
| **Context Words** | `salary, compensation, pay, wage, income` |
### Regex 패턴 분석
| 패턴 구성요소 | 의미 |
|--------------|------|
| `salary:` | 리터럴 텍스트 "salary:" 매치 |
| `\s*` | 0개 이상의 공백 문자 매치 |
| `\$` | 달러 기호 매치 (이스케이프) |
| `\s*` | $ 뒤의 0개 이상의 공백 문자 매치 |
| `\d{1,3}` | 1-3자리 숫자 매치 (예: "1", "50", "125") |
| `(,\d{3})*` | 쉼표로 구분된 천 단위 매치 (예: ",000", ",500,000") |
| `(\.\d{2})?` | 선택적으로 센트 매치 (예: ".00", ".50") |
### 결과 예시
```
원본: "직원 기록에 salary: $125,000.00 연봉이 표시됩니다"
삭제됨: "직원 기록에 <SALARY> 연봉이 표시됩니다"
원본: "기본 salary:$50,000에 보너스 가능성"
삭제됨: "기본 <SALARY>에 보너스 가능성"
```
<Tip>
"salary", "compensation", "pay", "wage", "income"과 같은 컨텍스트 단어를 추가하면 이러한 용어가 매칭된 패턴 근처에 나타날 때 감지 신뢰도가 높아져 거짓 양성을 줄입니다.
</Tip>
### 배포에서 인식기 활성화
<Warning>
조직 수준에서 커스텀 인식기를 생성해도 배포에 자동으로 활성화되지 않습니다. 적용하려는 모든 배포에 대해 각 인식기를 수동으로 활성화해야 합니다.
</Warning>
커스텀 인식기를 생성한 후, 각 배포에서 활성화합니다:
<Steps>
<Step title="배포로 이동">
배포/자동화로 이동하여 **Settings** → **PII Protection**을 엽니다.
</Step>
<Step title="커스텀 인식기 선택">
**Mask Recognizers** 아래에서 조직에서 정의한 인식기를 볼 수 있습니다. 활성화하려는 인식기 옆의 체크박스를 선택합니다.
<Frame>
![커스텀 인식기 활성화](/images/enterprise/pii_mask_recognizers_options.png)
</Frame>
</Step>
<Step title="구성 저장">
변경 사항을 저장합니다. 인식기는 이 배포의 모든 후속 실행에서 활성화됩니다.
</Step>
</Steps>
<Info>
커스텀 인식기가 필요한 각 배포에서 이 프로세스를 반복합니다. 이를 통해 다양한 환경 (예: 개발 vs. 프로덕션)에서 어떤 인식기가 활성화되는지 세밀하게 제어할 수 있습니다.
</Info>

View File

@@ -7,10 +7,6 @@ mode: "wide"
## 개요
<Note>
`@human_feedback` 데코레이터는 **CrewAI 버전 1.8.0 이상**이 필요합니다. 이 기능을 사용하기 전에 설치를 업데이트하세요.
</Note>
`@human_feedback` 데코레이터는 CrewAI Flow 내에서 직접 human-in-the-loop(HITL) 워크플로우를 가능하게 합니다. Flow 실행을 일시 중지하고, 인간에게 검토를 위해 출력을 제시하고, 피드백을 수집하고, 선택적으로 피드백 결과에 따라 다른 리스너로 라우팅할 수 있습니다.
이는 특히 다음과 같은 경우에 유용합니다:

View File

@@ -5,22 +5,9 @@ icon: "user-check"
mode: "wide"
---
휴먼 인 더 루프(HITL, Human-in-the-Loop)는 인공지능과 인간의 전문 지식을 결합하여 의사결정을 강화하고 작업 결과를 향상시키는 강력한 접근 방식입니다. CrewAI는 필요에 따라 HITL을 구현하는 여러 가지 방법을 제공합니다.
휴먼 인 더 루프(HITL, Human-in-the-Loop)는 인공지능과 인간의 전문 지식을 결합하여 의사결정을 강화하고 작업 결과를 향상시키는 강력한 접근 방식입니다. 이 가이드에서는 CrewAI 내에서 HITL을 구현하는 방법을 안내합니다.
## HITL 접근 방식 선택
CrewAI는 human-in-the-loop 워크플로우를 구현하기 위한 두 가지 주요 접근 방식을 제공합니다:
| 접근 방식 | 적합한 용도 | 통합 | 버전 |
|----------|----------|-------------|---------|
| **Flow 기반** (`@human_feedback` 데코레이터) | 로컬 개발, 콘솔 기반 검토, 동기식 워크플로우 | [Flow에서 인간 피드백](/ko/learn/human-feedback-in-flows) | **1.8.0+** |
| **Webhook 기반** (Enterprise) | 프로덕션 배포, 비동기 워크플로우, 외부 통합 (Slack, Teams 등) | 이 가이드 | - |
<Tip>
Flow를 구축하면서 피드백을 기반으로 라우팅하는 인간 검토 단계를 추가하려면 `@human_feedback` 데코레이터에 대한 [Flow에서 인간 피드백](/ko/learn/human-feedback-in-flows) 가이드를 참조하세요.
</Tip>
## Webhook 기반 HITL 워크플로우 설정
## HITL 워크플로우 설정
<Steps>
<Step title="작업 구성">

View File

@@ -1,115 +0,0 @@
---
title: Galileo 갈릴레오
description: CrewAI 추적 및 평가를 위한 Galileo 통합
icon: telescope
mode: "wide"
---
## 개요
이 가이드는 **Galileo**를 **CrewAI**와 통합하는 방법을 보여줍니다.
포괄적인 추적 및 평가 엔지니어링을 위한 것입니다.
이 가이드가 끝나면 CrewAI 에이전트를 추적할 수 있게 됩니다.
성과를 모니터링하고 행동을 평가합니다.
Galileo의 강력한 관측 플랫폼.
> **갈릴레오(Galileo)란 무엇인가요?**[Galileo](https://galileo.ai/)는 AI 평가 및 관찰 가능성입니다.
엔드투엔드 추적, 평가,
AI 애플리케이션 모니터링. 이를 통해 팀은 실제 사실을 포착할 수 있습니다.
견고한 가드레일을 만들고 체계적인 실험을 실행하세요.
내장된 실험 추적 및 성능 분석으로 신뢰성 보장
AI 수명주기 전반에 걸쳐 투명성과 지속적인 개선을 제공합니다.
## 시작하기
이 튜토리얼은 [CrewAI 빠른 시작](/ko/quickstart.mdx)을 따르며 추가하는 방법을 보여줍니다.
갈릴레오의 [CrewAIEventListener](https://v2docs.galileo.ai/sdk-api/python/reference/handlers/crewai/handler),
이벤트 핸들러.
자세한 내용은 갈릴레오 문서를 참고하세요.
[CrewAI 애플리케이션에 Galileo 추가](https://v2docs.galileo.ai/how-to-guides/third-party-integrations/add-galileo-to-crewai/add-galileo-to-crewai)
방법 안내.
> **참고**이 튜토리얼에서는 [CrewAI 빠른 시작](/ko/quickstart.mdx)을 완료했다고 가정합니다.
완전한 포괄적인 예제를 원한다면 Galileo
[CrewAI SDK 예제 저장소](https://github.com/rungalileo/sdk-examples/tree/main/python/agent/crew-ai).
### 1단계: 종속성 설치
앱에 필요한 종속성을 설치합니다.
원하는 방법으로 가상 환경을 생성하고,
그런 다음 다음을 사용하여 해당 환경 내에 종속성을 설치하십시오.
선호하는 도구:
```bash
uv add galileo
```
### 2단계: [CrewAI 빠른 시작](/ko/quickstart.mdx)에서 .env 파일에 추가
```bash
# Your Galileo API key
GALILEO_API_KEY="your-galileo-api-key"
# Your Galileo project name
GALILEO_PROJECT="your-galileo-project-name"
# The name of the Log stream you want to use for logging
GALILEO_LOG_STREAM="your-galileo-log-stream "
```
### 3단계: Galileo 이벤트 리스너 추가
Galileo로 로깅을 활성화하려면 `CrewAIEventListener`의 인스턴스를 생성해야 합니다.
다음을 통해 Galileo CrewAI 핸들러 패키지를 가져옵니다.
main.py 파일 상단에 다음 코드를 추가하세요.
```python
from galileo.handlers.crewai.handler import CrewAIEventListener
```
실행 함수 시작 시 이벤트 리스너를 생성합니다.
```python
def run():
# Create the event listener
CrewAIEventListener()
# The rest of your existing code goes here
```
리스너 인스턴스를 생성하면 자동으로
CrewAI에 등록되었습니다.
### 4단계: Crew Agent 실행
CrewAI CLI를 사용하여 Crew Agent를 실행하세요.
```bash
crewai run
```
### 5단계: Galileo에서 추적 보기
승무원 에이전트가 완료되면 흔적이 플러시되어 Galileo에 나타납니다.
![Galileo trace view](/images/galileo-trace-veiw.png)
## 갈릴레오 통합 이해
Galileo는 이벤트 리스너를 등록하여 CrewAI와 통합됩니다.
승무원 실행 이벤트(예: 에이전트 작업, 도구 호출, 모델 응답)를 캡처합니다.
관찰 가능성과 평가를 위해 이를 갈릴레오에 전달합니다.
### 이벤트 리스너 이해
`CrewAIEventListener()` 인스턴스를 생성하는 것이 전부입니다.
CrewAI 실행을 위해 Galileo를 활성화하는 데 필요합니다. 인스턴스화되면 리스너는 다음을 수행합니다.
-CrewAI에 자동으로 등록됩니다.
-환경 변수에서 Galileo 구성을 읽습니다.
-모든 실행 데이터를 Galileo 프로젝트 및 다음에서 지정한 로그 스트림에 기록합니다.
`GALILEO_PROJECT` 및 `GALILEO_LOG_STREAM`
추가 구성이나 코드 변경이 필요하지 않습니다.
이 실행의 모든 데이터는 Galileo 프로젝트에 기록되며
환경 구성에 따라 지정된 로그 스트림
(예: GALILEO_PROJECT 및 GALILEO_LOG_STREAM)

View File

@@ -4,545 +4,6 @@ description: "Atualizações de produto, melhorias e correções do CrewAI"
icon: "clock"
mode: "wide"
---
<Update label="08 jan 2026">
## v1.8.0
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.8.0)
## O que Mudou
### Funcionalidades
- Adicionar cadeia async nativa para a2a
- Adicionar mecanismos de atualização a2a (poll/stream/push) com handlers e config
- Introduzir configuração global de fluxo para feedback human-in-the-loop
- Adicionar eventos de chamada de ferramenta em streaming e corrigir rastreamento de ID do provedor
- Introduzir arquitetura de Flows e Crews pronta para produção
- Adicionar HITL para Flows
- Melhorar EventListener e TraceCollectionListener para melhor tratamento de eventos
### Correções de Bugs
- Tratar dependência a2a ausente como opcional
- Corrigir busca de erro para polling de login WorkOS
- Corrigir nome de trigger errado na documentação de exemplo
### Documentação
- Atualizar documentação de webhook-streaming
- Ajustar linguagem da documentação de AOP para AMP
### Contribuidores
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide, @mplachta
</Update>
<Update label="19 dez 2025">
## v1.7.2
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.7.2)
## O que Mudou
### Correções de Bugs
- Resolver problemas de conexão
### Documentação
- Atualizar página de documentação api-reference/status
### Contribuidores
@greysonlalonde, @heitorado, @lorenzejay, @lucasgomide
</Update>
<Update label="16 dez 2025">
## v1.7.1
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.7.1)
## O que Mudou
### Melhorias
- Adicionar flag `--no-commit` ao comando bump
- Usar schema JSON para serialização de argumentos de ferramenta
### Correções de Bugs
- Corrigir exibição de mensagem de erro da resposta quando login do repositório de ferramentas falha
- Corrigir terminação graciosa de future ao executar tarefa assincronamente
- Corrigir ordenação de tarefas adicionando índice
- Corrigir verificações de compatibilidade de plataforma para sinais Windows
- Corrigir timer do controlador RPM para evitar travamento do processo
- Corrigir registro de uso de tokens e validar modelo de resposta em stream
### Documentação
- Adicionar documentação traduzida para async
- Adicionar documentação para API Deploy AOP
- Adicionar documentação para o conector agent handler
- Adicionar documentação sobre async nativo
### Contribuidores
@Llamrei, @dragosmc, @gilfeig, @greysonlalonde, @heitorado, @lorenzejay, @mattatcha, @vinibrsl
</Update>
<Update label="09 dez 2025">
## v1.7.0
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.7.0)
## O que Mudou
### Funcionalidades
- Adicionar kickoff de fluxo async
- Adicionar suporte a crew async
- Adicionar suporte a tarefa async
- Adicionar suporte a conhecimento async
- Adicionar suporte a memória async
- Adicionar suporte async para ferramentas e executor de agente; melhorar tipagem e docs
- Implementar API de extensões a2a e cache de cartão de agente async; corrigir propagação de tarefas e streaming
- Adicionar suporte a ferramenta async nativa
- Adicionar suporte a llm async
- Criar tipos de eventos sys e handler
### Correções de Bugs
- Corrigir problema para garantir que nonetypes não sejam passados para otel
- Corrigir deadlock em operações de arquivo do armazenamento de tokens
- Corrigir para garantir que span otel seja fechado
- Usar HuggingFaceEmbeddingFunction para embeddings, atualizar chaves e adicionar testes
- Corrigir para garantir que supports_tools seja true para todos os modelos anthropic suportados
- Garantir que hooks funcionem com fluxos de lite agents
### Contribuidores
@greysonlalonde, @lorenzejay
</Update>
<Update label="29 nov 2025">
## v1.6.1
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.6.1)
## O que Mudou
### Correções de Bugs
- Corrigir chamada ChatCompletionsClient para garantir funcionamento adequado
- Garantir que métodos async sejam executáveis para anotações
- Corrigir parâmetros em RagTool.add, adicionar tipagem e testes
- Remover parâmetro inválido do cliente SSE
- Apagar configuração 'oauth2_extra' no comando 'crewai config reset'
### Refatoração
- Aprimorar validação de modelo e inferência de provedor na classe LLM
### Contribuidores
@Vidit-Ostwal, @greysonlalonde, @heitorado, @lorenzejay
</Update>
<Update label="25 nov 2025">
## v1.6.0
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.6.0)
## O que Mudou
### Funcionalidades
- Adicionar suporte a resultado de streaming para fluxos e crews
- Adicionar gemini-3-pro-preview
- Suportar login CLI com Entra ID
- Adicionar ferramenta Merge Agent Handler
- Aprimorar gerenciamento de estado de eventos de fluxo
### Correções de Bugs
- Garantir que caminho de persistência de armazenamento rag personalizado seja definido se passado
- Garantir que retornos fuzzy sejam mais estritos e mostrem aviso de tipo
- Re-adicionar parâmetro response_format do openai e adicionar teste
- Corrigir configuração de embeddings da ferramenta rag
- Garantir que painel de início de execução de fluxo não seja mostrado no plot
### Documentação
- Atualizar referências de AMP para AOP na documentação
- Atualizar AMP para AOP
### Contribuidores
@Vidit-Ostwal, @gilfeig, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @markmcd
</Update>
<Update label="22 nov 2025">
## v0.203.2
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/0.203.2)
## O que Mudou
- Bump de versão hotfix de 0.203.1 para 0.203.2
</Update>
<Update label="16 nov 2025">
## v1.5.0
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.5.0)
## O que Mudou
### Funcionalidades
- Adicionar flag de status de conclusão remota de confiança a2a
- Buscar e armazenar mais dados sobre servidor de autorização Okta
- Implementar hooks antes e depois de chamadas LLM no CrewAgentExecutor
- Expor mensagens para TaskOutput e LiteAgentOutputs
- Aprimorar descrição de schema do QdrantVectorSearchTool
### Correções de Bugs
- Garantir que flags de instrumentação de rastreamento sejam aplicadas corretamente
- Corrigir links de documentação de ferramentas personalizadas e adicionar ação de links quebrados do Mintlify
### Documentação
- Aprimorar documentação de guardrail de tarefa com suporte a validação baseada em LLM
### Contribuidores
@danielfsbarreto, @greysonlalonde, @heitorado, @lorenzejay, @theCyberTech
</Update>
<Update label="07 nov 2025">
## v1.4.1
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.4.1)
## O que Mudou
### Correções de Bugs
- Corrigir tratamento de iterações máximas do agente
- Resolver problemas de roteamento para sintaxe de modelo LLM para provedores respeitados
### Contribuidores
@greysonlalonde
</Update>
<Update label="07 nov 2025">
## v1.4.0
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.4.0)
## O que Mudou
### Funcionalidades
- Adicionar suporte para rotas de plot não-AST
- Implementar suporte de primeira classe para MCP
- Adicionar dunder de validação Pydantic ao BaseInterceptor
- Adicionar suporte para hooks de interceptor de mensagem LLM
- Cache de prompts i18n para uso eficiente
- Aprimorar QdrantVectorSearchTool
### Correções de Bugs
- Corrigir problemas para manter stopwords atualizadas
- Resolver valores não pickleable no estado de fluxo
- Garantir que lite agents corrijam curso em erros de validação
- Corrigir hash de argumento de callback para garantir que cache funcione
- Permitir adicionar conteúdo de fonte RAG de URLs válidas
- Tornar seleção de nó de plot mais suave
- Corrigir IDs de documento duplicados para conhecimento
### Refatoração
- Melhorar tratamento de execução de ferramenta MCP com concurrent futures
- Simplificar tratamento de fluxo, tipagem e logging; atualizar UI e testes
- Refatorar gerenciamento de stop word para propriedade
### Documentação
- Migrar embedder para embedding_model e exigir vectordb em documentação de ferramentas; adicionar exemplos de provedor (en/ko/pt-BR)
### Contribuidores
@danielfsbarreto, @greysonlalonde, @lorenzejay, @lucasgomide, @tonykipkemboi
</Update>
<Update label="01 nov 2025">
## v1.3.0
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.3.0)
## O que Mudou
### Funcionalidades
- Refatorar tratamento de fluxo, tipagem e logging
- Aprimorar QdrantVectorSearchTool
### Correções de Bugs
- Corrigir ferramentas Firecrawl e adicionar testes
- Refatorar use_stop_words para propriedade e adicionar verificação para stop words
### Documentação
- Migrar embedder para embedding_model e exigir vectordb em documentação de ferramentas
- Adicionar exemplos de provedor em Inglês, Coreano e Português
### Refatoração
- Melhorar tratamento de fluxo e atualizações de UI
### Contribuidores
@danielfsbarreto, @greysonlalonde, @lorenzejay, @lucasgomide, @tonykipkemboi
</Update>
<Update label="27 out 2025">
## v1.2.1
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.2.1)
## O que Mudou
### Funcionalidades
- Adicionar suporte para integração Datadog
- Suportar apps e mcps em liteagent
### Documentação
- Descrever variável de ambiente obrigatória para chamar ferramentas Platform para cada integração
- Adicionar documentação de integração Datadog
### Contribuidores
@barieom, @lorenzejay, @lucasgomide, @sabrenner
</Update>
<Update label="24 out 2025">
## v1.2.0
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.2.0)
## O que Mudou
### Correções de Bugs
- Atualizar modelo LLM padrão e melhorar logging de erros em utilitários LLM
- Alterar diretório de visualização de fluxo e inspeção de método
### Removendo Não Utilizados
- Remover aisuite
### Contribuidores
@greysonlalonde, @lorenzejay
</Update>
<Update label="21 out 2025">
## v1.1.0
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.1.0)
## O que Mudou
### Funcionalidades
- Aprimorar InternalInstructor para suportar múltiplos provedores LLM
- Implementar base de plugin mypy
- Melhorar QdrantVectorSearchTool
### Correções de Bugs
- Corrigir links de documentação de integração quebrados
- Corrigir chamada de trace dupla e adicionar tipos
- Fixar versões de template para mais recente
### Documentação
- Atualizar detalhes e exemplos de integração LLM
### Refatoração
- Melhorar tipagem do CrewBase
### Contribuidores
@cwarre33, @danielfsbarreto, @greysonlalonde, @lorenzejay
</Update>
<Update label="20 out 2025">
## v1.0.0
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0)
## O que Mudou
### Funcionalidades
- Bump de versões para 1.0.0
- Aprimorar tratamento de eventos de conhecimento e guardrail na classe Agent
- Injetar credenciais do repositório de ferramentas no comando crewai run
### Correções de Bugs
- Preservar estrutura de condição aninhada em decoradores Flow
- Adicionar parâmetros de print padrão ao método Printer.print
- Corrigir erros quando não há input() disponível
- Adicionar margem de 10s ao decodificar JWT
- Reverter agenda cron ruim
- Corrigir agenda cron para executar a cada 5 dias em datas específicas
- Usar PATH do sistema para binário Docker em vez de caminho hardcoded
- Adicionar configuração CodeQL para excluir corretamente diretórios de template
### Documentação
- Atualizar política de segurança para relatório de vulnerabilidade
- Adicionar guia para capturar logs de telemetria no CrewAI AMP
- Adicionar arquivos /resume ausentes
- Esclarecer parâmetro de URL de webhook em workflows HITL
### Contribuidores
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide, @mplachta, @theCyberTech
</Update>
<Update label="18 out 2025">
## v1.0.0b3 (Pré-lançamento)
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0b3)
## O que Mudou
### Funcionalidades
- Aprimorar funcionalidade e validação de guardrail de tarefa
- Melhorar suporte para importar SDK nativo
- Adicionar testes nativos Azure
- Aprimorar classe BedrockCompletion com funcionalidades avançadas
- Aprimorar classe GeminiCompletion com suporte a parâmetro de cliente
- Aprimorar classe AnthropicCompletion com parâmetros de cliente adicionais
### Correções de Bugs
- Preservar estrutura de condição aninhada em decoradores Flow
- Adicionar parâmetros de print padrão ao método Printer.print
- Remover prints stdout e melhorar determinismo de teste
### Refatoração
- Converter módulo de projeto para metaclasse com tipagem completa
### Contribuidores
@greysonlalonde, @lorenzejay
</Update>
<Update label="16 out 2025">
## v1.0.0b2 (Pré-lançamento)
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0b2)
## O que Mudou
### Funcionalidades
- Aprimorar classe OpenAICompletion com parâmetros de cliente adicionais
- Melhorar segurança de thread do event bus e suporte async
- Injetar credenciais do repositório de ferramentas no comando crewai run
### Correções de Bugs
- Corrigir problema onde ocorre erro se não houver input() disponível
- Adicionar margem de 10s ao decodificar JWT
- Corrigir cópia e adicionar verificação NOT_SPECIFIED em task.py
### Documentação
- Garantir que CREWAI_PLATFORM_INTEGRATION_TOKEN seja mencionado na documentação
- Atualizar documentação de triggers
### Contribuidores
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide
</Update>
<Update label="14 out 2025">
## v1.0.0b1 (Pré-lançamento)
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0b1)
## O que Mudou
### Funcionalidades
- Aprimorar classe OpenAICompletion com parâmetros de cliente adicionais
- Melhorar segurança de thread do event bus e suporte async
- Implementar integração Bedrock LLM
### Correções de Bugs
- Corrigir problema com disponibilidade de input() ausente
- Resolver erro de decodificação JWT adicionando margem de 10 segundos
- Injetar credenciais do repositório de ferramentas no comando crewai run
- Corrigir cópia e adicionar verificação NOT_SPECIFIED em task.py
### Documentação
- Garantir que CREWAI_PLATFORM_INTEGRATION_TOKEN seja mencionado na documentação
- Atualizar documentação de triggers
### Contribuidores
@Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide
</Update>
<Update label="13 out 2025">
## v0.203.1
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/0.203.1)
## O que Mudou
### Melhorias e Correções do Núcleo
- Corrigida injeção de credenciais do repositório de ferramentas no comando `crewai run`
- Adicionada margem de 10 segundos ao decodificar JWTs para reduzir erros de validação de token
- Corrigida (depois revertida) correção de agenda cron destinada a executar jobs a cada 5 dias em datas específicas
### Documentação e Guias
- Atualizada política de segurança para esclarecer o processo de relatório de vulnerabilidade
</Update>
<Update label="09 out 2025">
## v1.0.0a4 (Pré-lançamento)
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0a4)
## O que Mudou
### Funcionalidades
- Aprimorar tratamento de eventos de conhecimento e guardrail na classe Agent
- Introduzir comandos de listagem e execução de trigger para desenvolvimento local
- Atualizar documentação com nova abordagem para consumir Platform Actions
- Adicionar guia para capturar logs de telemetria no CrewAI AMP
### Correções de Bugs
- Reverter agenda cron ruim
- Corrigir agenda cron para executar a cada 5 dias em datas específicas
- Remover linha duplicada e adicionar variável de ambiente explícita
### Contribuidores
@greysonlalonde, @heitorado, @joaomdmoura, @lorenzejay, @lucasgomide, @mplachta, @theCyberTech
</Update>
<Update label="07 out 2025">
## v1.0.0a3 (Pré-lançamento)
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0a3)
## O que Mudou
### Funcionalidades
- Adicionar suporte a agente para ações de plataforma
- Adicionar argumento de interpretador para ferramenta de execução de código
- Suporte direto para execução de apps de plataforma
### Documentação
- Adicionar documentação de ações de plataforma
- Adicionar tipos de transporte stdio e sse à documentação MCP
- Atualizar lista de modelos AWS
### Contribuidores
@greysonlalonde, @heitorado, @lorenzejay, @lucasgomide
</Update>
<Update label="03 out 2025">
## v1.0.0a2 (Pré-lançamento)
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.0.0a2)
## O que Mudou
### Melhorias e Correções do Núcleo
- Atualizações de CI para monorepo
- Atualizar modelo Anthropic padrão para claude-sonnet-4-20250514
- Corrigir testes para atualização de modelo
### Contribuidores
@greysonlalonde, @lorenzejay
</Update>
<Update label="30 set 2025">
## v1.0.0a1

View File

@@ -309,10 +309,6 @@ Ao executar esse Flow, a saída será diferente dependendo do valor booleano ale
### Human in the Loop (feedback humano)
<Note>
O decorador `@human_feedback` requer **CrewAI versão 1.8.0 ou superior**.
</Note>
O decorador `@human_feedback` permite fluxos de trabalho human-in-the-loop, pausando a execução do flow para coletar feedback de um humano. Isso é útil para portões de aprovação, revisão de qualidade e pontos de decisão que requerem julgamento humano.
```python Code

View File

@@ -1,342 +0,0 @@
---
title: Redação de PII para Traces
description: "Redija automaticamente dados sensíveis de traces de execução de crews e flows"
icon: "lock"
mode: "wide"
---
## Visão Geral
A Redação de PII é um recurso do CrewAI AMP que detecta e mascara automaticamente Informações de Identificação Pessoal (PII) nos traces de execução de crews e flows. Isso garante que dados sensíveis como números de cartão de crédito, CPF, endereços de e-mail e nomes não sejam expostos nos traces do CrewAI AMP. Você também pode criar reconhecedores personalizados para proteger dados específicos da sua organização.
<Info>
A Redação de PII está disponível no plano Enterprise.
A implantação deve ser versão 1.8.0 ou superior.
</Info>
<Frame>
![Visão Geral da Redação de PII](/images/enterprise/pii_mask_recognizer_trace_example.png)
</Frame>
## Por Que a Redação de PII é Importante
Ao executar agentes de IA em produção, informações sensíveis frequentemente fluem através das suas crews:
- Dados de clientes de integrações CRM
- Informações financeiras de processadores de pagamento
- Detalhes pessoais de envios de formulários
- Dados internos de funcionários
Sem a redação adequada, esses dados aparecem nos traces, tornando a conformidade com regulamentações como LGPD, HIPAA e PCI-DSS desafiadora. A Redação de PII resolve isso mascarando automaticamente dados sensíveis antes de serem armazenados nos traces.
## Como Funciona
1. **Detectar** - Escanear dados de eventos de trace para padrões de PII conhecidos
2. **Classificar** - Identificar o tipo de dado sensível (cartão de crédito, CPF, e-mail, etc.)
3. **Mascarar/Redigir** - Substituir os dados sensíveis por valores mascarados com base na sua configuração
```
Original: "Entre em contato com john.doe@company.com ou ligue para 555-123-4567"
Redigido: "Entre em contato com <EMAIL_ADDRESS> ou ligue para <PHONE_NUMBER>"
```
## Habilitando a Redação de PII
<Info>
Você deve estar no plano Enterprise e sua implantação deve ser versão 1.8.0 ou superior para usar este recurso.
</Info>
<Steps>
<Step title="Navegue até Configurações da Crew">
No painel do CrewAI AMP, selecione sua crew implantada e vá para uma de suas implantações/automações, depois navegue até **Settings** → **PII Protection**.
</Step>
<Step title="Habilitar Proteção PII">
Ative **PII Redaction for Traces**. Isso habilitará a varredura automática e redação de dados de trace.
<Info>
Você precisa habilitar manualmente a Redação de PII para cada implantação.
</Info>
<Frame>
![Habilitar Redação de PII](/images/enterprise/pii_mask_recognizer_enable.png)
</Frame>
</Step>
<Step title="Configurar Tipos de Entidade">
Selecione quais tipos de PII detectar e redigir. Cada entidade pode ser habilitada ou desabilitada individualmente.
<Frame>
![Configurar Entidades](/images/enterprise/pii_mask_recognizer_supported_entities.png)
</Frame>
</Step>
<Step title="Salvar">
Salve sua configuração. A redação de PII estará ativa em todas as execuções subsequentes da crew, sem necessidade de reimplantação.
</Step>
</Steps>
## Tipos de Entidade Suportados
O CrewAI suporta os seguintes tipos de entidade PII, organizados por categoria.
### Entidades Globais
| Entidade | Descrição | Exemplo |
|----------|-----------|---------|
| `CREDIT_CARD` | Números de cartão de crédito/débito | "4111-1111-1111-1111" |
| `CRYPTO` | Endereços de carteira de criptomoedas | "bc1qxy2kgd..." |
| `DATE_TIME` | Datas e horários | "15 de janeiro de 2024" |
| `EMAIL_ADDRESS` | Endereços de e-mail | "john@example.com" |
| `IBAN_CODE` | Números de conta bancária internacional | "DE89 3704 0044 0532 0130 00" |
| `IP_ADDRESS` | Endereços IPv4 e IPv6 | "192.168.1.1" |
| `LOCATION` | Localizações geográficas | "São Paulo" |
| `MEDICAL_LICENSE` | Números de licença médica | "CRM12345" |
| `NRP` | Nacionalidades, grupos religiosos ou políticos | - |
| `PERSON` | Nomes pessoais | "João Silva" |
| `PHONE_NUMBER` | Números de telefone em vários formatos | "+55 (11) 98765-4321" |
| `URL` | URLs da web | "https://example.com" |
### Entidades Específicas dos EUA
| Entidade | Descrição | Exemplo |
|----------|-----------|---------|
| `US_BANK_NUMBER` | Números de conta bancária dos EUA | "1234567890" |
| `US_DRIVER_LICENSE` | Números de carteira de motorista dos EUA | "D1234567" |
| `US_ITIN` | Número de Identificação de Contribuinte Individual | "900-70-0000" |
| `US_PASSPORT` | Números de passaporte dos EUA | "123456789" |
| `US_SSN` | Números de Seguro Social | "123-45-6789" |
## Ações de Redação
Para cada entidade habilitada, você pode configurar como os dados são redigidos:
| Ação | Descrição | Exemplo de Saída |
|------|-----------|------------------|
| `mask` | Substituir pelo rótulo do tipo de entidade | `<CREDIT_CARD>` |
| `redact` | Remover completamente o texto | *(vazio)* |
## Reconhecedores Personalizados
Além das entidades integradas, você pode criar **reconhecedores personalizados** para detectar padrões de PII específicos da sua organização.
<Frame>
![Reconhecedores Personalizados](/images/enterprise/pii_mask_recognizer.png)
</Frame>
### Tipos de Reconhecedores
Você tem duas opções para reconhecedores personalizados:
| Tipo | Melhor Para | Exemplo de Caso de Uso |
|------|-------------|------------------------|
| **Baseado em Padrão (Regex)** | Dados estruturados com formatos previsíveis | Valores de salário, IDs de funcionários, códigos de projeto |
| **Lista de Negação** | Correspondências exatas de strings | Nomes de empresas, codinomes internos, termos específicos |
### Criando um Reconhecedor Personalizado
<Steps>
<Step title="Navegue até Reconhecedores Personalizados">
Vá para **Settings** da Organização → **Organization** → **Add Recognizer**.
</Step>
<Step title="Configure o Reconhecedor">
<Frame>
![Configurar Reconhecedor](/images/enterprise/pii_mask_recognizer_create.png)
</Frame>
Configure os seguintes campos:
- **Name**: Um nome descritivo para o reconhecedor
- **Entity Type**: O rótulo da entidade que aparecerá na saída redigida (ex.: `EMPLOYEE_ID`, `SALARY`)
- **Type**: Escolha entre Padrão Regex ou Lista de Negação
- **Pattern/Values**: Padrão regex ou lista de strings para corresponder
- **Confidence Threshold**: Pontuação mínima (0.0-1.0) necessária para uma correspondência acionar a redação. Valores mais altos (ex.: 0.8) reduzem falsos positivos, mas podem perder algumas correspondências. Valores mais baixos (ex.: 0.5) capturam mais correspondências, mas podem redigir em excesso. O padrão é 0.8.
- **Context Words** (opcional): Palavras que aumentam a confiança de detecção quando encontradas próximas
</Step>
<Step title="Salvar">
Salve o reconhecedor. Ele estará disponível para habilitar em suas implantações.
</Step>
</Steps>
### Entendendo os Tipos de Entidade
O **Entity Type** determina como o conteúdo correspondido aparece nos traces redigidos:
```
Entity Type: SALARY
Pattern: salary:\s*\$\s*\d+
Entrada: "Salário do funcionário: $50,000"
Saída: "Salário do funcionário <SALARY>"
```
### Usando Palavras de Contexto
Palavras de contexto melhoram a precisão aumentando a confiança quando termos específicos aparecem próximos ao padrão correspondido:
```
Context Words: "project", "code", "internal"
Entity Type: PROJECT_CODE
Pattern: PRJ-\d{4}
```
Quando "project" ou "code" aparece próximo a "PRJ-1234", o reconhecedor tem maior confiança de que é uma correspondência verdadeira, reduzindo falsos positivos.
## Visualizando Traces Redigidos
Uma vez que a redação de PII está habilitada, seus traces mostrarão valores redigidos no lugar de dados sensíveis:
```
Task Output: "Cliente <PERSON> fez o pedido #12345.
E-mail de contato: <EMAIL_ADDRESS>, telefone: <PHONE_NUMBER>.
Pagamento processado para cartão terminando em <CREDIT_CARD>."
```
Os valores redigidos são claramente marcados com colchetes angulares e o rótulo do tipo de entidade (ex.: `<EMAIL_ADDRESS>`), facilitando entender quais dados foram protegidos enquanto ainda permite depurar e monitorar o comportamento da crew.
## Melhores Práticas
### Considerações de Desempenho
<Steps>
<Step title="Habilite Apenas Entidades Necessárias">
Cada entidade habilitada adiciona sobrecarga de processamento. Habilite apenas entidades relevantes para seus dados.
</Step>
<Step title="Use Padrões Específicos">
Para reconhecedores personalizados, use padrões específicos para reduzir falsos positivos e melhorar o desempenho. Padrões regex são melhores para identificar padrões específicos nos traces como salário, ID de funcionário, código de projeto, etc. Reconhecedores de lista de negação são melhores para identificar strings exatas nos traces como nomes de empresas, codinomes internos, etc.
</Step>
<Step title="Aproveite Palavras de Contexto">
Palavras de contexto melhoram a precisão acionando a detecção apenas quando o texto circundante corresponde.
</Step>
</Steps>
## Solução de Problemas
<Accordion title="PII Não Está Sendo Redigido">
**Possíveis Causas:**
- Tipo de entidade não habilitado na configuração
- Padrão não corresponde ao formato dos dados
- Reconhecedor personalizado tem erros de sintaxe
**Soluções:**
- Verifique se a entidade está habilitada em Settings → Security
- Teste padrões regex com dados de amostra
- Verifique logs para erros de configuração
</Accordion>
<Accordion title="Muitos Dados Estão Sendo Redigidos">
**Possíveis Causas:**
- Tipos de entidade muito amplos habilitados (ex.: `DATE_TIME` captura datas em todos os lugares)
- Padrões de reconhecedor personalizado são muito gerais
**Soluções:**
- Desabilite entidades que causam falsos positivos
- Torne padrões personalizados mais específicos
- Adicione palavras de contexto para melhorar a precisão
</Accordion>
<Accordion title="Problemas de Desempenho">
**Possíveis Causas:**
- Muitas entidades habilitadas
- Entidades baseadas em NLP (`PERSON`, `LOCATION`, `NRP`) são computacionalmente caras pois usam modelos de machine learning
**Soluções:**
- Habilite apenas entidades que você realmente precisa
- Considere usar alternativas baseadas em padrão quando possível
- Monitore tempos de processamento de trace no painel
</Accordion>
---
## Exemplo Prático: Correspondência de Padrão de Salário
Este exemplo demonstra como criar um reconhecedor personalizado para detectar e mascarar informações de salário em seus traces.
### Caso de Uso
Sua crew processa dados de funcionários ou financeiros que incluem informações de salário em formatos como:
- `salary: $50,000`
- `salary: $125,000.00`
- `salary:$1,500.50`
Você deseja mascarar automaticamente esses valores para proteger dados sensíveis de remuneração.
### Configuração
<Frame>
![Configuração do Reconhecedor de Salário](/images/enterprise/pii_mask_custom_recognizer_salary.png)
</Frame>
| Campo | Valor |
|-------|-------|
| **Name** | `SALARY` |
| **Entity Type** | `SALARY` |
| **Type** | Regex Pattern |
| **Regex Pattern** | `salary:\s*\$\s*\d{1,3}(,\d{3})*(\.\d{2})?` |
| **Action** | Mask |
| **Confidence Threshold** | `0.8` |
| **Context Words** | `salary, compensation, pay, wage, income` |
### Análise do Padrão Regex
| Componente do Padrão | Significado |
|----------------------|-------------|
| `salary:` | Corresponde ao texto literal "salary:" |
| `\s*` | Corresponde a zero ou mais caracteres de espaço em branco |
| `\$` | Corresponde ao sinal de dólar (escapado) |
| `\s*` | Corresponde a zero ou mais caracteres de espaço em branco após $ |
| `\d{1,3}` | Corresponde a 1-3 dígitos (ex.: "1", "50", "125") |
| `(,\d{3})*` | Corresponde a milhares separados por vírgula (ex.: ",000", ",500,000") |
| `(\.\d{2})?` | Opcionalmente corresponde a centavos (ex.: ".00", ".50") |
### Resultados de Exemplo
```
Original: "Registro do funcionário mostra salary: $125,000.00 anualmente"
Redigido: "Registro do funcionário mostra <SALARY> anualmente"
Original: "Salário base salary:$50,000 com potencial de bônus"
Redigido: "Salário base <SALARY> com potencial de bônus"
```
<Tip>
Adicionar palavras de contexto como "salary", "compensation", "pay", "wage" e "income" ajuda a aumentar a confiança de detecção quando esses termos aparecem próximos ao padrão correspondido, reduzindo falsos positivos.
</Tip>
### Habilite o Reconhecedor para Suas Implantações
<Warning>
Criar um reconhecedor personalizado no nível da organização não o habilita automaticamente para suas implantações. Você deve habilitar manualmente cada reconhecedor para cada implantação onde deseja aplicá-lo.
</Warning>
Após criar seu reconhecedor personalizado, habilite-o para cada implantação:
<Steps>
<Step title="Navegue até Sua Implantação">
Vá para sua implantação/automação e abra **Settings** → **PII Protection**.
</Step>
<Step title="Selecione Reconhecedores Personalizados">
Em **Mask Recognizers**, você verá os reconhecedores definidos pela sua organização. Marque a caixa ao lado dos reconhecedores que deseja habilitar.
<Frame>
![Habilitar Reconhecedor Personalizado](/images/enterprise/pii_mask_recognizers_options.png)
</Frame>
</Step>
<Step title="Salvar Configuração">
Salve suas alterações. O reconhecedor estará ativo em todas as execuções subsequentes para esta implantação.
</Step>
</Steps>
<Info>
Repita este processo para cada implantação onde você precisa do reconhecedor personalizado. Isso oferece controle granular sobre quais reconhecedores estão ativos em diferentes ambientes (ex.: desenvolvimento vs. produção).
</Info>

View File

@@ -7,10 +7,6 @@ mode: "wide"
## Visão Geral
<Note>
O decorador `@human_feedback` requer **CrewAI versão 1.8.0 ou superior**. Certifique-se de atualizar sua instalação antes de usar este recurso.
</Note>
O decorador `@human_feedback` permite fluxos de trabalho human-in-the-loop (HITL) diretamente nos CrewAI Flows. Ele permite pausar a execução do flow, apresentar a saída para um humano revisar, coletar seu feedback e, opcionalmente, rotear para diferentes listeners com base no resultado do feedback.
Isso é particularmente valioso para:

View File

@@ -5,22 +5,9 @@ icon: "user-check"
mode: "wide"
---
Human-in-the-Loop (HITL) é uma abordagem poderosa que combina a inteligência artificial com a experiência humana para aprimorar a tomada de decisões e melhorar os resultados das tarefas. CrewAI oferece várias maneiras de implementar HITL dependendo das suas necessidades.
Human-in-the-Loop (HITL) é uma abordagem poderosa que combina a inteligência artificial com a experiência humana para aprimorar a tomada de decisões e melhorar os resultados das tarefas. Este guia mostra como implementar HITL dentro da CrewAI.
## Escolhendo Sua Abordagem HITL
CrewAI oferece duas abordagens principais para implementar workflows human-in-the-loop:
| Abordagem | Melhor Para | Integração | Versão |
|----------|----------|-------------|---------|
| **Baseada em Flow** (decorador `@human_feedback`) | Desenvolvimento local, revisão via console, workflows síncronos | [Feedback Humano em Flows](/pt-BR/learn/human-feedback-in-flows) | **1.8.0+** |
| **Baseada em Webhook** (Enterprise) | Deployments em produção, workflows assíncronos, integrações externas (Slack, Teams, etc.) | Este guia | - |
<Tip>
Se você está construindo flows e deseja adicionar etapas de revisão humana com roteamento baseado em feedback, confira o guia [Feedback Humano em Flows](/pt-BR/learn/human-feedback-in-flows) para o decorador `@human_feedback`.
</Tip>
## Configurando Workflows HITL Baseados em Webhook
## Configurando Workflows HITL
<Steps>
<Step title="Configure sua Tarefa">

View File

@@ -1,115 +0,0 @@
---
title: Galileo Galileu
description: Integração Galileo para rastreamento e avaliação CrewAI
icon: telescope
mode: "wide"
---
## Visão geral
Este guia demonstra como integrar o **Galileo**com o **CrewAI**
para rastreamento abrangente e engenharia de avaliação.
Ao final deste guia, você será capaz de rastrear seus agentes CrewAI,
monitorar seu desempenho e avaliar seu comportamento com
A poderosa plataforma de observabilidade do Galileo.
> **O que é Galileo?**[Galileo](https://galileo.ai/) é avaliação e observabilidade de IA
plataforma que oferece rastreamento, avaliação e
e monitoramento de aplicações de IA. Ele permite que as equipes capturem a verdade,
criar grades de proteção robustas e realizar experimentos sistemáticos com
rastreamento de experimentos integrado e análise de desempenho -garantindo confiabilidade,
transparência e melhoria contínua em todo o ciclo de vida da IA.
## Primeiros passos
Este tutorial segue o [CrewAI Quickstart](pt-BR/quickstart) e mostra como adicionar
[CrewAIEventListener] do Galileo(https://v2docs.galileo.ai/sdk-api/python/reference/handlers/crewai/handler),
um manipulador de eventos.
Para mais informações, consulte Galileu
[Adicionar Galileo a um aplicativo CrewAI](https://v2docs.galileo.ai/how-to-guides/third-party-integrations/add-galileo-to-crewai/add-galileo-to-crewai)
guia prático.
> **Observação**Este tutorial pressupõe que você concluiu o [CrewAI Quickstart](pt-BR/quickstart).
Se você quiser um exemplo completo e abrangente, consulte o Galileo
[Repositório de exemplo SDK da CrewAI](https://github.com/rungalileo/sdk-examples/tree/main/python/agent/crew-ai).
### Etapa 1: instalar dependências
Instale as dependências necessárias para seu aplicativo.
Crie um ambiente virtual usando seu método preferido,
em seguida, instale dependências dentro desse ambiente usando seu
ferramenta preferida:
```bash
uv add galileo
```
### Etapa 2: adicione ao arquivo .env do [CrewAI Quickstart](/pt-BR/quickstart)
```bash
# Your Galileo API key
GALILEO_API_KEY="your-galileo-api-key"
# Your Galileo project name
GALILEO_PROJECT="your-galileo-project-name"
# The name of the Log stream you want to use for logging
GALILEO_LOG_STREAM="your-galileo-log-stream "
```
### Etapa 3: adicionar o ouvinte de eventos Galileo
Para habilitar o registro com Galileo, você precisa criar uma instância do `CrewAIEventListener`.
Importe o pacote manipulador Galileo CrewAI por
adicionando o seguinte código no topo do seu arquivo main.py:
```python
from galileo.handlers.crewai.handler import CrewAIEventListener
```
No início da sua função run, crie o ouvinte de evento:
```python
def run():
# Create the event listener
CrewAIEventListener()
# The rest of your existing code goes here
```
Quando você cria a instância do listener, ela é automaticamente
registrado na CrewAI.
### Etapa 4: administre sua Crew
Administre sua Crew com o CrewAI CLI:
```bash
crewai run
```
### Passo 5: Visualize os traços no Galileo
Assim que sua tripulação terminar, os rastros serão eliminados e aparecerão no Galileo.
![Galileo trace view](/images/galileo-trace-veiw.png)
## Compreendendo a integração do Galileo
Galileo se integra ao CrewAI registrando um ouvinte de evento
que captura eventos de execução da tripulação (por exemplo, ações do agente, chamadas de ferramentas, respostas do modelo)
e os encaminha ao Galileo para observabilidade e avaliação.
### Compreendendo o ouvinte de eventos
Criar uma instância `CrewAIEventListener()` é tudo o que você precisa
necessário para habilitar o Galileo para uma execução do CrewAI. Quando instanciado, o ouvinte:
-Registra-se automaticamente no CrewAI
-Lê a configuração do Galileo a partir de variáveis de ambiente
-Registra todos os dados de execução no projeto Galileo e fluxo de log especificado por
`GALILEO_PROJECT` e `GALILEO_LOG_STREAM`
Nenhuma configuração adicional ou alterações de código são necessárias.
Todos os dados desta execução são registados no projecto Galileo e
fluxo de log especificado pela configuração do seu ambiente
(por exemplo, GALILEO_PROJECT e GALILEO_LOG_STREAM).

View File

@@ -12,7 +12,7 @@ dependencies = [
"pytube~=15.0.0",
"requests~=2.32.5",
"docker~=7.1.0",
"crewai==1.8.0",
"crewai==1.7.2",
"lancedb~=0.5.4",
"tiktoken~=0.8.0",
"beautifulsoup4~=4.13.4",

View File

@@ -291,4 +291,4 @@ __all__ = [
"ZapierActionTools",
]
__version__ = "1.8.0"
__version__ = "1.7.2"

View File

@@ -49,7 +49,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = [
"crewai-tools==1.8.0",
"crewai-tools==1.7.2",
]
embeddings = [
"tiktoken~=0.8.0"

View File

@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
__version__ = "1.8.0"
__version__ = "1.7.2"
_telemetry_submitted = False

View File

@@ -3,6 +3,4 @@
from crewai.a2a.config import A2AConfig
__all__ = [
"A2AConfig",
]
__all__ = ["A2AConfig"]

View File

@@ -5,12 +5,11 @@ This module is separate from experimental.a2a to avoid circular imports.
from __future__ import annotations
from typing import Annotated, Any, ClassVar, Literal
from typing import Annotated
from pydantic import (
BaseModel,
BeforeValidator,
ConfigDict,
Field,
HttpUrl,
TypeAdapter,
@@ -19,12 +18,6 @@ from pydantic import (
from crewai.a2a.auth.schemas import AuthScheme
try:
from crewai.a2a.updates import UpdateConfig
except ImportError:
UpdateConfig = Any # type: ignore[misc,assignment]
http_url_adapter = TypeAdapter(HttpUrl)
Url = Annotated[
@@ -35,33 +28,23 @@ Url = Annotated[
]
def _get_default_update_config() -> UpdateConfig:
from crewai.a2a.updates import StreamingConfig
return StreamingConfig()
class A2AConfig(BaseModel):
"""Configuration for A2A protocol integration.
Attributes:
endpoint: A2A agent endpoint URL.
auth: Authentication scheme.
timeout: Request timeout in seconds.
max_turns: Maximum conversation turns with A2A agent.
auth: Authentication scheme (Bearer, OAuth2, API Key, HTTP Basic/Digest).
timeout: Request timeout in seconds (default: 120).
max_turns: Maximum conversation turns with A2A agent (default: 10).
response_model: Optional Pydantic model for structured A2A agent responses.
fail_fast: If True, raise error when agent unreachable; if False, skip and continue.
trust_remote_completion_status: If True, return A2A agent's result directly when completed.
updates: Update mechanism config.
transport_protocol: A2A transport protocol (grpc, jsonrpc, http+json).
fail_fast: If True, raise error when agent unreachable; if False, skip and continue (default: True).
trust_remote_completion_status: If True, return A2A agent's result directly when status is "completed"; if False, always ask server agent to respond (default: False).
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
endpoint: Url = Field(description="A2A agent endpoint URL")
auth: AuthScheme | None = Field(
default=None,
description="Authentication scheme",
description="Authentication scheme (Bearer, OAuth2, API Key, HTTP Basic/Digest)",
)
timeout: int = Field(default=120, description="Request timeout in seconds")
max_turns: int = Field(
@@ -69,21 +52,13 @@ class A2AConfig(BaseModel):
)
response_model: type[BaseModel] | None = Field(
default=None,
description="Optional Pydantic model for structured A2A agent responses",
description="Optional Pydantic model for structured A2A agent responses. When specified, the A2A agent is expected to return JSON matching this schema.",
)
fail_fast: bool = Field(
default=True,
description="If True, raise error when agent unreachable; if False, skip",
description="If True, raise an error immediately when the A2A agent is unreachable. If False, skip the A2A agent and continue execution.",
)
trust_remote_completion_status: bool = Field(
default=False,
description="If True, return A2A result directly when completed",
)
updates: UpdateConfig = Field(
default_factory=_get_default_update_config,
description="Update mechanism config",
)
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"] = Field(
default="JSONRPC",
description="Specified mode of A2A transport protocol",
description='If True, return the A2A agent\'s result directly when status is "completed" without asking the server agent to respond. If False, always ask the server agent to respond, allowing it to potentially delegate again.',
)

View File

@@ -1,7 +0,0 @@
"""A2A protocol error types."""
from a2a.client.errors import A2AClientTimeoutError
class A2APollingTimeoutError(A2AClientTimeoutError):
"""Raised when polling exceeds the configured timeout."""

View File

@@ -1,322 +0,0 @@
"""Helper functions for processing A2A task results."""
from __future__ import annotations
from collections.abc import AsyncIterator
from typing import TYPE_CHECKING, TypedDict
import uuid
from a2a.types import (
AgentCard,
Message,
Part,
Role,
Task,
TaskArtifactUpdateEvent,
TaskState,
TaskStatusUpdateEvent,
TextPart,
)
from typing_extensions import NotRequired
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import A2AResponseReceivedEvent
if TYPE_CHECKING:
from a2a.types import Task as A2ATask
SendMessageEvent = (
tuple[Task, TaskStatusUpdateEvent | TaskArtifactUpdateEvent | None] | Message
)
TERMINAL_STATES: frozenset[TaskState] = frozenset(
{
TaskState.completed,
TaskState.failed,
TaskState.rejected,
TaskState.canceled,
}
)
ACTIONABLE_STATES: frozenset[TaskState] = frozenset(
{
TaskState.input_required,
TaskState.auth_required,
}
)
class TaskStateResult(TypedDict):
"""Result dictionary from processing A2A task state."""
status: TaskState
history: list[Message]
result: NotRequired[str]
error: NotRequired[str]
agent_card: NotRequired[AgentCard]
def extract_task_result_parts(a2a_task: A2ATask) -> list[str]:
"""Extract result parts from A2A task status message, history, and artifacts.
Args:
a2a_task: A2A Task object with status, history, and artifacts
Returns:
List of result text parts
"""
result_parts: list[str] = []
if a2a_task.status and a2a_task.status.message:
msg = a2a_task.status.message
result_parts.extend(
part.root.text for part in msg.parts if part.root.kind == "text"
)
if not result_parts and a2a_task.history:
for history_msg in reversed(a2a_task.history):
if history_msg.role == Role.agent:
result_parts.extend(
part.root.text
for part in history_msg.parts
if part.root.kind == "text"
)
break
if a2a_task.artifacts:
result_parts.extend(
part.root.text
for artifact in a2a_task.artifacts
for part in artifact.parts
if part.root.kind == "text"
)
return result_parts
def extract_error_message(a2a_task: A2ATask, default: str) -> str:
"""Extract error message from A2A task.
Args:
a2a_task: A2A Task object
default: Default message if no error found
Returns:
Error message string
"""
if a2a_task.status and a2a_task.status.message:
msg = a2a_task.status.message
if msg:
for part in msg.parts:
if part.root.kind == "text":
return str(part.root.text)
return str(msg)
if a2a_task.history:
for history_msg in reversed(a2a_task.history):
for part in history_msg.parts:
if part.root.kind == "text":
return str(part.root.text)
return default
def process_task_state(
a2a_task: A2ATask,
new_messages: list[Message],
agent_card: AgentCard,
turn_number: int,
is_multiturn: bool,
agent_role: str | None,
result_parts: list[str] | None = None,
) -> TaskStateResult | None:
"""Process A2A task state and return result dictionary.
Shared logic for both polling and streaming handlers.
Args:
a2a_task: The A2A task to process
new_messages: List to collect messages (modified in place)
agent_card: The agent card
turn_number: Current turn number
is_multiturn: Whether multi-turn conversation
agent_role: Agent role for logging
result_parts: Accumulated result parts (streaming passes accumulated,
polling passes None to extract from task)
Returns:
Result dictionary if terminal/actionable state, None otherwise
"""
should_extract = result_parts is None
if result_parts is None:
result_parts = []
if a2a_task.status.state == TaskState.completed:
if should_extract:
extracted_parts = extract_task_result_parts(a2a_task)
result_parts.extend(extracted_parts)
if a2a_task.history:
new_messages.extend(a2a_task.history)
response_text = " ".join(result_parts) if result_parts else ""
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=response_text,
turn_number=turn_number,
is_multiturn=is_multiturn,
status="completed",
agent_role=agent_role,
),
)
return TaskStateResult(
status=TaskState.completed,
agent_card=agent_card,
result=response_text,
history=new_messages,
)
if a2a_task.status.state == TaskState.input_required:
if a2a_task.history:
new_messages.extend(a2a_task.history)
response_text = extract_error_message(a2a_task, "Additional input required")
if response_text and not a2a_task.history:
agent_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=response_text))],
context_id=a2a_task.context_id,
task_id=a2a_task.id,
)
new_messages.append(agent_message)
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=response_text,
turn_number=turn_number,
is_multiturn=is_multiturn,
status="input_required",
agent_role=agent_role,
),
)
return TaskStateResult(
status=TaskState.input_required,
error=response_text,
history=new_messages,
agent_card=agent_card,
)
if a2a_task.status.state in {TaskState.failed, TaskState.rejected}:
error_msg = extract_error_message(a2a_task, "Task failed without error message")
if a2a_task.history:
new_messages.extend(a2a_task.history)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
if a2a_task.status.state == TaskState.auth_required:
error_msg = extract_error_message(a2a_task, "Authentication required")
return TaskStateResult(
status=TaskState.auth_required,
error=error_msg,
history=new_messages,
)
if a2a_task.status.state == TaskState.canceled:
error_msg = extract_error_message(a2a_task, "Task was canceled")
return TaskStateResult(
status=TaskState.canceled,
error=error_msg,
history=new_messages,
)
return None
async def send_message_and_get_task_id(
event_stream: AsyncIterator[SendMessageEvent],
new_messages: list[Message],
agent_card: AgentCard,
turn_number: int,
is_multiturn: bool,
agent_role: str | None,
) -> str | TaskStateResult:
"""Send message and process initial response.
Handles the common pattern of sending a message and either:
- Getting an immediate Message response (task completed synchronously)
- Getting a Task that needs polling/waiting for completion
Args:
event_stream: Async iterator from client.send_message()
new_messages: List to collect messages (modified in place)
agent_card: The agent card
turn_number: Current turn number
is_multiturn: Whether multi-turn conversation
agent_role: Agent role for logging
Returns:
Task ID string if agent needs polling/waiting, or TaskStateResult if done.
"""
try:
async for event in event_stream:
if isinstance(event, Message):
new_messages.append(event)
result_parts = [
part.root.text for part in event.parts if part.root.kind == "text"
]
response_text = " ".join(result_parts) if result_parts else ""
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=response_text,
turn_number=turn_number,
is_multiturn=is_multiturn,
status="completed",
agent_role=agent_role,
),
)
return TaskStateResult(
status=TaskState.completed,
result=response_text,
history=new_messages,
agent_card=agent_card,
)
if isinstance(event, tuple):
a2a_task, _ = event
if a2a_task.status.state in TERMINAL_STATES | ACTIONABLE_STATES:
result = process_task_state(
a2a_task=a2a_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
)
if result:
return result
return a2a_task.id
return TaskStateResult(
status=TaskState.failed,
error="No task ID received from initial message",
history=new_messages,
)
finally:
aclose = getattr(event_stream, "aclose", None)
if aclose:
await aclose()

View File

@@ -27,14 +27,3 @@ UNAVAILABLE_AGENTS_NOTICE_TEMPLATE: Final[Template] = Template(
" $unavailable_agents"
"\n</A2A_AGENTS_STATUS>\n"
)
REMOTE_AGENT_COMPLETED_NOTICE: Final[str] = """
<REMOTE_AGENT_STATUS>
STATUS: COMPLETED
The remote agent has finished processing your request. Their response is in the conversation history above.
You MUST now:
1. Extract the answer from the conversation history
2. Set is_a2a=false
3. Return the answer as your final message
DO NOT send another request - the task is already done.
</REMOTE_AGENT_STATUS>
"""

View File

@@ -4,16 +4,6 @@ from typing import Any, Literal, Protocol, TypedDict, runtime_checkable
from typing_extensions import NotRequired
from crewai.a2a.updates import (
PollingConfig,
PollingHandler,
PushNotificationConfig,
PushNotificationHandler,
StreamingConfig,
StreamingHandler,
UpdateConfig,
)
@runtime_checkable
class AgentResponseProtocol(Protocol):
@@ -46,16 +36,3 @@ class PartsDict(TypedDict):
text: str
metadata: NotRequired[PartsMetadataDict]
PollingHandlerType = type[PollingHandler]
StreamingHandlerType = type[StreamingHandler]
PushNotificationHandlerType = type[PushNotificationHandler]
HandlerType = PollingHandlerType | StreamingHandlerType | PushNotificationHandlerType
HANDLER_REGISTRY: dict[type[UpdateConfig], HandlerType] = {
PollingConfig: PollingHandler,
StreamingConfig: StreamingHandler,
PushNotificationConfig: PushNotificationHandler,
}

View File

@@ -1,35 +0,0 @@
"""A2A update mechanism configuration types."""
from crewai.a2a.updates.base import (
BaseHandlerKwargs,
PollingHandlerKwargs,
PushNotificationHandlerKwargs,
PushNotificationResultStore,
StreamingHandlerKwargs,
UpdateHandler,
)
from crewai.a2a.updates.polling.config import PollingConfig
from crewai.a2a.updates.polling.handler import PollingHandler
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
from crewai.a2a.updates.streaming.config import StreamingConfig
from crewai.a2a.updates.streaming.handler import StreamingHandler
UpdateConfig = PollingConfig | StreamingConfig | PushNotificationConfig
__all__ = [
"BaseHandlerKwargs",
"PollingConfig",
"PollingHandler",
"PollingHandlerKwargs",
"PushNotificationConfig",
"PushNotificationHandler",
"PushNotificationHandlerKwargs",
"PushNotificationResultStore",
"StreamingConfig",
"StreamingHandler",
"StreamingHandlerKwargs",
"UpdateConfig",
"UpdateHandler",
]

View File

@@ -1,131 +0,0 @@
"""Base types for A2A update mechanism handlers."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Protocol, TypedDict
from pydantic import GetCoreSchemaHandler
from pydantic_core import CoreSchema, core_schema
if TYPE_CHECKING:
from a2a.client import Client
from a2a.types import AgentCard, Message, Task
from crewai.a2a.task_helpers import TaskStateResult
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
class BaseHandlerKwargs(TypedDict, total=False):
"""Base kwargs shared by all handlers."""
turn_number: int
is_multiturn: bool
agent_role: str | None
class PollingHandlerKwargs(BaseHandlerKwargs, total=False):
"""Kwargs for polling handler."""
polling_interval: float
polling_timeout: float
endpoint: str
agent_branch: Any
history_length: int
max_polls: int | None
class StreamingHandlerKwargs(BaseHandlerKwargs, total=False):
"""Kwargs for streaming handler."""
context_id: str | None
task_id: str | None
class PushNotificationHandlerKwargs(BaseHandlerKwargs, total=False):
"""Kwargs for push notification handler."""
config: PushNotificationConfig
result_store: PushNotificationResultStore
polling_timeout: float
polling_interval: float
agent_branch: Any
class PushNotificationResultStore(Protocol):
"""Protocol for storing and retrieving push notification results.
This protocol defines the interface for a result store that the
PushNotificationHandler uses to wait for task completion.
"""
@classmethod
def __get_pydantic_core_schema__(
cls,
source_type: Any,
handler: GetCoreSchemaHandler,
) -> CoreSchema:
return core_schema.any_schema()
async def wait_for_result(
self,
task_id: str,
timeout: float,
poll_interval: float = 1.0,
) -> Task | None:
"""Wait for a task result to be available.
Args:
task_id: The task ID to wait for.
timeout: Max seconds to wait before returning None.
poll_interval: Seconds between polling attempts.
Returns:
The completed Task object, or None if timeout.
"""
...
async def get_result(self, task_id: str) -> Task | None:
"""Get a task result if available.
Args:
task_id: The task ID to retrieve.
Returns:
The Task object if available, None otherwise.
"""
...
async def store_result(self, task: Task) -> None:
"""Store a task result.
Args:
task: The Task object to store.
"""
...
class UpdateHandler(Protocol):
"""Protocol for A2A update mechanism handlers."""
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Any,
) -> TaskStateResult:
"""Execute the update mechanism and return result.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages (modified in place).
agent_card: The agent card.
**kwargs: Additional handler-specific parameters.
Returns:
Result dictionary with status, result/error, and history.
"""
...

View File

@@ -1 +0,0 @@
"""Polling update mechanism module."""

View File

@@ -1,25 +0,0 @@
"""Polling update mechanism configuration."""
from __future__ import annotations
from pydantic import BaseModel, Field
class PollingConfig(BaseModel):
"""Configuration for polling-based task updates.
Attributes:
interval: Seconds between poll attempts.
timeout: Max seconds to poll before raising timeout error.
max_polls: Max number of poll attempts.
history_length: Number of messages to retrieve per poll.
"""
interval: float = Field(
default=2.0, gt=0, description="Seconds between poll attempts"
)
timeout: float | None = Field(default=None, gt=0, description="Max seconds to poll")
max_polls: int | None = Field(default=None, gt=0, description="Max poll attempts")
history_length: int = Field(
default=100, gt=0, description="Messages to retrieve per poll"
)

View File

@@ -1,246 +0,0 @@
"""Polling update mechanism handler."""
from __future__ import annotations
import asyncio
import time
from typing import TYPE_CHECKING, Any
import uuid
from a2a.client import Client
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
TaskQueryParams,
TaskState,
TextPart,
)
from typing_extensions import Unpack
from crewai.a2a.errors import A2APollingTimeoutError
from crewai.a2a.task_helpers import (
ACTIONABLE_STATES,
TERMINAL_STATES,
TaskStateResult,
process_task_state,
send_message_and_get_task_id,
)
from crewai.a2a.updates.base import PollingHandlerKwargs
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2APollingStartedEvent,
A2APollingStatusEvent,
A2AResponseReceivedEvent,
)
if TYPE_CHECKING:
from a2a.types import Task as A2ATask
async def _poll_task_until_complete(
client: Client,
task_id: str,
polling_interval: float,
polling_timeout: float,
agent_branch: Any | None = None,
history_length: int = 100,
max_polls: int | None = None,
) -> A2ATask:
"""Poll task status until terminal state reached.
Args:
client: A2A client instance
task_id: Task ID to poll
polling_interval: Seconds between poll attempts
polling_timeout: Max seconds before timeout
agent_branch: Agent tree branch for logging
history_length: Number of messages to retrieve per poll
max_polls: Max number of poll attempts (None = unlimited)
Returns:
Final task object in terminal state
Raises:
A2APollingTimeoutError: If polling exceeds timeout or max_polls
"""
start_time = time.monotonic()
poll_count = 0
while True:
poll_count += 1
task = await client.get_task(
TaskQueryParams(id=task_id, history_length=history_length)
)
elapsed = time.monotonic() - start_time
crewai_event_bus.emit(
agent_branch,
A2APollingStatusEvent(
task_id=task_id,
state=str(task.status.state.value) if task.status.state else "unknown",
elapsed_seconds=elapsed,
poll_count=poll_count,
),
)
if task.status.state in TERMINAL_STATES | ACTIONABLE_STATES:
return task
if elapsed > polling_timeout:
raise A2APollingTimeoutError(
f"Polling timeout after {polling_timeout}s ({poll_count} polls)"
)
if max_polls and poll_count >= max_polls:
raise A2APollingTimeoutError(
f"Max polls ({max_polls}) exceeded after {elapsed:.1f}s"
)
await asyncio.sleep(polling_interval)
class PollingHandler:
"""Polling-based update handler."""
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Unpack[PollingHandlerKwargs],
) -> TaskStateResult:
"""Execute A2A delegation using polling for updates.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages.
agent_card: The agent card.
**kwargs: Polling-specific parameters.
Returns:
Dictionary with status, result/error, and history.
"""
polling_interval = kwargs.get("polling_interval", 2.0)
polling_timeout = kwargs.get("polling_timeout", 300.0)
endpoint = kwargs.get("endpoint", "")
agent_branch = kwargs.get("agent_branch")
turn_number = kwargs.get("turn_number", 0)
is_multiturn = kwargs.get("is_multiturn", False)
agent_role = kwargs.get("agent_role")
history_length = kwargs.get("history_length", 100)
max_polls = kwargs.get("max_polls")
context_id = kwargs.get("context_id")
task_id = kwargs.get("task_id")
try:
result_or_task_id = await send_message_and_get_task_id(
event_stream=client.send_message(message),
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
)
if not isinstance(result_or_task_id, str):
return result_or_task_id
task_id = result_or_task_id
crewai_event_bus.emit(
agent_branch,
A2APollingStartedEvent(
task_id=task_id,
polling_interval=polling_interval,
endpoint=endpoint,
),
)
final_task = await _poll_task_until_complete(
client=client,
task_id=task_id,
polling_interval=polling_interval,
polling_timeout=polling_timeout,
agent_branch=agent_branch,
history_length=history_length,
max_polls=max_polls,
)
result = process_task_state(
a2a_task=final_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
)
if result:
return result
return TaskStateResult(
status=TaskState.failed,
error=f"Unexpected task state: {final_task.status.state}",
history=new_messages,
)
except A2APollingTimeoutError as e:
error_msg = str(e)
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
is_multiturn=is_multiturn,
status="failed",
agent_role=agent_role,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
except A2AClientHTTPError as e:
error_msg = f"HTTP Error {e.status_code}: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
is_multiturn=is_multiturn,
status="failed",
agent_role=agent_role,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)

View File

@@ -1 +0,0 @@
"""Push notification update mechanism module."""

View File

@@ -1,38 +0,0 @@
"""Push notification update mechanism configuration."""
from __future__ import annotations
from a2a.types import PushNotificationAuthenticationInfo
from pydantic import AnyHttpUrl, BaseModel, Field
from crewai.a2a.updates.base import PushNotificationResultStore
class PushNotificationConfig(BaseModel):
"""Configuration for webhook-based task updates.
Attributes:
url: Callback URL where agent sends push notifications.
id: Unique identifier for this config.
token: Token to validate incoming notifications.
authentication: Auth info for agent to use when calling webhook.
timeout: Max seconds to wait for task completion.
interval: Seconds between result polling attempts.
result_store: Store for receiving push notification results.
"""
url: AnyHttpUrl = Field(description="Callback URL for push notifications")
id: str | None = Field(default=None, description="Unique config identifier")
token: str | None = Field(default=None, description="Validation token")
authentication: PushNotificationAuthenticationInfo | None = Field(
default=None, description="Auth info for agent to use when calling webhook"
)
timeout: float | None = Field(
default=300.0, gt=0, description="Max seconds to wait for task completion"
)
interval: float = Field(
default=2.0, gt=0, description="Seconds between result polling attempts"
)
result_store: PushNotificationResultStore | None = Field(
default=None, description="Result store for push notification handling"
)

View File

@@ -1,220 +0,0 @@
"""Push notification (webhook) update mechanism handler."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
import uuid
from a2a.client import Client
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
TaskState,
TextPart,
)
from typing_extensions import Unpack
from crewai.a2a.task_helpers import (
TaskStateResult,
process_task_state,
send_message_and_get_task_id,
)
from crewai.a2a.updates.base import (
PushNotificationHandlerKwargs,
PushNotificationResultStore,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2APushNotificationRegisteredEvent,
A2APushNotificationTimeoutEvent,
A2AResponseReceivedEvent,
)
if TYPE_CHECKING:
from a2a.types import Task as A2ATask
logger = logging.getLogger(__name__)
async def _wait_for_push_result(
task_id: str,
result_store: PushNotificationResultStore,
timeout: float,
poll_interval: float,
agent_branch: Any | None = None,
) -> A2ATask | None:
"""Wait for push notification result.
Args:
task_id: Task ID to wait for.
result_store: Store to retrieve results from.
timeout: Max seconds to wait.
poll_interval: Seconds between polling attempts.
agent_branch: Agent tree branch for logging.
Returns:
Final task object, or None if timeout.
"""
task = await result_store.wait_for_result(
task_id=task_id,
timeout=timeout,
poll_interval=poll_interval,
)
if task is None:
crewai_event_bus.emit(
agent_branch,
A2APushNotificationTimeoutEvent(
task_id=task_id,
timeout_seconds=timeout,
),
)
return task
class PushNotificationHandler:
"""Push notification (webhook) based update handler."""
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Unpack[PushNotificationHandlerKwargs],
) -> TaskStateResult:
"""Execute A2A delegation using push notifications for updates.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages.
agent_card: The agent card.
**kwargs: Push notification-specific parameters.
Returns:
Dictionary with status, result/error, and history.
Raises:
ValueError: If result_store or config not provided.
"""
config = kwargs.get("config")
result_store = kwargs.get("result_store")
polling_timeout = kwargs.get("polling_timeout", 300.0)
polling_interval = kwargs.get("polling_interval", 2.0)
agent_branch = kwargs.get("agent_branch")
turn_number = kwargs.get("turn_number", 0)
is_multiturn = kwargs.get("is_multiturn", False)
agent_role = kwargs.get("agent_role")
context_id = kwargs.get("context_id")
task_id = kwargs.get("task_id")
if config is None:
return TaskStateResult(
status=TaskState.failed,
error="PushNotificationConfig is required for push notification handler",
history=new_messages,
)
if result_store is None:
return TaskStateResult(
status=TaskState.failed,
error="PushNotificationResultStore is required for push notification handler",
history=new_messages,
)
try:
result_or_task_id = await send_message_and_get_task_id(
event_stream=client.send_message(message),
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
)
if not isinstance(result_or_task_id, str):
return result_or_task_id
task_id = result_or_task_id
crewai_event_bus.emit(
agent_branch,
A2APushNotificationRegisteredEvent(
task_id=task_id,
callback_url=str(config.url),
),
)
logger.debug(
"Push notification callback for task %s configured at %s (via initial request)",
task_id,
config.url,
)
final_task = await _wait_for_push_result(
task_id=task_id,
result_store=result_store,
timeout=polling_timeout,
poll_interval=polling_interval,
agent_branch=agent_branch,
)
if final_task is None:
return TaskStateResult(
status=TaskState.failed,
error=f"Push notification timeout after {polling_timeout}s",
history=new_messages,
)
result = process_task_state(
a2a_task=final_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
)
if result:
return result
return TaskStateResult(
status=TaskState.failed,
error=f"Unexpected task state: {final_task.status.state}",
history=new_messages,
)
except A2AClientHTTPError as e:
error_msg = f"HTTP Error {e.status_code}: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
is_multiturn=is_multiturn,
status="failed",
agent_role=agent_role,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)

View File

@@ -1 +0,0 @@
"""Streaming update mechanism module."""

View File

@@ -1,9 +0,0 @@
"""Streaming update mechanism configuration."""
from __future__ import annotations
from pydantic import BaseModel
class StreamingConfig(BaseModel):
"""Configuration for SSE-based task updates."""

View File

@@ -1,149 +0,0 @@
"""Streaming (SSE) update mechanism handler."""
from __future__ import annotations
import uuid
from a2a.client import Client
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
TaskArtifactUpdateEvent,
TaskState,
TaskStatusUpdateEvent,
TextPart,
)
from typing_extensions import Unpack
from crewai.a2a.task_helpers import (
ACTIONABLE_STATES,
TERMINAL_STATES,
TaskStateResult,
process_task_state,
)
from crewai.a2a.updates.base import StreamingHandlerKwargs
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import A2AResponseReceivedEvent
class StreamingHandler:
"""SSE streaming-based update handler."""
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Unpack[StreamingHandlerKwargs],
) -> TaskStateResult:
"""Execute A2A delegation using SSE streaming for updates.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages.
agent_card: The agent card.
**kwargs: Streaming-specific parameters.
Returns:
Dictionary with status, result/error, and history.
"""
context_id = kwargs.get("context_id")
task_id = kwargs.get("task_id")
turn_number = kwargs.get("turn_number", 0)
is_multiturn = kwargs.get("is_multiturn", False)
agent_role = kwargs.get("agent_role")
result_parts: list[str] = []
final_result: TaskStateResult | None = None
event_stream = client.send_message(message)
try:
async for event in event_stream:
if isinstance(event, Message):
new_messages.append(event)
for part in event.parts:
if part.root.kind == "text":
text = part.root.text
result_parts.append(text)
elif isinstance(event, tuple):
a2a_task, update = event
if isinstance(update, TaskArtifactUpdateEvent):
artifact = update.artifact
result_parts.extend(
part.root.text
for part in artifact.parts
if part.root.kind == "text"
)
is_final_update = False
if isinstance(update, TaskStatusUpdateEvent):
is_final_update = update.final
if (
not is_final_update
and a2a_task.status.state
not in TERMINAL_STATES | ACTIONABLE_STATES
):
continue
final_result = process_task_state(
a2a_task=a2a_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
result_parts=result_parts,
)
if final_result:
break
except A2AClientHTTPError as e:
error_msg = f"HTTP Error {e.status_code}: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
is_multiturn=is_multiturn,
status="failed",
agent_role=agent_role,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
finally:
aclose = getattr(event_stream, "aclose", None)
if aclose:
await aclose()
if final_result:
return final_result
return TaskStateResult(
status=TaskState.completed,
result=" ".join(result_parts) if result_parts else "",
history=new_messages,
agent_card=agent_card,
)

View File

@@ -7,17 +7,21 @@ from collections.abc import AsyncIterator, MutableMapping
from contextlib import asynccontextmanager
from functools import lru_cache
import time
from typing import TYPE_CHECKING, Any, Literal
from typing import TYPE_CHECKING, Any
import uuid
from a2a.client import A2AClientHTTPError, Client, ClientConfig, ClientFactory
from a2a.client import Client, ClientConfig, ClientFactory
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
PushNotificationConfig as A2APushNotificationConfig,
Role,
TaskArtifactUpdateEvent,
TaskState,
TaskStatusUpdateEvent,
TextPart,
TransportProtocol,
)
from aiocache import cached # type: ignore[import-untyped]
from aiocache.serializers import PickleSerializer # type: ignore[import-untyped]
@@ -32,49 +36,24 @@ from crewai.a2a.auth.utils import (
validate_auth_against_agent_card,
)
from crewai.a2a.config import A2AConfig
from crewai.a2a.task_helpers import TaskStateResult
from crewai.a2a.types import (
HANDLER_REGISTRY,
HandlerType,
PartsDict,
PartsMetadataDict,
)
from crewai.a2a.updates import (
PollingConfig,
PushNotificationConfig,
StreamingHandler,
UpdateConfig,
)
from crewai.a2a.types import PartsDict, PartsMetadataDict
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AConversationStartedEvent,
A2ADelegationCompletedEvent,
A2ADelegationStartedEvent,
A2AMessageSentEvent,
A2AResponseReceivedEvent,
)
from crewai.types.utils import create_literals_from_strings
if TYPE_CHECKING:
from a2a.types import Message
from a2a.types import Message, Task as A2ATask
from crewai.a2a.auth.schemas import AuthScheme
def get_handler(config: UpdateConfig | None) -> HandlerType:
"""Get the handler class for a given update config.
Args:
config: Update mechanism configuration.
Returns:
Handler class for the config type, defaults to StreamingHandler.
"""
if config is None:
return StreamingHandler
return HANDLER_REGISTRY.get(type(config), StreamingHandler)
@lru_cache()
def _fetch_agent_card_cached(
endpoint: str,
@@ -82,14 +61,24 @@ def _fetch_agent_card_cached(
timeout: int,
_ttl_hash: int,
) -> AgentCard:
"""Cached sync version of fetch_agent_card."""
"""Cached version of fetch_agent_card with auth support.
Args:
endpoint: A2A agent endpoint URL
auth_hash: Hash of the auth object
timeout: Request timeout
_ttl_hash: Time-based hash for cache invalidation
Returns:
Cached AgentCard
"""
auth = _auth_store.get(auth_hash)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(
_afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
_fetch_agent_card_async(endpoint=endpoint, auth=auth, timeout=timeout)
)
finally:
loop.close()
@@ -139,74 +128,47 @@ def fetch_agent_card(
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(
afetch_agent_card(endpoint=endpoint, auth=auth, timeout=timeout)
_fetch_agent_card_async(endpoint=endpoint, auth=auth, timeout=timeout)
)
finally:
loop.close()
async def afetch_agent_card(
endpoint: str,
auth: AuthScheme | None = None,
timeout: int = 30,
use_cache: bool = True,
) -> AgentCard:
"""Fetch AgentCard from an A2A endpoint asynchronously.
Native async implementation. Use this when running in an async context.
Args:
endpoint: A2A agent endpoint URL (AgentCard URL).
auth: Optional AuthScheme for authentication.
timeout: Request timeout in seconds.
use_cache: Whether to use caching (default True).
Returns:
AgentCard object with agent capabilities and skills.
Raises:
httpx.HTTPStatusError: If the request fails.
A2AClientHTTPError: If authentication fails.
"""
if use_cache:
if auth:
auth_data = auth.model_dump_json(
exclude={
"_access_token",
"_token_expires_at",
"_refresh_token",
"_authorization_callback",
}
)
auth_hash = hash((type(auth).__name__, auth_data))
else:
auth_hash = 0
_auth_store[auth_hash] = auth
agent_card: AgentCard = await _afetch_agent_card_cached(
endpoint, auth_hash, timeout
)
return agent_card
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
@cached(ttl=300, serializer=PickleSerializer()) # type: ignore[untyped-decorator]
async def _afetch_agent_card_cached(
async def _fetch_agent_card_async_cached(
endpoint: str,
auth_hash: int,
timeout: int,
) -> AgentCard:
"""Cached async implementation of AgentCard fetching."""
"""Cached async implementation of AgentCard fetching.
Args:
endpoint: A2A agent endpoint URL
auth_hash: Hash of the auth object
timeout: Request timeout in seconds
Returns:
Cached AgentCard object
"""
auth = _auth_store.get(auth_hash)
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
return await _fetch_agent_card_async(endpoint=endpoint, auth=auth, timeout=timeout)
async def _afetch_agent_card_impl(
async def _fetch_agent_card_async(
endpoint: str,
auth: AuthScheme | None,
timeout: int,
) -> AgentCard:
"""Internal async implementation of AgentCard fetching."""
"""Async implementation of AgentCard fetching.
Args:
endpoint: A2A agent endpoint URL
auth: Optional AuthScheme for authentication
timeout: Request timeout in seconds
Returns:
AgentCard object
"""
if "/.well-known/agent-card.json" in endpoint:
base_url = endpoint.replace("/.well-known/agent-card.json", "")
agent_card_path = "/.well-known/agent-card.json"
@@ -258,7 +220,6 @@ async def _afetch_agent_card_impl(
def execute_a2a_delegation(
endpoint: str,
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
auth: AuthScheme | None,
timeout: int,
task_description: str,
@@ -274,16 +235,18 @@ def execute_a2a_delegation(
agent_branch: Any | None = None,
response_model: type[BaseModel] | None = None,
turn_number: int | None = None,
updates: UpdateConfig | None = None,
) -> TaskStateResult:
"""Execute a task delegation to a remote A2A agent synchronously.
) -> dict[str, Any]:
"""Execute a task delegation to a remote A2A agent with multi-turn support.
This is the sync wrapper around aexecute_a2a_delegation. For async contexts,
use aexecute_a2a_delegation directly.
Handles:
- AgentCard discovery
- Authentication setup
- Message creation and sending
- Response parsing
- Multi-turn conversations
Args:
endpoint: A2A agent endpoint URL (AgentCard URL)
transport_protocol: Optional A2A transport protocol (grpc, jsonrpc, http+json)
auth: Optional AuthScheme for authentication (Bearer, OAuth2, API Key, HTTP Basic/Digest)
timeout: Request timeout in seconds
task_description: The task to delegate
@@ -299,127 +262,22 @@ def execute_a2a_delegation(
agent_branch: Optional agent tree branch for logging
response_model: Optional Pydantic model for structured outputs
turn_number: Optional turn number for multi-turn conversations
endpoint: A2A agent endpoint URL.
auth: Optional AuthScheme for authentication.
timeout: Request timeout in seconds.
task_description: The task to delegate.
context: Optional context information.
context_id: Context ID for correlating messages/tasks.
task_id: Specific task identifier.
reference_task_ids: List of related task IDs.
metadata: Additional metadata.
extensions: Protocol extensions for custom fields.
conversation_history: Previous Message objects from conversation.
agent_id: Agent identifier for logging.
agent_role: Role of the CrewAI agent delegating the task.
agent_branch: Optional agent tree branch for logging.
response_model: Optional Pydantic model for structured outputs.
turn_number: Optional turn number for multi-turn conversations.
updates: Update mechanism config from A2AConfig.updates.
Returns:
TaskStateResult with status, result/error, history, and agent_card.
Dictionary with:
- status: "completed", "input_required", "failed", etc.
- result: Result string (if completed)
- error: Error message (if failed)
- history: List of new Message objects from this exchange
Raises:
ImportError: If a2a-sdk is not installed
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(
aexecute_a2a_delegation(
endpoint=endpoint,
auth=auth,
timeout=timeout,
task_description=task_description,
context=context,
context_id=context_id,
task_id=task_id,
reference_task_ids=reference_task_ids,
metadata=metadata,
extensions=extensions,
conversation_history=conversation_history,
agent_id=agent_id,
agent_role=agent_role,
agent_branch=agent_branch,
response_model=response_model,
transport_protocol=transport_protocol,
turn_number=turn_number,
updates=updates,
)
)
finally:
loop.close()
async def aexecute_a2a_delegation(
endpoint: str,
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
auth: AuthScheme | None,
timeout: int,
task_description: str,
context: str | None = None,
context_id: str | None = None,
task_id: str | None = None,
reference_task_ids: list[str] | None = None,
metadata: dict[str, Any] | None = None,
extensions: dict[str, Any] | None = None,
conversation_history: list[Message] | None = None,
agent_id: str | None = None,
agent_role: Role | None = None,
agent_branch: Any | None = None,
response_model: type[BaseModel] | None = None,
turn_number: int | None = None,
updates: UpdateConfig | None = None,
) -> TaskStateResult:
"""Execute a task delegation to a remote A2A agent asynchronously.
Native async implementation with multi-turn support. Use this when running
in an async context (e.g., with Crew.akickoff() or agent.aexecute_task()).
Args:
endpoint: A2A agent endpoint URL
transport_protocol: Optional A2A transport protocol (grpc, jsonrpc, http+json)
auth: Optional AuthScheme for authentication
timeout: Request timeout in seconds
task_description: Task to delegate
context: Optional context
context_id: Context ID for correlation
task_id: Specific task identifier
reference_task_ids: Related task IDs
metadata: Additional metadata
extensions: Protocol extensions
conversation_history: Previous Message objects
turn_number: Current turn number
agent_branch: Agent tree branch for logging
agent_id: Agent identifier for logging
agent_role: Agent role for logging
response_model: Optional Pydantic model for structured outputs
endpoint: A2A agent endpoint URL.
auth: Optional AuthScheme for authentication.
timeout: Request timeout in seconds.
task_description: The task to delegate.
context: Optional context information.
context_id: Context ID for correlating messages/tasks.
task_id: Specific task identifier.
reference_task_ids: List of related task IDs.
metadata: Additional metadata.
extensions: Protocol extensions for custom fields.
conversation_history: Previous Message objects from conversation.
agent_id: Agent identifier for logging.
agent_role: Role of the CrewAI agent delegating the task.
agent_branch: Optional agent tree branch for logging.
response_model: Optional Pydantic model for structured outputs.
turn_number: Optional turn number for multi-turn conversations.
updates: Update mechanism config from A2AConfig.updates.
Returns:
TaskStateResult with status, result/error, history, and agent_card.
"""
if conversation_history is None:
conversation_history = []
is_multiturn = len(conversation_history) > 0
is_multiturn = bool(conversation_history and len(conversation_history) > 0)
if turn_number is None:
turn_number = len([m for m in conversation_history if m.role == Role.user]) + 1
turn_number = (
len([m for m in (conversation_history or []) if m.role == Role.user]) + 1
)
crewai_event_bus.emit(
agent_branch,
A2ADelegationStartedEvent(
@@ -431,44 +289,48 @@ async def aexecute_a2a_delegation(
),
)
result = await _aexecute_a2a_delegation_impl(
endpoint=endpoint,
auth=auth,
timeout=timeout,
task_description=task_description,
context=context,
context_id=context_id,
task_id=task_id,
reference_task_ids=reference_task_ids,
metadata=metadata,
extensions=extensions,
conversation_history=conversation_history,
is_multiturn=is_multiturn,
turn_number=turn_number,
agent_branch=agent_branch,
agent_id=agent_id,
agent_role=agent_role,
response_model=response_model,
updates=updates,
transport_protocol=transport_protocol,
)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
result = loop.run_until_complete(
_execute_a2a_delegation_async(
endpoint=endpoint,
auth=auth,
timeout=timeout,
task_description=task_description,
context=context,
context_id=context_id,
task_id=task_id,
reference_task_ids=reference_task_ids,
metadata=metadata,
extensions=extensions,
conversation_history=conversation_history or [],
is_multiturn=is_multiturn,
turn_number=turn_number,
agent_branch=agent_branch,
agent_id=agent_id,
agent_role=agent_role,
response_model=response_model,
)
)
crewai_event_bus.emit(
agent_branch,
A2ADelegationCompletedEvent(
status=result["status"],
result=result.get("result"),
error=result.get("error"),
is_multiturn=is_multiturn,
),
)
crewai_event_bus.emit(
agent_branch,
A2ADelegationCompletedEvent(
status=result["status"],
result=result.get("result"),
error=result.get("error"),
is_multiturn=is_multiturn,
),
)
return result
return result
finally:
loop.close()
async def _aexecute_a2a_delegation_impl(
async def _execute_a2a_delegation_async(
endpoint: str,
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
auth: AuthScheme | None,
timeout: int,
task_description: str,
@@ -479,15 +341,37 @@ async def _aexecute_a2a_delegation_impl(
metadata: dict[str, Any] | None,
extensions: dict[str, Any] | None,
conversation_history: list[Message],
is_multiturn: bool,
turn_number: int,
agent_branch: Any | None,
agent_id: str | None,
agent_role: str | None,
response_model: type[BaseModel] | None,
updates: UpdateConfig | None,
) -> TaskStateResult:
"""Internal async implementation of A2A delegation."""
is_multiturn: bool = False,
turn_number: int = 1,
agent_branch: Any | None = None,
agent_id: str | None = None,
agent_role: str | None = None,
response_model: type[BaseModel] | None = None,
) -> dict[str, Any]:
"""Async implementation of A2A delegation with multi-turn support.
Args:
endpoint: A2A agent endpoint URL
auth: Optional AuthScheme for authentication
timeout: Request timeout in seconds
task_description: Task to delegate
context: Optional context
context_id: Context ID for correlation
task_id: Specific task identifier
reference_task_ids: Related task IDs
metadata: Additional metadata
extensions: Protocol extensions
conversation_history: Previous Message objects
is_multiturn: Whether this is a multi-turn conversation
turn_number: Current turn number
agent_branch: Agent tree branch for logging
agent_id: Agent identifier for logging
agent_role: Agent role for logging
response_model: Optional Pydantic model for structured outputs
Returns:
Dictionary with status, result/error, and new history
"""
if auth:
auth_data = auth.model_dump_json(
exclude={
@@ -501,7 +385,7 @@ async def _aexecute_a2a_delegation_impl(
else:
auth_hash = 0
_auth_store[auth_hash] = auth
agent_card = await _afetch_agent_card_cached(
agent_card = await _fetch_agent_card_async_cached(
endpoint=endpoint, auth_hash=auth_hash, timeout=timeout
)
@@ -562,6 +446,7 @@ async def _aexecute_a2a_delegation_impl(
extensions=extensions,
)
transport_protocol = TransportProtocol("JSONRPC")
new_messages: list[Message] = [*conversation_history, message]
crewai_event_bus.emit(
None,
@@ -573,73 +458,211 @@ async def _aexecute_a2a_delegation_impl(
),
)
handler = get_handler(updates)
use_polling = isinstance(updates, PollingConfig)
handler_kwargs: dict[str, Any] = {
"turn_number": turn_number,
"is_multiturn": is_multiturn,
"agent_role": agent_role,
"context_id": context_id,
"task_id": task_id,
"endpoint": endpoint,
"agent_branch": agent_branch,
}
if isinstance(updates, PollingConfig):
handler_kwargs.update(
{
"polling_interval": updates.interval,
"polling_timeout": updates.timeout or float(timeout),
"history_length": updates.history_length,
"max_polls": updates.max_polls,
}
)
elif isinstance(updates, PushNotificationConfig):
handler_kwargs.update(
{
"config": updates,
"result_store": updates.result_store,
"polling_timeout": updates.timeout or float(timeout),
"polling_interval": updates.interval,
}
)
push_config_for_client = (
updates if isinstance(updates, PushNotificationConfig) else None
)
use_streaming = not use_polling and push_config_for_client is None
async with _create_a2a_client(
agent_card=agent_card,
transport_protocol=transport_protocol,
timeout=timeout,
headers=headers,
streaming=use_streaming,
streaming=True,
auth=auth,
use_polling=use_polling,
push_notification_config=push_config_for_client,
) as client:
return await handler.execute(
client=client,
message=message,
new_messages=new_messages,
agent_card=agent_card,
**handler_kwargs,
)
result_parts: list[str] = []
final_result: dict[str, Any] | None = None
event_stream = client.send_message(message)
try:
async for event in event_stream:
if isinstance(event, Message):
new_messages.append(event)
for part in event.parts:
if part.root.kind == "text":
text = part.root.text
result_parts.append(text)
elif isinstance(event, tuple):
a2a_task, update = event
if isinstance(update, TaskArtifactUpdateEvent):
artifact = update.artifact
result_parts.extend(
part.root.text
for part in artifact.parts
if part.root.kind == "text"
)
is_final_update = False
if isinstance(update, TaskStatusUpdateEvent):
is_final_update = update.final
if not is_final_update and a2a_task.status.state not in [
TaskState.completed,
TaskState.input_required,
TaskState.failed,
TaskState.rejected,
TaskState.auth_required,
TaskState.canceled,
]:
continue
if a2a_task.status.state == TaskState.completed:
extracted_parts = _extract_task_result_parts(a2a_task)
result_parts.extend(extracted_parts)
if a2a_task.history:
new_messages.extend(a2a_task.history)
response_text = " ".join(result_parts) if result_parts else ""
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=response_text,
turn_number=turn_number,
is_multiturn=is_multiturn,
status="completed",
agent_role=agent_role,
),
)
final_result = {
"status": "completed",
"result": response_text,
"history": new_messages,
"agent_card": agent_card,
}
break
if a2a_task.status.state == TaskState.input_required:
if a2a_task.history:
new_messages.extend(a2a_task.history)
response_text = _extract_error_message(
a2a_task, "Additional input required"
)
if response_text and not a2a_task.history:
agent_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=response_text))],
context_id=a2a_task.context_id
if hasattr(a2a_task, "context_id")
else None,
task_id=a2a_task.task_id
if hasattr(a2a_task, "task_id")
else None,
)
new_messages.append(agent_message)
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=response_text,
turn_number=turn_number,
is_multiturn=is_multiturn,
status="input_required",
agent_role=agent_role,
),
)
final_result = {
"status": "input_required",
"error": response_text,
"history": new_messages,
"agent_card": agent_card,
}
break
if a2a_task.status.state in [TaskState.failed, TaskState.rejected]:
error_msg = _extract_error_message(
a2a_task, "Task failed without error message"
)
if a2a_task.history:
new_messages.extend(a2a_task.history)
final_result = {
"status": "failed",
"error": error_msg,
"history": new_messages,
}
break
if a2a_task.status.state == TaskState.auth_required:
error_msg = _extract_error_message(
a2a_task, "Authentication required"
)
final_result = {
"status": "auth_required",
"error": error_msg,
"history": new_messages,
}
break
if a2a_task.status.state == TaskState.canceled:
error_msg = _extract_error_message(
a2a_task, "Task was canceled"
)
final_result = {
"status": "canceled",
"error": error_msg,
"history": new_messages,
}
break
except Exception as e:
if isinstance(e, A2AClientHTTPError):
error_msg = f"HTTP Error {e.status_code}: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
is_multiturn=is_multiturn,
status="failed",
agent_role=agent_role,
),
)
return {
"status": "failed",
"error": error_msg,
"history": new_messages,
}
current_exception: Exception | BaseException | None = e
while current_exception:
if hasattr(current_exception, "response"):
response = current_exception.response
if hasattr(response, "text"):
break
if current_exception and hasattr(current_exception, "__cause__"):
current_exception = current_exception.__cause__
raise
finally:
if hasattr(event_stream, "aclose"):
await event_stream.aclose()
if final_result:
return final_result
return {
"status": "completed",
"result": " ".join(result_parts) if result_parts else "",
"history": new_messages,
}
@asynccontextmanager
async def _create_a2a_client(
agent_card: AgentCard,
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
transport_protocol: TransportProtocol,
timeout: int,
headers: MutableMapping[str, str],
streaming: bool,
auth: AuthScheme | None = None,
use_polling: bool = False,
push_notification_config: PushNotificationConfig | None = None,
) -> AsyncIterator[Client]:
"""Create and configure an A2A client.
@@ -650,8 +673,6 @@ async def _create_a2a_client(
headers: HTTP headers (already with auth applied)
streaming: Enable streaming responses
auth: Optional AuthScheme for client configuration
use_polling: Enable polling mode
push_notification_config: Optional push notification config to include in requests
Yields:
Configured A2A client instance
@@ -664,24 +685,11 @@ async def _create_a2a_client(
if auth and isinstance(auth, (HTTPDigestAuth, APIKeyAuth)):
configure_auth_client(auth, httpx_client)
push_configs: list[A2APushNotificationConfig] = []
if push_notification_config is not None:
push_configs.append(
A2APushNotificationConfig(
url=str(push_notification_config.url),
id=push_notification_config.id,
token=push_notification_config.token,
authentication=push_notification_config.authentication,
)
)
config = ClientConfig(
httpx_client=httpx_client,
supported_transports=[transport_protocol],
streaming=streaming and not use_polling,
polling=use_polling,
supported_transports=[str(transport_protocol.value)],
streaming=streaming,
accepted_output_modes=["application/json"],
push_notification_configs=push_configs,
)
factory = ClientFactory(config)
@@ -689,6 +697,66 @@ async def _create_a2a_client(
yield client
def _extract_task_result_parts(a2a_task: A2ATask) -> list[str]:
"""Extract result parts from A2A task history and artifacts.
Args:
a2a_task: A2A Task object with history and artifacts
Returns:
List of result text parts
"""
result_parts: list[str] = []
if a2a_task.history:
for history_msg in reversed(a2a_task.history):
if history_msg.role == Role.agent:
result_parts.extend(
part.root.text
for part in history_msg.parts
if part.root.kind == "text"
)
break
if a2a_task.artifacts:
result_parts.extend(
part.root.text
for artifact in a2a_task.artifacts
for part in artifact.parts
if part.root.kind == "text"
)
return result_parts
def _extract_error_message(a2a_task: A2ATask, default: str) -> str:
"""Extract error message from A2A task.
Args:
a2a_task: A2A Task object
default: Default message if no error found
Returns:
Error message string
"""
if a2a_task.status and a2a_task.status.message:
msg = a2a_task.status.message
if msg:
for part in msg.parts:
if part.root.kind == "text":
return str(part.root.text)
return str(msg)
if a2a_task.history:
for history_msg in reversed(a2a_task.history):
for part in history_msg.parts:
if part.root.kind == "text":
return str(part.root.text)
return default
def create_agent_response_model(agent_ids: tuple[str, ...]) -> type[BaseModel]:
"""Create a dynamic AgentResponse model with Literal types for agent IDs.
@@ -720,7 +788,7 @@ def create_agent_response_model(agent_ids: tuple[str, ...]) -> type[BaseModel]:
is_a2a=(
bool,
Field(
description="Set to false when the remote agent has answered your question - extract their answer and return it as your final message. Set to true ONLY if you need to ask a NEW, DIFFERENT question. NEVER repeat the same request - if the conversation history shows the agent already answered, set is_a2a=false immediately."
description="Set to true to continue the conversation by sending this message to the A2A agent and awaiting their response. Set to false ONLY when you are completely done and providing your final answer (not when asking questions)."
),
),
__base__=BaseModel,

View File

@@ -5,30 +5,25 @@ Wraps agent classes with A2A delegation capabilities.
from __future__ import annotations
import asyncio
from collections.abc import Callable, Coroutine
from collections.abc import Callable
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import wraps
from types import MethodType
from typing import TYPE_CHECKING, Any
from typing import TYPE_CHECKING, Any, cast
from a2a.types import Role, TaskState
from a2a.types import Role
from pydantic import BaseModel, ValidationError
from crewai.a2a.config import A2AConfig
from crewai.a2a.extensions.base import ExtensionRegistry
from crewai.a2a.task_helpers import TaskStateResult
from crewai.a2a.templates import (
AVAILABLE_AGENTS_TEMPLATE,
CONVERSATION_TURN_INFO_TEMPLATE,
PREVIOUS_A2A_CONVERSATION_TEMPLATE,
REMOTE_AGENT_COMPLETED_NOTICE,
UNAVAILABLE_AGENTS_NOTICE_TEMPLATE,
)
from crewai.a2a.types import AgentResponseProtocol
from crewai.a2a.utils import (
aexecute_a2a_delegation,
afetch_agent_card,
execute_a2a_delegation,
fetch_agent_card,
get_a2a_agents_and_response_model,
@@ -51,15 +46,15 @@ if TYPE_CHECKING:
def wrap_agent_with_a2a_instance(
agent: Agent, extension_registry: ExtensionRegistry | None = None
) -> None:
"""Wrap an agent instance's execute_task and aexecute_task methods with A2A support.
"""Wrap an agent instance's execute_task method with A2A support.
This function modifies the agent instance by wrapping its execute_task
and aexecute_task methods to add A2A delegation capabilities. Should only
be called when the agent has a2a configuration set.
method to add A2A delegation capabilities. Should only be called when
the agent has a2a configuration set.
Args:
agent: The agent instance to wrap.
extension_registry: Optional registry of A2A extensions.
agent: The agent instance to wrap
extension_registry: Optional registry of A2A extensions for injecting tools and custom logic
"""
if extension_registry is None:
extension_registry = ExtensionRegistry()
@@ -67,7 +62,6 @@ def wrap_agent_with_a2a_instance(
extension_registry.inject_all_tools(agent)
original_execute_task = agent.execute_task.__func__ # type: ignore[attr-defined]
original_aexecute_task = agent.aexecute_task.__func__ # type: ignore[attr-defined]
@wraps(original_execute_task)
def execute_task_with_a2a(
@@ -76,7 +70,17 @@ def wrap_agent_with_a2a_instance(
context: str | None = None,
tools: list[BaseTool] | None = None,
) -> str:
"""Execute task with A2A delegation support (sync)."""
"""Execute task with A2A delegation support.
Args:
self: The agent instance
task: The task to execute
context: Optional context for task execution
tools: Optional tools available to the agent
Returns:
Task execution result
"""
if not self.a2a:
return original_execute_task(self, task, context, tools) # type: ignore[no-any-return]
@@ -93,34 +97,7 @@ def wrap_agent_with_a2a_instance(
extension_registry=extension_registry,
)
@wraps(original_aexecute_task)
async def aexecute_task_with_a2a(
self: Agent,
task: Task,
context: str | None = None,
tools: list[BaseTool] | None = None,
) -> str:
"""Execute task with A2A delegation support (async)."""
if not self.a2a:
return await original_aexecute_task(self, task, context, tools) # type: ignore[no-any-return]
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
return await _aexecute_task_with_a2a(
self=self,
a2a_agents=a2a_agents,
original_fn=original_aexecute_task,
task=task,
agent_response_model=agent_response_model,
context=context,
tools=tools,
extension_registry=extension_registry,
)
object.__setattr__(agent, "execute_task", MethodType(execute_task_with_a2a, agent))
object.__setattr__(
agent, "aexecute_task", MethodType(aexecute_task_with_a2a, agent)
)
def _fetch_card_from_config(
@@ -278,7 +255,6 @@ def _augment_prompt_with_a2a(
max_turns: int | None = None,
failed_agents: dict[str, str] | None = None,
extension_registry: ExtensionRegistry | None = None,
remote_task_completed: bool = False,
) -> tuple[str, bool]:
"""Add A2A delegation instructions to prompt.
@@ -351,15 +327,12 @@ def _augment_prompt_with_a2a(
warning=warning,
)
completion_notice = ""
if remote_task_completed and conversation_history:
completion_notice = REMOTE_AGENT_COMPLETED_NOTICE
augmented_prompt = f"""{task_description}
IMPORTANT: You have the ability to delegate this task to remote A2A agents.
{agents_text}
{history_text}{turn_info}{completion_notice}
{history_text}{turn_info}
"""
@@ -373,8 +346,16 @@ IMPORTANT: You have the ability to delegate this task to remote A2A agents.
def _parse_agent_response(
raw_result: str | dict[str, Any], agent_response_model: type[BaseModel]
) -> BaseModel | str | dict[str, Any]:
"""Parse LLM output as AgentResponse or return raw agent response."""
) -> BaseModel | str:
"""Parse LLM output as AgentResponse or return raw agent response.
Args:
raw_result: Raw output from LLM
agent_response_model: The agent response model
Returns:
Parsed AgentResponse or string
"""
if agent_response_model:
try:
if isinstance(raw_result, str):
@@ -382,70 +363,71 @@ def _parse_agent_response(
if isinstance(raw_result, dict):
return agent_response_model.model_validate(raw_result)
except ValidationError:
return raw_result
return raw_result
return cast(str, raw_result)
return cast(str, raw_result)
def _handle_max_turns_exceeded(
def _handle_agent_response_and_continue(
self: Agent,
a2a_result: dict[str, Any],
agent_id: str,
agent_cards: dict[str, AgentCard] | None,
a2a_agents: list[A2AConfig],
original_task_description: str,
conversation_history: list[Message],
max_turns: int,
) -> str:
"""Handle the case when max turns is exceeded.
Shared logic for both sync and async delegation.
Returns:
Final message if found in history.
Raises:
Exception: If no final message found and max turns exceeded.
"""
if conversation_history:
for msg in reversed(conversation_history):
if msg.role == Role.agent:
text_parts = [
part.root.text for part in msg.parts if part.root.kind == "text"
]
final_message = (
" ".join(text_parts) if text_parts else "Conversation completed"
)
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="completed",
final_result=final_message,
error=None,
total_turns=max_turns,
),
)
return final_message
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="failed",
final_result=None,
error=f"Conversation exceeded maximum turns ({max_turns})",
total_turns=max_turns,
),
)
raise Exception(f"A2A conversation exceeded maximum turns ({max_turns})")
def _process_response_result(
raw_result: str,
disable_structured_output: bool,
turn_num: int,
agent_role: str,
max_turns: int,
task: Task,
original_fn: Callable[..., str],
context: str | None,
tools: list[BaseTool] | None,
agent_response_model: type[BaseModel],
) -> tuple[str | None, str | None]:
"""Process LLM response and determine next action.
"""Handle A2A result and get CrewAI agent's response.
Shared logic for both sync and async handlers.
Args:
self: The agent instance
a2a_result: Result from A2A delegation
agent_id: ID of the A2A agent
agent_cards: Pre-fetched agent cards
a2a_agents: List of A2A configurations
original_task_description: Original task description
conversation_history: Conversation history
turn_num: Current turn number
max_turns: Maximum turns allowed
task: The task being executed
original_fn: Original execute_task method
context: Optional context
tools: Optional tools
agent_response_model: Response model for parsing
Returns:
Tuple of (final_result, next_request).
Tuple of (final_result, current_request) where:
- final_result is not None if conversation should end
- current_request is the next message to send if continuing
"""
agent_cards_dict = agent_cards or {}
if "agent_card" in a2a_result and agent_id not in agent_cards_dict:
agent_cards_dict[agent_id] = a2a_result["agent_card"]
task.description, disable_structured_output = _augment_prompt_with_a2a(
a2a_agents=a2a_agents,
task_description=original_task_description,
conversation_history=conversation_history,
turn_num=turn_num,
max_turns=max_turns,
agent_cards=agent_cards_dict,
)
original_response_model = task.response_model
if disable_structured_output:
task.response_model = None
raw_result = original_fn(self, task, context, tools)
if disable_structured_output:
task.response_model = original_response_model
if disable_structured_output:
final_turn_number = turn_num + 1
result_text = str(raw_result)
@@ -455,7 +437,7 @@ def _process_response_result(
message=result_text,
turn_number=final_turn_number,
is_multiturn=True,
agent_role=agent_role,
agent_role=self.role,
),
)
crewai_event_bus.emit(
@@ -484,7 +466,7 @@ def _process_response_result(
message=str(llm_response.message),
turn_number=final_turn_number,
is_multiturn=True,
agent_role=agent_role,
agent_role=self.role,
),
)
crewai_event_bus.emit(
@@ -502,200 +484,6 @@ def _process_response_result(
return str(raw_result), None
def _prepare_agent_cards_dict(
a2a_result: TaskStateResult,
agent_id: str,
agent_cards: dict[str, AgentCard] | None,
) -> dict[str, AgentCard]:
"""Prepare agent cards dictionary from result and existing cards.
Shared logic for both sync and async response handlers.
"""
agent_cards_dict = agent_cards or {}
if "agent_card" in a2a_result and agent_id not in agent_cards_dict:
agent_cards_dict[agent_id] = a2a_result["agent_card"]
return agent_cards_dict
def _prepare_delegation_context(
self: Agent,
agent_response: AgentResponseProtocol,
task: Task,
original_task_description: str | None,
) -> tuple[
list[A2AConfig],
type[BaseModel],
str,
str,
A2AConfig,
str | None,
str | None,
dict[str, Any] | None,
dict[str, Any] | None,
list[str],
str,
int,
]:
"""Prepare delegation context from agent response and task.
Shared logic for both sync and async delegation.
Returns:
Tuple containing all the context values needed for delegation.
"""
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
agent_ids = tuple(config.endpoint for config in a2a_agents)
current_request = str(agent_response.message)
if hasattr(agent_response, "a2a_ids") and agent_response.a2a_ids:
agent_id = agent_response.a2a_ids[0]
else:
agent_id = agent_ids[0] if agent_ids else ""
if agent_id and agent_id not in agent_ids:
raise ValueError(
f"Unknown A2A agent ID(s): {agent_response.a2a_ids} not in {agent_ids}"
)
agent_config = next(filter(lambda x: x.endpoint == agent_id, a2a_agents))
task_config = task.config or {}
context_id = task_config.get("context_id")
task_id_config = task_config.get("task_id")
metadata = task_config.get("metadata")
extensions = task_config.get("extensions")
reference_task_ids = task_config.get("reference_task_ids", [])
if original_task_description is None:
original_task_description = task.description
max_turns = agent_config.max_turns
return (
a2a_agents,
agent_response_model,
current_request,
agent_id,
agent_config,
context_id,
task_id_config,
metadata,
extensions,
reference_task_ids,
original_task_description,
max_turns,
)
def _handle_task_completion(
a2a_result: TaskStateResult,
task: Task,
task_id_config: str | None,
reference_task_ids: list[str],
agent_config: A2AConfig,
turn_num: int,
) -> tuple[str | None, str | None, list[str]]:
"""Handle task completion state including reference task updates.
Shared logic for both sync and async delegation.
Returns:
Tuple of (result_if_trusted, updated_task_id, updated_reference_task_ids).
"""
if a2a_result["status"] == TaskState.completed:
if task_id_config is not None and task_id_config not in reference_task_ids:
reference_task_ids.append(task_id_config)
if task.config is None:
task.config = {}
task.config["reference_task_ids"] = reference_task_ids
task_id_config = None
if agent_config.trust_remote_completion_status:
result_text = a2a_result.get("result", "")
final_turn_number = turn_num + 1
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="completed",
final_result=result_text,
error=None,
total_turns=final_turn_number,
),
)
return str(result_text), task_id_config, reference_task_ids
return None, task_id_config, reference_task_ids
def _handle_agent_response_and_continue(
self: Agent,
a2a_result: TaskStateResult,
agent_id: str,
agent_cards: dict[str, AgentCard] | None,
a2a_agents: list[A2AConfig],
original_task_description: str,
conversation_history: list[Message],
turn_num: int,
max_turns: int,
task: Task,
original_fn: Callable[..., str],
context: str | None,
tools: list[BaseTool] | None,
agent_response_model: type[BaseModel],
remote_task_completed: bool = False,
) -> tuple[str | None, str | None]:
"""Handle A2A result and get CrewAI agent's response.
Args:
self: The agent instance
a2a_result: Result from A2A delegation
agent_id: ID of the A2A agent
agent_cards: Pre-fetched agent cards
a2a_agents: List of A2A configurations
original_task_description: Original task description
conversation_history: Conversation history
turn_num: Current turn number
max_turns: Maximum turns allowed
task: The task being executed
original_fn: Original execute_task method
context: Optional context
tools: Optional tools
agent_response_model: Response model for parsing
Returns:
Tuple of (final_result, current_request) where:
- final_result is not None if conversation should end
- current_request is the next message to send if continuing
"""
agent_cards_dict = _prepare_agent_cards_dict(a2a_result, agent_id, agent_cards)
task.description, disable_structured_output = _augment_prompt_with_a2a(
a2a_agents=a2a_agents,
task_description=original_task_description,
conversation_history=conversation_history,
turn_num=turn_num,
max_turns=max_turns,
agent_cards=agent_cards_dict,
remote_task_completed=remote_task_completed,
)
original_response_model = task.response_model
if disable_structured_output:
task.response_model = None
raw_result = original_fn(self, task, context, tools)
if disable_structured_output:
task.response_model = original_response_model
return _process_response_result(
raw_result=raw_result,
disable_structured_output=disable_structured_output,
turn_num=turn_num,
agent_role=self.role,
agent_response_model=agent_response_model,
)
def _delegate_to_a2a(
self: Agent,
agent_response: AgentResponseProtocol,
@@ -726,24 +514,34 @@ def _delegate_to_a2a(
Raises:
ImportError: If a2a-sdk is not installed
"""
(
a2a_agents,
agent_response_model,
current_request,
agent_id,
agent_config,
context_id,
task_id_config,
metadata,
extensions,
reference_task_ids,
original_task_description,
max_turns,
) = _prepare_delegation_context(
self, agent_response, task, original_task_description
)
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
agent_ids = tuple(config.endpoint for config in a2a_agents)
current_request = str(agent_response.message)
if hasattr(agent_response, "a2a_ids") and agent_response.a2a_ids:
agent_id = agent_response.a2a_ids[0]
else:
agent_id = agent_ids[0] if agent_ids else ""
if agent_id and agent_id not in agent_ids:
raise ValueError(
f"Unknown A2A agent ID(s): {agent_response.a2a_ids} not in {agent_ids}"
)
agent_config = next(filter(lambda x: x.endpoint == agent_id, a2a_agents))
task_config = task.config or {}
context_id = task_config.get("context_id")
task_id_config = task_config.get("task_id")
metadata = task_config.get("metadata")
extensions = task_config.get("extensions")
reference_task_ids = task_config.get("reference_task_ids", [])
if original_task_description is None:
original_task_description = task.description
conversation_history: list[Message] = []
max_turns = agent_config.max_turns
try:
for turn_num in range(max_turns):
@@ -770,8 +568,6 @@ def _delegate_to_a2a(
agent_branch=agent_branch,
response_model=agent_config.response_model,
turn_number=turn_num + 1,
updates=agent_config.updates,
transport_protocol=agent_config.transport_protocol,
)
conversation_history = a2a_result.get("history", [])
@@ -783,19 +579,32 @@ def _delegate_to_a2a(
if latest_message.context_id is not None:
context_id = latest_message.context_id
if a2a_result["status"] in [TaskState.completed, TaskState.input_required]:
trusted_result, task_id_config, reference_task_ids = (
_handle_task_completion(
a2a_result,
task,
task_id_config,
reference_task_ids,
agent_config,
turn_num,
if a2a_result["status"] in ["completed", "input_required"]:
if (
a2a_result["status"] == "completed"
and agent_config.trust_remote_completion_status
):
if (
task_id_config is not None
and task_id_config not in reference_task_ids
):
reference_task_ids.append(task_id_config)
if task.config is None:
task.config = {}
task.config["reference_task_ids"] = reference_task_ids
result_text = a2a_result.get("result", "")
final_turn_number = turn_num + 1
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="completed",
final_result=result_text,
error=None,
total_turns=final_turn_number,
),
)
)
if trusted_result is not None:
return trusted_result
return cast(str, result_text)
final_result, next_request = _handle_agent_response_and_continue(
self=self,
@@ -812,7 +621,6 @@ def _delegate_to_a2a(
context=context,
tools=tools,
agent_response_model=agent_response_model,
remote_task_completed=(a2a_result["status"] == TaskState.completed),
)
if final_result is not None:
@@ -840,7 +648,6 @@ def _delegate_to_a2a(
context=context,
tools=tools,
agent_response_model=agent_response_model,
remote_task_completed=False,
)
if final_result is not None:
@@ -861,322 +668,36 @@ def _delegate_to_a2a(
)
return f"A2A delegation failed: {error_msg}"
return _handle_max_turns_exceeded(conversation_history, max_turns)
finally:
task.description = original_task_description
async def _afetch_card_from_config(
config: A2AConfig,
) -> tuple[A2AConfig, AgentCard | Exception]:
"""Fetch agent card from A2A config asynchronously."""
try:
card = await afetch_agent_card(
endpoint=config.endpoint,
auth=config.auth,
timeout=config.timeout,
)
return config, card
except Exception as e:
return config, e
async def _afetch_agent_cards_concurrently(
a2a_agents: list[A2AConfig],
) -> tuple[dict[str, AgentCard], dict[str, str]]:
"""Fetch agent cards concurrently for multiple A2A agents using asyncio."""
agent_cards: dict[str, AgentCard] = {}
failed_agents: dict[str, str] = {}
tasks = [_afetch_card_from_config(config) for config in a2a_agents]
results = await asyncio.gather(*tasks)
for config, result in results:
if isinstance(result, Exception):
if config.fail_fast:
raise RuntimeError(
f"Failed to fetch agent card from {config.endpoint}. "
f"Ensure the A2A agent is running and accessible. Error: {result}"
) from result
failed_agents[config.endpoint] = str(result)
else:
agent_cards[config.endpoint] = result
return agent_cards, failed_agents
async def _aexecute_task_with_a2a(
self: Agent,
a2a_agents: list[A2AConfig],
original_fn: Callable[..., Coroutine[Any, Any, str]],
task: Task,
agent_response_model: type[BaseModel],
context: str | None,
tools: list[BaseTool] | None,
extension_registry: ExtensionRegistry,
) -> str:
"""Async version of _execute_task_with_a2a."""
original_description: str = task.description
original_output_pydantic = task.output_pydantic
original_response_model = task.response_model
agent_cards, failed_agents = await _afetch_agent_cards_concurrently(a2a_agents)
if not agent_cards and a2a_agents and failed_agents:
unavailable_agents_text = ""
for endpoint, error in failed_agents.items():
unavailable_agents_text += f" - {endpoint}: {error}\n"
notice = UNAVAILABLE_AGENTS_NOTICE_TEMPLATE.substitute(
unavailable_agents=unavailable_agents_text
)
task.description = f"{original_description}{notice}"
try:
return await original_fn(self, task, context, tools)
finally:
task.description = original_description
task.description, _ = _augment_prompt_with_a2a(
a2a_agents=a2a_agents,
task_description=original_description,
agent_cards=agent_cards,
failed_agents=failed_agents,
extension_registry=extension_registry,
)
task.response_model = agent_response_model
try:
raw_result = await original_fn(self, task, context, tools)
agent_response = _parse_agent_response(
raw_result=raw_result, agent_response_model=agent_response_model
)
if extension_registry and isinstance(agent_response, BaseModel):
agent_response = extension_registry.process_response_with_all(
agent_response, {}
)
if isinstance(agent_response, BaseModel) and isinstance(
agent_response, AgentResponseProtocol
):
if agent_response.is_a2a:
return await _adelegate_to_a2a(
self,
agent_response=agent_response,
task=task,
original_fn=original_fn,
context=context,
tools=tools,
agent_cards=agent_cards,
original_task_description=original_description,
extension_registry=extension_registry,
)
return str(agent_response.message)
return raw_result
finally:
task.description = original_description
task.output_pydantic = original_output_pydantic
task.response_model = original_response_model
async def _ahandle_agent_response_and_continue(
self: Agent,
a2a_result: TaskStateResult,
agent_id: str,
agent_cards: dict[str, AgentCard] | None,
a2a_agents: list[A2AConfig],
original_task_description: str,
conversation_history: list[Message],
turn_num: int,
max_turns: int,
task: Task,
original_fn: Callable[..., Coroutine[Any, Any, str]],
context: str | None,
tools: list[BaseTool] | None,
agent_response_model: type[BaseModel],
remote_task_completed: bool = False,
) -> tuple[str | None, str | None]:
"""Async version of _handle_agent_response_and_continue."""
agent_cards_dict = _prepare_agent_cards_dict(a2a_result, agent_id, agent_cards)
task.description, disable_structured_output = _augment_prompt_with_a2a(
a2a_agents=a2a_agents,
task_description=original_task_description,
conversation_history=conversation_history,
turn_num=turn_num,
max_turns=max_turns,
agent_cards=agent_cards_dict,
remote_task_completed=remote_task_completed,
)
original_response_model = task.response_model
if disable_structured_output:
task.response_model = None
raw_result = await original_fn(self, task, context, tools)
if disable_structured_output:
task.response_model = original_response_model
return _process_response_result(
raw_result=raw_result,
disable_structured_output=disable_structured_output,
turn_num=turn_num,
agent_role=self.role,
agent_response_model=agent_response_model,
)
async def _adelegate_to_a2a(
self: Agent,
agent_response: AgentResponseProtocol,
task: Task,
original_fn: Callable[..., Coroutine[Any, Any, str]],
context: str | None,
tools: list[BaseTool] | None,
agent_cards: dict[str, AgentCard] | None = None,
original_task_description: str | None = None,
extension_registry: ExtensionRegistry | None = None,
) -> str:
"""Async version of _delegate_to_a2a."""
(
a2a_agents,
agent_response_model,
current_request,
agent_id,
agent_config,
context_id,
task_id_config,
metadata,
extensions,
reference_task_ids,
original_task_description,
max_turns,
) = _prepare_delegation_context(
self, agent_response, task, original_task_description
)
conversation_history: list[Message] = []
try:
for turn_num in range(max_turns):
console_formatter = getattr(crewai_event_bus, "_console", None)
agent_branch = None
if console_formatter:
agent_branch = getattr(
console_formatter, "current_agent_branch", None
) or getattr(console_formatter, "current_task_branch", None)
a2a_result = await aexecute_a2a_delegation(
endpoint=agent_config.endpoint,
auth=agent_config.auth,
timeout=agent_config.timeout,
task_description=current_request,
context_id=context_id,
task_id=task_id_config,
reference_task_ids=reference_task_ids,
metadata=metadata,
extensions=extensions,
conversation_history=conversation_history,
agent_id=agent_id,
agent_role=Role.user,
agent_branch=agent_branch,
response_model=agent_config.response_model,
turn_number=turn_num + 1,
transport_protocol=agent_config.transport_protocol,
updates=agent_config.updates,
)
conversation_history = a2a_result.get("history", [])
if conversation_history:
latest_message = conversation_history[-1]
if latest_message.task_id is not None:
task_id_config = latest_message.task_id
if latest_message.context_id is not None:
context_id = latest_message.context_id
if a2a_result["status"] in [TaskState.completed, TaskState.input_required]:
trusted_result, task_id_config, reference_task_ids = (
_handle_task_completion(
a2a_result,
task,
task_id_config,
reference_task_ids,
agent_config,
turn_num,
if conversation_history:
for msg in reversed(conversation_history):
if msg.role == Role.agent:
text_parts = [
part.root.text for part in msg.parts if part.root.kind == "text"
]
final_message = (
" ".join(text_parts) if text_parts else "Conversation completed"
)
)
if trusted_result is not None:
return trusted_result
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="completed",
final_result=final_message,
error=None,
total_turns=max_turns,
),
)
return final_message
final_result, next_request = await _ahandle_agent_response_and_continue(
self=self,
a2a_result=a2a_result,
agent_id=agent_id,
agent_cards=agent_cards,
a2a_agents=a2a_agents,
original_task_description=original_task_description,
conversation_history=conversation_history,
turn_num=turn_num,
max_turns=max_turns,
task=task,
original_fn=original_fn,
context=context,
tools=tools,
agent_response_model=agent_response_model,
remote_task_completed=(a2a_result["status"] == TaskState.completed),
)
if final_result is not None:
return final_result
if next_request is not None:
current_request = next_request
continue
error_msg = a2a_result.get("error", "Unknown error")
final_result, next_request = await _ahandle_agent_response_and_continue(
self=self,
a2a_result=a2a_result,
agent_id=agent_id,
agent_cards=agent_cards,
a2a_agents=a2a_agents,
original_task_description=original_task_description,
conversation_history=conversation_history,
turn_num=turn_num,
max_turns=max_turns,
task=task,
original_fn=original_fn,
context=context,
tools=tools,
agent_response_model=agent_response_model,
)
if final_result is not None:
return final_result
if next_request is not None:
current_request = next_request
continue
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="failed",
final_result=None,
error=error_msg,
total_turns=turn_num + 1,
),
)
return f"A2A delegation failed: {error_msg}"
return _handle_max_turns_exceeded(conversation_history, max_turns)
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="failed",
final_result=None,
error=f"Conversation exceeded maximum turns ({max_turns})",
total_turns=max_turns,
),
)
raise Exception(f"A2A conversation exceeded maximum turns ({max_turns})")
finally:
task.description = original_task_description

View File

@@ -709,17 +709,9 @@ class Agent(BaseAgent):
raw_tools: list[BaseTool] = tools or self.tools or []
parsed_tools = parse_tools(raw_tools)
use_native_tool_calling = (
hasattr(self.llm, "supports_function_calling")
and callable(getattr(self.llm, "supports_function_calling", None))
and self.llm.supports_function_calling()
and len(raw_tools) > 0
)
prompt = Prompts(
agent=self,
has_tools=len(raw_tools) > 0,
use_native_tool_calling=use_native_tool_calling,
i18n=self.i18n,
use_system_prompt=self.use_system_prompt,
system_template=self.system_template,
@@ -727,8 +719,6 @@ class Agent(BaseAgent):
response_template=self.response_template,
).task_execution()
print("prompt", prompt)
stop_words = [self.i18n.slice("observation")]
if self.response_template:

View File

@@ -236,30 +236,14 @@ def process_tool_results(agent: Agent, result: Any) -> Any:
def save_last_messages(agent: Agent) -> None:
"""Save the last messages from agent executor.
Sanitizes messages to be compatible with TaskOutput's LLMMessage type,
which only accepts 'user', 'assistant', 'system' roles and requires
content to be a string or list (not None).
Args:
agent: The agent instance.
"""
if not agent.agent_executor or not hasattr(agent.agent_executor, "messages"):
agent._last_messages = []
return
sanitized_messages = []
for msg in agent.agent_executor.messages:
role = msg.get("role", "")
# Only include messages with valid LLMMessage roles
if role not in ("user", "assistant", "system"):
continue
# Ensure content is not None (can happen with tool call assistant messages)
content = msg.get("content")
if content is None:
content = ""
sanitized_messages.append({"role": role, "content": content})
agent._last_messages = sanitized_messages
agent._last_messages = (
agent.agent_executor.messages.copy()
if agent.agent_executor and hasattr(agent.agent_executor, "messages")
else []
)
def prepare_tools(

View File

@@ -26,3 +26,9 @@ ACTION_REGEX: Final[re.Pattern[str]] = re.compile(
ACTION_INPUT_ONLY_REGEX: Final[re.Pattern[str]] = re.compile(
r"\s*Action\s*\d*\s*Input\s*\d*\s*:\s*(.*)", re.DOTALL
)
# Regex to match "Action: None" or similar non-action values (None, N/A, etc.)
# This captures the action value and any text that follows it
ACTION_NONE_REGEX: Final[re.Pattern[str]] = re.compile(
r"Action\s*\d*\s*:\s*(none|n/a|na|no action|no_action)(?:\s*[-:(]?\s*(.*))?",
re.IGNORECASE | re.DOTALL,
)

View File

@@ -10,7 +10,7 @@ from collections.abc import Callable
import logging
from typing import TYPE_CHECKING, Any, Literal, cast
from pydantic import BaseModel, GetCoreSchemaHandler, ValidationError
from pydantic import BaseModel, GetCoreSchemaHandler
from pydantic_core import CoreSchema, core_schema
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
@@ -30,7 +30,6 @@ from crewai.hooks.llm_hooks import (
)
from crewai.utilities.agent_utils import (
aget_llm_response,
convert_tools_to_openai_schema,
enforce_rpm_limit,
format_message_for_llm,
get_llm_response,
@@ -216,33 +215,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
def _invoke_loop(self) -> AgentFinish:
"""Execute agent loop until completion.
Checks if the LLM supports native function calling and uses that
approach if available, otherwise falls back to the ReAct text pattern.
Returns:
Final answer from the agent.
"""
# Check if model supports native function calling
use_native_tools = (
hasattr(self.llm, "supports_function_calling")
and callable(getattr(self.llm, "supports_function_calling", None))
and self.llm.supports_function_calling()
and self.original_tools
)
if use_native_tools:
return self._invoke_loop_native_tools()
# Fall back to ReAct text-based pattern
return self._invoke_loop_react()
def _invoke_loop_react(self) -> AgentFinish:
"""Execute agent loop using ReAct text-based pattern.
This is the traditional approach where tool definitions are embedded
in the prompt and the LLM outputs Action/Action Input text that is
parsed to execute tools.
Returns:
Final answer from the agent.
"""
@@ -272,24 +244,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
response_model=self.response_model,
executor_context=self,
)
print("--------------------------------")
print("get_llm_response answer", answer)
print("--------------------------------")
# breakpoint()
if self.response_model is not None:
try:
self.response_model.model_validate_json(answer)
formatted_answer = AgentFinish(
thought="",
output=answer,
text=answer,
)
except ValidationError:
formatted_answer = process_llm_response(
answer, self.use_stop_words
) # type: ignore[assignment]
else:
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
if isinstance(formatted_answer, AgentAction):
# Extract agent fingerprint if available
@@ -323,7 +278,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
)
self._invoke_step_callback(formatted_answer) # type: ignore[arg-type]
self._append_message(formatted_answer.text) # type: ignore[union-attr]
self._append_message(formatted_answer.text) # type: ignore[union-attr,attr-defined]
except OutputParserError as e:
formatted_answer = handle_output_parser_exception( # type: ignore[assignment]
@@ -365,338 +320,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._show_logs(formatted_answer)
return formatted_answer
def _invoke_loop_native_tools(self) -> AgentFinish:
"""Execute agent loop using native function calling.
This method uses the LLM's native tool/function calling capability
instead of the text-based ReAct pattern. The LLM directly returns
structured tool calls which are executed and results fed back.
Returns:
Final answer from the agent.
"""
print("--------------------------------")
print("invoke_loop_native_tools")
print("--------------------------------")
# Convert tools to OpenAI schema format
if not self.original_tools:
# No tools available, fall back to simple LLM call
return self._invoke_loop_native_no_tools()
openai_tools, available_functions = convert_tools_to_openai_schema(
self.original_tools
)
while True:
try:
if has_reached_max_iterations(self.iterations, self.max_iter):
formatted_answer = handle_max_iterations_exceeded(
None,
printer=self._printer,
i18n=self._i18n,
messages=self.messages,
llm=self.llm,
callbacks=self.callbacks,
)
self._show_logs(formatted_answer)
return formatted_answer
enforce_rpm_limit(self.request_within_rpm_limit)
# Debug: Show messages being sent to LLM
print("--------------------------------")
print(f"Messages count: {len(self.messages)}")
for i, msg in enumerate(self.messages):
role = msg.get("role", "unknown")
content = msg.get("content", "")
if content:
preview = (
content[:200] + "..." if len(content) > 200 else content
)
else:
preview = "(no content)"
print(f" [{i}] {role}: {preview}")
print("--------------------------------")
# Call LLM with native tools
# Pass available_functions=None so the LLM returns tool_calls
# without executing them. The executor handles tool execution
# via _handle_native_tool_calls to properly manage message history.
answer = get_llm_response(
llm=self.llm,
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
tools=openai_tools,
available_functions=None,
from_task=self.task,
from_agent=self.agent,
response_model=self.response_model,
executor_context=self,
)
print("--------------------------------")
print("invoke_loop_native_tools answer", answer)
print("--------------------------------")
# print("get_llm_response answer", answer[:500] + "...")
# Check if the response is a list of tool calls
if (
isinstance(answer, list)
and answer
and self._is_tool_call_list(answer)
):
# Handle tool calls - execute tools and add results to messages
self._handle_native_tool_calls(answer, available_functions)
# Continue loop to let LLM analyze results and decide next steps
continue
# Text or other response - handle as potential final answer
if isinstance(answer, str):
# Text response - this is the final answer
formatted_answer = AgentFinish(
thought="",
output=answer,
text=answer,
)
self._invoke_step_callback(formatted_answer)
self._append_message(answer) # Save final answer to messages
self._show_logs(formatted_answer)
return formatted_answer
# Unexpected response type, treat as final answer
formatted_answer = AgentFinish(
thought="",
output=str(answer),
text=str(answer),
)
self._invoke_step_callback(formatted_answer)
self._append_message(str(answer)) # Save final answer to messages
self._show_logs(formatted_answer)
return formatted_answer
except Exception as e:
if e.__class__.__module__.startswith("litellm"):
raise e
if is_context_length_exceeded(e):
handle_context_length(
respect_context_window=self.respect_context_window,
printer=self._printer,
messages=self.messages,
llm=self.llm,
callbacks=self.callbacks,
i18n=self._i18n,
)
continue
handle_unknown_error(self._printer, e)
raise e
finally:
self.iterations += 1
def _invoke_loop_native_no_tools(self) -> AgentFinish:
"""Execute a simple LLM call when no tools are available.
Returns:
Final answer from the agent.
"""
enforce_rpm_limit(self.request_within_rpm_limit)
answer = get_llm_response(
llm=self.llm,
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
from_task=self.task,
from_agent=self.agent,
response_model=self.response_model,
executor_context=self,
)
formatted_answer = AgentFinish(
thought="",
output=str(answer),
text=str(answer),
)
self._show_logs(formatted_answer)
return formatted_answer
def _is_tool_call_list(self, response: list[Any]) -> bool:
"""Check if a response is a list of tool calls.
Args:
response: The response to check.
Returns:
True if the response appears to be a list of tool calls.
"""
if not response:
return False
first_item = response[0]
# OpenAI-style
if hasattr(first_item, "function") or (
isinstance(first_item, dict) and "function" in first_item
):
return True
# Anthropic-style
if (
hasattr(first_item, "type")
and getattr(first_item, "type", None) == "tool_use"
):
return True
if hasattr(first_item, "name") and hasattr(first_item, "input"):
return True
# Gemini-style
if hasattr(first_item, "function_call") and first_item.function_call:
return True
return False
def _handle_native_tool_calls(
self,
tool_calls: list[Any],
available_functions: dict[str, Callable[..., Any]],
) -> None:
"""Handle a single native tool call from the LLM.
Executes only the FIRST tool call and appends the result to message history.
This enables sequential tool execution with reflection after each tool,
allowing the LLM to reason about results before deciding on next steps.
Args:
tool_calls: List of tool calls from the LLM (only first is processed).
available_functions: Dict mapping function names to callables.
"""
from datetime import datetime
import json
from crewai.events import crewai_event_bus
from crewai.events.types.tool_usage_events import (
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
if not tool_calls:
return
# Only process the FIRST tool call for sequential execution with reflection
tool_call = tool_calls[0]
# Extract tool call info - handle OpenAI-style, Anthropic-style, and Gemini-style
if hasattr(tool_call, "function"):
# OpenAI-style: has .function.name and .function.arguments
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
func_name = tool_call.function.name
func_args = tool_call.function.arguments
elif hasattr(tool_call, "function_call") and tool_call.function_call:
# Gemini-style: has .function_call.name and .function_call.args
call_id = f"call_{id(tool_call)}"
func_name = tool_call.function_call.name
func_args = (
dict(tool_call.function_call.args)
if tool_call.function_call.args
else {}
)
elif hasattr(tool_call, "name") and hasattr(tool_call, "input"):
# Anthropic format: has .name and .input (ToolUseBlock)
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
func_name = tool_call.name
func_args = tool_call.input # Already a dict in Anthropic
elif isinstance(tool_call, dict):
call_id = tool_call.get("id", f"call_{id(tool_call)}")
func_info = tool_call.get("function", {})
func_name = func_info.get("name", "") or tool_call.get("name", "")
func_args = func_info.get("arguments", "{}") or tool_call.get("input", {})
else:
return
# Append assistant message with single tool call
assistant_message: LLMMessage = {
"role": "assistant",
"content": None,
"tool_calls": [
{
"id": call_id,
"type": "function",
"function": {
"name": func_name,
"arguments": func_args
if isinstance(func_args, str)
else json.dumps(func_args),
},
}
],
}
self.messages.append(assistant_message)
# Parse arguments for the single tool call
if isinstance(func_args, str):
try:
args_dict = json.loads(func_args)
except json.JSONDecodeError:
args_dict = {}
else:
args_dict = func_args
# Emit tool usage started event
started_at = datetime.now()
crewai_event_bus.emit(
self,
event=ToolUsageStartedEvent(
tool_name=func_name,
tool_args=args_dict,
from_agent=self.agent,
from_task=self.task,
),
)
# Execute the tool
print(f"Using Tool: {func_name}")
result = "Tool not found"
if func_name in available_functions:
try:
tool_func = available_functions[func_name]
result = tool_func(**args_dict)
if not isinstance(result, str):
result = str(result)
except Exception as e:
result = f"Error executing tool: {e}"
# Emit tool usage finished event
crewai_event_bus.emit(
self,
event=ToolUsageFinishedEvent(
output=result,
tool_name=func_name,
tool_args=args_dict,
from_agent=self.agent,
from_task=self.task,
started_at=started_at,
finished_at=datetime.now(),
),
)
# Append tool result message
tool_message: LLMMessage = {
"role": "tool",
"tool_call_id": call_id,
"content": result,
}
self.messages.append(tool_message)
# Log the tool execution
if self.agent and self.agent.verbose:
self._printer.print(
content=f"Tool {func_name} executed with result: {result[:200]}...",
color="green",
)
# Inject post-tool reasoning prompt to enforce analysis
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
reasoning_message: LLMMessage = {
"role": "user",
"content": reasoning_prompt,
}
self.messages.append(reasoning_message)
async def ainvoke(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Execute the agent asynchronously with given inputs.
@@ -746,29 +369,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
async def _ainvoke_loop(self) -> AgentFinish:
"""Execute agent loop asynchronously until completion.
Checks if the LLM supports native function calling and uses that
approach if available, otherwise falls back to the ReAct text pattern.
Returns:
Final answer from the agent.
"""
# Check if model supports native function calling
use_native_tools = (
hasattr(self.llm, "supports_function_calling")
and callable(getattr(self.llm, "supports_function_calling", None))
and self.llm.supports_function_calling()
and self.original_tools
)
if use_native_tools:
return await self._ainvoke_loop_native_tools()
# Fall back to ReAct text-based pattern
return await self._ainvoke_loop_react()
async def _ainvoke_loop_react(self) -> AgentFinish:
"""Execute agent loop asynchronously using ReAct text-based pattern.
Returns:
Final answer from the agent.
"""
@@ -798,21 +398,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
response_model=self.response_model,
executor_context=self,
)
if self.response_model is not None:
try:
self.response_model.model_validate_json(answer)
formatted_answer = AgentFinish(
thought="",
output=answer,
text=answer,
)
except ValidationError:
formatted_answer = process_llm_response(
answer, self.use_stop_words
) # type: ignore[assignment]
else:
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
if isinstance(formatted_answer, AgentAction):
fingerprint_context = {}
@@ -845,7 +431,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
)
self._invoke_step_callback(formatted_answer) # type: ignore[arg-type]
self._append_message(formatted_answer.text) # type: ignore[union-attr]
self._append_message(formatted_answer.text) # type: ignore[union-attr,attr-defined]
except OutputParserError as e:
formatted_answer = handle_output_parser_exception( # type: ignore[assignment]
@@ -882,139 +468,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._show_logs(formatted_answer)
return formatted_answer
async def _ainvoke_loop_native_tools(self) -> AgentFinish:
"""Execute agent loop asynchronously using native function calling.
This method uses the LLM's native tool/function calling capability
instead of the text-based ReAct pattern.
Returns:
Final answer from the agent.
"""
# Convert tools to OpenAI schema format
if not self.original_tools:
return await self._ainvoke_loop_native_no_tools()
openai_tools, available_functions = convert_tools_to_openai_schema(
self.original_tools
)
while True:
try:
if has_reached_max_iterations(self.iterations, self.max_iter):
formatted_answer = handle_max_iterations_exceeded(
None,
printer=self._printer,
i18n=self._i18n,
messages=self.messages,
llm=self.llm,
callbacks=self.callbacks,
)
self._show_logs(formatted_answer)
return formatted_answer
enforce_rpm_limit(self.request_within_rpm_limit)
# Call LLM with native tools
# Pass available_functions=None so the LLM returns tool_calls
# without executing them. The executor handles tool execution
# via _handle_native_tool_calls to properly manage message history.
answer = await aget_llm_response(
llm=self.llm,
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
tools=openai_tools,
available_functions=None,
from_task=self.task,
from_agent=self.agent,
response_model=self.response_model,
executor_context=self,
)
print("--------------------------------")
print("native llm completion answer", answer)
print("--------------------------------")
# Check if the response is a list of tool calls
if (
isinstance(answer, list)
and answer
and self._is_tool_call_list(answer)
):
# Handle tool calls - execute tools and add results to messages
self._handle_native_tool_calls(answer, available_functions)
# Continue loop to let LLM analyze results and decide next steps
continue
# Text or other response - handle as potential final answer
if isinstance(answer, str):
# Text response - this is the final answer
formatted_answer = AgentFinish(
thought="",
output=answer,
text=answer,
)
self._invoke_step_callback(formatted_answer)
self._append_message(answer) # Save final answer to messages
self._show_logs(formatted_answer)
return formatted_answer
# Unexpected response type, treat as final answer
formatted_answer = AgentFinish(
thought="",
output=str(answer),
text=str(answer),
)
self._invoke_step_callback(formatted_answer)
self._append_message(str(answer)) # Save final answer to messages
self._show_logs(formatted_answer)
return formatted_answer
except Exception as e:
if e.__class__.__module__.startswith("litellm"):
raise e
if is_context_length_exceeded(e):
handle_context_length(
respect_context_window=self.respect_context_window,
printer=self._printer,
messages=self.messages,
llm=self.llm,
callbacks=self.callbacks,
i18n=self._i18n,
)
continue
handle_unknown_error(self._printer, e)
raise e
finally:
self.iterations += 1
async def _ainvoke_loop_native_no_tools(self) -> AgentFinish:
"""Execute a simple async LLM call when no tools are available.
Returns:
Final answer from the agent.
"""
enforce_rpm_limit(self.request_within_rpm_limit)
answer = await aget_llm_response(
llm=self.llm,
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
from_task=self.task,
from_agent=self.agent,
response_model=self.response_model,
executor_context=self,
)
formatted_answer = AgentFinish(
thought="",
output=str(answer),
text=str(answer),
)
self._show_logs(formatted_answer)
return formatted_answer
def _handle_agent_action(
self, formatted_answer: AgentAction, tool_result: ToolResult
) -> AgentAction | AgentFinish:

View File

@@ -12,6 +12,7 @@ from json_repair import repair_json # type: ignore[import-untyped]
from crewai.agents.constants import (
ACTION_INPUT_ONLY_REGEX,
ACTION_INPUT_REGEX,
ACTION_NONE_REGEX,
ACTION_REGEX,
FINAL_ANSWER_ACTION,
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
@@ -118,6 +119,34 @@ def parse(text: str) -> AgentAction | AgentFinish:
thought=thought, tool=clean_action, tool_input=safe_tool_input, text=text
)
# Check for "Action: None" or similar non-action values
# This handles cases where the LLM indicates it cannot/should not use a tool
action_none_match = ACTION_NONE_REGEX.search(text)
if action_none_match:
# Extract any additional content after "Action: None"
additional_content = action_none_match.group(2)
if additional_content:
additional_content = additional_content.strip()
# Remove trailing parenthesis if present (from patterns like "Action: None (reason)")
if additional_content.startswith("(") and ")" in additional_content:
additional_content = additional_content.split(")", 1)[-1].strip()
elif additional_content.startswith(")"):
additional_content = additional_content[1:].strip()
# Build the final answer from thought and any additional content
final_answer = thought
if additional_content:
if final_answer:
final_answer = f"{final_answer}\n\n{additional_content}"
else:
final_answer = additional_content
# If we still have no content, use a generic message
if not final_answer:
final_answer = "I cannot perform this action with the available tools."
return AgentFinish(thought=thought, output=final_answer, text=text)
if not ACTION_REGEX.search(text):
raise OutputParserError(
f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{_I18N.slice('final_answer_format')}",

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.8.0"
"crewai[tools]==1.7.2"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.8.0"
"crewai[tools]==1.7.2"
]
[project.scripts]

View File

@@ -10,7 +10,7 @@ This module provides the event infrastructure that allows users to:
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from typing import TYPE_CHECKING
from crewai.events.base_event_listener import BaseEventListener
from crewai.events.depends import Depends
@@ -34,8 +34,6 @@ from crewai.events.types.flow_events import (
FlowFinishedEvent,
FlowPlotEvent,
FlowStartedEvent,
HumanFeedbackReceivedEvent,
HumanFeedbackRequestedEvent,
MethodExecutionFailedEvent,
MethodExecutionFinishedEvent,
MethodExecutionStartedEvent,
@@ -147,8 +145,6 @@ __all__ = [
"FlowFinishedEvent",
"FlowPlotEvent",
"FlowStartedEvent",
"HumanFeedbackReceivedEvent",
"HumanFeedbackRequestedEvent",
"KnowledgeQueryCompletedEvent",
"KnowledgeQueryFailedEvent",
"KnowledgeQueryStartedEvent",
@@ -209,7 +205,7 @@ _AGENT_EVENT_MAPPING = {
}
def __getattr__(name: str) -> Any:
def __getattr__(name: str):
"""Lazy import for agent events to avoid circular imports."""
if name in _AGENT_EVENT_MAPPING:
import importlib

View File

@@ -13,8 +13,6 @@ from crewai.events.types.a2a_events import (
A2ADelegationCompletedEvent,
A2ADelegationStartedEvent,
A2AMessageSentEvent,
A2APollingStartedEvent,
A2APollingStatusEvent,
A2AResponseReceivedEvent,
)
from crewai.events.types.agent_events import (
@@ -39,8 +37,6 @@ from crewai.events.types.flow_events import (
FlowFinishedEvent,
FlowPausedEvent,
FlowStartedEvent,
HumanFeedbackReceivedEvent,
HumanFeedbackRequestedEvent,
MethodExecutionFailedEvent,
MethodExecutionFinishedEvent,
MethodExecutionPausedEvent,
@@ -71,6 +67,7 @@ from crewai.events.types.mcp_events import (
MCPConnectionCompletedEvent,
MCPConnectionFailedEvent,
MCPConnectionStartedEvent,
MCPToolExecutionCompletedEvent,
MCPToolExecutionFailedEvent,
MCPToolExecutionStartedEvent,
)
@@ -209,9 +206,10 @@ class EventListener(BaseEventListener):
@crewai_event_bus.on(TaskCompletedEvent)
def on_task_completed(source: Any, event: TaskCompletedEvent) -> None:
# Handle telemetry
span = self.execution_spans.pop(source, None)
span = self.execution_spans.get(source)
if span:
self._telemetry.task_ended(span, source, source.agent.crew)
self.execution_spans[source] = None
# Pass task name if it exists
task_name = get_task_name(source)
@@ -221,10 +219,11 @@ class EventListener(BaseEventListener):
@crewai_event_bus.on(TaskFailedEvent)
def on_task_failed(source: Any, event: TaskFailedEvent) -> None:
span = self.execution_spans.pop(source, None)
span = self.execution_spans.get(source)
if span:
if source.agent and source.agent.crew:
self._telemetry.task_ended(span, source, source.agent.crew)
self.execution_spans[source] = None
# Pass task name if it exists
task_name = get_task_name(source)
@@ -330,33 +329,6 @@ class EventListener(BaseEventListener):
"paused",
)
# ----------- HUMAN FEEDBACK EVENTS -----------
@crewai_event_bus.on(HumanFeedbackRequestedEvent)
def on_human_feedback_requested(
_: Any, event: HumanFeedbackRequestedEvent
) -> None:
"""Handle human feedback requested event."""
has_routing = event.emit is not None and len(event.emit) > 0
self._telemetry.human_feedback_span(
event_type="requested",
has_routing=has_routing,
num_outcomes=len(event.emit) if event.emit else 0,
)
@crewai_event_bus.on(HumanFeedbackReceivedEvent)
def on_human_feedback_received(
_: Any, event: HumanFeedbackReceivedEvent
) -> None:
"""Handle human feedback received event."""
has_routing = event.outcome is not None
self._telemetry.human_feedback_span(
event_type="received",
has_routing=has_routing,
num_outcomes=0,
feedback_provided=bool(event.feedback and event.feedback.strip()),
outcome=event.outcome,
)
# ----------- TOOL USAGE EVENTS -----------
@crewai_event_bus.on(ToolUsageStartedEvent)
def on_tool_usage_started(source: Any, event: ToolUsageStartedEvent) -> None:
@@ -378,12 +350,6 @@ class EventListener(BaseEventListener):
self.formatter.handle_llm_tool_usage_finished(
event.tool_name,
)
else:
self.formatter.handle_tool_usage_finished(
event.tool_name,
event.output,
getattr(event, "run_attempts", None),
)
@crewai_event_bus.on(ToolUsageErrorEvent)
def on_tool_usage_error(source: Any, event: ToolUsageErrorEvent) -> None:
@@ -614,23 +580,6 @@ class EventListener(BaseEventListener):
event.total_turns,
)
@crewai_event_bus.on(A2APollingStartedEvent)
def on_a2a_polling_started(_: Any, event: A2APollingStartedEvent) -> None:
self.formatter.handle_a2a_polling_started(
event.task_id,
event.polling_interval,
event.endpoint,
)
@crewai_event_bus.on(A2APollingStatusEvent)
def on_a2a_polling_status(_: Any, event: A2APollingStatusEvent) -> None:
self.formatter.handle_a2a_polling_status(
event.task_id,
event.state,
event.elapsed_seconds,
event.poll_count,
)
# ----------- MCP EVENTS -----------
@crewai_event_bus.on(MCPConnectionStartedEvent)

View File

@@ -15,7 +15,7 @@ class A2AEventBase(BaseEvent):
from_task: Any | None = None
from_agent: Any | None = None
def __init__(self, **data: Any) -> None:
def __init__(self, **data):
"""Initialize A2A event, extracting task and agent metadata."""
if data.get("from_task"):
task = data["from_task"]
@@ -139,74 +139,3 @@ class A2AConversationCompletedEvent(A2AEventBase):
final_result: str | None = None
error: str | None = None
total_turns: int
class A2APollingStartedEvent(A2AEventBase):
"""Event emitted when polling mode begins for A2A delegation.
Attributes:
task_id: A2A task ID being polled
polling_interval: Seconds between poll attempts
endpoint: A2A agent endpoint URL
"""
type: str = "a2a_polling_started"
task_id: str
polling_interval: float
endpoint: str
class A2APollingStatusEvent(A2AEventBase):
"""Event emitted on each polling iteration.
Attributes:
task_id: A2A task ID being polled
state: Current task state from remote agent
elapsed_seconds: Time since polling started
poll_count: Number of polls completed
"""
type: str = "a2a_polling_status"
task_id: str
state: str
elapsed_seconds: float
poll_count: int
class A2APushNotificationRegisteredEvent(A2AEventBase):
"""Event emitted when push notification callback is registered.
Attributes:
task_id: A2A task ID for which callback is registered
callback_url: URL where agent will send push notifications
"""
type: str = "a2a_push_notification_registered"
task_id: str
callback_url: str
class A2APushNotificationReceivedEvent(A2AEventBase):
"""Event emitted when a push notification is received.
Attributes:
task_id: A2A task ID from the notification
state: Current task state from the notification
"""
type: str = "a2a_push_notification_received"
task_id: str
state: str
class A2APushNotificationTimeoutEvent(A2AEventBase):
"""Event emitted when push notification wait times out.
Attributes:
task_id: A2A task ID that timed out
timeout_seconds: Timeout duration in seconds
"""
type: str = "a2a_push_notification_timeout"
task_id: str
timeout_seconds: float

View File

@@ -114,6 +114,7 @@ To enable tracing, do any one of these:
New streaming sessions will be created on-demand when needed.
This method exists for API compatibility with HITL callers.
"""
pass
def print_panel(
self, content: Text, title: str, style: str = "blue", is_flow: bool = False
@@ -366,32 +367,6 @@ To enable tracing, do any one of these:
self.print_panel(content, f"🔧 Tool Execution Started (#{iteration})", "yellow")
def handle_tool_usage_finished(
self,
tool_name: str,
output: str,
run_attempts: int | None = None,
) -> None:
"""Handle tool usage finished event with panel display."""
if not self.verbose:
return
iteration = self.tool_usage_counts.get(tool_name, 1)
content = Text()
content.append("Tool Completed\n", style="green bold")
content.append("Tool: ", style="white")
content.append(f"{tool_name}\n", style="green bold")
if output:
content.append("Output: ", style="white")
content.append(f"{output}\n", style="green")
self.print_panel(
content, f"✅ Tool Execution Completed (#{iteration})", "green"
)
def handle_tool_usage_error(
self,
tool_name: str,
@@ -1442,49 +1417,3 @@ To enable tracing, do any one of these:
panel = self.create_panel(content, "❌ MCP Tool Failed", "red")
self.print(panel)
self.print()
def handle_a2a_polling_started(
self,
task_id: str,
polling_interval: float,
endpoint: str,
) -> None:
"""Handle A2A polling started event with panel display."""
content = Text()
content.append("A2A Polling Started\n", style="cyan bold")
content.append("Task ID: ", style="white")
content.append(f"{task_id[:8]}...\n", style="cyan")
content.append("Interval: ", style="white")
content.append(f"{polling_interval}s\n", style="cyan")
self.print_panel(content, "⏳ A2A Polling", "cyan")
def handle_a2a_polling_status(
self,
task_id: str,
state: str,
elapsed_seconds: float,
poll_count: int,
) -> None:
"""Handle A2A polling status event with panel display."""
if state == "completed":
style = "green"
status_indicator = ""
elif state == "failed":
style = "red"
status_indicator = ""
elif state == "working":
style = "yellow"
status_indicator = ""
else:
style = "cyan"
status_indicator = ""
content = Text()
content.append(f"Poll #{poll_count}\n", style=f"{style} bold")
content.append("Status: ", style="white")
content.append(f"{status_indicator} {state}\n", style=style)
content.append("Elapsed: ", style="white")
content.append(f"{elapsed_seconds:.1f}s\n", style=style)
self.print_panel(content, f"📊 A2A Poll #{poll_count}", style)

View File

@@ -1,8 +1,6 @@
from __future__ import annotations
from collections.abc import Callable
from datetime import datetime
import json
import threading
from typing import TYPE_CHECKING, Any, Literal, cast
from uuid import uuid4
@@ -19,24 +17,16 @@ from crewai.agents.parser import (
OutputParserError,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.events.listeners.tracing.utils import (
is_tracing_enabled_in_context,
)
from crewai.events.types.logging_events import (
AgentLogsExecutionEvent,
AgentLogsStartedEvent,
)
from crewai.events.types.tool_usage_events import (
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
from crewai.flow.flow import Flow, listen, or_, router, start
from crewai.hooks.llm_hooks import (
get_after_llm_call_hooks,
get_before_llm_call_hooks,
)
from crewai.utilities.agent_utils import (
convert_tools_to_openai_schema,
enforce_rpm_limit,
format_message_for_llm,
get_llm_response,
@@ -81,8 +71,6 @@ class AgentReActState(BaseModel):
current_answer: AgentAction | AgentFinish | None = Field(default=None)
is_finished: bool = Field(default=False)
ask_for_human_input: bool = Field(default=False)
use_native_tools: bool = Field(default=False)
pending_tool_calls: list[Any] = Field(default_factory=list)
class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
@@ -191,10 +179,6 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
)
)
# Native tool calling support
self._openai_tools: list[dict[str, Any]] = []
self._available_functions: dict[str, Callable[..., Any]] = {}
self._state = AgentReActState()
def _ensure_flow_initialized(self) -> None:
@@ -205,66 +189,14 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
Only the instance that actually executes via invoke() will emit events.
"""
if not self._flow_initialized:
current_tracing = is_tracing_enabled_in_context()
# Now call Flow's __init__ which will replace self._state
# with Flow's managed state. Suppress flow events since this is
# an agent executor, not a user-facing flow.
super().__init__(
suppress_flow_events=True,
tracing=current_tracing if current_tracing else None,
)
self._flow_initialized = True
def _check_native_tool_support(self) -> bool:
"""Check if LLM supports native function calling.
Returns:
True if the LLM supports native function calling and tools are available.
"""
return (
hasattr(self.llm, "supports_function_calling")
and callable(getattr(self.llm, "supports_function_calling", None))
and self.llm.supports_function_calling()
and bool(self.original_tools)
)
def _setup_native_tools(self) -> None:
"""Convert tools to OpenAI schema format for native function calling."""
if self.original_tools:
self._openai_tools, self._available_functions = (
convert_tools_to_openai_schema(self.original_tools)
)
def _is_tool_call_list(self, response: list[Any]) -> bool:
"""Check if a response is a list of tool calls.
Args:
response: The response to check.
Returns:
True if the response appears to be a list of tool calls.
"""
if not response:
return False
first_item = response[0]
# Check for OpenAI-style tool call structure
if hasattr(first_item, "function") or (
isinstance(first_item, dict) and "function" in first_item
):
return True
# Check for Anthropic-style tool call structure (ToolUseBlock)
if (
hasattr(first_item, "type")
and getattr(first_item, "type", None) == "tool_use"
):
return True
if hasattr(first_item, "name") and hasattr(first_item, "input"):
return True
# Check for Gemini-style function call (Part with function_call)
if hasattr(first_item, "function_call") and first_item.function_call:
return True
return False
@property
def use_stop_words(self) -> bool:
"""Check to determine if stop words are being used.
@@ -297,11 +229,6 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
def initialize_reasoning(self) -> Literal["initialized"]:
"""Initialize the reasoning flow and emit agent start logs."""
self._show_start_logs()
# Check for native tool support on first iteration
if self.state.iterations == 0:
self.state.use_native_tools = self._check_native_tool_support()
if self.state.use_native_tools:
self._setup_native_tools()
return "initialized"
@listen("force_final_answer")
@@ -376,69 +303,6 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
handle_unknown_error(self._printer, e)
raise
@listen("continue_reasoning_native")
def call_llm_native_tools(
self,
) -> Literal["native_tool_calls", "native_finished", "context_error"]:
"""Execute LLM call with native function calling.
Returns routing decision based on whether tool calls or final answer.
"""
try:
enforce_rpm_limit(self.request_within_rpm_limit)
# Call LLM with native tools
# Pass available_functions=None so the LLM returns tool_calls
# without executing them. The executor handles tool execution.
answer = get_llm_response(
llm=self.llm,
messages=list(self.state.messages),
callbacks=self.callbacks,
printer=self._printer,
tools=self._openai_tools,
available_functions=None,
from_task=self.task,
from_agent=self.agent,
response_model=self.response_model,
executor_context=self,
)
# Check if the response is a list of tool calls
if isinstance(answer, list) and answer and self._is_tool_call_list(answer):
# Store tool calls for sequential processing
self.state.pending_tool_calls = list(answer)
return "native_tool_calls"
# Text response - this is the final answer
if isinstance(answer, str):
self.state.current_answer = AgentFinish(
thought="",
output=answer,
text=answer,
)
self._invoke_step_callback(self.state.current_answer)
self._append_message_to_state(answer)
return "native_finished"
# Unexpected response type, treat as final answer
self.state.current_answer = AgentFinish(
thought="",
output=str(answer),
text=str(answer),
)
self._invoke_step_callback(self.state.current_answer)
self._append_message_to_state(str(answer))
return "native_finished"
except Exception as e:
if is_context_length_exceeded(e):
self._last_context_error = e
return "context_error"
if e.__class__.__module__.startswith("litellm"):
raise e
handle_unknown_error(self._printer, e)
raise
@router(call_llm_and_parse)
def route_by_answer_type(self) -> Literal["execute_tool", "agent_finished"]:
"""Route based on whether answer is AgentAction or AgentFinish."""
@@ -494,14 +358,6 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
self.state.is_finished = True
return "tool_result_is_final"
# Inject post-tool reasoning prompt to enforce analysis
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
reasoning_message: LLMMessage = {
"role": "user",
"content": reasoning_prompt,
}
self.state.messages.append(reasoning_message)
return "tool_completed"
except Exception as e:
@@ -511,143 +367,6 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
self._console.print(error_text)
raise
@listen("native_tool_calls")
def execute_native_tool(self) -> Literal["native_tool_completed"]:
"""Execute a single native tool call and inject reasoning prompt.
Processes only the FIRST tool call from pending_tool_calls for
sequential execution with reflection after each tool.
"""
if not self.state.pending_tool_calls:
return "native_tool_completed"
tool_call = self.state.pending_tool_calls[0]
self.state.pending_tool_calls = [] # Clear pending calls
# Extract tool call info - handle OpenAI, Anthropic, and Gemini formats
if hasattr(tool_call, "function"):
# OpenAI format: has .function.name and .function.arguments
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
func_name = tool_call.function.name
func_args = tool_call.function.arguments
elif hasattr(tool_call, "function_call") and tool_call.function_call:
# Gemini format: has .function_call.name and .function_call.args
call_id = f"call_{id(tool_call)}"
func_name = tool_call.function_call.name
func_args = (
dict(tool_call.function_call.args)
if tool_call.function_call.args
else {}
)
elif hasattr(tool_call, "name") and hasattr(tool_call, "input"):
# Anthropic format: has .name and .input (ToolUseBlock)
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
func_name = tool_call.name
func_args = tool_call.input # Already a dict in Anthropic
elif isinstance(tool_call, dict):
call_id = tool_call.get("id", f"call_{id(tool_call)}")
func_info = tool_call.get("function", {})
func_name = func_info.get("name", "") or tool_call.get("name", "")
func_args = func_info.get("arguments", "{}") or tool_call.get("input", {})
else:
return "native_tool_completed"
# Append assistant message with single tool call
assistant_message: LLMMessage = {
"role": "assistant",
"content": None,
"tool_calls": [
{
"id": call_id,
"type": "function",
"function": {
"name": func_name,
"arguments": func_args
if isinstance(func_args, str)
else json.dumps(func_args),
},
}
],
}
self.state.messages.append(assistant_message)
# Parse arguments for the single tool call
if isinstance(func_args, str):
try:
args_dict = json.loads(func_args)
except json.JSONDecodeError:
args_dict = {}
else:
args_dict = func_args
# Emit tool usage started event
started_at = datetime.now()
crewai_event_bus.emit(
self,
event=ToolUsageStartedEvent(
tool_name=func_name,
tool_args=args_dict,
from_agent=self.agent,
from_task=self.task,
),
)
# Execute the tool
result = "Tool not found"
if func_name in self._available_functions:
try:
tool_func = self._available_functions[func_name]
result = tool_func(**args_dict)
if not isinstance(result, str):
result = str(result)
except Exception as e:
result = f"Error executing tool: {e}"
# Emit tool usage finished event
crewai_event_bus.emit(
self,
event=ToolUsageFinishedEvent(
output=result,
tool_name=func_name,
tool_args=args_dict,
from_agent=self.agent,
from_task=self.task,
started_at=started_at,
finished_at=datetime.now(),
),
)
# Append tool result message
tool_message: LLMMessage = {
"role": "tool",
"tool_call_id": call_id,
"content": result,
}
self.state.messages.append(tool_message)
# Log the tool execution
if self.agent and self.agent.verbose:
self._printer.print(
content=f"Tool {func_name} executed with result: {result[:200]}...",
color="green",
)
# Inject post-tool reasoning prompt to enforce analysis
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
reasoning_message: LLMMessage = {
"role": "user",
"content": reasoning_prompt,
}
self.state.messages.append(reasoning_message)
return "native_tool_completed"
@router(execute_native_tool)
def increment_native_and_continue(self) -> Literal["initialized"]:
"""Increment iteration counter after native tool execution."""
self.state.iterations += 1
return "initialized"
@listen("initialized")
def continue_iteration(self) -> Literal["check_iteration"]:
"""Bridge listener that connects iteration loop back to iteration check."""
@@ -656,14 +375,10 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
@router(or_(initialize_reasoning, continue_iteration))
def check_max_iterations(
self,
) -> Literal[
"force_final_answer", "continue_reasoning", "continue_reasoning_native"
]:
) -> Literal["force_final_answer", "continue_reasoning"]:
"""Check if max iterations reached before proceeding with reasoning."""
if has_reached_max_iterations(self.state.iterations, self.max_iter):
return "force_final_answer"
if self.state.use_native_tools:
return "continue_reasoning_native"
return "continue_reasoning"
@router(execute_tool_action)
@@ -672,7 +387,7 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
self.state.iterations += 1
return "initialized"
@listen(or_("agent_finished", "tool_result_is_final", "native_finished"))
@listen(or_("agent_finished", "tool_result_is_final"))
def finalize(self) -> Literal["completed", "skipped"]:
"""Finalize execution and emit completion logs."""
if self.state.current_answer is None:
@@ -760,8 +475,6 @@ class CrewAgentExecutorFlow(Flow[AgentReActState], CrewAgentExecutorMixin):
self.state.iterations = 0
self.state.current_answer = None
self.state.is_finished = False
self.state.use_native_tools = False
self.state.pending_tool_calls = []
if "system" in self.prompt:
prompt = cast("SystemPromptResult", self.prompt)

View File

@@ -5,7 +5,6 @@ from crewai.flow.async_feedback import (
PendingFeedbackContext,
)
from crewai.flow.flow import Flow, and_, listen, or_, router, start
from crewai.flow.flow_config import flow_config
from crewai.flow.human_feedback import HumanFeedbackResult, human_feedback
from crewai.flow.persistence import persist
from crewai.flow.visualization import (
@@ -25,7 +24,6 @@ __all__ = [
"PendingFeedbackContext",
"and_",
"build_flow_structure",
"flow_config",
"human_feedback",
"listen",
"or_",

View File

@@ -1203,13 +1203,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
result = self.kickoff(inputs=inputs)
result_holder.append(result)
except Exception as e:
# HumanFeedbackPending is expected control flow, not an error
from crewai.flow.async_feedback.types import HumanFeedbackPending
if isinstance(e, HumanFeedbackPending):
result_holder.append(e)
else:
signal_error(state, e)
signal_error(state, e)
finally:
self.stream = True
signal_end(state)
@@ -1264,13 +1258,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
result = await self.kickoff_async(inputs=inputs)
result_holder.append(result)
except Exception as e:
# HumanFeedbackPending is expected control flow, not an error
from crewai.flow.async_feedback.types import HumanFeedbackPending
if isinstance(e, HumanFeedbackPending):
result_holder.append(e)
else:
signal_error(state, e, is_async=True)
signal_error(state, e, is_async=True)
finally:
self.stream = True
signal_end(state, is_async=True)
@@ -1602,45 +1590,29 @@ class Flow(Generic[T], metaclass=FlowMeta):
return result
except Exception as e:
# Check if this is a HumanFeedbackPending exception (paused, not failed)
from crewai.flow.async_feedback.types import HumanFeedbackPending
if not self.suppress_flow_events:
# Check if this is a HumanFeedbackPending exception (paused, not failed)
from crewai.flow.async_feedback.types import HumanFeedbackPending
if isinstance(e, HumanFeedbackPending):
# Auto-save pending feedback (create default persistence if needed)
if self._persistence is None:
from crewai.flow.persistence import SQLiteFlowPersistence
if isinstance(e, HumanFeedbackPending):
# Auto-save pending feedback (create default persistence if needed)
if self._persistence is None:
from crewai.flow.persistence import SQLiteFlowPersistence
self._persistence = SQLiteFlowPersistence()
self._persistence = SQLiteFlowPersistence()
# Emit paused event (not failed)
if not self.suppress_flow_events:
future = crewai_event_bus.emit(
self,
MethodExecutionPausedEvent(
type="method_execution_paused",
method_name=method_name,
flow_name=self.name or self.__class__.__name__,
state=self._copy_and_serialize_state(),
flow_id=e.context.flow_id,
message=e.context.message,
emit=e.context.emit,
),
)
if future:
self._event_futures.append(future)
elif not self.suppress_flow_events:
# Regular failure - emit failed event
future = crewai_event_bus.emit(
self,
MethodExecutionFailedEvent(
type="method_execution_failed",
method_name=method_name,
flow_name=self.name or self.__class__.__name__,
error=e,
),
)
if future:
self._event_futures.append(future)
# Regular failure
future = crewai_event_bus.emit(
self,
MethodExecutionFailedEvent(
type="method_execution_failed",
method_name=method_name,
flow_name=self.name or self.__class__.__name__,
error=e,
),
)
if future:
self._event_futures.append(future)
raise e
def _copy_and_serialize_state(self) -> dict[str, Any]:

View File

@@ -1,39 +0,0 @@
"""Global Flow configuration.
This module provides a singleton configuration object that can be used to
customize Flow behavior at runtime.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from crewai.flow.async_feedback.types import HumanFeedbackProvider
class FlowConfig:
"""Global configuration for Flow execution.
Attributes:
hitl_provider: The human-in-the-loop feedback provider.
Defaults to None (uses console input).
Can be overridden by deployments at startup.
"""
def __init__(self) -> None:
self._hitl_provider: HumanFeedbackProvider | None = None
@property
def hitl_provider(self) -> Any:
"""Get the configured HITL provider."""
return self._hitl_provider
@hitl_provider.setter
def hitl_provider(self, provider: Any) -> None:
"""Set the HITL provider."""
self._hitl_provider = provider
# Singleton instance
flow_config = FlowConfig()

View File

@@ -1,5 +1,4 @@
import inspect
from typing import Any
from pydantic import BaseModel, Field, InstanceOf, model_validator
from typing_extensions import Self
@@ -15,14 +14,14 @@ class FlowTrackable(BaseModel):
inspecting the call stack.
"""
parent_flow: InstanceOf[Flow[Any]] | None = Field(
parent_flow: InstanceOf[Flow] | None = Field(
default=None,
description="The parent flow of the instance, if it was created inside a flow.",
)
@model_validator(mode="after")
def _set_parent_flow(self) -> Self:
max_depth = 8
max_depth = 5
frame = inspect.currentframe()
try:

View File

@@ -283,18 +283,11 @@ def human_feedback(
llm=llm if isinstance(llm, str) else None,
)
# Determine effective provider:
effective_provider = provider
if effective_provider is None:
from crewai.flow.flow_config import flow_config
effective_provider = flow_config.hitl_provider
if effective_provider is not None:
# Use provider (may raise HumanFeedbackPending for async providers)
return effective_provider.request_feedback(context, flow_instance)
if provider is not None:
# Use custom provider (may raise HumanFeedbackPending)
return provider.request_feedback(context, flow_instance)
else:
# Use default console input (local development)
# Use default console input
return flow_instance._request_human_feedback(
message=message,
output=method_output,

View File

@@ -925,12 +925,12 @@ class LLM(BaseLLM):
except Exception as e:
logging.debug(f"Error checking for tool calls: {e}")
# Track token usage and log callbacks if available in streaming mode
if usage_info:
self._track_token_usage_internal(usage_info)
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
if not tool_calls or not available_functions:
# Track token usage and log callbacks if available in streaming mode
if usage_info:
self._track_token_usage_internal(usage_info)
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
if response_model and self.is_litellm:
instructor_instance = InternalInstructor(
content=full_response,
@@ -962,7 +962,12 @@ class LLM(BaseLLM):
if tool_result is not None:
return tool_result
# --- 10) Emit completion event and return response
# --- 10) Track token usage and log callbacks if available in streaming mode
if usage_info:
self._track_token_usage_internal(usage_info)
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
# --- 11) Emit completion event and return response
self._handle_emit_call_events(
response=full_response,
call_type=LLMCallType.LLM_CALL,
@@ -1144,14 +1149,6 @@ class LLM(BaseLLM):
params["response_model"] = response_model
response = litellm.completion(**params)
if (
hasattr(response, "usage")
and not isinstance(response.usage, type)
and response.usage
):
usage_info = response.usage
self._track_token_usage_internal(usage_info)
except ContextWindowExceededError as e:
# Convert litellm's context window error to our own exception type
# for consistent handling in the rest of the codebase
@@ -1202,19 +1199,16 @@ class LLM(BaseLLM):
)
return text_response
# --- 6) If there are tool calls but no available functions, return the tool calls
# This allows the caller (e.g., executor) to handle tool execution
if tool_calls and not available_functions:
# --- 6) If there is no text response, no available functions, but there are tool calls, return the tool calls
if tool_calls and not available_functions and not text_response:
return tool_calls
# --- 7) Handle tool calls if present (execute when available_functions provided)
if tool_calls and available_functions:
tool_result = self._handle_tool_call(
tool_calls, available_functions, from_task, from_agent
)
if tool_result is not None:
return tool_result
# --- 7) Handle tool calls if present
tool_result = self._handle_tool_call(
tool_calls, available_functions, from_task, from_agent
)
if tool_result is not None:
return tool_result
# --- 8) If tool call handling didn't return a result, emit completion event and return text response
self._handle_emit_call_events(
response=text_response,
@@ -1279,14 +1273,6 @@ class LLM(BaseLLM):
params["response_model"] = response_model
response = await litellm.acompletion(**params)
if (
hasattr(response, "usage")
and not isinstance(response.usage, type)
and response.usage
):
usage_info = response.usage
self._track_token_usage_internal(usage_info)
except ContextWindowExceededError as e:
raise LLMContextLengthExceededError(str(e)) from e
@@ -1331,18 +1317,14 @@ class LLM(BaseLLM):
)
return text_response
# If there are tool calls but no available functions, return the tool calls
# This allows the caller (e.g., executor) to handle tool execution
if tool_calls and not available_functions:
if tool_calls and not available_functions and not text_response:
return tool_calls
# Handle tool calls if present (execute when available_functions provided)
if tool_calls and available_functions:
tool_result = self._handle_tool_call(
tool_calls, available_functions, from_task, from_agent
)
if tool_result is not None:
return tool_result
tool_result = self._handle_tool_call(
tool_calls, available_functions, from_task, from_agent
)
if tool_result is not None:
return tool_result
self._handle_emit_call_events(
response=text_response,
@@ -1377,7 +1359,6 @@ class LLM(BaseLLM):
"""
full_response = ""
chunk_count = 0
usage_info = None
accumulated_tool_args: defaultdict[int, AccumulatedToolArgs] = defaultdict(
@@ -1463,9 +1444,6 @@ class LLM(BaseLLM):
end_time=0,
)
if usage_info:
self._track_token_usage_internal(usage_info)
if accumulated_tool_args and available_functions:
# Convert accumulated tool args to ChatCompletionDeltaToolCall objects
tool_calls_list: list[ChatCompletionDeltaToolCall] = [

View File

@@ -445,7 +445,7 @@ class BaseLLM(ABC):
from_agent=from_agent,
)
return result
return str(result)
except Exception as e:
error_msg = f"Error executing function '{function_name}': {e!s}"

View File

@@ -418,7 +418,6 @@ class AnthropicCompletion(BaseLLM):
- System messages are separate from conversation messages
- Messages must alternate between user and assistant
- First message must be from user
- Tool results must be in user messages with tool_result content blocks
- When thinking is enabled, assistant messages must start with thinking blocks
Args:
@@ -432,7 +431,6 @@ class AnthropicCompletion(BaseLLM):
formatted_messages: list[LLMMessage] = []
system_message: str | None = None
pending_tool_results: list[dict[str, Any]] = []
for message in base_formatted:
role = message.get("role")
@@ -443,47 +441,16 @@ class AnthropicCompletion(BaseLLM):
system_message += f"\n\n{content}"
else:
system_message = cast(str, content)
elif role == "tool":
# Convert OpenAI-style tool message to Anthropic tool_result format
# These will be collected and added as a user message
tool_call_id = message.get("tool_call_id", "")
tool_result = {
"type": "tool_result",
"tool_use_id": tool_call_id,
"content": content if content else "",
}
pending_tool_results.append(tool_result)
elif role == "assistant":
# First, flush any pending tool results as a user message
if pending_tool_results:
formatted_messages.append(
{"role": "user", "content": pending_tool_results}
)
pending_tool_results = []
else:
role_str = role if role is not None else "user"
# Handle assistant message with tool_calls (convert to Anthropic format)
tool_calls = message.get("tool_calls", [])
if tool_calls:
assistant_content: list[dict[str, Any]] = []
for tc in tool_calls:
if isinstance(tc, dict):
func = tc.get("function", {})
tool_use = {
"type": "tool_use",
"id": tc.get("id", ""),
"name": func.get("name", ""),
"input": json.loads(func.get("arguments", "{}"))
if isinstance(func.get("arguments"), str)
else func.get("arguments", {}),
}
assistant_content.append(tool_use)
if assistant_content:
formatted_messages.append(
{"role": "assistant", "content": assistant_content}
)
elif isinstance(content, list):
formatted_messages.append({"role": "assistant", "content": content})
elif self.thinking and self.previous_thinking_blocks:
if isinstance(content, list):
formatted_messages.append({"role": role_str, "content": content})
elif (
role_str == "assistant"
and self.thinking
and self.previous_thinking_blocks
):
structured_content = cast(
list[dict[str, Any]],
[
@@ -492,34 +459,14 @@ class AnthropicCompletion(BaseLLM):
],
)
formatted_messages.append(
LLMMessage(role="assistant", content=structured_content)
LLMMessage(role=role_str, content=structured_content)
)
else:
content_str = content if content is not None else ""
formatted_messages.append(
LLMMessage(role="assistant", content=content_str)
)
else:
# User message - first flush any pending tool results
if pending_tool_results:
formatted_messages.append(
{"role": "user", "content": pending_tool_results}
)
pending_tool_results = []
role_str = role if role is not None else "user"
if isinstance(content, list):
formatted_messages.append({"role": role_str, "content": content})
else:
content_str = content if content is not None else ""
formatted_messages.append(
LLMMessage(role=role_str, content=content_str)
)
# Flush any remaining pending tool results
if pending_tool_results:
formatted_messages.append({"role": "user", "content": pending_tool_results})
# Ensure first message is from user (Anthropic requirement)
if not formatted_messages:
# If no messages, add a default user message
@@ -579,19 +526,13 @@ class AnthropicCompletion(BaseLLM):
return structured_json
# Check if Claude wants to use tools
if response.content:
if response.content and available_functions:
tool_uses = [
block for block in response.content if isinstance(block, ToolUseBlock)
]
if tool_uses:
# If no available_functions, return tool calls for executor to handle
# This allows the executor to manage tool execution with proper
# message history and post-tool reasoning prompts
if not available_functions:
return list(tool_uses)
# Handle tool use conversation flow internally
# Handle tool use conversation flow
return self._handle_tool_use_conversation(
response,
tool_uses,
@@ -755,7 +696,7 @@ class AnthropicCompletion(BaseLLM):
return structured_json
if final_message.content:
if final_message.content and available_functions:
tool_uses = [
block
for block in final_message.content
@@ -763,11 +704,7 @@ class AnthropicCompletion(BaseLLM):
]
if tool_uses:
# If no available_functions, return tool calls for executor to handle
if not available_functions:
return list(tool_uses)
# Handle tool use conversation flow internally
# Handle tool use conversation flow
return self._handle_tool_use_conversation(
final_message,
tool_uses,
@@ -996,16 +933,12 @@ class AnthropicCompletion(BaseLLM):
return structured_json
if response.content:
if response.content and available_functions:
tool_uses = [
block for block in response.content if isinstance(block, ToolUseBlock)
]
if tool_uses:
# If no available_functions, return tool calls for executor to handle
if not available_functions:
return list(tool_uses)
return await self._ahandle_tool_use_conversation(
response,
tool_uses,
@@ -1146,7 +1079,7 @@ class AnthropicCompletion(BaseLLM):
return structured_json
if final_message.content:
if final_message.content and available_functions:
tool_uses = [
block
for block in final_message.content
@@ -1154,10 +1087,6 @@ class AnthropicCompletion(BaseLLM):
]
if tool_uses:
# If no available_functions, return tool calls for executor to handle
if not available_functions:
return list(tool_uses)
return await self._ahandle_tool_use_conversation(
final_message,
tool_uses,

View File

@@ -443,7 +443,7 @@ class AzureCompletion(BaseLLM):
params["presence_penalty"] = self.presence_penalty
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
if self.stop and self.supports_stop_words():
if self.stop:
params["stop"] = self.stop
# Handle tools/functions for Azure OpenAI models
@@ -514,31 +514,10 @@ class AzureCompletion(BaseLLM):
for message in base_formatted:
role = message.get("role", "user") # Default to user if no role
# Handle None content - Azure requires string content
content = message.get("content") or ""
content = message.get("content", "")
# Handle tool role messages - keep as tool role for Azure OpenAI
if role == "tool":
tool_call_id = message.get("tool_call_id", "unknown")
azure_messages.append(
{
"role": "tool",
"tool_call_id": tool_call_id,
"content": content,
}
)
# Handle assistant messages with tool_calls
elif role == "assistant" and message.get("tool_calls"):
tool_calls = message.get("tool_calls", [])
azure_msg: LLMMessage = {
"role": "assistant",
"content": content, # Already defaulted to "" above
"tool_calls": tool_calls,
}
azure_messages.append(azure_msg)
else:
# Azure AI Inference requires both 'role' and 'content'
azure_messages.append({"role": role, "content": content})
# Azure AI Inference requires both 'role' and 'content'
azure_messages.append({"role": role, "content": content})
return azure_messages
@@ -625,11 +604,6 @@ class AzureCompletion(BaseLLM):
from_agent=from_agent,
)
# If there are tool_calls but no available_functions, return the tool_calls
# This allows the caller (e.g., executor) to handle tool execution
if message.tool_calls and not available_functions:
return list(message.tool_calls)
# Handle tool calls
if message.tool_calls and available_functions:
tool_call = message.tool_calls[0] # Handle first tool call
@@ -801,21 +775,6 @@ class AzureCompletion(BaseLLM):
from_agent=from_agent,
)
# If there are tool_calls but no available_functions, return them
# in OpenAI-compatible format for executor to handle
if tool_calls and not available_functions:
return [
{
"id": call_data.get("id", f"call_{idx}"),
"type": "function",
"function": {
"name": call_data["name"],
"arguments": call_data["arguments"],
},
}
for idx, call_data in tool_calls.items()
]
# Handle completed tool calls
if tool_calls and available_functions:
for call_data in tool_calls.values():
@@ -972,28 +931,8 @@ class AzureCompletion(BaseLLM):
return self.is_openai_model
def supports_stop_words(self) -> bool:
"""Check if the model supports stop words.
Models using the Responses API (GPT-5 family, o-series reasoning models,
computer-use-preview) do not support stop sequences.
See: https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-models/concepts/models-sold-directly-by-azure
"""
model_lower = self.model.lower() if self.model else ""
if "gpt-5" in model_lower:
return False
o_series_models = ["o1", "o3", "o4", "o1-mini", "o3-mini", "o4-mini"]
responses_api_models = ["computer-use-preview"]
unsupported_stop_models = o_series_models + responses_api_models
for unsupported in unsupported_stop_models:
if unsupported in model_lower:
return False
return True
"""Check if the model supports stop words."""
return True # Most Azure models support stop sequences
def get_context_window_size(self) -> int:
"""Get the context window size for the model."""

View File

@@ -606,17 +606,6 @@ class GeminiCompletion(BaseLLM):
if response.candidates and (self.tools or available_functions):
candidate = response.candidates[0]
if candidate.content and candidate.content.parts:
# Collect function call parts
function_call_parts = [
part for part in candidate.content.parts if part.function_call
]
# If there are function calls but no available_functions,
# return them for the executor to handle (like OpenAI/Anthropic)
if function_call_parts and not available_functions:
return function_call_parts
# Otherwise execute the tools internally
for part in candidate.content.parts:
if part.function_call:
function_name = part.function_call.name
@@ -731,7 +720,7 @@ class GeminiCompletion(BaseLLM):
from_task: Any | None = None,
from_agent: Any | None = None,
response_model: type[BaseModel] | None = None,
) -> str | list[dict[str, Any]]:
) -> str:
"""Finalize streaming response with usage tracking, function execution, and events.
Args:
@@ -749,21 +738,6 @@ class GeminiCompletion(BaseLLM):
"""
self._track_token_usage_internal(usage_data)
# If there are function calls but no available_functions,
# return them for the executor to handle
if function_calls and not available_functions:
return [
{
"id": call_data["id"],
"function": {
"name": call_data["name"],
"arguments": json.dumps(call_data["args"]),
},
"type": "function",
}
for call_data in function_calls.values()
]
# Handle completed function calls
if function_calls and available_functions:
for call_data in function_calls.values():

View File

@@ -428,12 +428,6 @@ class OpenAICompletion(BaseLLM):
choice: Choice = response.choices[0]
message = choice.message
# If there are tool_calls but no available_functions, return the tool_calls
# This allows the caller (e.g., executor) to handle tool execution
if message.tool_calls and not available_functions:
return list(message.tool_calls)
# If there are tool_calls and available_functions, execute the tools
if message.tool_calls and available_functions:
tool_call = message.tool_calls[0]
function_name = tool_call.function.name
@@ -731,15 +725,6 @@ class OpenAICompletion(BaseLLM):
choice: Choice = response.choices[0]
message = choice.message
# If there are tool_calls but no available_functions, return the tool_calls
# This allows the caller (e.g., executor) to handle tool execution
if message.tool_calls and not available_functions:
print("--------------------------------")
print("lorenze tool_calls", list(message.tool_calls))
print("--------------------------------")
return list(message.tool_calls)
# If there are tool_calls and available_functions, execute the tools
if message.tool_calls and available_functions:
tool_call = message.tool_calls[0]
function_name = tool_call.function.name

View File

@@ -969,35 +969,3 @@ class Telemetry:
close_span(span)
self._safe_telemetry_operation(_operation)
def human_feedback_span(
self,
event_type: str,
has_routing: bool,
num_outcomes: int = 0,
feedback_provided: bool | None = None,
outcome: str | None = None,
) -> None:
"""Records human feedback feature usage.
Args:
event_type: Type of event - "requested" or "received".
has_routing: Whether emit options were configured for routing.
num_outcomes: Number of possible outcomes if routing is used.
feedback_provided: Whether user provided feedback or skipped (None if requested).
outcome: The collapsed outcome string if routing was used.
"""
def _operation() -> None:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Human Feedback")
self._add_attribute(span, "event_type", event_type)
self._add_attribute(span, "has_routing", has_routing)
self._add_attribute(span, "num_outcomes", num_outcomes)
if feedback_provided is not None:
self._add_attribute(span, "feedback_provided", feedback_provided)
if outcome is not None:
self._add_attribute(span, "outcome", outcome)
close_span(span)
self._safe_telemetry_operation(_operation)

View File

@@ -11,9 +11,6 @@
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
"no_tools": "\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"native_tools": "\nUse available tools to gather information and complete your task.",
"native_task": "\nCurrent Task: {input}\n\nThis is VERY important to you, your job depends on it!",
"post_tool_reasoning": "PAUSE and THINK before responding.\n\nInternally consider (DO NOT output these steps):\n- What key insights did the tool provide?\n- Have I fulfilled ALL requirements from my original instructions (e.g., minimum tool calls, specific sources)?\n- Do I have enough information to fully answer the task?\n\nIF you have NOT met all requirements or need more information: Call another tool now.\n\nIF you have met all requirements and have sufficient information: Provide ONLY your final answer in the format specified by the task's expected output. Do NOT include reasoning steps, analysis sections, or meta-commentary. Just deliver the answer.",
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. When responding, I must use the following format:\n\n```\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n```\nThis Thought/Action/Action Input/Result can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",

View File

@@ -108,65 +108,6 @@ def render_text_description_and_args(
return "\n".join(tool_strings)
def convert_tools_to_openai_schema(
tools: Sequence[BaseTool | CrewStructuredTool],
) -> tuple[list[dict[str, Any]], dict[str, Callable[..., Any]]]:
"""Convert CrewAI tools to OpenAI function calling format.
This function converts CrewAI BaseTool and CrewStructuredTool objects
into the OpenAI-compatible tool schema format that can be passed to
LLM providers for native function calling.
Args:
tools: List of CrewAI tool objects to convert.
Returns:
Tuple containing:
- List of OpenAI-format tool schema dictionaries
- Dict mapping tool names to their callable run() methods
Example:
>>> tools = [CalculatorTool(), SearchTool()]
>>> schemas, functions = convert_tools_to_openai_schema(tools)
>>> # schemas can be passed to llm.call(tools=schemas)
>>> # functions can be passed to llm.call(available_functions=functions)
"""
openai_tools: list[dict[str, Any]] = []
available_functions: dict[str, Callable[..., Any]] = {}
for tool in tools:
# Get the JSON schema for tool parameters
parameters: dict[str, Any] = {}
if hasattr(tool, "args_schema") and tool.args_schema is not None:
try:
parameters = tool.args_schema.model_json_schema()
# Remove title and description from schema root as they're redundant
parameters.pop("title", None)
parameters.pop("description", None)
except Exception:
parameters = {}
# Extract original description from formatted description
# BaseTool formats description as "Tool Name: ...\nTool Arguments: ...\nTool Description: {original}"
description = tool.description
if "Tool Description:" in description:
# Extract the original description after "Tool Description:"
description = description.split("Tool Description:")[-1].strip()
schema: dict[str, Any] = {
"type": "function",
"function": {
"name": tool.name,
"description": description,
"parameters": parameters,
},
}
openai_tools.append(schema)
available_functions[tool.name] = tool.run
return openai_tools, available_functions
def has_reached_max_iterations(iterations: int, max_iterations: int) -> bool:
"""Check if the maximum number of iterations has been reached.
@@ -293,13 +234,11 @@ def get_llm_response(
messages: list[LLMMessage],
callbacks: list[TokenCalcHandler],
printer: Printer,
tools: list[dict[str, Any]] | None = None,
available_functions: dict[str, Callable[..., Any]] | None = None,
from_task: Task | None = None,
from_agent: Agent | LiteAgent | None = None,
response_model: type[BaseModel] | None = None,
executor_context: CrewAgentExecutor | LiteAgent | None = None,
) -> str | Any:
) -> str:
"""Call the LLM and return the response, handling any invalid responses.
Args:
@@ -307,16 +246,13 @@ def get_llm_response(
messages: The messages to send to the LLM.
callbacks: List of callbacks for the LLM call.
printer: Printer instance for output.
tools: Optional list of tool schemas for native function calling.
available_functions: Optional dict mapping function names to callables.
from_task: Optional task context for the LLM call.
from_agent: Optional agent context for the LLM call.
response_model: Optional Pydantic model for structured outputs.
executor_context: Optional executor context for hook invocation.
Returns:
The response from the LLM as a string, or tool call results if
native function calling is used.
The response from the LLM as a string.
Raises:
Exception: If an error occurs.
@@ -331,9 +267,7 @@ def get_llm_response(
try:
answer = llm.call(
messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent, # type: ignore[arg-type]
response_model=response_model,
@@ -355,13 +289,11 @@ async def aget_llm_response(
messages: list[LLMMessage],
callbacks: list[TokenCalcHandler],
printer: Printer,
tools: list[dict[str, Any]] | None = None,
available_functions: dict[str, Callable[..., Any]] | None = None,
from_task: Task | None = None,
from_agent: Agent | LiteAgent | None = None,
response_model: type[BaseModel] | None = None,
executor_context: CrewAgentExecutor | None = None,
) -> str | Any:
) -> str:
"""Call the LLM asynchronously and return the response.
Args:
@@ -369,16 +301,13 @@ async def aget_llm_response(
messages: The messages to send to the LLM.
callbacks: List of callbacks for the LLM call.
printer: Printer instance for output.
tools: Optional list of tool schemas for native function calling.
available_functions: Optional dict mapping function names to callables.
from_task: Optional task context for the LLM call.
from_agent: Optional agent context for the LLM call.
response_model: Optional Pydantic model for structured outputs.
executor_context: Optional executor context for hook invocation.
Returns:
The response from the LLM as a string, or tool call results if
native function calling is used.
The response from the LLM as a string.
Raises:
Exception: If an error occurs.
@@ -392,9 +321,7 @@ async def aget_llm_response(
try:
answer = await llm.acall(
messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent, # type: ignore[arg-type]
response_model=response_model,

View File

@@ -22,9 +22,7 @@ class SystemPromptResult(StandardPromptResult):
user: Annotated[str, "The user prompt component"]
COMPONENTS = Literal[
"role_playing", "tools", "no_tools", "native_tools", "task", "native_task"
]
COMPONENTS = Literal["role_playing", "tools", "no_tools", "task"]
class Prompts(BaseModel):
@@ -38,10 +36,6 @@ class Prompts(BaseModel):
has_tools: bool = Field(
default=False, description="Indicates if the agent has access to tools"
)
use_native_tool_calling: bool = Field(
default=False,
description="Whether to use native function calling instead of ReAct format",
)
system_template: str | None = Field(
default=None, description="Custom system prompt template"
)
@@ -64,24 +58,12 @@ class Prompts(BaseModel):
A dictionary containing the constructed prompt(s).
"""
slices: list[COMPONENTS] = ["role_playing"]
# When using native tool calling with tools, use native_tools instructions
# When using ReAct pattern with tools, use tools instructions
# When no tools are available, use no_tools instructions
if self.has_tools:
if self.use_native_tool_calling:
slices.append("native_tools")
else:
slices.append("tools")
slices.append("tools")
else:
slices.append("no_tools")
system: str = self._build_prompt(slices)
# Use native_task for native tool calling (no "Thought:" prompt)
# Use task for ReAct pattern (includes "Thought:" prompt)
task_slice: COMPONENTS = (
"native_task" if self.use_native_tool_calling else "task"
)
slices.append(task_slice)
slices.append("task")
if (
not self.system_template
@@ -90,7 +72,7 @@ class Prompts(BaseModel):
):
return SystemPromptResult(
system=system,
user=self._build_prompt([task_slice]),
user=self._build_prompt(["task"]),
prompt=self._build_prompt(slices),
)
return StandardPromptResult(

View File

@@ -1,323 +0,0 @@
from __future__ import annotations
import os
import uuid
import pytest
import pytest_asyncio
from a2a.client import ClientFactory
from a2a.types import AgentCard, Message, Part, Role, TaskState, TextPart
from crewai.a2a.updates.polling.handler import PollingHandler
from crewai.a2a.updates.streaming.handler import StreamingHandler
A2A_TEST_ENDPOINT = os.getenv("A2A_TEST_ENDPOINT", "http://localhost:9999")
@pytest_asyncio.fixture
async def a2a_client():
"""Create A2A client for test server."""
client = await ClientFactory.connect(A2A_TEST_ENDPOINT)
yield client
await client.close()
@pytest.fixture
def test_message() -> Message:
"""Create a simple test message."""
return Message(
role=Role.user,
parts=[Part(root=TextPart(text="What is 2 + 2?"))],
message_id=str(uuid.uuid4()),
)
@pytest_asyncio.fixture
async def agent_card(a2a_client) -> AgentCard:
"""Fetch the real agent card from the server."""
return await a2a_client.get_card()
class TestA2AAgentCardFetching:
"""Integration tests for agent card fetching."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_fetch_agent_card(self, a2a_client) -> None:
"""Test fetching an agent card from the server."""
card = await a2a_client.get_card()
assert card is not None
assert card.name == "GPT Assistant"
assert card.url is not None
assert card.capabilities is not None
assert card.capabilities.streaming is True
class TestA2APollingIntegration:
"""Integration tests for A2A polling handler."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_polling_completes_task(
self,
a2a_client,
test_message: Message,
agent_card: AgentCard,
) -> None:
"""Test that polling handler completes a task successfully."""
new_messages: list[Message] = []
result = await PollingHandler.execute(
client=a2a_client,
message=test_message,
new_messages=new_messages,
agent_card=agent_card,
polling_interval=0.5,
polling_timeout=30.0,
)
assert isinstance(result, dict)
assert result["status"] == TaskState.completed
assert result.get("result") is not None
assert "4" in result["result"]
class TestA2AStreamingIntegration:
"""Integration tests for A2A streaming handler."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_streaming_completes_task(
self,
a2a_client,
test_message: Message,
agent_card: AgentCard,
) -> None:
"""Test that streaming handler completes a task successfully."""
new_messages: list[Message] = []
result = await StreamingHandler.execute(
client=a2a_client,
message=test_message,
new_messages=new_messages,
agent_card=agent_card,
)
assert isinstance(result, dict)
assert result["status"] == TaskState.completed
assert result.get("result") is not None
class TestA2ATaskOperations:
"""Integration tests for task operations."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_send_message_and_get_response(
self,
a2a_client,
test_message: Message,
) -> None:
"""Test sending a message and getting a response."""
from a2a.types import Task
final_task: Task | None = None
async for event in a2a_client.send_message(test_message):
if isinstance(event, tuple) and len(event) >= 1:
task, _ = event
if isinstance(task, Task):
final_task = task
assert final_task is not None
assert final_task.id is not None
assert final_task.status is not None
assert final_task.status.state == TaskState.completed
class TestA2APushNotificationHandler:
"""Tests for push notification handler.
These tests use mocks for the result store since webhook callbacks
are incoming requests that can't be recorded with VCR.
"""
@pytest.fixture
def mock_agent_card(self) -> AgentCard:
"""Create a minimal valid agent card for testing."""
from a2a.types import AgentCapabilities
return AgentCard(
name="Test Agent",
description="Test agent for push notification tests",
url="http://localhost:9999",
version="1.0.0",
capabilities=AgentCapabilities(streaming=True, push_notifications=True),
default_input_modes=["text"],
default_output_modes=["text"],
skills=[],
)
@pytest.fixture
def mock_task(self) -> "Task":
"""Create a minimal valid task for testing."""
from a2a.types import Task, TaskStatus
return Task(
id="task-123",
context_id="ctx-123",
status=TaskStatus(state=TaskState.working),
)
@pytest.mark.asyncio
async def test_push_handler_waits_for_result(
self,
mock_agent_card: AgentCard,
mock_task,
) -> None:
"""Test that push handler waits for result from store."""
from unittest.mock import AsyncMock, MagicMock
from a2a.types import Task, TaskStatus
from pydantic import AnyHttpUrl
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
completed_task = Task(
id="task-123",
context_id="ctx-123",
status=TaskStatus(state=TaskState.completed),
history=[],
)
mock_store = MagicMock()
mock_store.wait_for_result = AsyncMock(return_value=completed_task)
async def mock_send_message(*args, **kwargs):
yield (mock_task, None)
mock_client = MagicMock()
mock_client.send_message = mock_send_message
config = PushNotificationConfig(
url=AnyHttpUrl("http://localhost:8080/a2a/callback"),
token="secret-token",
result_store=mock_store,
)
test_msg = Message(
role=Role.user,
parts=[Part(root=TextPart(text="What is 2+2?"))],
message_id="msg-001",
)
new_messages: list[Message] = []
result = await PushNotificationHandler.execute(
client=mock_client,
message=test_msg,
new_messages=new_messages,
agent_card=mock_agent_card,
config=config,
result_store=mock_store,
polling_timeout=30.0,
polling_interval=1.0,
)
mock_store.wait_for_result.assert_called_once_with(
task_id="task-123",
timeout=30.0,
poll_interval=1.0,
)
assert result["status"] == TaskState.completed
@pytest.mark.asyncio
async def test_push_handler_returns_failure_on_timeout(
self,
mock_agent_card: AgentCard,
) -> None:
"""Test that push handler returns failure when result store times out."""
from unittest.mock import AsyncMock, MagicMock
from a2a.types import Task, TaskStatus
from pydantic import AnyHttpUrl
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
mock_store = MagicMock()
mock_store.wait_for_result = AsyncMock(return_value=None)
working_task = Task(
id="task-456",
context_id="ctx-456",
status=TaskStatus(state=TaskState.working),
)
async def mock_send_message(*args, **kwargs):
yield (working_task, None)
mock_client = MagicMock()
mock_client.send_message = mock_send_message
config = PushNotificationConfig(
url=AnyHttpUrl("http://localhost:8080/a2a/callback"),
token="token",
result_store=mock_store,
)
test_msg = Message(
role=Role.user,
parts=[Part(root=TextPart(text="test"))],
message_id="msg-002",
)
new_messages: list[Message] = []
result = await PushNotificationHandler.execute(
client=mock_client,
message=test_msg,
new_messages=new_messages,
agent_card=mock_agent_card,
config=config,
result_store=mock_store,
polling_timeout=5.0,
polling_interval=0.5,
)
assert result["status"] == TaskState.failed
assert "timeout" in result.get("error", "").lower()
@pytest.mark.asyncio
async def test_push_handler_requires_config(
self,
mock_agent_card: AgentCard,
) -> None:
"""Test that push handler fails gracefully without config."""
from unittest.mock import MagicMock
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
mock_client = MagicMock()
test_msg = Message(
role=Role.user,
parts=[Part(root=TextPart(text="test"))],
message_id="msg-003",
)
new_messages: list[Message] = []
result = await PushNotificationHandler.execute(
client=mock_client,
message=test_msg,
new_messages=new_messages,
agent_card=mock_agent_card,
)
assert result["status"] == TaskState.failed
assert "config" in result.get("error", "").lower()

View File

@@ -360,3 +360,92 @@ def test_integration_valid_and_invalid():
# TODO: ADD TEST TO MAKE SURE ** REMOVAL DOESN'T MESS UP ANYTHING
# Tests for Action: None handling (Issue #4186)
def test_action_none_basic():
"""Test that 'Action: None' is parsed as AgentFinish."""
text = "Thought: I cannot use any tool for this.\nAction: None"
result = parser.parse(text)
assert isinstance(result, AgentFinish)
assert "I cannot use any tool for this." in result.output
def test_action_none_with_reason_in_parentheses():
"""Test 'Action: None (reason)' format."""
text = "Thought: The tool is not available.\nAction: None (direct response required)"
result = parser.parse(text)
assert isinstance(result, AgentFinish)
assert "The tool is not available." in result.output
def test_action_none_lowercase():
"""Test that 'Action: none' (lowercase) is handled."""
text = "Thought: I should respond directly.\nAction: none"
result = parser.parse(text)
assert isinstance(result, AgentFinish)
assert "I should respond directly." in result.output
def test_action_na():
"""Test that 'Action: N/A' is handled."""
text = "Thought: No action needed here.\nAction: N/A"
result = parser.parse(text)
assert isinstance(result, AgentFinish)
assert "No action needed here." in result.output
def test_action_na_lowercase():
"""Test that 'Action: n/a' (lowercase) is handled."""
text = "Thought: This requires a direct answer.\nAction: n/a"
result = parser.parse(text)
assert isinstance(result, AgentFinish)
assert "This requires a direct answer." in result.output
def test_action_none_with_dash_separator():
"""Test 'Action: None - reason' format."""
text = "Thought: I need to provide a direct response.\nAction: None - direct response"
result = parser.parse(text)
assert isinstance(result, AgentFinish)
assert "I need to provide a direct response." in result.output
def test_action_none_with_additional_content():
"""Test 'Action: None' with additional content after."""
text = "Thought: I analyzed the request.\nAction: None\nHere is my direct response to your question."
result = parser.parse(text)
assert isinstance(result, AgentFinish)
assert "I analyzed the request." in result.output
def test_action_no_action():
"""Test that 'Action: no action' is handled."""
text = "Thought: I will respond without using tools.\nAction: no action"
result = parser.parse(text)
assert isinstance(result, AgentFinish)
assert "I will respond without using tools." in result.output
def test_action_none_without_thought():
"""Test 'Action: None' without a thought prefix."""
text = "Action: None"
result = parser.parse(text)
assert isinstance(result, AgentFinish)
assert result.output == "I cannot perform this action with the available tools."
def test_action_none_preserves_original_text():
"""Test that the original text is preserved in the result."""
text = "Thought: I cannot delegate this task.\nAction: None"
result = parser.parse(text)
assert isinstance(result, AgentFinish)
assert result.text == text
def test_action_none_with_colon_separator():
"""Test 'Action: None: reason' format."""
text = "Thought: Direct response needed.\nAction: None: providing direct answer"
result = parser.parse(text)
assert isinstance(result, AgentFinish)
assert "Direct response needed." in result.output

View File

@@ -1,479 +0,0 @@
"""Integration tests for native tool calling functionality.
These tests verify that agents can use native function calling
when the LLM supports it, across multiple providers.
"""
from __future__ import annotations
import os
from typing import Any
from unittest.mock import patch, MagicMock
import pytest
from pydantic import BaseModel, Field
from crewai import Agent, Crew, Task
from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool
# Check for optional provider availability
try:
import anthropic
HAS_ANTHROPIC = True
except ImportError:
HAS_ANTHROPIC = False
try:
import google.genai
HAS_GOOGLE_GENAI = True
except ImportError:
HAS_GOOGLE_GENAI = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
class CalculatorInput(BaseModel):
"""Input schema for calculator tool."""
expression: str = Field(description="Mathematical expression to evaluate")
class CalculatorTool(BaseTool):
"""A calculator tool that performs mathematical calculations."""
name: str = "calculator"
description: str = "Perform mathematical calculations. Use this for any math operations."
args_schema: type[BaseModel] = CalculatorInput
def _run(self, expression: str) -> str:
"""Execute the calculation."""
try:
# Safe evaluation for basic math
result = eval(expression) # noqa: S307
return f"The result of {expression} is {result}"
except Exception as e:
return f"Error calculating {expression}: {e}"
class WeatherInput(BaseModel):
"""Input schema for weather tool."""
location: str = Field(description="City name to get weather for")
class WeatherTool(BaseTool):
"""A mock weather tool for testing."""
name: str = "get_weather"
description: str = "Get the current weather for a location"
args_schema: type[BaseModel] = WeatherInput
def _run(self, location: str) -> str:
"""Get weather (mock implementation)."""
return f"The weather in {location} is sunny with a temperature of 72°F"
@pytest.fixture
def calculator_tool() -> CalculatorTool:
"""Create a calculator tool for testing."""
return CalculatorTool()
@pytest.fixture
def weather_tool() -> WeatherTool:
"""Create a weather tool for testing."""
return WeatherTool()
# =============================================================================
# OpenAI Provider Tests
# =============================================================================
class TestOpenAINativeToolCalling:
"""Tests for native tool calling with OpenAI models."""
@pytest.mark.vcr()
def test_openai_agent_with_native_tool_calling(
self, calculator_tool: CalculatorTool
) -> None:
"""Test OpenAI agent can use native tool calling."""
agent = Agent(
role="Math Assistant",
goal="Help users with mathematical calculations",
backstory="You are a helpful math assistant.",
tools=[calculator_tool],
llm=LLM(model="gpt-4o-mini"),
verbose=False,
max_iter=3,
)
task = Task(
description="Calculate what is 15 * 8",
expected_output="The result of the calculation",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
assert result.raw is not None
assert "120" in str(result.raw)
def test_openai_agent_kickoff_with_tools_mocked(
self, calculator_tool: CalculatorTool
) -> None:
"""Test OpenAI agent kickoff with mocked LLM call."""
llm = LLM(model="gpt-4o-mini")
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
agent = Agent(
role="Math Assistant",
goal="Calculate math",
backstory="You calculate.",
tools=[calculator_tool],
llm=llm,
verbose=False,
)
task = Task(
description="Calculate 15 * 8",
expected_output="Result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert mock_call.called
assert result is not None
# =============================================================================
# Anthropic Provider Tests
# =============================================================================
@pytest.mark.skipif(not HAS_ANTHROPIC, reason="anthropic package not installed")
class TestAnthropicNativeToolCalling:
"""Tests for native tool calling with Anthropic models."""
@pytest.fixture(autouse=True)
def mock_anthropic_api_key(self):
"""Mock ANTHROPIC_API_KEY for tests."""
if "ANTHROPIC_API_KEY" not in os.environ:
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
yield
else:
yield
@pytest.mark.vcr()
def test_anthropic_agent_with_native_tool_calling(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Anthropic agent can use native tool calling."""
agent = Agent(
role="Math Assistant",
goal="Help users with mathematical calculations",
backstory="You are a helpful math assistant.",
tools=[calculator_tool],
llm=LLM(model="anthropic/claude-3-5-haiku-20241022"),
verbose=False,
max_iter=3,
)
task = Task(
description="Calculate what is 15 * 8",
expected_output="The result of the calculation",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
assert result.raw is not None
def test_anthropic_agent_kickoff_with_tools_mocked(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Anthropic agent kickoff with mocked LLM call."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
agent = Agent(
role="Math Assistant",
goal="Calculate math",
backstory="You calculate.",
tools=[calculator_tool],
llm=llm,
verbose=False,
)
task = Task(
description="Calculate 15 * 8",
expected_output="Result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert mock_call.called
assert result is not None
# =============================================================================
# Google/Gemini Provider Tests
# =============================================================================
@pytest.mark.skipif(not HAS_GOOGLE_GENAI, reason="google-genai package not installed")
class TestGeminiNativeToolCalling:
"""Tests for native tool calling with Gemini models."""
@pytest.fixture(autouse=True)
def mock_google_api_key(self):
"""Mock GOOGLE_API_KEY for tests."""
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-key"}):
yield
@pytest.mark.vcr()
def test_gemini_agent_with_native_tool_calling(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Gemini agent can use native tool calling."""
agent = Agent(
role="Math Assistant",
goal="Help users with mathematical calculations",
backstory="You are a helpful math assistant.",
tools=[calculator_tool],
llm=LLM(model="gemini/gemini-2.0-flash-001"),
verbose=False,
max_iter=3,
)
task = Task(
description="Calculate what is 15 * 8",
expected_output="The result of the calculation",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
assert result.raw is not None
def test_gemini_agent_kickoff_with_tools_mocked(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Gemini agent kickoff with mocked LLM call."""
llm = LLM(model="gemini/gemini-2.0-flash-001")
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
agent = Agent(
role="Math Assistant",
goal="Calculate math",
backstory="You calculate.",
tools=[calculator_tool],
llm=llm,
verbose=False,
)
task = Task(
description="Calculate 15 * 8",
expected_output="Result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert mock_call.called
assert result is not None
# =============================================================================
# Azure Provider Tests
# =============================================================================
class TestAzureNativeToolCalling:
"""Tests for native tool calling with Azure OpenAI models."""
@pytest.fixture(autouse=True)
def mock_azure_env(self):
"""Mock Azure environment variables for tests."""
env_vars = {
"AZURE_API_KEY": "test-key",
"AZURE_API_BASE": "https://test.openai.azure.com",
"AZURE_API_VERSION": "2024-02-15-preview",
}
with patch.dict(os.environ, env_vars):
yield
def test_azure_agent_kickoff_with_tools_mocked(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Azure agent kickoff with mocked LLM call."""
llm = LLM(
model="azure/gpt-4o-mini",
api_key="test-key",
base_url="https://test.openai.azure.com",
)
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
agent = Agent(
role="Math Assistant",
goal="Calculate math",
backstory="You calculate.",
tools=[calculator_tool],
llm=llm,
verbose=False,
)
task = Task(
description="Calculate 15 * 8",
expected_output="Result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert mock_call.called
assert result is not None
# =============================================================================
# Bedrock Provider Tests
# =============================================================================
@pytest.mark.skipif(not HAS_BOTO3, reason="boto3 package not installed")
class TestBedrockNativeToolCalling:
"""Tests for native tool calling with AWS Bedrock models."""
@pytest.fixture(autouse=True)
def mock_aws_env(self):
"""Mock AWS environment variables for tests."""
env_vars = {
"AWS_ACCESS_KEY_ID": "test-key",
"AWS_SECRET_ACCESS_KEY": "test-secret",
"AWS_REGION": "us-east-1",
}
with patch.dict(os.environ, env_vars):
yield
def test_bedrock_agent_kickoff_with_tools_mocked(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Bedrock agent kickoff with mocked LLM call."""
llm = LLM(model="bedrock/anthropic.claude-3-haiku-20240307-v1:0")
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
agent = Agent(
role="Math Assistant",
goal="Calculate math",
backstory="You calculate.",
tools=[calculator_tool],
llm=llm,
verbose=False,
)
task = Task(
description="Calculate 15 * 8",
expected_output="Result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert mock_call.called
assert result is not None
# =============================================================================
# Cross-Provider Native Tool Calling Behavior Tests
# =============================================================================
class TestNativeToolCallingBehavior:
"""Tests for native tool calling behavior across providers."""
def test_supports_function_calling_check(self) -> None:
"""Test that supports_function_calling() is properly checked."""
# OpenAI should support function calling
openai_llm = LLM(model="gpt-4o-mini")
assert hasattr(openai_llm, "supports_function_calling")
assert openai_llm.supports_function_calling() is True
@pytest.mark.skipif(not HAS_ANTHROPIC, reason="anthropic package not installed")
def test_anthropic_supports_function_calling(self) -> None:
"""Test that Anthropic models support function calling."""
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
assert hasattr(llm, "supports_function_calling")
assert llm.supports_function_calling() is True
@pytest.mark.skipif(not HAS_GOOGLE_GENAI, reason="google-genai package not installed")
def test_gemini_supports_function_calling(self) -> None:
"""Test that Gemini models support function calling."""
# with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-key"}):
print("GOOGLE_API_KEY", os.getenv("GOOGLE_API_KEY"))
llm = LLM(model="gemini/gemini-2.5-flash")
assert hasattr(llm, "supports_function_calling")
# Gemini uses supports_tools property
assert llm.supports_function_calling() is True
# =============================================================================
# Token Usage Tests
# =============================================================================
class TestNativeToolCallingTokenUsage:
"""Tests for token usage with native tool calling."""
@pytest.mark.vcr()
def test_openai_native_tool_calling_token_usage(
self, calculator_tool: CalculatorTool
) -> None:
"""Test token usage tracking with OpenAI native tool calling."""
agent = Agent(
role="Calculator",
goal="Perform calculations efficiently",
backstory="You calculate things.",
tools=[calculator_tool],
llm=LLM(model="gpt-4o-mini"),
verbose=False,
max_iter=3,
)
task = Task(
description="What is 100 / 4?",
expected_output="The result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
assert result.token_usage is not None
assert result.token_usage.total_tokens > 0
assert result.token_usage.successful_requests >= 1
print(f"\n[OPENAI NATIVE TOOL CALLING TOKEN USAGE]")
print(f" Prompt tokens: {result.token_usage.prompt_tokens}")
print(f" Completion tokens: {result.token_usage.completion_tokens}")
print(f" Total tokens: {result.token_usage.total_tokens}")

View File

@@ -1,44 +0,0 @@
interactions:
- request:
body: ''
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
host:
- localhost:9999
method: GET
uri: http://localhost:9999/.well-known/agent-card.json
response:
body:
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
perform calculations, or get the current time in any timezone.","name":"GPT
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
a general conversation with the AI assistant. Ask questions, get explanations,
or just chat.","examples":["Hello, how are you?","Explain quantum computing
in simple terms","What can you help me with?"],"id":"conversation","name":"General
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
mathematical calculations including arithmetic, exponents, and more.","examples":["What
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
the current date and time in any timezone.","examples":["What time is it?","What''s
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
headers:
content-length:
- '1198'
content-type:
- application/json
date:
- Tue, 06 Jan 2026 14:17:00 GMT
server:
- uvicorn
status:
code: 200
message: OK
version: 1

View File

@@ -1,126 +0,0 @@
interactions:
- request:
body: ''
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
host:
- localhost:9999
method: GET
uri: http://localhost:9999/.well-known/agent-card.json
response:
body:
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
perform calculations, or get the current time in any timezone.","name":"GPT
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
a general conversation with the AI assistant. Ask questions, get explanations,
or just chat.","examples":["Hello, how are you?","Explain quantum computing
in simple terms","What can you help me with?"],"id":"conversation","name":"General
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
mathematical calculations including arithmetic, exponents, and more.","examples":["What
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
the current date and time in any timezone.","examples":["What time is it?","What''s
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
headers:
content-length:
- '1198'
content-type:
- application/json
date:
- Tue, 06 Jan 2026 14:16:58 GMT
server:
- uvicorn
status:
code: 200
message: OK
- request:
body: '{"id":"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":[],"blocking":true},"message":{"kind":"message","messageId":"e1e63c75-3ea0-49fb-b512-5128a2476416","parts":[{"kind":"text","text":"What
is 2 + 2?"}],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '301'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999/
response:
body:
string: "data: {\"id\":\"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"b9e14c1b-734d-4d1e-864a-e6dda5231d71\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"0dd4d3af-f35d-409d-9462-01218e5641f9\"}}\r\n\r\ndata:
{\"id\":\"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"b9e14c1b-734d-4d1e-864a-e6dda5231d71\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"0dd4d3af-f35d-409d-9462-01218e5641f9\"}}\r\n\r\ndata:
{\"id\":\"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"b9e14c1b-734d-4d1e-864a-e6dda5231d71\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"54bb7ff3-f2c0-4eb3-b427-bf1c8cf90832\",\"parts\":[{\"kind\":\"text\",\"text\":\"\\n[Tool:
calculator] 2 + 2 = 4\\n2 + 2 equals 4.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"0dd4d3af-f35d-409d-9462-01218e5641f9\"}}\r\n\r\n"
headers:
Transfer-Encoding:
- chunked
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Tue, 06 Jan 2026 14:16:58 GMT
server:
- uvicorn
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"id":"cb1e4af3-d2d0-4848-96b8-7082ee6171d1","jsonrpc":"2.0","method":"tasks/get","params":{"historyLength":100,"id":"0dd4d3af-f35d-409d-9462-01218e5641f9"}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
content-length:
- '157'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999/
response:
body:
string: '{"id":"cb1e4af3-d2d0-4848-96b8-7082ee6171d1","jsonrpc":"2.0","result":{"contextId":"b9e14c1b-734d-4d1e-864a-e6dda5231d71","history":[{"contextId":"b9e14c1b-734d-4d1e-864a-e6dda5231d71","kind":"message","messageId":"e1e63c75-3ea0-49fb-b512-5128a2476416","parts":[{"kind":"text","text":"What
is 2 + 2?"}],"role":"user","taskId":"0dd4d3af-f35d-409d-9462-01218e5641f9"}],"id":"0dd4d3af-f35d-409d-9462-01218e5641f9","kind":"task","status":{"message":{"kind":"message","messageId":"54bb7ff3-f2c0-4eb3-b427-bf1c8cf90832","parts":[{"kind":"text","text":"\n[Tool:
calculator] 2 + 2 = 4\n2 + 2 equals 4."}],"role":"agent"},"state":"completed"}}}'
headers:
content-length:
- '635'
content-type:
- application/json
date:
- Tue, 06 Jan 2026 14:17:00 GMT
server:
- uvicorn
status:
code: 200
message: OK
version: 1

View File

@@ -1,90 +0,0 @@
interactions:
- request:
body: ''
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
host:
- localhost:9999
method: GET
uri: http://localhost:9999/.well-known/agent-card.json
response:
body:
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
perform calculations, or get the current time in any timezone.","name":"GPT
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
a general conversation with the AI assistant. Ask questions, get explanations,
or just chat.","examples":["Hello, how are you?","Explain quantum computing
in simple terms","What can you help me with?"],"id":"conversation","name":"General
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
mathematical calculations including arithmetic, exponents, and more.","examples":["What
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
the current date and time in any timezone.","examples":["What time is it?","What''s
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
headers:
content-length:
- '1198'
content-type:
- application/json
date:
- Tue, 06 Jan 2026 14:17:02 GMT
server:
- uvicorn
status:
code: 200
message: OK
- request:
body: '{"id":"8cf25b61-8884-4246-adce-fccb32e176ab","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":[],"blocking":true},"message":{"kind":"message","messageId":"c145297f-7331-4835-adcc-66b51de92a2b","parts":[{"kind":"text","text":"What
is 2 + 2?"}],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '301'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999/
response:
body:
string: "data: {\"id\":\"8cf25b61-8884-4246-adce-fccb32e176ab\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"30601267-ab3b-48ef-afc8-916c37a18651\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"3083d3da-4739-4f4f-a4e8-7c048ea819c1\"}}\r\n\r\ndata:
{\"id\":\"8cf25b61-8884-4246-adce-fccb32e176ab\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"30601267-ab3b-48ef-afc8-916c37a18651\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"3083d3da-4739-4f4f-a4e8-7c048ea819c1\"}}\r\n\r\ndata:
{\"id\":\"8cf25b61-8884-4246-adce-fccb32e176ab\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"30601267-ab3b-48ef-afc8-916c37a18651\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"25f81e3c-b7e8-48b5-a98a-4066f3637a13\",\"parts\":[{\"kind\":\"text\",\"text\":\"\\n[Tool:
calculator] 2 + 2 = 4\\n2 + 2 equals 4.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"3083d3da-4739-4f4f-a4e8-7c048ea819c1\"}}\r\n\r\n"
headers:
Transfer-Encoding:
- chunked
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Tue, 06 Jan 2026 14:17:02 GMT
server:
- uvicorn
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
version: 1

View File

@@ -1,90 +0,0 @@
interactions:
- request:
body: ''
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
host:
- localhost:9999
method: GET
uri: http://localhost:9999/.well-known/agent-card.json
response:
body:
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
perform calculations, or get the current time in any timezone.","name":"GPT
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
a general conversation with the AI assistant. Ask questions, get explanations,
or just chat.","examples":["Hello, how are you?","Explain quantum computing
in simple terms","What can you help me with?"],"id":"conversation","name":"General
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
mathematical calculations including arithmetic, exponents, and more.","examples":["What
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
the current date and time in any timezone.","examples":["What time is it?","What''s
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
headers:
content-length:
- '1198'
content-type:
- application/json
date:
- Tue, 06 Jan 2026 14:17:00 GMT
server:
- uvicorn
status:
code: 200
message: OK
- request:
body: '{"id":"3a17c6bf-8db6-45a6-8535-34c45c0c4936","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":[],"blocking":true},"message":{"kind":"message","messageId":"712558a3-6d92-4591-be8a-9dd8566dde82","parts":[{"kind":"text","text":"What
is 2 + 2?"}],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '301'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999/
response:
body:
string: "data: {\"id\":\"3a17c6bf-8db6-45a6-8535-34c45c0c4936\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ca2fbbc9-761e-45d9-a929-0c68b1f8acbf\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"c6e88db0-36e9-4269-8b9a-ecb6dfdcf6a1\"}}\r\n\r\ndata:
{\"id\":\"3a17c6bf-8db6-45a6-8535-34c45c0c4936\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ca2fbbc9-761e-45d9-a929-0c68b1f8acbf\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"c6e88db0-36e9-4269-8b9a-ecb6dfdcf6a1\"}}\r\n\r\ndata:
{\"id\":\"3a17c6bf-8db6-45a6-8535-34c45c0c4936\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ca2fbbc9-761e-45d9-a929-0c68b1f8acbf\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"916324aa-fd25-4849-bceb-c4644e2fcbb0\",\"parts\":[{\"kind\":\"text\",\"text\":\"\\n[Tool:
calculator] 2 + 2 = 4\\n2 + 2 equals 4.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"c6e88db0-36e9-4269-8b9a-ecb6dfdcf6a1\"}}\r\n\r\n"
headers:
Transfer-Encoding:
- chunked
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Tue, 06 Jan 2026 14:17:00 GMT
server:
- uvicorn
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
version: 1

View File

@@ -1,113 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"user","content":"Tell me a joke."}],"model":"gpt-4o-mini","stop":[]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '90'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- async:asyncio
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 2.14.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.14
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-CvErx9mbnUKFHKkhPChO93eUzKJqy\",\n \"object\":
\"chat.completion\",\n \"created\": 1767757889,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Why did the scarecrow win an award?
\\n\\nBecause he was outstanding in his field!\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 12,\n \"completion_tokens\":
18,\n \"total_tokens\": 30,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
headers:
Access-Control-Expose-Headers:
- ACCESS-CONTROL-XXX
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Wed, 07 Jan 2026 03:51:29 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
content-length:
- '887'
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '466'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '483'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,113 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"user","content":"Tell me a joke."}],"model":"gpt-4o-mini","stop":[]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '90'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 2.14.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.14
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-CugAsv9iAHdiGddGDHcZWEp7ZV7cB\",\n \"object\":
\"chat.completion\",\n \"created\": 1767624522,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Why don't skeletons fight each other?
\\n\\nThey don't have the guts!\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 12,\n \"completion_tokens\":
15,\n \"total_tokens\": 27,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Mon, 05 Jan 2026 14:48:43 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
content-length:
- '874'
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '424'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '1017'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,179 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"user","content":"Tell me a joke."}],"model":"gpt-4o-mini","stop":[],"stream":true,"stream_options":{"include_usage":true}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '144'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- async:asyncio
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 2.14.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.14
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"k9LESwMhk"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"Why"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"tYMBX9z8"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
did"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"X5lpC48"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
the"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Ns5pnmO"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
scare"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"cUTYl"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"crow"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"ZvHPszH"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
win"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"pLKQ5rM"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
an"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Yl8vxgvM"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
award"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"xfxd0"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"SFxdiZP3Uh"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
\n\n"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Sysruv"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"Because"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"OeZH"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
he"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"epBJpPYm"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
was"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"5Bofkug"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
outstanding"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"ylIDIBTCqSLy3tA"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
in"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"lLi2lQc4"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
his"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"fi47Jij"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
field"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Kkiyw"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"RMcUfqa93e"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null,"obfuscation":"rAtJI"}
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[],"usage":{"prompt_tokens":12,"completion_tokens":18,"total_tokens":30,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"aTyTRaiahL"}
data: [DONE]
'
headers:
Access-Control-Expose-Headers:
- ACCESS-CONTROL-XXX
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- text/event-stream; charset=utf-8
Date:
- Wed, 07 Jan 2026 04:09:13 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '243'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '645'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,179 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"user","content":"Tell me a joke."}],"model":"gpt-4o-mini","stop":[],"stream":true,"stream_options":{"include_usage":true}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '144'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 2.14.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.14
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"SVnFynat2"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"Why"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"M0Y4Qurw"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
did"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"LknkzkM"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
the"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"45ePnqI"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
scare"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"DsJ1r"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"crow"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"9sXjMg0"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
win"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"UlTRXCu"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
an"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"He218dPh"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
award"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"CO1Dc"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"nHS3XxEjuW"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
\n\n"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"IhBQDR"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"Because"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"TJzX"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
he"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"AjRyStfn"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
was"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"2AZtzyA"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
outstanding"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"XfziOItr8wziIap"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
in"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"7hXp54s6"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
his"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"RPmgnK3"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
field"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"uqtNk"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Wziup4uj7N"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null,"obfuscation":"q9paY"}
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[],"usage":{"prompt_tokens":12,"completion_tokens":18,"total_tokens":30,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"TWmOWpZx0s"}
data: [DONE]
'
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- text/event-stream; charset=utf-8
Date:
- Mon, 05 Jan 2026 14:48:44 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '227'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '645'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -515,94 +515,6 @@ def test_azure_supports_stop_words():
assert llm.supports_stop_words() == True
def test_azure_gpt5_models_do_not_support_stop_words():
"""
Test that GPT-5 family models do not support stop words.
GPT-5 models use the Responses API which doesn't support stop sequences.
See: https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-models/concepts/models-sold-directly-by-azure
"""
# GPT-5 base models
gpt5_models = [
"azure/gpt-5",
"azure/gpt-5-mini",
"azure/gpt-5-nano",
"azure/gpt-5-chat",
# GPT-5.1 series
"azure/gpt-5.1",
"azure/gpt-5.1-chat",
"azure/gpt-5.1-codex",
"azure/gpt-5.1-codex-mini",
# GPT-5.2 series
"azure/gpt-5.2",
"azure/gpt-5.2-chat",
]
for model_name in gpt5_models:
llm = LLM(model=model_name)
assert llm.supports_stop_words() == False, f"Expected {model_name} to NOT support stop words"
def test_azure_o_series_models_do_not_support_stop_words():
"""
Test that o-series reasoning models do not support stop words.
"""
o_series_models = [
"azure/o1",
"azure/o1-mini",
"azure/o3",
"azure/o3-mini",
"azure/o4",
"azure/o4-mini",
]
for model_name in o_series_models:
llm = LLM(model=model_name)
assert llm.supports_stop_words() == False, f"Expected {model_name} to NOT support stop words"
def test_azure_responses_api_models_do_not_support_stop_words():
"""
Test that models using the Responses API do not support stop words.
"""
responses_api_models = [
"azure/computer-use-preview",
]
for model_name in responses_api_models:
llm = LLM(model=model_name)
assert llm.supports_stop_words() == False, f"Expected {model_name} to NOT support stop words"
def test_azure_stop_words_not_included_for_unsupported_models():
"""
Test that stop words are not included in completion params for models that don't support them.
"""
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
"AZURE_ENDPOINT": "https://models.inference.ai.azure.com"
}):
# Test GPT-5 model - stop should NOT be included even if set
llm_gpt5 = LLM(
model="azure/gpt-5-nano",
stop=["STOP", "END"]
)
params = llm_gpt5._prepare_completion_params(
messages=[{"role": "user", "content": "test"}]
)
assert "stop" not in params, "stop should not be included for GPT-5 models"
# Test regular model - stop SHOULD be included
llm_gpt4 = LLM(
model="azure/gpt-4",
stop=["STOP", "END"]
)
params = llm_gpt4._prepare_completion_params(
messages=[{"role": "user", "content": "test"}]
)
assert "stop" in params, "stop should be included for GPT-4 models"
assert params["stop"] == ["STOP", "END"]
def test_azure_context_window_size():
"""
Test that Azure models return correct context window sizes

View File

@@ -4500,71 +4500,6 @@ def test_crew_copy_with_memory():
pytest.fail(f"Copying crew raised an unexpected exception: {e}")
def test_sets_parent_flow_when_using_crewbase_pattern_inside_flow():
@CrewBase
class TestCrew:
agents_config = None
tasks_config = None
agents: list[BaseAgent]
tasks: list[Task]
@agent
def researcher(self) -> Agent:
return Agent(
role="Researcher",
goal="Research things",
backstory="Expert researcher",
)
@agent
def writer_agent(self) -> Agent:
return Agent(
role="Writer",
goal="Write things",
backstory="Expert writer",
)
@task
def research_task(self) -> Task:
return Task(
description="Test task for researcher",
expected_output="output",
agent=self.researcher(),
)
@task
def write_task(self) -> Task:
return Task(
description="Test task for writer",
expected_output="output",
agent=self.writer_agent(),
)
@crew
def crew(self) -> Crew:
return Crew(
agents=self.agents,
tasks=self.tasks,
process=Process.sequential,
)
captured_crew = None
class MyFlow(Flow):
@start()
def start_method(self):
nonlocal captured_crew
captured_crew = TestCrew().crew()
return captured_crew
flow = MyFlow()
flow.kickoff()
assert captured_crew is not None
assert captured_crew.parent_flow is flow
def test_sets_parent_flow_when_outside_flow(researcher, writer):
crew = Crew(
agents=[researcher, writer],

View File

@@ -877,116 +877,3 @@ def test_validate_model_in_constants():
LLM._validate_model_in_constants("anthropic.claude-future-v1:0", "bedrock")
is True
)
@pytest.mark.vcr(record_mode="once",decode_compressed_response=True)
def test_usage_info_non_streaming_with_call():
llm = LLM(model="gpt-4o-mini", is_litellm=True)
assert llm._token_usage == {
"total_tokens": 0,
"prompt_tokens": 0,
"completion_tokens": 0,
"successful_requests": 0,
"cached_prompt_tokens": 0,
}
assert llm.stream is False
with patch.object(
llm, "_handle_non_streaming_response", wraps=llm._handle_non_streaming_response
) as mock_handle:
llm.call("Tell me a joke.")
mock_handle.assert_called_once()
assert llm._token_usage["total_tokens"] > 0
assert llm._token_usage["prompt_tokens"] > 0
assert llm._token_usage["completion_tokens"] > 0
assert llm._token_usage["successful_requests"] == 1
@pytest.mark.vcr(record_mode="once",decode_compressed_response=True)
def test_usage_info_streaming_with_call():
llm = LLM(model="gpt-4o-mini", is_litellm=True, stream=True)
assert llm._token_usage == {
"total_tokens": 0,
"prompt_tokens": 0,
"completion_tokens": 0,
"successful_requests": 0,
"cached_prompt_tokens": 0,
}
assert llm.stream is True
with patch.object(
llm, "_handle_streaming_response", wraps=llm._handle_streaming_response
) as mock_handle:
llm.call("Tell me a joke.")
mock_handle.assert_called_once()
assert llm._token_usage["total_tokens"] > 0
assert llm._token_usage["prompt_tokens"] > 0
assert llm._token_usage["completion_tokens"] > 0
assert llm._token_usage["successful_requests"] == 1
@pytest.mark.asyncio
@pytest.mark.vcr(record_mode="once",decode_compressed_response=True,match_on=["method", "scheme", "host", "path", "body"])
async def test_usage_info_non_streaming_with_acall():
llm = LLM(
model="openai/gpt-4o-mini",
is_litellm=True,
stream=False,
)
# sanity check
assert llm._token_usage == {
"total_tokens": 0,
"prompt_tokens": 0,
"completion_tokens": 0,
"successful_requests": 0,
"cached_prompt_tokens": 0,
}
with patch.object(
llm, "_ahandle_non_streaming_response", wraps=llm._ahandle_non_streaming_response
) as mock_handle:
result = await llm.acall("Tell me a joke.")
mock_handle.assert_called_once()
# token usage assertions (robust)
assert llm._token_usage["successful_requests"] == 1
assert llm._token_usage["prompt_tokens"] > 0
assert llm._token_usage["completion_tokens"] > 0
assert llm._token_usage["total_tokens"] > 0
assert len(result) > 0
@pytest.mark.asyncio
@pytest.mark.vcr(record_mode="none",decode_compressed_response=True,match_on=["method", "scheme", "host", "path", "body"])
async def test_usage_info_streaming_with_acall():
llm = LLM(
model="gpt-4o-mini",
is_litellm=True,
stream=True,
)
assert llm.stream is True
assert llm._token_usage == {
"total_tokens": 0,
"prompt_tokens": 0,
"completion_tokens": 0,
"successful_requests": 0,
"cached_prompt_tokens": 0,
}
with patch.object(
llm, "_ahandle_streaming_response", wraps=llm._ahandle_streaming_response
) as mock_handle:
result = await llm.acall("Tell me a joke.")
mock_handle.assert_called_once()
assert llm._token_usage["successful_requests"] == 1
assert llm._token_usage["prompt_tokens"] > 0
assert llm._token_usage["completion_tokens"] > 0
assert llm._token_usage["total_tokens"] > 0
assert len(result) > 0

View File

@@ -1,214 +0,0 @@
"""Tests for agent utility functions."""
from __future__ import annotations
from typing import Any
import pytest
from pydantic import BaseModel, Field
from crewai.tools.base_tool import BaseTool
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
class CalculatorInput(BaseModel):
"""Input schema for calculator tool."""
expression: str = Field(description="Mathematical expression to evaluate")
class CalculatorTool(BaseTool):
"""A simple calculator tool for testing."""
name: str = "calculator"
description: str = "Perform mathematical calculations"
args_schema: type[BaseModel] = CalculatorInput
def _run(self, expression: str) -> str:
"""Execute the calculation."""
try:
result = eval(expression) # noqa: S307
return str(result)
except Exception as e:
return f"Error: {e}"
class SearchInput(BaseModel):
"""Input schema for search tool."""
query: str = Field(description="Search query")
max_results: int = Field(default=10, description="Maximum number of results")
class SearchTool(BaseTool):
"""A search tool for testing."""
name: str = "web_search"
description: str = "Search the web for information"
args_schema: type[BaseModel] = SearchInput
def _run(self, query: str, max_results: int = 10) -> str:
"""Execute the search."""
return f"Search results for '{query}' (max {max_results})"
class NoSchemaTool(BaseTool):
"""A tool without an args schema for testing edge cases."""
name: str = "simple_tool"
description: str = "A simple tool with no schema"
def _run(self, **kwargs: Any) -> str:
"""Execute the tool."""
return "Simple tool executed"
class TestConvertToolsToOpenaiSchema:
"""Tests for convert_tools_to_openai_schema function."""
def test_converts_single_tool(self) -> None:
"""Test converting a single tool to OpenAI schema."""
tools = [CalculatorTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
assert len(schemas) == 1
assert len(functions) == 1
schema = schemas[0]
assert schema["type"] == "function"
assert schema["function"]["name"] == "calculator"
assert schema["function"]["description"] == "Perform mathematical calculations"
assert "properties" in schema["function"]["parameters"]
assert "expression" in schema["function"]["parameters"]["properties"]
def test_converts_multiple_tools(self) -> None:
"""Test converting multiple tools to OpenAI schema."""
tools = [CalculatorTool(), SearchTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
assert len(schemas) == 2
assert len(functions) == 2
# Check calculator
calc_schema = next(s for s in schemas if s["function"]["name"] == "calculator")
assert calc_schema["function"]["description"] == "Perform mathematical calculations"
# Check search
search_schema = next(s for s in schemas if s["function"]["name"] == "web_search")
assert search_schema["function"]["description"] == "Search the web for information"
assert "query" in search_schema["function"]["parameters"]["properties"]
assert "max_results" in search_schema["function"]["parameters"]["properties"]
def test_functions_dict_contains_callables(self) -> None:
"""Test that the functions dict maps names to callable run methods."""
tools = [CalculatorTool(), SearchTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
assert "calculator" in functions
assert "web_search" in functions
assert callable(functions["calculator"])
assert callable(functions["web_search"])
def test_function_can_be_called(self) -> None:
"""Test that the returned function can be called."""
tools = [CalculatorTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
result = functions["calculator"](expression="2 + 2")
assert result == "4"
def test_empty_tools_list(self) -> None:
"""Test with an empty tools list."""
schemas, functions = convert_tools_to_openai_schema([])
assert schemas == []
assert functions == {}
def test_schema_has_required_fields(self) -> None:
"""Test that the schema includes required fields information."""
tools = [SearchTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schema = schemas[0]
params = schema["function"]["parameters"]
# Should have required array
assert "required" in params
assert "query" in params["required"]
def test_tool_without_args_schema(self) -> None:
"""Test converting a tool that doesn't have an args_schema."""
# Create a minimal tool without args_schema
class MinimalTool(BaseTool):
name: str = "minimal"
description: str = "A minimal tool"
def _run(self) -> str:
return "done"
tools = [MinimalTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
assert len(schemas) == 1
schema = schemas[0]
assert schema["function"]["name"] == "minimal"
# Parameters should be empty dict or have minimal schema
assert isinstance(schema["function"]["parameters"], dict)
def test_schema_structure_matches_openai_format(self) -> None:
"""Test that the schema structure matches OpenAI's expected format."""
tools = [CalculatorTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
schema = schemas[0]
# Top level must have "type": "function"
assert schema["type"] == "function"
# Must have "function" key with nested structure
assert "function" in schema
func = schema["function"]
# Function must have name and description
assert "name" in func
assert "description" in func
assert isinstance(func["name"], str)
assert isinstance(func["description"], str)
# Parameters should be a valid JSON schema
assert "parameters" in func
params = func["parameters"]
assert isinstance(params, dict)
def test_removes_redundant_schema_fields(self) -> None:
"""Test that redundant title and description are removed from parameters."""
tools = [CalculatorTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
# Title should be removed as it's redundant with function name
assert "title" not in params
def test_preserves_field_descriptions(self) -> None:
"""Test that field descriptions are preserved in the schema."""
tools = [SearchTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
query_prop = params["properties"]["query"]
# Field description should be preserved
assert "description" in query_prop
assert query_prop["description"] == "Search query"
def test_preserves_default_values(self) -> None:
"""Test that default values are preserved in the schema."""
tools = [SearchTool()]
schemas, functions = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
max_results_prop = params["properties"]["max_results"]
# Default value should be preserved
assert "default" in max_results_prop
assert max_results_prop["default"] == 10

View File

@@ -25,8 +25,6 @@ from crewai.events.types.flow_events import (
FlowCreatedEvent,
FlowFinishedEvent,
FlowStartedEvent,
HumanFeedbackReceivedEvent,
HumanFeedbackRequestedEvent,
MethodExecutionFailedEvent,
MethodExecutionFinishedEvent,
MethodExecutionStartedEvent,
@@ -47,7 +45,6 @@ from crewai.events.types.tool_usage_events import (
ToolUsageFinishedEvent,
)
from crewai.flow.flow import Flow, listen, start
from crewai.flow.human_feedback import human_feedback
from crewai.llm import LLM
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
@@ -1276,135 +1273,3 @@ def test_llm_emits_event_with_lite_agent():
assert set(all_agent_roles) == {agent.role}
assert set(all_agent_id) == {str(agent.id)}
# ----------- HUMAN FEEDBACK EVENTS -----------
@patch("builtins.input", return_value="looks good")
@patch("builtins.print")
def test_human_feedback_emits_requested_and_received_events(mock_print, mock_input):
"""Test that @human_feedback decorator emits HumanFeedbackRequested and Received events."""
requested_events = []
received_events = []
events_received = threading.Event()
@crewai_event_bus.on(HumanFeedbackRequestedEvent)
def handle_requested(source, event):
requested_events.append(event)
@crewai_event_bus.on(HumanFeedbackReceivedEvent)
def handle_received(source, event):
received_events.append(event)
events_received.set()
class TestFlow(Flow):
@start()
@human_feedback(
message="Review:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
def review(self):
return "test content"
flow = TestFlow()
with patch.object(flow, "_collapse_to_outcome", return_value="approved"):
flow.kickoff()
assert events_received.wait(timeout=5), (
"Timeout waiting for human feedback events"
)
assert len(requested_events) == 1
assert requested_events[0].type == "human_feedback_requested"
assert requested_events[0].emit == ["approved", "rejected"]
assert requested_events[0].message == "Review:"
assert requested_events[0].output == "test content"
assert len(received_events) == 1
assert received_events[0].type == "human_feedback_received"
assert received_events[0].feedback == "looks good"
assert received_events[0].outcome is None
assert flow.last_human_feedback is not None
assert flow.last_human_feedback.outcome == "approved"
@patch("builtins.input", return_value="feedback text")
@patch("builtins.print")
def test_human_feedback_without_routing_emits_events(mock_print, mock_input):
"""Test that @human_feedback without emit still emits events."""
requested_events = []
received_events = []
events_received = threading.Event()
@crewai_event_bus.on(HumanFeedbackRequestedEvent)
def handle_requested(source, event):
requested_events.append(event)
@crewai_event_bus.on(HumanFeedbackReceivedEvent)
def handle_received(source, event):
received_events.append(event)
events_received.set()
class SimpleFlow(Flow):
@start()
@human_feedback(message="Please review:")
def review(self):
return "content to review"
flow = SimpleFlow()
flow.kickoff()
assert events_received.wait(timeout=5), (
"Timeout waiting for human feedback events"
)
assert len(requested_events) == 1
assert requested_events[0].emit is None
assert len(received_events) == 1
assert received_events[0].feedback == "feedback text"
assert received_events[0].outcome is None
@patch("builtins.input", return_value="")
@patch("builtins.print")
def test_human_feedback_empty_feedback_emits_events(mock_print, mock_input):
"""Test that empty feedback (skipped) still emits events correctly."""
received_events = []
events_received = threading.Event()
@crewai_event_bus.on(HumanFeedbackReceivedEvent)
def handle_received(source, event):
received_events.append(event)
events_received.set()
class SkipFlow(Flow):
@start()
@human_feedback(
message="Review:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
default_outcome="rejected",
)
def review(self):
return "content"
flow = SkipFlow()
flow.kickoff()
assert events_received.wait(timeout=5), (
"Timeout waiting for human feedback events"
)
assert len(received_events) == 1
assert received_events[0].feedback == ""
assert received_events[0].outcome is None
assert flow.last_human_feedback is not None
assert flow.last_human_feedback.outcome == "rejected"

View File

@@ -1,3 +1,3 @@
"""CrewAI development tools."""
__version__ = "1.8.0"
__version__ = "1.7.2"