Compare commits

..

1 Commits

Author SHA1 Message Date
Devin AI
8c0da6c129 Add regression tests for issue #3828: Flow with unpickleable objects in state
- Add test_flow_with_rlock_in_state to verify Flow works with threading.RLock in state
- Add test_flow_with_nested_unpickleable_objects to verify Flow works with unpickleable objects nested in containers
- These tests ensure the issue from version 1.3.0 (TypeError: cannot pickle '_thread.RLock' object) doesn't get reintroduced
- The issue was resolved in the current main branch by removing the _copy_state() method that used copy.deepcopy()
- Tests verify that flows with memory components or other resources containing locks work correctly

Fixes #3828

Co-Authored-By: João <joao@crewai.com>
2025-11-04 10:27:39 +00:00
378 changed files with 165667 additions and 8998 deletions

View File

@@ -5,6 +5,18 @@ on: [pull_request]
permissions:
contents: read
env:
OPENAI_API_KEY: fake-api-key
PYTHONUNBUFFERED: 1
BRAVE_API_KEY: fake-brave-key
SNOWFLAKE_USER: fake-snowflake-user
SNOWFLAKE_PASSWORD: fake-snowflake-password
SNOWFLAKE_ACCOUNT: fake-snowflake-account
SNOWFLAKE_WAREHOUSE: fake-snowflake-warehouse
SNOWFLAKE_DATABASE: fake-snowflake-database
SNOWFLAKE_SCHEMA: fake-snowflake-schema
EMBEDCHAIN_DB_URI: sqlite:///test.db
jobs:
tests:
name: tests (${{ matrix.python-version }})
@@ -72,20 +84,26 @@ jobs:
# fi
cd lib/crewai && uv run pytest \
--block-network \
--timeout=30 \
-vv \
--splits 8 \
--group ${{ matrix.group }} \
$DURATIONS_ARG \
--durations=10 \
-n auto \
--maxfail=3
- name: Run tool tests (group ${{ matrix.group }} of 8)
run: |
cd lib/crewai-tools && uv run pytest \
--block-network \
--timeout=30 \
-vv \
--splits 8 \
--group ${{ matrix.group }} \
--durations=10 \
-n auto \
--maxfail=3

View File

@@ -1,138 +0,0 @@
"""Pytest configuration for crewAI workspace."""
import os
from pathlib import Path
import tempfile
from typing import Any
import pytest
# Load .env.test for consistent test environment
# env_test_path = Path(__file__).parent / ".env.test"
# load_dotenv(env_test_path, override=True)
# load_dotenv(override=True)
@pytest.fixture(autouse=True, scope="function")
def setup_test_environment() -> None: # type: ignore
"""Set up test environment with a temporary directory for SQLite storage."""
with tempfile.TemporaryDirectory() as temp_dir:
# Create the directory with proper permissions
storage_dir = Path(temp_dir) / "crewai_test_storage"
storage_dir.mkdir(parents=True, exist_ok=True)
# Validate that the directory was created successfully
if not storage_dir.exists() or not storage_dir.is_dir():
raise RuntimeError(
f"Failed to create test storage directory: {storage_dir}"
)
# Verify directory permissions
try:
# Try to create a test file to verify write permissions
test_file = storage_dir / ".permissions_test"
test_file.touch()
test_file.unlink()
except (OSError, IOError) as e:
raise RuntimeError(
f"Test storage directory {storage_dir} is not writable: {e}"
) from e
os.environ["CREWAI_STORAGE_DIR"] = str(storage_dir)
os.environ["CREWAI_TESTING"] = "true"
yield
os.environ.pop("CREWAI_TESTING", None)
# Cleanup is handled automatically when tempfile context exits
HEADERS_TO_FILTER = {
"authorization": "AUTHORIZATION-XXX",
"content-security-policy": "CSP-FILTERED",
"cookie": "COOKIE-XXX",
"set-cookie": "SET-COOKIE-XXX",
"permissions-policy": "PERMISSIONS-POLICY-XXX",
"referrer-policy": "REFERRER-POLICY-XXX",
"strict-transport-security": "STS-XXX",
"x-content-type-options": "X-CONTENT-TYPE-XXX",
"x-frame-options": "X-FRAME-OPTIONS-XXX",
"x-permitted-cross-domain-policies": "X-PERMITTED-XXX",
"x-request-id": "X-REQUEST-ID-XXX",
"x-runtime": "X-RUNTIME-XXX",
"x-xss-protection": "X-XSS-PROTECTION-XXX",
"x-stainless-arch": "X-STAINLESS-ARCH-XXX",
"x-stainless-os": "X-STAINLESS-OS-XXX",
"x-stainless-read-timeout": "X-STAINLESS-READ-TIMEOUT-XXX",
"cf-ray": "CF-RAY-XXX",
"etag": "ETAG-XXX",
"Strict-Transport-Security": "STS-XXX",
"access-control-expose-headers": "ACCESS-CONTROL-XXX",
"openai-organization": "OPENAI-ORG-XXX",
"openai-project": "OPENAI-PROJECT-XXX",
"x-ratelimit-limit-requests": "X-RATELIMIT-LIMIT-REQUESTS-XXX",
"x-ratelimit-limit-tokens": "X-RATELIMIT-LIMIT-TOKENS-XXX",
"x-ratelimit-remaining-requests": "X-RATELIMIT-REMAINING-REQUESTS-XXX",
"x-ratelimit-remaining-tokens": "X-RATELIMIT-REMAINING-TOKENS-XXX",
"x-ratelimit-reset-requests": "X-RATELIMIT-RESET-REQUESTS-XXX",
"x-ratelimit-reset-tokens": "X-RATELIMIT-RESET-TOKENS-XXX",
}
def _filter_response_headers(response) -> dict[str, Any]: # type: ignore
"""Filter sensitive headers from response before recording."""
for header_name, replacement in HEADERS_TO_FILTER.items():
for variant in [header_name, header_name.upper(), header_name.title()]:
if variant in response["headers"]:
response["headers"][variant] = [replacement]
return response # type: ignore
@pytest.fixture(scope="module")
def vcr_cassette_dir(request: Any) -> str:
"""Generate cassette directory path based on test module location.
Organizes cassettes to mirror test directory structure within each package:
lib/crewai/tests/llms/google/test_google.py -> lib/crewai/tests/cassettes/llms/google/
lib/crewai-tools/tests/tools/test_search.py -> lib/crewai-tools/tests/cassettes/tools/
"""
test_file = Path(request.fspath)
# Find the package root (lib/crewai or lib/crewai-tools)
for parent in test_file.parents:
if parent.name in ("crewai", "crewai-tools") and parent.parent.name == "lib":
package_root = parent
break
else:
# Fallback to old behavior if we can't find package root
package_root = test_file.parent
tests_root = package_root / "tests"
test_dir = test_file.parent
# Get relative path from tests root
if test_dir != tests_root:
relative_path = test_dir.relative_to(tests_root)
cassette_dir = tests_root / "cassettes" / relative_path
else:
cassette_dir = tests_root / "cassettes"
cassette_dir.mkdir(parents=True, exist_ok=True)
return str(cassette_dir)
@pytest.fixture(scope="module")
def vcr_config(vcr_cassette_dir: str) -> dict[str, Any]:
"""Configure VCR with organized cassette storage."""
config = {
"cassette_library_dir": vcr_cassette_dir,
"record_mode": os.getenv("PYTEST_VCR_RECORD_MODE") or "once",
"filter_headers": [(k, v) for k, v in HEADERS_TO_FILTER.items()],
"before_record_response": _filter_response_headers,
}
if os.getenv("GITHUB_ACTIONS") == "true":
config["record_mode"] = "none"
return config

View File

@@ -1200,52 +1200,6 @@ Learn how to get the most out of your LLM configuration:
)
```
</Accordion>
<Accordion title="Transport Interceptors">
CrewAI provides message interceptors for several providers, allowing you to hook into request/response cycles at the transport layer.
**Supported Providers:**
- ✅ OpenAI
- ✅ Anthropic
**Basic Usage:**
```python
import httpx
from crewai import LLM
from crewai.llms.hooks import BaseInterceptor
class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Custom interceptor to modify requests and responses."""
def on_outbound(self, request: httpx.Request) -> httpx.Request:
"""Print request before sending to the LLM provider."""
print(request)
return request
def on_inbound(self, response: httpx.Response) -> httpx.Response:
"""Process response after receiving from the LLM provider."""
print(f"Status: {response.status_code}")
print(f"Response time: {response.elapsed}")
return response
# Use the interceptor with an LLM
llm = LLM(
model="openai/gpt-4o",
interceptor=CustomInterceptor()
)
```
**Important Notes:**
- Both methods must return the received object or type of object.
- Modifying received objects may result in unexpected behavior or application crashes.
- Not all providers support interceptors - check the supported providers list above
<Info>
Interceptors operate at the transport layer. This is particularly useful for:
- Message transformation and filtering
- Debugging API interactions
</Info>
</Accordion>
</AccordionGroup>
## Common Issues and Solutions

View File

@@ -11,13 +11,9 @@ The [Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP)
CrewAI offers **two approaches** for MCP integration:
### 🚀 **Simple DSL Integration** (Recommended)
### Simple DSL Integration** (Recommended)
Use the `mcps` field directly on agents for seamless MCP tool integration. The DSL supports both **string references** (for quick setup) and **structured configurations** (for full control).
#### String-Based References (Quick Setup)
Perfect for remote HTTPS servers and CrewAI AMP marketplace:
Use the `mcps` field directly on agents for seamless MCP tool integration:
```python
from crewai import Agent
@@ -36,46 +32,6 @@ agent = Agent(
# MCP tools are now automatically available to your agent!
```
#### Structured Configurations (Full Control)
For complete control over connection settings, tool filtering, and all transport types:
```python
from crewai import Agent
from crewai.mcp import MCPServerStdio, MCPServerHTTP, MCPServerSSE
from crewai.mcp.filters import create_static_tool_filter
agent = Agent(
role="Advanced Research Analyst",
goal="Research with full control over MCP connections",
backstory="Expert researcher with advanced tool access",
mcps=[
# Stdio transport for local servers
MCPServerStdio(
command="npx",
args=["-y", "@modelcontextprotocol/server-filesystem"],
env={"API_KEY": "your_key"},
tool_filter=create_static_tool_filter(
allowed_tool_names=["read_file", "list_directory"]
),
cache_tools_list=True,
),
# HTTP/Streamable HTTP transport for remote servers
MCPServerHTTP(
url="https://api.example.com/mcp",
headers={"Authorization": "Bearer your_token"},
streamable=True,
cache_tools_list=True,
),
# SSE transport for real-time streaming
MCPServerSSE(
url="https://stream.example.com/mcp/sse",
headers={"Authorization": "Bearer your_token"},
),
]
)
```
### 🔧 **Advanced: MCPServerAdapter** (For Complex Scenarios)
For advanced use cases requiring manual connection management, the `crewai-tools` library provides the `MCPServerAdapter` class.
@@ -112,14 +68,12 @@ uv pip install 'crewai-tools[mcp]'
## Quick Start: Simple DSL Integration
The easiest way to integrate MCP servers is using the `mcps` field on your agents. You can use either string references or structured configurations.
### Quick Start with String References
The easiest way to integrate MCP servers is using the `mcps` field on your agents:
```python
from crewai import Agent, Task, Crew
# Create agent with MCP tools using string references
# Create agent with MCP tools
research_agent = Agent(
role="Research Analyst",
goal="Find and analyze information using advanced search tools",
@@ -142,53 +96,13 @@ crew = Crew(agents=[research_agent], tasks=[research_task])
result = crew.kickoff()
```
### Quick Start with Structured Configurations
```python
from crewai import Agent, Task, Crew
from crewai.mcp import MCPServerStdio, MCPServerHTTP, MCPServerSSE
# Create agent with structured MCP configurations
research_agent = Agent(
role="Research Analyst",
goal="Find and analyze information using advanced search tools",
backstory="Expert researcher with access to multiple data sources",
mcps=[
# Local stdio server
MCPServerStdio(
command="python",
args=["local_server.py"],
env={"API_KEY": "your_key"},
),
# Remote HTTP server
MCPServerHTTP(
url="https://api.research.com/mcp",
headers={"Authorization": "Bearer your_token"},
),
]
)
# Create task
research_task = Task(
description="Research the latest developments in AI agent frameworks",
expected_output="Comprehensive research report with citations",
agent=research_agent
)
# Create and run crew
crew = Crew(agents=[research_agent], tasks=[research_task])
result = crew.kickoff()
```
That's it! The MCP tools are automatically discovered and available to your agent.
## MCP Reference Formats
The `mcps` field supports both **string references** (for quick setup) and **structured configurations** (for full control). You can mix both formats in the same list.
The `mcps` field supports various reference formats for maximum flexibility:
### String-Based References
#### External MCP Servers
### External MCP Servers
```python
mcps=[
@@ -203,7 +117,7 @@ mcps=[
]
```
#### CrewAI AMP Marketplace
### CrewAI AMP Marketplace
```python
mcps=[
@@ -219,166 +133,17 @@ mcps=[
]
```
### Structured Configurations
#### Stdio Transport (Local Servers)
Perfect for local MCP servers that run as processes:
```python
from crewai.mcp import MCPServerStdio
from crewai.mcp.filters import create_static_tool_filter
mcps=[
MCPServerStdio(
command="npx",
args=["-y", "@modelcontextprotocol/server-filesystem"],
env={"API_KEY": "your_key"},
tool_filter=create_static_tool_filter(
allowed_tool_names=["read_file", "write_file"]
),
cache_tools_list=True,
),
# Python-based server
MCPServerStdio(
command="python",
args=["path/to/server.py"],
env={"UV_PYTHON": "3.12", "API_KEY": "your_key"},
),
]
```
#### HTTP/Streamable HTTP Transport (Remote Servers)
For remote MCP servers over HTTP/HTTPS:
```python
from crewai.mcp import MCPServerHTTP
mcps=[
# Streamable HTTP (default)
MCPServerHTTP(
url="https://api.example.com/mcp",
headers={"Authorization": "Bearer your_token"},
streamable=True,
cache_tools_list=True,
),
# Standard HTTP
MCPServerHTTP(
url="https://api.example.com/mcp",
headers={"Authorization": "Bearer your_token"},
streamable=False,
),
]
```
#### SSE Transport (Real-Time Streaming)
For remote servers using Server-Sent Events:
```python
from crewai.mcp import MCPServerSSE
mcps=[
MCPServerSSE(
url="https://stream.example.com/mcp/sse",
headers={"Authorization": "Bearer your_token"},
cache_tools_list=True,
),
]
```
### Mixed References
You can combine string references and structured configurations:
```python
from crewai.mcp import MCPServerStdio, MCPServerHTTP
mcps=[
# String references
"https://external-api.com/mcp", # External server
"https://weather.service.com/mcp#forecast", # Specific external tool
"crewai-amp:financial-insights", # AMP service
# Structured configurations
MCPServerStdio(
command="npx",
args=["-y", "@modelcontextprotocol/server-filesystem"],
),
MCPServerHTTP(
url="https://api.example.com/mcp",
headers={"Authorization": "Bearer token"},
),
"crewai-amp:data-analysis#sentiment_tool" # Specific AMP tool
]
```
### Tool Filtering
Structured configurations support advanced tool filtering:
```python
from crewai.mcp import MCPServerStdio
from crewai.mcp.filters import create_static_tool_filter, create_dynamic_tool_filter, ToolFilterContext
# Static filtering (allow/block lists)
static_filter = create_static_tool_filter(
allowed_tool_names=["read_file", "write_file"],
blocked_tool_names=["delete_file"],
)
# Dynamic filtering (context-aware)
def dynamic_filter(context: ToolFilterContext, tool: dict) -> bool:
# Block dangerous tools for certain agent roles
if context.agent.role == "Code Reviewer":
if "delete" in tool.get("name", "").lower():
return False
return True
mcps=[
MCPServerStdio(
command="npx",
args=["-y", "@modelcontextprotocol/server-filesystem"],
tool_filter=static_filter, # or dynamic_filter
),
]
```
## Configuration Parameters
Each transport type supports specific configuration options:
### MCPServerStdio Parameters
- **`command`** (required): Command to execute (e.g., `"python"`, `"node"`, `"npx"`, `"uvx"`)
- **`args`** (optional): List of command arguments (e.g., `["server.py"]` or `["-y", "@mcp/server"]`)
- **`env`** (optional): Dictionary of environment variables to pass to the process
- **`tool_filter`** (optional): Tool filter function for filtering available tools
- **`cache_tools_list`** (optional): Whether to cache the tool list for faster subsequent access (default: `False`)
### MCPServerHTTP Parameters
- **`url`** (required): Server URL (e.g., `"https://api.example.com/mcp"`)
- **`headers`** (optional): Dictionary of HTTP headers for authentication or other purposes
- **`streamable`** (optional): Whether to use streamable HTTP transport (default: `True`)
- **`tool_filter`** (optional): Tool filter function for filtering available tools
- **`cache_tools_list`** (optional): Whether to cache the tool list for faster subsequent access (default: `False`)
### MCPServerSSE Parameters
- **`url`** (required): Server URL (e.g., `"https://api.example.com/mcp/sse"`)
- **`headers`** (optional): Dictionary of HTTP headers for authentication or other purposes
- **`tool_filter`** (optional): Tool filter function for filtering available tools
- **`cache_tools_list`** (optional): Whether to cache the tool list for faster subsequent access (default: `False`)
### Common Parameters
All transport types support:
- **`tool_filter`**: Filter function to control which tools are available. Can be:
- `None` (default): All tools are available
- Static filter: Created with `create_static_tool_filter()` for allow/block lists
- Dynamic filter: Created with `create_dynamic_tool_filter()` for context-aware filtering
- **`cache_tools_list`**: When `True`, caches the tool list after first discovery to improve performance on subsequent connections
## Key Features
- 🔄 **Automatic Tool Discovery**: Tools are automatically discovered and integrated
@@ -387,47 +152,26 @@ All transport types support:
- 🛡️ **Error Resilience**: Graceful handling of unavailable servers
- ⏱️ **Timeout Protection**: Built-in timeouts prevent hanging connections
- 📊 **Transparent Integration**: Works seamlessly with existing CrewAI features
- 🔧 **Full Transport Support**: Stdio, HTTP/Streamable HTTP, and SSE transports
- 🎯 **Advanced Filtering**: Static and dynamic tool filtering capabilities
- 🔐 **Flexible Authentication**: Support for headers, environment variables, and query parameters
## Error Handling
The MCP DSL integration is designed to be resilient and handles failures gracefully:
The MCP DSL integration is designed to be resilient:
```python
from crewai import Agent
from crewai.mcp import MCPServerStdio, MCPServerHTTP
agent = Agent(
role="Resilient Agent",
goal="Continue working despite server issues",
backstory="Agent that handles failures gracefully",
mcps=[
# String references
"https://reliable-server.com/mcp", # Will work
"https://unreachable-server.com/mcp", # Will be skipped gracefully
"crewai-amp:working-service", # Will work
# Structured configs
MCPServerStdio(
command="python",
args=["reliable_server.py"], # Will work
),
MCPServerHTTP(
url="https://slow-server.com/mcp", # Will timeout gracefully
),
"https://slow-server.com/mcp", # Will timeout gracefully
"crewai-amp:working-service" # Will work
]
)
# Agent will use tools from working servers and log warnings for failing ones
```
All connection errors are handled gracefully:
- **Connection failures**: Logged as warnings, agent continues with available tools
- **Timeout errors**: Connections timeout after 30 seconds (configurable)
- **Authentication errors**: Logged clearly for debugging
- **Invalid configurations**: Validation errors are raised at agent creation time
## Advanced: MCPServerAdapter
For complex scenarios requiring manual connection management, use the `MCPServerAdapter` class from `crewai-tools`. Using a Python context manager (`with` statement) is the recommended approach as it automatically handles starting and stopping the connection to the MCP server.

View File

@@ -7,12 +7,11 @@ from pathlib import Path
from typing import Any, cast
from crewai.tools.base_tool import BaseTool, EnvVar
from crewai_tools import tools
from pydantic import BaseModel
from pydantic.json_schema import GenerateJsonSchema
from pydantic_core import PydanticOmit
from crewai_tools import tools
class SchemaGenerator(GenerateJsonSchema):
def handle_invalid_for_json_schema(self, schema, error_info):

View File

@@ -12,7 +12,7 @@ dependencies = [
"pytube>=15.0.0",
"requests>=2.32.5",
"docker>=7.1.0",
"crewai==1.4.1",
"crewai==1.3.0",
"lancedb>=0.5.4",
"tiktoken>=0.8.0",
"beautifulsoup4>=4.13.4",

View File

@@ -287,4 +287,4 @@ __all__ = [
"ZapierActionTools",
]
__version__ = "1.4.1"
__version__ = "1.3.0"

View File

@@ -229,7 +229,6 @@ class CrewAIRagAdapter(Adapter):
continue
else:
metadata: dict[str, Any] = base_metadata.copy()
source_content = SourceContent(source_ref)
if data_type in [
DataType.PDF_FILE,
@@ -240,12 +239,13 @@ class CrewAIRagAdapter(Adapter):
DataType.XML,
DataType.MDX,
]:
if not source_content.is_url() and not source_content.path_exists():
if not os.path.isfile(source_ref):
raise FileNotFoundError(f"File does not exist: {source_ref}")
loader = data_type.get_loader()
chunker = data_type.get_chunker()
source_content = SourceContent(source_ref)
loader_result: LoaderResult = loader.load(source_content)
chunks = chunker.chunk(loader_result.content)

View File

@@ -91,7 +91,7 @@ class EmbeddingService:
"azure": "AZURE_OPENAI_API_KEY",
"amazon-bedrock": "AWS_ACCESS_KEY_ID", # or AWS_PROFILE
"cohere": "COHERE_API_KEY",
"google-generativeai": "GEMINI_API_KEY",
"google-generativeai": "GOOGLE_API_KEY",
"google-vertex": "GOOGLE_APPLICATION_CREDENTIALS",
"huggingface": "HUGGINGFACE_API_KEY",
"jina": "JINA_API_KEY",

View File

@@ -247,7 +247,7 @@ class StagehandTool(BaseTool):
if "claude" in model_str.lower() or "anthropic" in model_str.lower():
return self.model_api_key or os.getenv("ANTHROPIC_API_KEY")
if "gemini" in model_str.lower():
return self.model_api_key or os.getenv("GEMINI_API_KEY")
return self.model_api_key or os.getenv("GOOGLE_API_KEY")
# Default to trying OpenAI, then Anthropic
return (
self.model_api_key
@@ -313,7 +313,7 @@ class StagehandTool(BaseTool):
if not model_api_key:
raise ValueError(
"No appropriate API key found for model. Please set OPENAI_API_KEY, ANTHROPIC_API_KEY, or GEMINI_API_KEY"
"No appropriate API key found for model. Please set OPENAI_API_KEY, ANTHROPIC_API_KEY, or GOOGLE_API_KEY"
)
# Build the StagehandConfig with proper parameter names

View File

@@ -0,0 +1,21 @@
import pytest
def pytest_configure(config):
"""Register custom markers."""
config.addinivalue_line("markers", "integration: mark test as an integration test")
config.addinivalue_line("markers", "asyncio: mark test as an async test")
# Set the asyncio loop scope through ini configuration
config.inicfg["asyncio_mode"] = "auto"
@pytest.fixture(scope="function")
def event_loop():
"""Create an instance of the default event loop for each test case."""
import asyncio
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
yield loop
loop.close()

View File

@@ -2,7 +2,7 @@ import json
from unittest import mock
from crewai.tools.base_tool import BaseTool, EnvVar
from crewai_tools.generate_tool_specs import ToolSpecExtractor
from generate_tool_specs import ToolSpecExtractor
from pydantic import BaseModel, Field
import pytest
@@ -61,8 +61,8 @@ def test_unwrap_schema(extractor):
@pytest.fixture
def mock_tool_extractor(extractor):
with (
mock.patch("crewai_tools.generate_tool_specs.dir", return_value=["MockTool"]),
mock.patch("crewai_tools.generate_tool_specs.getattr", return_value=MockTool),
mock.patch("generate_tool_specs.dir", return_value=["MockTool"]),
mock.patch("generate_tool_specs.getattr", return_value=MockTool),
):
extractor.extract_all_tools()
assert len(extractor.tools_spec) == 1

View File

@@ -0,0 +1,289 @@
interactions:
- request:
body: '{"url": "https://firecrawl.dev", "includeTags": [], "excludeTags": [],
"onlyMainContent": true, "waitFor": 0, "skipTlsVerification": true, "removeBase64Images":
true, "fastMode": false, "blockAds": true, "storeInCache": true, "maxAge": 172800000,
"formats": ["markdown"], "headers": {}, "mobile": false, "proxy": "auto", "origin":
"python-sdk@4.5.0"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '350'
Content-Type:
- application/json
User-Agent:
- python-requests/2.32.5
method: POST
uri: https://api.firecrawl.dev/v2/scrape
response:
body:
string: "{\"success\":true,\"data\":{\"markdown\":\"We just raised our Series
A and shipped Firecrawl /v2 \U0001F389. [Read the blog.](https://www.firecrawl.dev/blog/firecrawl-v2-series-a-announcement)\\n\\n[2
Months Free \u2014 Annually](https://www.firecrawl.dev/pricing)\\n\\n# Turn
websites into LLM-ready data\\n\\nPower your AI apps with clean web data\\n\\nfrom
any website. [It's also open source.](https://github.com/firecrawl/firecrawl)\\n\\nScrape\\n\\nSearch\\nNew\\n\\nMap\\n\\nCrawl\\n\\nScrape\\n\\nLogo\\n\\nNavigation\\n\\nButton\\n\\nH1
Title\\n\\nDescription\\n\\nCTA Button\\n\\n\\\\[ .JSON \\\\]\\n\\n```json\\n1[\\\\\\n2
\ {\\\\\\n3 \\\"url\\\": \\\"https://example.com\\\",\\\\\\n4 \\\"markdown\\\":
\\\"# Getting Started...\\\",\\\\\\n5 \\\"json\\\": { \\\"title\\\": \\\"Guide\\\",
\\\"docs\\\": \\\"...\\\" },\\\\\\n6 \\\"screenshot\\\": \\\"https://example.com/hero.png\\\"\\\\\\n7
\ }\\\\\\n8]\\n```\\n\\nScrape Completed\\n\\nTrusted by5000+\\n\\ncompaniesof
all sizes\\n\\n![Logo 17](https://www.firecrawl.dev/assets-original/logocloud/17.png)\\n\\n![Logo
18](https://www.firecrawl.dev/assets-original/logocloud/18.png)\\n\\n![Logo
1](https://www.firecrawl.dev/assets-original/logocloud/1.png)\\n\\n![Logo
2](https://www.firecrawl.dev/assets-original/logocloud/2.png)\\n\\n![Logo
3](https://www.firecrawl.dev/assets-original/logocloud/3.png)\\n\\n![Logo
4](https://www.firecrawl.dev/assets-original/logocloud/4.png)\\n\\n![Logo
5](https://www.firecrawl.dev/assets-original/logocloud/5.png)\\n\\n![Logo
6](https://www.firecrawl.dev/assets-original/logocloud/6.png)\\n\\n![Logo
7](https://www.firecrawl.dev/assets-original/logocloud/7.png)\\n\\n![Logo
8](https://www.firecrawl.dev/assets-original/logocloud/8.png)\\n\\n![Logo
9](https://www.firecrawl.dev/assets-original/logocloud/9.png)\\n\\n![Logo
10](https://www.firecrawl.dev/assets-original/logocloud/10.png)\\n\\n![Logo
11](https://www.firecrawl.dev/assets-original/logocloud/11.png)\\n\\n![Logo
12](https://www.firecrawl.dev/assets-original/logocloud/12.png)\\n\\n![Logo
13](https://www.firecrawl.dev/assets-original/logocloud/13.png)\\n\\n![Logo
14](https://www.firecrawl.dev/assets-original/logocloud/14.png)\\n\\n![Logo
15](https://www.firecrawl.dev/assets-original/logocloud/15.png)\\n\\n![Logo
16](https://www.firecrawl.dev/assets-original/logocloud/16.png)\\n\\n![Logo
17](https://www.firecrawl.dev/assets-original/logocloud/17.png)\\n\\n![Logo
18](https://www.firecrawl.dev/assets-original/logocloud/18.png)\\n\\n![Logo
19](https://www.firecrawl.dev/assets-original/logocloud/19.png)\\n\\n![Logo
20](https://www.firecrawl.dev/assets-original/logocloud/20.png)\\n\\n![Logo
21](https://www.firecrawl.dev/assets-original/logocloud/21.png)\\n\\n![Logo
17](https://www.firecrawl.dev/assets-original/logocloud/17.png)\\n\\n![Logo
18](https://www.firecrawl.dev/assets-original/logocloud/18.png)\\n\\n![Logo
1](https://www.firecrawl.dev/assets-original/logocloud/1.png)\\n\\n![Logo
2](https://www.firecrawl.dev/assets-original/logocloud/2.png)\\n\\n![Logo
3](https://www.firecrawl.dev/assets-original/logocloud/3.png)\\n\\n![Logo
4](https://www.firecrawl.dev/assets-original/logocloud/4.png)\\n\\n![Logo
5](https://www.firecrawl.dev/assets-original/logocloud/5.png)\\n\\n![Logo
6](https://www.firecrawl.dev/assets-original/logocloud/6.png)\\n\\n![Logo
7](https://www.firecrawl.dev/assets-original/logocloud/7.png)\\n\\n![Logo
8](https://www.firecrawl.dev/assets-original/logocloud/8.png)\\n\\n![Logo
9](https://www.firecrawl.dev/assets-original/logocloud/9.png)\\n\\n![Logo
10](https://www.firecrawl.dev/assets-original/logocloud/10.png)\\n\\n![Logo
11](https://www.firecrawl.dev/assets-original/logocloud/11.png)\\n\\n![Logo
12](https://www.firecrawl.dev/assets-original/logocloud/12.png)\\n\\n![Logo
13](https://www.firecrawl.dev/assets-original/logocloud/13.png)\\n\\n![Logo
14](https://www.firecrawl.dev/assets-original/logocloud/14.png)\\n\\n![Logo
15](https://www.firecrawl.dev/assets-original/logocloud/15.png)\\n\\n![Logo
16](https://www.firecrawl.dev/assets-original/logocloud/16.png)\\n\\n![Logo
17](https://www.firecrawl.dev/assets-original/logocloud/17.png)\\n\\n![Logo
18](https://www.firecrawl.dev/assets-original/logocloud/18.png)\\n\\n![Logo
19](https://www.firecrawl.dev/assets-original/logocloud/19.png)\\n\\n![Logo
20](https://www.firecrawl.dev/assets-original/logocloud/20.png)\\n\\n![Logo
21](https://www.firecrawl.dev/assets-original/logocloud/21.png)\\n\\n\\\\[01/
07 \\\\]\\n\\n\xB7\\n\\nMain Features\\n\\n//\\n\\nDeveloper First\\n\\n//\\n\\n##
Startscraping today\\n\\nEnhance your apps with industry leading web scraping
and crawling capabilities.\\n\\nScrape\\n\\nGet llm-ready data from websites.
Markdown, JSON, screenshot, etc.\\n\\nSearch\\n\\nNew\\n\\nSearch the web
and get full content from results.\\n\\nCrawl\\n\\nCrawl all the pages on
a website and get data for each page.\\n\\nPython\\n\\nNode.js\\n\\nCurl\\n\\nCopy
code\\n\\n```python\\n1# pip install firecrawl-py\\n2from firecrawl import
Firecrawl\\n3\\n4app = Firecrawl(api_key=\\\"fc-YOUR_API_KEY\\\")\\n5\\n6#
Scrape a website:\\n7app.scrape('firecrawl.dev')\\n8\\n9\\n10\\n```\\n\\n\\\\[
.MD \\\\]\\n\\n```markdown\\n1# Firecrawl\\n2\\n3Firecrawl is a powerful web
scraping\\n4library that makes it easy to extract\\n5data from websites.\\n6\\n7##
Installation\\n8\\n9To install Firecrawl, run:\\n10\\n11\\n```\\n\\n![developer-1](https://www.firecrawl.dev/assets/developer/1.png)\\n\\n![developer-2](https://www.firecrawl.dev/assets/developer/2.png)\\n\\n![developer-3](https://www.firecrawl.dev/assets/developer/3.png)\\n\\n![developer-4](https://www.firecrawl.dev/assets/developer/4.png)\\n\\n![developer-5](https://www.firecrawl.dev/assets/developer/5.png)\\n\\n![developer-6](https://www.firecrawl.dev/assets/developer/6.png)\\n\\n![developer-7](https://www.firecrawl.dev/assets/developer/7.png)\\n\\n![developer-8](https://www.firecrawl.dev/assets/developer/8.png)\\n\\n![developer-9](https://www.firecrawl.dev/assets/developer/1.png)\\n\\n![developer-10](https://www.firecrawl.dev/assets/developer/2.png)\\n\\n![developer-11](https://www.firecrawl.dev/assets/developer/3.png)\\n\\n![developer-12](https://www.firecrawl.dev/assets/developer/4.png)\\n\\n![developer-13](https://www.firecrawl.dev/assets/developer/5.png)\\n\\n![developer-14](https://www.firecrawl.dev/assets/developer/6.png)\\n\\n![developer-15](https://www.firecrawl.dev/assets/developer/7.png)\\n\\n![developer-16](https://www.firecrawl.dev/assets/developer/8.png)\\n\\n![developer-17](https://www.firecrawl.dev/assets/developer/1.png)\\n\\n![developer-18](https://www.firecrawl.dev/assets/developer/2.png)\\n\\n![developer-19](https://www.firecrawl.dev/assets/developer/3.png)\\n\\n![developer-20](https://www.firecrawl.dev/assets/developer/4.png)\\n\\n![developer-21](https://www.firecrawl.dev/assets/developer/5.png)\\n\\n![developer-22](https://www.firecrawl.dev/assets/developer/6.png)\\n\\n![developer-23](https://www.firecrawl.dev/assets/developer/7.png)\\n\\n![developer-24](https://www.firecrawl.dev/assets/developer/8.png)\\n\\nIntegrations\\n\\n###
Use well-known tools\\n\\nAlready fully integrated with the greatest existing
tools and workflows.\\n\\n[See all integrations](https://www.firecrawl.dev/app)\\n\\n![Firecrawl
icon (blueprint)](https://www.firecrawl.dev/assets-original/developer-os-icon.png)\\n\\nmendableai/firecrawl\\n\\nPublic\\n\\nStar\\n\\n65.3K\\n\\n\\\\[python-SDK\\\\]
improvs/async\\n\\n#1337\\n\\n\xB7\\n\\nApr 18, 2025\\n\\n\xB7\\n\\n![rafaelsideguide](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F150964962%3Fv%3D4&w=48&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\nrafaelsideguide\\n\\nfeat(extract):
cost limit\\n\\n#1473\\n\\n\xB7\\n\\nApr 17, 2025\\n\\n\xB7\\n\\n![mogery](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F66118807%3Fv%3D4&w=48&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\nmogery\\n\\nfeat(scrape):
get job result from GCS, avoid Redis\\n\\n#1461\\n\\n\xB7\\n\\nApr 15, 2025\\n\\n\xB7\\n\\n![mogery](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F66118807%3Fv%3D4&w=48&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\nmogery\\n\\nExtract
v2/rerank improvs\\n\\n#1437\\n\\n\xB7\\n\\nApr 11, 2025\\n\\n\xB7\\n\\n![rafaelsideguide](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F150964962%3Fv%3D4&w=48&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\nrafaelsideguide\\n\\n![https://avatars.githubusercontent.com/u/150964962?v=4](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F150964962%3Fv%3D4&w=96&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\n![https://avatars.githubusercontent.com/u/66118807?v=4](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F66118807%3Fv%3D4&w=96&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\n+90\\n\\nOpen
Source\\n\\n### Code you can trust\\n\\nDeveloped transparently and collaboratively.
Join our community of contributors.\\n\\n[Check out our repo](https://github.com/firecrawl/firecrawl)\\n\\n\\\\[02/
07 \\\\]\\n\\n\xB7\\n\\nCore\\n\\n//\\n\\nBuilt to outperform\\n\\n//\\n\\n##
Core principles, provenperformance\\n\\nBuilt from the ground up to outperform
traditional scrapers.\\n\\nNo proxy headaches\\n\\nReliable.Covers 96% of
the web,\\n\\nincluding JS-heavy and protected pages. No proxies, no puppets,
just clean data.\\n\\nFirecrawl\\n\\n96%\\n\\n![Puppeteer icon](https://www.firecrawl.dev/assets/puppeteer.png)\\n\\nPuppeteer\\n\\n79%\\n\\ncURL\\n\\n75%\\n\\nSpeed
that feels invisible\\n\\nBlazingly fast.Delivers results in less than 1 second,
fast for real-time agents\\n\\nand dynamic apps.\\n\\nURL\\n\\nCrawl\\n\\nScrape\\n\\nfirecrawl.dev/docs\\n\\n50ms\\n\\n51ms\\n\\nfirecrawl.dev/templates\\n\\n52ms\\n\\n50ms\\n\\nfirecrawl.dev/changelog\\n\\n49ms\\n\\n52ms\\n\\nfirecrawl.dev/about\\n\\n52ms\\n\\n50ms\\n\\nfirecrawl.dev/changelog\\n\\n50ms\\n\\n52ms\\n\\nfirecrawl.dev/playground\\n\\n51ms\\n\\n49ms\\n\\n\\\\[
CTA \\\\]\\n\\n\\\\[ CRAWL \\\\]\\n\\n\\\\[ SCRAPE \\\\]\\n\\n\\\\[ CTA \\\\]\\n\\n//\\n\\nGet
started\\n\\n//\\n\\nReady to build?\\n\\nStart getting Web Data for free
and scale seamlessly as your project expands. No credit card needed.\\n\\n[Start
for free](https://www.firecrawl.dev/signin) [See our plans](https://www.firecrawl.dev/pricing)\\n\\n\\\\[03/
07 \\\\]\\n\\n\xB7\\n\\nFeatures\\n\\n//\\n\\nZero configuration\\n\\n//\\n\\n##
We handle the hard stuff\\n\\nRotating proxies, orchestration, rate limits,
js-blocked content and more.\\n\\nDocs to data\\n\\nMedia parsing.Firecrawl
can parse and output content from web hosted pdfs, docx, and more.\\n\\nhttps://example.com/docs/report.pdf\\n\\nhttps://example.com/files/brief.docx\\n\\nhttps://example.com/docs/guide.html\\n\\ndocx\\n\\nParsing...\\n\\nKnows
the moment\\n\\nSmart wait.Firecrawl intelligently waits for content to load,
making scraping faster and more reliable.\\n\\nhttps://example-spa.com\\n\\nRequest
Sent\\n\\nScrapes the real thing\\n\\nCached, when you need it.Selective caching,
you choose your caching patterns, growing web index.\\n\\n![User](https://www.firecrawl.dev/_next/image?url=%2Fassets-original%2Ffeatures%2Fcached-user.png&w=256&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\nUser\\n\\nFirecrawl\\n\\nCache\\n\\nInvisible
access\\n\\nStealth mode.Crawls the web without\\n\\nbeing blocked, mimics
real users to access protected or dynamic content.\\n\\nInteractive scraping\\n\\nActions.Click,
scroll, write, wait, press and more before extracting content.\\n\\nhttps://example.com\\n\\nNavigate\\n\\nClick\\n\\nType\\n\\nWait\\n\\nScroll\\n\\nPress\\n\\nScreenshot\\n\\nScrape\\n\\n\\\\[04/
07 \\\\]\\n\\n\xB7\\n\\nPricing\\n\\n//\\n\\nTransparent\\n\\n//\\n\\n## Flexible
pricing\\n\\nExplore transparent pricing built for real-world scraping. Start
for free, then scale as you grow.\\n\\n\U0001F1FA\U0001F1F8USD\\n\\nFree Plan\\n\\nA
lightweight way to try scraping.\\n\\nNo cost, no card, no hassle.\\n\\n500
credits\\n\\n$0123456789\\n\\none-time\\n\\nGet started\\n\\nScrape 500 pages\\n\\n2
concurrent requests\\n\\nLow rate limits\\n\\nHobby\\n\\nGreat for side projects
and small tools.\\n\\nFast, simple, no overkill.\\n\\n3,000 credits\\n\\n$01234567890123456789\\n\\n/monthly\\n\\nBilled
yearly\\n\\n2 months free\\n\\nSubscribe\\n\\nScrape 3,000 pages\\n\\n5 concurrent
requests\\n\\nBasic support\\n\\n$9 per extra 1k credits\\n\\nStandard\\n\\nMost
popular\\n\\nPerfect for scaling with less effort.\\n\\nSimple, solid, dependable.\\n\\n100,000
credits\\n\\n$01234567890123456789\\n\\n/monthly\\n\\nBilled yearly\\n\\n2
months free\\n\\nSubscribe\\n\\nScrape 100,000 pages\\n\\n50 concurrent requests\\n\\nStandard
support\\n\\n$47 per extra 35k credits\\n\\nGrowth\\n\\nBuilt for high volume
and speed.\\n\\nFirecrawl at full force.\\n\\n500,000 credits\\n\\n$012345678901234567890123456789\\n\\n/monthly\\n\\nBilled
yearly\\n\\n2 months free\\n\\nSubscribe\\n\\nScrape 500,000 pages\\n\\n100
concurrent requests\\n\\nPriority support\\n\\n$177 per extra 175k credits\\n\\nExtra
credits are available via auto-recharge packs. [Enable](https://www.firecrawl.dev/signin/signup)\\n\\nEnterprise\\n\\nPower
at your pace\\n\\nUnlimited credits. Custom RPMs.\\n\\n[Contact sales](https://fk4bvu0n5qp.typeform.com/to/Ej6oydlg)
[More details](https://www.firecrawl.dev/enterprise)\\n\\nBulk discounts\\n\\nTop
priority support\\n\\nCustom concurrency limits\\n\\nImproved stealth proxies\\n\\nSLAs\\n\\nAdvanced
security & controls\\n\\n\\\\[05/ 07 \\\\]\\n\\n\xB7\\n\\nTestimonials\\n\\n//\\n\\nCommunity\\n\\n//\\n\\n##
People love building withFirecrawl\\n\\nDiscover why developers choose
Firecrawl every day.\\n\\n[![Morgan Linton](https://www.firecrawl.dev/assets/testimonials/morgan-linton.png)Morgan
Linton@morganlinton\\\"If you're coding with AI, and haven't discovered @firecrawl\\\\_dev
yet, prepare to have your mind blown \U0001F92F\\\"](https://x.com/morganlinton/status/1839454165703204955)
[![Chris DeWeese](https://www.firecrawl.dev/assets/testimonials/chris-deweese.png)Chris
DeWeese@chrisdeweese\\\\_\\\"Started using @firecrawl\\\\_dev for a project,
I wish I used this sooner.\\\"](https://x.com/chrisdeweese_/status/1853587120406876601)
[![Alex Reibman](https://www.firecrawl.dev/assets/testimonials/alex-reibman.png)Alex
Reibman@AlexReibman\\\"Moved our internal agent's web scraping tool from Apify
to Firecrawl because it benchmarked 50x faster with AgentOps.\\\"](https://x.com/AlexReibman/status/1780299595484131836)
[![Tom - Morpho](https://www.firecrawl.dev/assets/testimonials/tom-morpho.png)Tom
- Morpho@TomReppelin\\\"I found gold today. Thank you @firecrawl\\\\_dev\\\"](https://x.com/TomReppelin/status/1844382491014201613)\\n\\n[![Morgan
Linton](https://www.firecrawl.dev/assets/testimonials/morgan-linton.png)Morgan
Linton@morganlinton\\\"If you're coding with AI, and haven't discovered @firecrawl\\\\_dev
yet, prepare to have your mind blown \U0001F92F\\\"](https://x.com/morganlinton/status/1839454165703204955)
[![Chris DeWeese](https://www.firecrawl.dev/assets/testimonials/chris-deweese.png)Chris
DeWeese@chrisdeweese\\\\_\\\"Started using @firecrawl\\\\_dev for a project,
I wish I used this sooner.\\\"](https://x.com/chrisdeweese_/status/1853587120406876601)
[![Alex Reibman](https://www.firecrawl.dev/assets/testimonials/alex-reibman.png)Alex
Reibman@AlexReibman\\\"Moved our internal agent's web scraping tool from Apify
to Firecrawl because it benchmarked 50x faster with AgentOps.\\\"](https://x.com/AlexReibman/status/1780299595484131836)
[![Tom - Morpho](https://www.firecrawl.dev/assets/testimonials/tom-morpho.png)Tom
- Morpho@TomReppelin\\\"I found gold today. Thank you @firecrawl\\\\_dev\\\"](https://x.com/TomReppelin/status/1844382491014201613)\\n\\n[![Bardia](https://www.firecrawl.dev/assets/testimonials/bardia.png)Bardia@thepericulum\\\"The
Firecrawl team ships. I wanted types for their node SDK, and less than an
hour later, I got them.\\\"](https://x.com/thepericulum/status/1781397799487078874)
[![Matt Busigin](https://www.firecrawl.dev/assets/testimonials/matt-busigin.png)Matt
Busigin@mbusigin\\\"Firecrawl is dope. Congrats guys \U0001F44F\\\"](https://x.com/mbusigin/status/1836065372010656069)
[![Sumanth](https://www.firecrawl.dev/assets/testimonials/sumanth.png)Sumanth@Sumanth\\\\_077\\\"Web
scraping will never be the same!\\\\\\\\\\n\\\\\\\\\\nFirecrawl is an open-source
framework that takes a URL, crawls it, and conver...\\\"](https://x.com/Sumanth_077/status/1940049003074478511)
[![Steven Tey](https://www.firecrawl.dev/assets/testimonials/steven-tey.png)Steven
Tey@steventey\\\"Open-source Clay alternative just dropped\\\\\\\\\\n\\\\\\\\\\nUpload
a CSV of emails and...\\\"](https://x.com/steventey/status/1932945651761098889)\\n\\n[![Bardia](https://www.firecrawl.dev/assets/testimonials/bardia.png)Bardia@thepericulum\\\"The
Firecrawl team ships. I wanted types for their node SDK, and less than an
hour later, I got them.\\\"](https://x.com/thepericulum/status/1781397799487078874)
[![Matt Busigin](https://www.firecrawl.dev/assets/testimonials/matt-busigin.png)Matt
Busigin@mbusigin\\\"Firecrawl is dope. Congrats guys \U0001F44F\\\"](https://x.com/mbusigin/status/1836065372010656069)
[![Sumanth](https://www.firecrawl.dev/assets/testimonials/sumanth.png)Sumanth@Sumanth\\\\_077\\\"Web
scraping will never be the same!\\\\\\\\\\n\\\\\\\\\\nFirecrawl is an open-source
framework that takes a URL, crawls it, and conver...\\\"](https://x.com/Sumanth_077/status/1940049003074478511)
[![Steven Tey](https://www.firecrawl.dev/assets/testimonials/steven-tey.png)Steven
Tey@steventey\\\"Open-source Clay alternative just dropped\\\\\\\\\\n\\\\\\\\\\nUpload
a CSV of emails and...\\\"](https://x.com/steventey/status/1932945651761098889)\\n\\n\\\\[06/
07 \\\\]\\n\\n\xB7\\n\\nUse Cases\\n\\n//\\n\\nUse cases\\n\\n//\\n\\n## Transform
\ web data into AI-powered solutions\\n\\nDiscover how Firecrawl customers
are getting the most out of our API.\\n\\n[View all use cases](https://docs.firecrawl.dev/use-cases/overview)\\n\\nChat
with context\\n\\nSmarter AI chats\\n\\nPower your AI assistants with real-time,
accurate web content.\\n\\n[View docs](https://docs.firecrawl.dev/introduction)\\n\\n![AI
Assistant](https://www.firecrawl.dev/assets/ai/bot.png)\\n\\nAI Assistant\\n\\nwithFirecrawl\\n\\nReal-time\xB7Updated
2 min ago\\n\\nAsk anything...\\n\\nKnow your leads\\n\\nLead enrichment\\n\\nEnhance
your sales data with\\n\\nweb information.\\n\\n[Check out Extract](https://www.firecrawl.dev/extract)\\n\\nExtracting
leads from directory...\\n\\nTech startups\\n\\nWith contact info\\n\\nDecision
makers\\n\\nFunding stage\\n\\nReady to engage\\n\\n![Emily Tran](https://www.firecrawl.dev/assets/ai/leads-1.png)\\n\\n![James
Carter](https://www.firecrawl.dev/assets/ai/leads-2.png)\\n\\n![Sophia Kim](https://www.firecrawl.dev/assets/ai/leads-3.png)\\n\\n![Michael
Rivera](https://www.firecrawl.dev/assets/ai/leads-4.png)\\n\\nKnow your leads\\n\\nMCPs\\n\\nAdd
powerful scraping to your\\n\\ncode editors.\\n\\n[Get started](https://docs.firecrawl.dev/mcp-server)\\n\\n![Claude
Code](https://www.firecrawl.dev/assets/ai/mcps-claude.png)\\n\\nClaude Code\\n\\n![Cursor](https://www.firecrawl.dev/assets/ai/mcps-cursor.png)\\n\\nCursor\\n\\n![Windsurf](https://www.firecrawl.dev/assets/ai/mcps-windsurf.png)\\n\\nWindsurf\\n\\n\u273B\\n\\nWelcome
to Claude Code!\\n\\n/help for help, /status for your current setup\\n\\n>Try
\\\"how do I log an error?\\\"\\n\\nBuild with context\\n\\nAI platforms\\n\\nLet
your customers build AI apps\\n\\nwith web data.\\n\\n[Check out Map](https://docs.firecrawl.dev/features/map)\\n\\n![Logo
1](https://www.firecrawl.dev/assets/ai/platforms-1.png)\\n\\n![Logo 2](https://www.firecrawl.dev/assets/ai/platforms-2.png)\\n\\n![Logo
4](https://www.firecrawl.dev/assets/ai/platforms-4.png)\\n\\n![Logo 3](https://www.firecrawl.dev/assets/ai/platforms-3.png)\\n\\nExtracting
text...\\n\\nNo insight missed\\n\\nDeep research\\n\\nExtract comprehensive
information for\\n\\nin-depth research.\\n\\n[Build your own with Search](https://docs.firecrawl.dev/features/search)\\n\\nDeep
research in progress...\\n\\nAcademic papers\\n\\n0 found\\n\\nNews articles\\n\\n0
found\\n\\nExpert opinions\\n\\n0 found\\n\\nResearch reports\\n\\n0 found\\n\\nIndustry
data\\n\\n0 found\\n\\nAsk anything...\\n\\n\\\\[ CTA \\\\]\\n\\n\\\\[ CRAWL
\\\\]\\n\\n\\\\[ SCRAPE \\\\]\\n\\n\\\\[ CTA \\\\]\\n\\n//\\n\\nGet started\\n\\n//\\n\\nReady
to build?\\n\\nStart getting Web Data for free and scale seamlessly as your
project expands. No credit card needed.\\n\\n[Start for free](https://www.firecrawl.dev/signin)
[See our plans](https://www.firecrawl.dev/pricing)\\n\\n\\\\[07/ 07 \\\\]\\n\\n\xB7\\n\\nFAQ\\n\\n//\\n\\nFAQ\\n\\n//\\n\\n##
Frequently askedquestions\\n\\nEverything you need to know about Firecrawl.\\n\\nGeneral\\n\\nWhat
is Firecrawl?\\n\\nWhat sites work?\\n\\nWho can benefit from using Firecrawl?\\n\\nIs
Firecrawl open-source?\\n\\nWhat is the difference between Firecrawl and other
web scrapers?\\n\\nWhat is the difference between the open-source version
and the hosted version?\\n\\nScraping & Crawling\\n\\nHow does Firecrawl handle
dynamic content on websites?\\n\\nWhy is it not crawling all the pages?\\n\\nCan
Firecrawl crawl websites without a sitemap?\\n\\nWhat formats can Firecrawl
convert web data into?\\n\\nHow does Firecrawl ensure the cleanliness of the
data?\\n\\nIs Firecrawl suitable for large-scale data scraping projects?\\n\\nDoes
it respect robots.txt?\\n\\nWhat measures does Firecrawl take to handle web
scraping challenges like rate limits and caching?\\n\\nDoes Firecrawl handle
captcha or authentication?\\n\\nAPI Related\\n\\nWhere can I find my API key?\\n\\nBilling\\n\\nIs
Firecrawl free?\\n\\nIs there a pay-per-use plan instead of monthly?\\n\\nDo
credits roll over to the next month?\\n\\nHow many credits do scraping and
crawling cost?\\n\\nDo you charge for failed requests?\\n\\nWhat payment methods
do you accept?\\n\\nFOOTER\\n\\nThe easiest way to extract\\n\\ndata from
the web\\n\\nBacked by\\n\\nY Combinator\\n\\n[Linkedin](https://www.linkedin.com/company/firecrawl)
[Github](https://github.com/firecrawl/firecrawl)\\n\\nSOC II \xB7 Type 2\\n\\nAICPA\\n\\nSOC
2\\n\\n[X (Twitter)](https://x.com/firecrawl_dev) [Discord](https://discord.gg/gSmWdAkdwd)\\n\\nProducts\\n\\n[Playground](https://www.firecrawl.dev/playground)
[Extract](https://www.firecrawl.dev/extract) [Pricing](https://www.firecrawl.dev/pricing)
[Templates](https://www.firecrawl.dev/templates) [Changelog](https://www.firecrawl.dev/changelog)\\n\\nUse
Cases\\n\\n[AI Platforms](https://docs.firecrawl.dev/use-cases/ai-platforms)
[Lead Enrichment](https://docs.firecrawl.dev/use-cases/lead-enrichment) [SEO
Platforms](https://docs.firecrawl.dev/use-cases/seo-platforms) [Deep Research](https://docs.firecrawl.dev/use-cases/deep-research)\\n\\nDocumentation\\n\\n[Getting
started](https://docs.firecrawl.dev/introduction) [API Reference](https://docs.firecrawl.dev/api-reference/introduction)
[Integrations](https://www.firecrawl.dev/app) [Examples](https://docs.firecrawl.dev/use-cases/overview)
[SDKs](https://docs.firecrawl.dev/sdks/overview)\\n\\nCompany\\n\\n[Blog](https://www.firecrawl.dev/blog)
[Careers](https://www.firecrawl.dev/careers) [Creator & OSS program](https://www.firecrawl.dev/creator-oss-program)
[Student program](https://www.firecrawl.dev/student-program)\\n\\n\xA9 2025
Firecrawl\\n\\n[Terms of Service](https://www.firecrawl.dev/terms-of-service)
[Privacy Policy](https://www.firecrawl.dev/privacy-policy) [Report Abuse](mailto:help@firecrawl.com?subject=Issue:)\\n\\n[All
systems normal](https://status.firecrawl.dev/)\\n\\nStripeM-Inner\",\"metadata\":{\"twitter:title\":\"Firecrawl
- The Web Data API for AI\",\"publisher\":\"Firecrawl\",\"ogUrl\":\"https://www.firecrawl.dev\",\"robots\":\"follow,
index\",\"title\":\"Firecrawl - The Web Data API for AI\",\"ogDescription\":\"The
web crawling, scraping, and search API for AI. Built for scale. Firecrawl
delivers the entire internet to AI agents and builders. Clean, structured,
and ready to reason with.\",\"ogImage\":\"https://www.firecrawl.dev/og.png\",\"viewport\":\"width=device-width,
initial-scale=1, maximum-scale=1, user-scalable=no\",\"og:url\":\"https://www.firecrawl.dev\",\"og:site_name\":\"Firecrawl
- The Web Data API for AI\",\"og:type\":\"website\",\"twitter:image\":\"https://www.firecrawl.dev/og.png\",\"author\":\"Firecrawl\",\"og:title\":\"Firecrawl
- The Web Data API for AI\",\"favicon\":\"https://www.firecrawl.dev/favicon.png\",\"description\":\"The
web crawling, scraping, and search API for AI. Built for scale. Firecrawl
delivers the entire internet to AI agents and builders. Clean, structured,
and ready to reason with.\",\"referrer\":\"origin-when-cross-origin\",\"twitter:site\":\"@Vercel\",\"ogSiteName\":\"Firecrawl
- The Web Data API for AI\",\"og:image\":\"https://www.firecrawl.dev/og.png\",\"twitter:card\":\"summary_large_image\",\"twitter:creator\":\"@Vercel\",\"twitter:description\":\"The
web crawling, scraping, and search API for AI. Built for scale. Firecrawl
delivers the entire internet to AI agents and builders. Clean, structured,
and ready to reason with.\",\"language\":\"en\",\"keywords\":\"Firecrawl,Markdown,Data,Mendable,Langchain\",\"creator\":\"Firecrawl\",\"ogTitle\":\"Firecrawl
- The Web Data API for AI\",\"og:description\":\"The web crawling, scraping,
and search API for AI. Built for scale. Firecrawl delivers the entire internet
to AI agents and builders. Clean, structured, and ready to reason with.\",\"scrapeId\":\"e78d8060-d581-4e5e-b25a-90cfdad48530\",\"sourceURL\":\"https://firecrawl.dev\",\"url\":\"https://www.firecrawl.dev/\",\"statusCode\":200,\"contentType\":\"text/html;
charset=utf-8\",\"proxyUsed\":\"basic\",\"cacheState\":\"hit\",\"cachedAt\":\"2025-10-29T13:09:07.713Z\",\"creditsUsed\":1}}}"
headers:
Access-Control-Allow-Origin:
- '*'
Alt-Svc:
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
Content-Length:
- '24693'
Content-Type:
- application/json; charset=utf-8
Date:
- Wed, 29 Oct 2025 14:34:03 GMT
ETag:
- W/"6075-Q1W6uMv95JKEZARbtaiPYYMojlU"
Via:
- 1.1 google
X-Powered-By:
- Express
X-Response-Time:
- 4719.998ms
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,937 @@
interactions:
- request:
body: '{"query": "firecrawl", "limit": 5, "scrapeOptions": {"includeTags": [],
"excludeTags": [], "onlyMainContent": true, "waitFor": 0, "skipTlsVerification":
true, "removeBase64Images": true, "fastMode": false, "blockAds": true, "storeInCache":
true, "maxAge": 14400000, "formats": ["markdown"], "mobile": false}, "origin":
"python-sdk@4.5.0"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '338'
Content-Type:
- application/json
User-Agent:
- python-requests/2.32.5
method: POST
uri: https://api.firecrawl.dev/v2/search
response:
body:
string: "{\"success\":true,\"data\":{\"web\":[{\"url\":\"https://www.firecrawl.dev/\",\"title\":\"Firecrawl
- The Web Data API for AI\",\"description\":\"The web crawling, scraping,
and search API for AI. Built for scale. Firecrawl delivers the entire internet
to AI agents and builders.\",\"position\":1,\"markdown\":\"We just raised
our Series A and shipped Firecrawl /v2 \U0001F389. [Read the blog.](https://www.firecrawl.dev/blog/firecrawl-v2-series-a-announcement)\\n\\n[2
Months Free \u2014 Annually](https://www.firecrawl.dev/pricing)\\n\\n# Turn
websites into LLM-ready data\\n\\nPower your AI apps with clean web data\\n\\nfrom
any website. [It's also open source.](https://github.com/firecrawl/firecrawl)\\n\\nScrape\\n\\nSearch\\nNew\\n\\nMap\\n\\nCrawl\\n\\nScrape\\n\\nLogo\\n\\nNavigation\\n\\nButton\\n\\nH1
Title\\n\\nDescription\\n\\nCTA Button\\n\\n\\\\[ .JSON \\\\]\\n\\n```json\\n1[\\\\\\n2
\ {\\\\\\n3 \\\"url\\\": \\\"https://example.com\\\",\\\\\\n4 \\\"markdown\\\":
\\\"# Getting Started...\\\",\\\\\\n5 \\\"json\\\": { \\\"title\\\": \\\"Guide\\\",
\\\"docs\\\": \\\"...\\\" },\\\\\\n6 \\\"screenshot\\\": \\\"https://example.com/hero.png\\\"\\\\\\n7
\ }\\\\\\n8]\\n```\\n\\nScrape Completed\\n\\nTrusted by5000+\\n\\ncompaniesof
all sizes\\n\\n![Logo 17](https://www.firecrawl.dev/assets-original/logocloud/17.png)\\n\\n![Logo
18](https://www.firecrawl.dev/assets-original/logocloud/18.png)\\n\\n![Logo
1](https://www.firecrawl.dev/assets-original/logocloud/1.png)\\n\\n![Logo
2](https://www.firecrawl.dev/assets-original/logocloud/2.png)\\n\\n![Logo
3](https://www.firecrawl.dev/assets-original/logocloud/3.png)\\n\\n![Logo
4](https://www.firecrawl.dev/assets-original/logocloud/4.png)\\n\\n![Logo
5](https://www.firecrawl.dev/assets-original/logocloud/5.png)\\n\\n![Logo
6](https://www.firecrawl.dev/assets-original/logocloud/6.png)\\n\\n![Logo
7](https://www.firecrawl.dev/assets-original/logocloud/7.png)\\n\\n![Logo
8](https://www.firecrawl.dev/assets-original/logocloud/8.png)\\n\\n![Logo
9](https://www.firecrawl.dev/assets-original/logocloud/9.png)\\n\\n![Logo
10](https://www.firecrawl.dev/assets-original/logocloud/10.png)\\n\\n![Logo
11](https://www.firecrawl.dev/assets-original/logocloud/11.png)\\n\\n![Logo
12](https://www.firecrawl.dev/assets-original/logocloud/12.png)\\n\\n![Logo
13](https://www.firecrawl.dev/assets-original/logocloud/13.png)\\n\\n![Logo
14](https://www.firecrawl.dev/assets-original/logocloud/14.png)\\n\\n![Logo
15](https://www.firecrawl.dev/assets-original/logocloud/15.png)\\n\\n![Logo
16](https://www.firecrawl.dev/assets-original/logocloud/16.png)\\n\\n![Logo
17](https://www.firecrawl.dev/assets-original/logocloud/17.png)\\n\\n![Logo
18](https://www.firecrawl.dev/assets-original/logocloud/18.png)\\n\\n![Logo
19](https://www.firecrawl.dev/assets-original/logocloud/19.png)\\n\\n![Logo
20](https://www.firecrawl.dev/assets-original/logocloud/20.png)\\n\\n![Logo
21](https://www.firecrawl.dev/assets-original/logocloud/21.png)\\n\\n![Logo
17](https://www.firecrawl.dev/assets-original/logocloud/17.png)\\n\\n![Logo
18](https://www.firecrawl.dev/assets-original/logocloud/18.png)\\n\\n![Logo
1](https://www.firecrawl.dev/assets-original/logocloud/1.png)\\n\\n![Logo
2](https://www.firecrawl.dev/assets-original/logocloud/2.png)\\n\\n![Logo
3](https://www.firecrawl.dev/assets-original/logocloud/3.png)\\n\\n![Logo
4](https://www.firecrawl.dev/assets-original/logocloud/4.png)\\n\\n![Logo
5](https://www.firecrawl.dev/assets-original/logocloud/5.png)\\n\\n![Logo
6](https://www.firecrawl.dev/assets-original/logocloud/6.png)\\n\\n![Logo
7](https://www.firecrawl.dev/assets-original/logocloud/7.png)\\n\\n![Logo
8](https://www.firecrawl.dev/assets-original/logocloud/8.png)\\n\\n![Logo
9](https://www.firecrawl.dev/assets-original/logocloud/9.png)\\n\\n![Logo
10](https://www.firecrawl.dev/assets-original/logocloud/10.png)\\n\\n![Logo
11](https://www.firecrawl.dev/assets-original/logocloud/11.png)\\n\\n![Logo
12](https://www.firecrawl.dev/assets-original/logocloud/12.png)\\n\\n![Logo
13](https://www.firecrawl.dev/assets-original/logocloud/13.png)\\n\\n![Logo
14](https://www.firecrawl.dev/assets-original/logocloud/14.png)\\n\\n![Logo
15](https://www.firecrawl.dev/assets-original/logocloud/15.png)\\n\\n![Logo
16](https://www.firecrawl.dev/assets-original/logocloud/16.png)\\n\\n![Logo
17](https://www.firecrawl.dev/assets-original/logocloud/17.png)\\n\\n![Logo
18](https://www.firecrawl.dev/assets-original/logocloud/18.png)\\n\\n![Logo
19](https://www.firecrawl.dev/assets-original/logocloud/19.png)\\n\\n![Logo
20](https://www.firecrawl.dev/assets-original/logocloud/20.png)\\n\\n![Logo
21](https://www.firecrawl.dev/assets-original/logocloud/21.png)\\n\\n\\\\[01/
07 \\\\]\\n\\n\xB7\\n\\nMain Features\\n\\n//\\n\\nDeveloper First\\n\\n//\\n\\n##
Startscraping today\\n\\nEnhance your apps with industry leading web scraping
and crawling capabilities.\\n\\nScrape\\n\\nGet llm-ready data from websites.
Markdown, JSON, screenshot, etc.\\n\\nSearch\\n\\nNew\\n\\nSearch the web
and get full content from results.\\n\\nCrawl\\n\\nCrawl all the pages on
a website and get data for each page.\\n\\nPython\\n\\nNode.js\\n\\nCurl\\n\\nCopy
code\\n\\n```python\\n1# pip install firecrawl-py\\n2from firecrawl import
Firecrawl\\n3\\n4app = Firecrawl(api_key=\\\"fc-YOUR_API_KEY\\\")\\n5\\n6#
Scrape a website:\\n7app.scrape('firecrawl.dev')\\n8\\n9\\n10\\n```\\n\\n\\\\[
.MD \\\\]\\n\\n```markdown\\n1# Firecrawl\\n2\\n3Firecrawl is a powerful web
scraping\\n4library that makes it easy to extract\\n5data from websites.\\n6\\n7##
Installation\\n8\\n9To install Firecrawl, run:\\n10\\n11\\n```\\n\\n![developer-1](https://www.firecrawl.dev/assets/developer/1.png)\\n\\n![developer-2](https://www.firecrawl.dev/assets/developer/2.png)\\n\\n![developer-3](https://www.firecrawl.dev/assets/developer/3.png)\\n\\n![developer-4](https://www.firecrawl.dev/assets/developer/4.png)\\n\\n![developer-5](https://www.firecrawl.dev/assets/developer/5.png)\\n\\n![developer-6](https://www.firecrawl.dev/assets/developer/6.png)\\n\\n![developer-7](https://www.firecrawl.dev/assets/developer/7.png)\\n\\n![developer-8](https://www.firecrawl.dev/assets/developer/8.png)\\n\\n![developer-9](https://www.firecrawl.dev/assets/developer/1.png)\\n\\n![developer-10](https://www.firecrawl.dev/assets/developer/2.png)\\n\\n![developer-11](https://www.firecrawl.dev/assets/developer/3.png)\\n\\n![developer-12](https://www.firecrawl.dev/assets/developer/4.png)\\n\\n![developer-13](https://www.firecrawl.dev/assets/developer/5.png)\\n\\n![developer-14](https://www.firecrawl.dev/assets/developer/6.png)\\n\\n![developer-15](https://www.firecrawl.dev/assets/developer/7.png)\\n\\n![developer-16](https://www.firecrawl.dev/assets/developer/8.png)\\n\\n![developer-17](https://www.firecrawl.dev/assets/developer/1.png)\\n\\n![developer-18](https://www.firecrawl.dev/assets/developer/2.png)\\n\\n![developer-19](https://www.firecrawl.dev/assets/developer/3.png)\\n\\n![developer-20](https://www.firecrawl.dev/assets/developer/4.png)\\n\\n![developer-21](https://www.firecrawl.dev/assets/developer/5.png)\\n\\n![developer-22](https://www.firecrawl.dev/assets/developer/6.png)\\n\\n![developer-23](https://www.firecrawl.dev/assets/developer/7.png)\\n\\n![developer-24](https://www.firecrawl.dev/assets/developer/8.png)\\n\\nIntegrations\\n\\n###
Use well-known tools\\n\\nAlready fully integrated with the greatest existing
tools and workflows.\\n\\n[See all integrations](https://www.firecrawl.dev/app)\\n\\n![Firecrawl
icon (blueprint)](https://www.firecrawl.dev/assets-original/developer-os-icon.png)\\n\\nmendableai/firecrawl\\n\\nPublic\\n\\nStar\\n\\n65.3K\\n\\n\\\\[python-SDK\\\\]
improvs/async\\n\\n#1337\\n\\n\xB7\\n\\nApr 18, 2025\\n\\n\xB7\\n\\n![rafaelsideguide](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F150964962%3Fv%3D4&w=48&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\nrafaelsideguide\\n\\nfeat(extract):
cost limit\\n\\n#1473\\n\\n\xB7\\n\\nApr 17, 2025\\n\\n\xB7\\n\\n![mogery](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F66118807%3Fv%3D4&w=48&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\nmogery\\n\\nfeat(scrape):
get job result from GCS, avoid Redis\\n\\n#1461\\n\\n\xB7\\n\\nApr 15, 2025\\n\\n\xB7\\n\\n![mogery](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F66118807%3Fv%3D4&w=48&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\nmogery\\n\\nExtract
v2/rerank improvs\\n\\n#1437\\n\\n\xB7\\n\\nApr 11, 2025\\n\\n\xB7\\n\\n![rafaelsideguide](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F150964962%3Fv%3D4&w=48&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\nrafaelsideguide\\n\\n![https://avatars.githubusercontent.com/u/150964962?v=4](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F150964962%3Fv%3D4&w=96&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\n![https://avatars.githubusercontent.com/u/66118807?v=4](https://www.firecrawl.dev/_next/image?url=https%3A%2F%2Favatars.githubusercontent.com%2Fu%2F66118807%3Fv%3D4&w=96&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\n+90\\n\\nOpen
Source\\n\\n### Code you can trust\\n\\nDeveloped transparently and collaboratively.
Join our community of contributors.\\n\\n[Check out our repo](https://github.com/firecrawl/firecrawl)\\n\\n\\\\[02/
07 \\\\]\\n\\n\xB7\\n\\nCore\\n\\n//\\n\\nBuilt to outperform\\n\\n//\\n\\n##
Core principles, provenperformance\\n\\nBuilt from the ground up to outperform
traditional scrapers.\\n\\nNo proxy headaches\\n\\nReliable.Covers 96% of
the web,\\n\\nincluding JS-heavy and protected pages. No proxies, no puppets,
just clean data.\\n\\nFirecrawl\\n\\n96%\\n\\n![Puppeteer icon](https://www.firecrawl.dev/assets/puppeteer.png)\\n\\nPuppeteer\\n\\n79%\\n\\ncURL\\n\\n75%\\n\\nSpeed
that feels invisible\\n\\nBlazingly fast.Delivers results in less than 1 second,
fast for real-time agents\\n\\nand dynamic apps.\\n\\nURL\\n\\nCrawl\\n\\nScrape\\n\\nfirecrawl.dev/docs\\n\\n50ms\\n\\n51ms\\n\\nfirecrawl.dev/templates\\n\\n52ms\\n\\n50ms\\n\\nfirecrawl.dev/changelog\\n\\n49ms\\n\\n52ms\\n\\nfirecrawl.dev/about\\n\\n52ms\\n\\n50ms\\n\\nfirecrawl.dev/changelog\\n\\n50ms\\n\\n52ms\\n\\nfirecrawl.dev/playground\\n\\n51ms\\n\\n49ms\\n\\n\\\\[
CTA \\\\]\\n\\n\\\\[ CRAWL \\\\]\\n\\n\\\\[ SCRAPE \\\\]\\n\\n\\\\[ CTA \\\\]\\n\\n//\\n\\nGet
started\\n\\n//\\n\\nReady to build?\\n\\nStart getting Web Data for free
and scale seamlessly as your project expands. No credit card needed.\\n\\n[Start
for free](https://www.firecrawl.dev/signin) [See our plans](https://www.firecrawl.dev/pricing)\\n\\n\\\\[03/
07 \\\\]\\n\\n\xB7\\n\\nFeatures\\n\\n//\\n\\nZero configuration\\n\\n//\\n\\n##
We handle the hard stuff\\n\\nRotating proxies, orchestration, rate limits,
js-blocked content and more.\\n\\nDocs to data\\n\\nMedia parsing.Firecrawl
can parse and output content from web hosted pdfs, docx, and more.\\n\\nhttps://example.com/docs/report.pdf\\n\\nhttps://example.com/files/brief.docx\\n\\nhttps://example.com/docs/guide.html\\n\\ndocx\\n\\nParsing...\\n\\nKnows
the moment\\n\\nSmart wait.Firecrawl intelligently waits for content to load,
making scraping faster and more reliable.\\n\\nhttps://example-spa.com\\n\\nRequest
Sent\\n\\nScrapes the real thing\\n\\nCached, when you need it.Selective caching,
you choose your caching patterns, growing web index.\\n\\n![User](https://www.firecrawl.dev/_next/image?url=%2Fassets-original%2Ffeatures%2Fcached-user.png&w=256&q=75&dpl=dpl_7RqvseQXNVYetFdhTKj6RntohhL1)\\n\\nUser\\n\\nFirecrawl\\n\\nCache\\n\\nInvisible
access\\n\\nStealth mode.Crawls the web without\\n\\nbeing blocked, mimics
real users to access protected or dynamic content.\\n\\nInteractive scraping\\n\\nActions.Click,
scroll, write, wait, press and more before extracting content.\\n\\nhttps://example.com\\n\\nNavigate\\n\\nClick\\n\\nType\\n\\nWait\\n\\nScroll\\n\\nPress\\n\\nScreenshot\\n\\nScrape\\n\\n\\\\[04/
07 \\\\]\\n\\n\xB7\\n\\nPricing\\n\\n//\\n\\nTransparent\\n\\n//\\n\\n## Flexible
pricing\\n\\nExplore transparent pricing built for real-world scraping. Start
for free, then scale as you grow.\\n\\n\U0001F1FA\U0001F1F8USD\\n\\nFree Plan\\n\\nA
lightweight way to try scraping.\\n\\nNo cost, no card, no hassle.\\n\\n500
credits\\n\\n$0123456789\\n\\none-time\\n\\nGet started\\n\\nScrape 500 pages\\n\\n2
concurrent requests\\n\\nLow rate limits\\n\\nHobby\\n\\nGreat for side projects
and small tools.\\n\\nFast, simple, no overkill.\\n\\n3,000 credits\\n\\n$01234567890123456789\\n\\n/monthly\\n\\nBilled
yearly\\n\\n2 months free\\n\\nSubscribe\\n\\nScrape 3,000 pages\\n\\n5 concurrent
requests\\n\\nBasic support\\n\\n$9 per extra 1k credits\\n\\nStandard\\n\\nMost
popular\\n\\nPerfect for scaling with less effort.\\n\\nSimple, solid, dependable.\\n\\n100,000
credits\\n\\n$01234567890123456789\\n\\n/monthly\\n\\nBilled yearly\\n\\n2
months free\\n\\nSubscribe\\n\\nScrape 100,000 pages\\n\\n50 concurrent requests\\n\\nStandard
support\\n\\n$47 per extra 35k credits\\n\\nGrowth\\n\\nBuilt for high volume
and speed.\\n\\nFirecrawl at full force.\\n\\n500,000 credits\\n\\n$012345678901234567890123456789\\n\\n/monthly\\n\\nBilled
yearly\\n\\n2 months free\\n\\nSubscribe\\n\\nScrape 500,000 pages\\n\\n100
concurrent requests\\n\\nPriority support\\n\\n$177 per extra 175k credits\\n\\nExtra
credits are available via auto-recharge packs. [Enable](https://www.firecrawl.dev/signin/signup)\\n\\nEnterprise\\n\\nPower
at your pace\\n\\nUnlimited credits. Custom RPMs.\\n\\n[Contact sales](https://fk4bvu0n5qp.typeform.com/to/Ej6oydlg)
[More details](https://www.firecrawl.dev/enterprise)\\n\\nBulk discounts\\n\\nTop
priority support\\n\\nCustom concurrency limits\\n\\nImproved stealth proxies\\n\\nSLAs\\n\\nAdvanced
security & controls\\n\\n\\\\[05/ 07 \\\\]\\n\\n\xB7\\n\\nTestimonials\\n\\n//\\n\\nCommunity\\n\\n//\\n\\n##
People love building withFirecrawl\\n\\nDiscover why developers choose
Firecrawl every day.\\n\\n[![Morgan Linton](https://www.firecrawl.dev/assets/testimonials/morgan-linton.png)Morgan
Linton@morganlinton\\\"If you're coding with AI, and haven't discovered @firecrawl\\\\_dev
yet, prepare to have your mind blown \U0001F92F\\\"](https://x.com/morganlinton/status/1839454165703204955)
[![Chris DeWeese](https://www.firecrawl.dev/assets/testimonials/chris-deweese.png)Chris
DeWeese@chrisdeweese\\\\_\\\"Started using @firecrawl\\\\_dev for a project,
I wish I used this sooner.\\\"](https://x.com/chrisdeweese_/status/1853587120406876601)
[![Alex Reibman](https://www.firecrawl.dev/assets/testimonials/alex-reibman.png)Alex
Reibman@AlexReibman\\\"Moved our internal agent's web scraping tool from Apify
to Firecrawl because it benchmarked 50x faster with AgentOps.\\\"](https://x.com/AlexReibman/status/1780299595484131836)
[![Tom - Morpho](https://www.firecrawl.dev/assets/testimonials/tom-morpho.png)Tom
- Morpho@TomReppelin\\\"I found gold today. Thank you @firecrawl\\\\_dev\\\"](https://x.com/TomReppelin/status/1844382491014201613)\\n\\n[![Morgan
Linton](https://www.firecrawl.dev/assets/testimonials/morgan-linton.png)Morgan
Linton@morganlinton\\\"If you're coding with AI, and haven't discovered @firecrawl\\\\_dev
yet, prepare to have your mind blown \U0001F92F\\\"](https://x.com/morganlinton/status/1839454165703204955)
[![Chris DeWeese](https://www.firecrawl.dev/assets/testimonials/chris-deweese.png)Chris
DeWeese@chrisdeweese\\\\_\\\"Started using @firecrawl\\\\_dev for a project,
I wish I used this sooner.\\\"](https://x.com/chrisdeweese_/status/1853587120406876601)
[![Alex Reibman](https://www.firecrawl.dev/assets/testimonials/alex-reibman.png)Alex
Reibman@AlexReibman\\\"Moved our internal agent's web scraping tool from Apify
to Firecrawl because it benchmarked 50x faster with AgentOps.\\\"](https://x.com/AlexReibman/status/1780299595484131836)
[![Tom - Morpho](https://www.firecrawl.dev/assets/testimonials/tom-morpho.png)Tom
- Morpho@TomReppelin\\\"I found gold today. Thank you @firecrawl\\\\_dev\\\"](https://x.com/TomReppelin/status/1844382491014201613)\\n\\n[![Bardia](https://www.firecrawl.dev/assets/testimonials/bardia.png)Bardia@thepericulum\\\"The
Firecrawl team ships. I wanted types for their node SDK, and less than an
hour later, I got them.\\\"](https://x.com/thepericulum/status/1781397799487078874)
[![Matt Busigin](https://www.firecrawl.dev/assets/testimonials/matt-busigin.png)Matt
Busigin@mbusigin\\\"Firecrawl is dope. Congrats guys \U0001F44F\\\"](https://x.com/mbusigin/status/1836065372010656069)
[![Sumanth](https://www.firecrawl.dev/assets/testimonials/sumanth.png)Sumanth@Sumanth\\\\_077\\\"Web
scraping will never be the same!\\\\\\\\\\n\\\\\\\\\\nFirecrawl is an open-source
framework that takes a URL, crawls it, and conver...\\\"](https://x.com/Sumanth_077/status/1940049003074478511)
[![Steven Tey](https://www.firecrawl.dev/assets/testimonials/steven-tey.png)Steven
Tey@steventey\\\"Open-source Clay alternative just dropped\\\\\\\\\\n\\\\\\\\\\nUpload
a CSV of emails and...\\\"](https://x.com/steventey/status/1932945651761098889)\\n\\n[![Bardia](https://www.firecrawl.dev/assets/testimonials/bardia.png)Bardia@thepericulum\\\"The
Firecrawl team ships. I wanted types for their node SDK, and less than an
hour later, I got them.\\\"](https://x.com/thepericulum/status/1781397799487078874)
[![Matt Busigin](https://www.firecrawl.dev/assets/testimonials/matt-busigin.png)Matt
Busigin@mbusigin\\\"Firecrawl is dope. Congrats guys \U0001F44F\\\"](https://x.com/mbusigin/status/1836065372010656069)
[![Sumanth](https://www.firecrawl.dev/assets/testimonials/sumanth.png)Sumanth@Sumanth\\\\_077\\\"Web
scraping will never be the same!\\\\\\\\\\n\\\\\\\\\\nFirecrawl is an open-source
framework that takes a URL, crawls it, and conver...\\\"](https://x.com/Sumanth_077/status/1940049003074478511)
[![Steven Tey](https://www.firecrawl.dev/assets/testimonials/steven-tey.png)Steven
Tey@steventey\\\"Open-source Clay alternative just dropped\\\\\\\\\\n\\\\\\\\\\nUpload
a CSV of emails and...\\\"](https://x.com/steventey/status/1932945651761098889)\\n\\n\\\\[06/
07 \\\\]\\n\\n\xB7\\n\\nUse Cases\\n\\n//\\n\\nUse cases\\n\\n//\\n\\n## Transform
\ web data into AI-powered solutions\\n\\nDiscover how Firecrawl customers
are getting the most out of our API.\\n\\n[View all use cases](https://docs.firecrawl.dev/use-cases/overview)\\n\\nChat
with context\\n\\nSmarter AI chats\\n\\nPower your AI assistants with real-time,
accurate web content.\\n\\n[View docs](https://docs.firecrawl.dev/introduction)\\n\\n![AI
Assistant](https://www.firecrawl.dev/assets/ai/bot.png)\\n\\nAI Assistant\\n\\nwithFirecrawl\\n\\nReal-time\xB7Updated
2 min ago\\n\\nAsk anything...\\n\\nKnow your leads\\n\\nLead enrichment\\n\\nEnhance
your sales data with\\n\\nweb information.\\n\\n[Check out Extract](https://www.firecrawl.dev/extract)\\n\\nExtracting
leads from directory...\\n\\nTech startups\\n\\nWith contact info\\n\\nDecision
makers\\n\\nFunding stage\\n\\nReady to engage\\n\\n![Emily Tran](https://www.firecrawl.dev/assets/ai/leads-1.png)\\n\\n![James
Carter](https://www.firecrawl.dev/assets/ai/leads-2.png)\\n\\n![Sophia Kim](https://www.firecrawl.dev/assets/ai/leads-3.png)\\n\\n![Michael
Rivera](https://www.firecrawl.dev/assets/ai/leads-4.png)\\n\\nKnow your leads\\n\\nMCPs\\n\\nAdd
powerful scraping to your\\n\\ncode editors.\\n\\n[Get started](https://docs.firecrawl.dev/mcp-server)\\n\\n![Claude
Code](https://www.firecrawl.dev/assets/ai/mcps-claude.png)\\n\\nClaude Code\\n\\n![Cursor](https://www.firecrawl.dev/assets/ai/mcps-cursor.png)\\n\\nCursor\\n\\n![Windsurf](https://www.firecrawl.dev/assets/ai/mcps-windsurf.png)\\n\\nWindsurf\\n\\n\u273B\\n\\nWelcome
to Claude Code!\\n\\n/help for help, /status for your current setup\\n\\n>Try
\\\"how do I log an error?\\\"\\n\\nBuild with context\\n\\nAI platforms\\n\\nLet
your customers build AI apps\\n\\nwith web data.\\n\\n[Check out Map](https://docs.firecrawl.dev/features/map)\\n\\n![Logo
1](https://www.firecrawl.dev/assets/ai/platforms-1.png)\\n\\n![Logo 2](https://www.firecrawl.dev/assets/ai/platforms-2.png)\\n\\n![Logo
4](https://www.firecrawl.dev/assets/ai/platforms-4.png)\\n\\n![Logo 3](https://www.firecrawl.dev/assets/ai/platforms-3.png)\\n\\nExtracting
text...\\n\\nNo insight missed\\n\\nDeep research\\n\\nExtract comprehensive
information for\\n\\nin-depth research.\\n\\n[Build your own with Search](https://docs.firecrawl.dev/features/search)\\n\\nDeep
research in progress...\\n\\nAcademic papers\\n\\n0 found\\n\\nNews articles\\n\\n0
found\\n\\nExpert opinions\\n\\n0 found\\n\\nResearch reports\\n\\n0 found\\n\\nIndustry
data\\n\\n0 found\\n\\nAsk anything...\\n\\n\\\\[ CTA \\\\]\\n\\n\\\\[ CRAWL
\\\\]\\n\\n\\\\[ SCRAPE \\\\]\\n\\n\\\\[ CTA \\\\]\\n\\n//\\n\\nGet started\\n\\n//\\n\\nReady
to build?\\n\\nStart getting Web Data for free and scale seamlessly as your
project expands. No credit card needed.\\n\\n[Start for free](https://www.firecrawl.dev/signin)
[See our plans](https://www.firecrawl.dev/pricing)\\n\\n\\\\[07/ 07 \\\\]\\n\\n\xB7\\n\\nFAQ\\n\\n//\\n\\nFAQ\\n\\n//\\n\\n##
Frequently askedquestions\\n\\nEverything you need to know about Firecrawl.\\n\\nGeneral\\n\\nWhat
is Firecrawl?\\n\\nWhat sites work?\\n\\nWho can benefit from using Firecrawl?\\n\\nIs
Firecrawl open-source?\\n\\nWhat is the difference between Firecrawl and other
web scrapers?\\n\\nWhat is the difference between the open-source version
and the hosted version?\\n\\nScraping & Crawling\\n\\nHow does Firecrawl handle
dynamic content on websites?\\n\\nWhy is it not crawling all the pages?\\n\\nCan
Firecrawl crawl websites without a sitemap?\\n\\nWhat formats can Firecrawl
convert web data into?\\n\\nHow does Firecrawl ensure the cleanliness of the
data?\\n\\nIs Firecrawl suitable for large-scale data scraping projects?\\n\\nDoes
it respect robots.txt?\\n\\nWhat measures does Firecrawl take to handle web
scraping challenges like rate limits and caching?\\n\\nDoes Firecrawl handle
captcha or authentication?\\n\\nAPI Related\\n\\nWhere can I find my API key?\\n\\nBilling\\n\\nIs
Firecrawl free?\\n\\nIs there a pay-per-use plan instead of monthly?\\n\\nDo
credits roll over to the next month?\\n\\nHow many credits do scraping and
crawling cost?\\n\\nDo you charge for failed requests?\\n\\nWhat payment methods
do you accept?\\n\\nFOOTER\\n\\nThe easiest way to extract\\n\\ndata from
the web\\n\\nBacked by\\n\\nY Combinator\\n\\n[Linkedin](https://www.linkedin.com/company/firecrawl)
[Github](https://github.com/firecrawl/firecrawl)\\n\\nSOC II \xB7 Type 2\\n\\nAICPA\\n\\nSOC
2\\n\\n[X (Twitter)](https://x.com/firecrawl_dev) [Discord](https://discord.gg/gSmWdAkdwd)\\n\\nProducts\\n\\n[Playground](https://www.firecrawl.dev/playground)
[Extract](https://www.firecrawl.dev/extract) [Pricing](https://www.firecrawl.dev/pricing)
[Templates](https://www.firecrawl.dev/templates) [Changelog](https://www.firecrawl.dev/changelog)\\n\\nUse
Cases\\n\\n[AI Platforms](https://docs.firecrawl.dev/use-cases/ai-platforms)
[Lead Enrichment](https://docs.firecrawl.dev/use-cases/lead-enrichment) [SEO
Platforms](https://docs.firecrawl.dev/use-cases/seo-platforms) [Deep Research](https://docs.firecrawl.dev/use-cases/deep-research)\\n\\nDocumentation\\n\\n[Getting
started](https://docs.firecrawl.dev/introduction) [API Reference](https://docs.firecrawl.dev/api-reference/introduction)
[Integrations](https://www.firecrawl.dev/app) [Examples](https://docs.firecrawl.dev/use-cases/overview)
[SDKs](https://docs.firecrawl.dev/sdks/overview)\\n\\nCompany\\n\\n[Blog](https://www.firecrawl.dev/blog)
[Careers](https://www.firecrawl.dev/careers) [Creator & OSS program](https://www.firecrawl.dev/creator-oss-program)
[Student program](https://www.firecrawl.dev/student-program)\\n\\n\xA9 2025
Firecrawl\\n\\n[Terms of Service](https://www.firecrawl.dev/terms-of-service)
[Privacy Policy](https://www.firecrawl.dev/privacy-policy) [Report Abuse](mailto:help@firecrawl.com?subject=Issue:)\\n\\n[All
systems normal](https://status.firecrawl.dev/)\\n\\nStripeM-Inner\",\"metadata\":{\"favicon\":\"https://www.firecrawl.dev/favicon.png\",\"ogUrl\":\"https://www.firecrawl.dev\",\"ogImage\":\"https://www.firecrawl.dev/og.png\",\"referrer\":\"origin-when-cross-origin\",\"ogDescription\":\"The
web crawling, scraping, and search API for AI. Built for scale. Firecrawl
delivers the entire internet to AI agents and builders. Clean, structured,
and ready to reason with.\",\"robots\":\"follow, index\",\"twitter:card\":\"summary_large_image\",\"og:site_name\":\"Firecrawl
- The Web Data API for AI\",\"twitter:title\":\"Firecrawl - The Web Data API
for AI\",\"og:image\":\"https://www.firecrawl.dev/og.png\",\"title\":\"Firecrawl
- The Web Data API for AI\",\"og:description\":\"The web crawling, scraping,
and search API for AI. Built for scale. Firecrawl delivers the entire internet
to AI agents and builders. Clean, structured, and ready to reason with.\",\"twitter:image\":\"https://www.firecrawl.dev/og.png\",\"viewport\":\"width=device-width,
initial-scale=1, maximum-scale=1, user-scalable=no\",\"ogSiteName\":\"Firecrawl
- The Web Data API for AI\",\"keywords\":\"Firecrawl,Markdown,Data,Mendable,Langchain\",\"author\":\"Firecrawl\",\"og:title\":\"Firecrawl
- The Web Data API for AI\",\"twitter:description\":\"The web crawling, scraping,
and search API for AI. Built for scale. Firecrawl delivers the entire internet
to AI agents and builders. Clean, structured, and ready to reason with.\",\"description\":\"The
web crawling, scraping, and search API for AI. Built for scale. Firecrawl
delivers the entire internet to AI agents and builders. Clean, structured,
and ready to reason with.\",\"twitter:site\":\"@Vercel\",\"og:url\":\"https://www.firecrawl.dev\",\"og:type\":\"website\",\"ogTitle\":\"Firecrawl
- The Web Data API for AI\",\"language\":\"en\",\"creator\":\"Firecrawl\",\"publisher\":\"Firecrawl\",\"twitter:creator\":\"@Vercel\",\"scrapeId\":\"57b0586f-36e8-4923-aaa2-88ff58c03999\",\"sourceURL\":\"https://www.firecrawl.dev/\",\"url\":\"https://www.firecrawl.dev/\",\"statusCode\":200,\"contentType\":\"text/html;
charset=utf-8\",\"proxyUsed\":\"basic\",\"cacheState\":\"hit\",\"cachedAt\":\"2025-10-29T13:09:07.713Z\"}},{\"url\":\"https://github.com/firecrawl/firecrawl\",\"title\":\"firecrawl/firecrawl:
The Web Data API for AI - Turn entire ... - GitHub\",\"description\":\"Firecrawl
is an API service that takes a URL, crawls it, and converts it into clean
markdown or structured data. We crawl all accessible subpages and give you
...\",\"position\":2,\"category\":\"github\",\"markdown\":\"[Skip to content](https://github.com/firecrawl/firecrawl#start-of-content)\\n\\nYou
signed in with another tab or window. [Reload](https://github.com/firecrawl/firecrawl)
to refresh your session.You signed out in another tab or window. [Reload](https://github.com/firecrawl/firecrawl)
to refresh your session.You switched accounts on another tab or window. [Reload](https://github.com/firecrawl/firecrawl)
to refresh your session.Dismiss alert\\n\\n{{ message }}\\n\\n[firecrawl](https://github.com/firecrawl)/
**[firecrawl](https://github.com/firecrawl/firecrawl)** Public\\n\\n- Couldn't
load subscription status.\\nRetry\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n### Uh
oh!\\n\\n\\n\\n\\n\\n\\n\\nThere was an error while loading. [Please reload
this page](https://github.com/firecrawl/firecrawl).\\n\\n- [Fork\\\\\\\\\\n5.1k](https://github.com/login?return_to=%2Ffirecrawl%2Ffirecrawl)\\n-
[Star\\\\\\\\\\n65.2k](https://github.com/login?return_to=%2Ffirecrawl%2Ffirecrawl)\\n\\n\\n\U0001F525
The Web Data API for AI - Turn entire websites into LLM-ready markdown or
structured data\\n\\n\\n[firecrawl.dev](https://firecrawl.dev/ \\\"https://firecrawl.dev\\\")\\n\\n###
License\\n\\n[AGPL-3.0 license](https://github.com/firecrawl/firecrawl/blob/main/LICENSE)\\n\\n[65.2k\\\\\\\\\\nstars](https://github.com/firecrawl/firecrawl/stargazers)
[5.1k\\\\\\\\\\nforks](https://github.com/firecrawl/firecrawl/forks) [Branches](https://github.com/firecrawl/firecrawl/branches)
[Tags](https://github.com/firecrawl/firecrawl/tags) [Activity](https://github.com/firecrawl/firecrawl/activity)\\n\\n[Star](https://github.com/login?return_to=%2Ffirecrawl%2Ffirecrawl)\\n\\nCouldn't
load subscription status.\\nRetry\\n\\n### Uh oh!\\n\\nThere was an error
while loading. [Please reload this page](https://github.com/firecrawl/firecrawl).\\n\\n#
firecrawl/firecrawl\\n\\nmain\\n\\n[**887** Branches](https://github.com/firecrawl/firecrawl/branches)
[**28** Tags](https://github.com/firecrawl/firecrawl/tags)\\n\\n[Go to Branches
page](https://github.com/firecrawl/firecrawl/branches)[Go to Tags page](https://github.com/firecrawl/firecrawl/tags)\\n\\nGo
to file\\n\\nCode\\n\\nOpen more actions menu\\n\\n## Folders and files\\n\\n|
Name | Name | Last commit message | Last commit date |\\n| --- | --- | ---
| --- |\\n| ## Latest commit<br>[![amplitudesxd](https://avatars.githubusercontent.com/u/62763456?v=4&size=40)](https://github.com/amplitudesxd)[amplitudesxd](https://github.com/firecrawl/firecrawl/commits?author=amplitudesxd)<br>[chore:
update last scrape rpc (](https://github.com/firecrawl/firecrawl/commit/37de2877fab4bae2de297e37bad3c9bcd49a64bc)
[#2339](https://github.com/firecrawl/firecrawl/pull/2339) [)](https://github.com/firecrawl/firecrawl/commit/37de2877fab4bae2de297e37bad3c9bcd49a64bc)<br>success<br>20
hours agoOct 27, 2025<br>[37de287](https://github.com/firecrawl/firecrawl/commit/37de2877fab4bae2de297e37bad3c9bcd49a64bc)\_\xB7\_20
hours agoOct 27, 2025<br>## History<br>[4,487 Commits](https://github.com/firecrawl/firecrawl/commits/main/)
<br>Open commit details<br>[View commit history for this file.](https://github.com/firecrawl/firecrawl/commits/main/)
|\\n| [.github](https://github.com/firecrawl/firecrawl/tree/main/.github \\\".github\\\")
| [.github](https://github.com/firecrawl/firecrawl/tree/main/.github \\\".github\\\")
| [fix(ci): temp disabled prod env tests](https://github.com/firecrawl/firecrawl/commit/42fc149c1ab738da0e15e772817774aa35273f8e
\\\"fix(ci): temp disabled prod env tests\\\") | 5 days agoOct 23, 2025 |\\n|
[apps](https://github.com/firecrawl/firecrawl/tree/main/apps \\\"apps\\\")
| [apps](https://github.com/firecrawl/firecrawl/tree/main/apps \\\"apps\\\")
| [chore: update last scrape rpc (](https://github.com/firecrawl/firecrawl/commit/37de2877fab4bae2de297e37bad3c9bcd49a64bc
\\\"chore: update last scrape rpc (#2339)\\\") [#2339](https://github.com/firecrawl/firecrawl/pull/2339)
[)](https://github.com/firecrawl/firecrawl/commit/37de2877fab4bae2de297e37bad3c9bcd49a64bc
\\\"chore: update last scrape rpc (#2339)\\\") | 20 hours agoOct 27, 2025
|\\n| [examples](https://github.com/firecrawl/firecrawl/tree/main/examples
\\\"examples\\\") | [examples](https://github.com/firecrawl/firecrawl/tree/main/examples
\\\"examples\\\") | [Merge pull request](https://github.com/firecrawl/firecrawl/commit/7ad57003b4ad8b230ba8252129e52bafa62dfae9
\\\"Merge pull request #2172 from MAVRICK-1/firecrawl-gemini-screenshot-editor
\ feat: Add Firecrawl + Gemini 2.5 Flash Image CLI Editor\\\") [#2172](https://github.com/firecrawl/firecrawl/pull/2172)
[from MAVRICK-1/firecrawl-gemini-screenshot-e\u2026](https://github.com/firecrawl/firecrawl/commit/7ad57003b4ad8b230ba8252129e52bafa62dfae9
\\\"Merge pull request #2172 from MAVRICK-1/firecrawl-gemini-screenshot-editor
\ feat: Add Firecrawl + Gemini 2.5 Flash Image CLI Editor\\\") | last monthSep
23, 2025 |\\n| [img](https://github.com/firecrawl/firecrawl/tree/main/img
\\\"img\\\") | [img](https://github.com/firecrawl/firecrawl/tree/main/img
\\\"img\\\") | [updated readme](https://github.com/firecrawl/firecrawl/commit/4f904e774831dc598681d3e998d0e5e15abcec27
\\\"updated readme\\\") | 2 months agoAug 18, 2025 |\\n| [.gitattributes](https://github.com/firecrawl/firecrawl/blob/main/.gitattributes
\\\".gitattributes\\\") | [.gitattributes](https://github.com/firecrawl/firecrawl/blob/main/.gitattributes
\\\".gitattributes\\\") | [Initial commit](https://github.com/firecrawl/firecrawl/commit/a6c2a878119321a196f720cce4195e086f1c6b46
\\\"Initial commit\\\") | last yearApr 15, 2024 |\\n| [.gitignore](https://github.com/firecrawl/firecrawl/blob/main/.gitignore
\\\".gitignore\\\") | [.gitignore](https://github.com/firecrawl/firecrawl/blob/main/.gitignore
\\\".gitignore\\\") | [Nick: init](https://github.com/firecrawl/firecrawl/commit/ab3fa4838458c8303a67dd30fdd75a16b89cc20b
\\\"Nick: init\\\") | 3 weeks agoOct 10, 2025 |\\n| [.gitmodules](https://github.com/firecrawl/firecrawl/blob/main/.gitmodules
\\\".gitmodules\\\") | [.gitmodules](https://github.com/firecrawl/firecrawl/blob/main/.gitmodules
\\\".gitmodules\\\") | [mendableai -> firecrawl](https://github.com/firecrawl/firecrawl/commit/2f3bc4e7a7b1a67a29c06df629f79402ee1aad1b
\\\"mendableai -> firecrawl\\\") | 2 months agoAug 18, 2025 |\\n| [CLAUDE.md](https://github.com/firecrawl/firecrawl/blob/main/CLAUDE.md
\\\"CLAUDE.md\\\") | [CLAUDE.md](https://github.com/firecrawl/firecrawl/blob/main/CLAUDE.md
\\\"CLAUDE.md\\\") | [add claude file](https://github.com/firecrawl/firecrawl/commit/3f0873c788823258a7d9f55d1c8772aed4e1a8de
\\\"add claude file\\\") | 2 months agoAug 6, 2025 |\\n| [CONTRIBUTING.md](https://github.com/firecrawl/firecrawl/blob/main/CONTRIBUTING.md
\\\"CONTRIBUTING.md\\\") | [CONTRIBUTING.md](https://github.com/firecrawl/firecrawl/blob/main/CONTRIBUTING.md
\\\"CONTRIBUTING.md\\\") | [Add Rust to CONTRIBUTING (](https://github.com/firecrawl/firecrawl/commit/f396cb20b54c3c2d7e64882642c5df6310a01002
\\\"Add Rust to CONTRIBUTING (#2180)\\\") [#2180](https://github.com/firecrawl/firecrawl/pull/2180)
[)](https://github.com/firecrawl/firecrawl/commit/f396cb20b54c3c2d7e64882642c5df6310a01002
\\\"Add Rust to CONTRIBUTING (#2180)\\\") | last monthSep 18, 2025 |\\n| [LICENSE](https://github.com/firecrawl/firecrawl/blob/main/LICENSE
\\\"LICENSE\\\") | [LICENSE](https://github.com/firecrawl/firecrawl/blob/main/LICENSE
\\\"LICENSE\\\") | [Update SDKs to MIT license](https://github.com/firecrawl/firecrawl/commit/afb49e21e7cff595ebad9ce0b7aba13b88f39cf8
\\\"Update SDKs to MIT license\\\") | last yearJul 8, 2024 |\\n| [README.md](https://github.com/firecrawl/firecrawl/blob/main/README.md
\\\"README.md\\\") | [README.md](https://github.com/firecrawl/firecrawl/blob/main/README.md
\\\"README.md\\\") | [Update README.md](https://github.com/firecrawl/firecrawl/commit/a21430e97818d95099bb365be711d9227bd75590
\\\"Update README.md\\\") | 3 weeks agoOct 6, 2025 |\\n| [SELF\\\\_HOST.md](https://github.com/firecrawl/firecrawl/blob/main/SELF_HOST.md
\\\"SELF_HOST.md\\\") | [SELF\\\\_HOST.md](https://github.com/firecrawl/firecrawl/blob/main/SELF_HOST.md
\\\"SELF_HOST.md\\\") | [Allow self-hosted webhook delivery to private IP
addresses (](https://github.com/firecrawl/firecrawl/commit/5756b834884d481382ce1f5674836a56b7fee33d
\\\"Allow self-hosted webhook delivery to private IP addresses (#2232)\\\")
[#2232](https://github.com/firecrawl/firecrawl/pull/2232) [)](https://github.com/firecrawl/firecrawl/commit/5756b834884d481382ce1f5674836a56b7fee33d
\\\"Allow self-hosted webhook delivery to private IP addresses (#2232)\\\")
| 27 days agoOct 1, 2025 |\\n| [docker-compose.yaml](https://github.com/firecrawl/firecrawl/blob/main/docker-compose.yaml
\\\"docker-compose.yaml\\\") | [docker-compose.yaml](https://github.com/firecrawl/firecrawl/blob/main/docker-compose.yaml
\\\"docker-compose.yaml\\\") | [Fix a self-hosted docker-compose.yaml bug
caused by a recent firecraw\u2026](https://github.com/firecrawl/firecrawl/commit/7d4100b274889977fa1ba26344532d9d8747494c
\\\"Fix a self-hosted docker-compose.yaml bug caused by a recent firecrawl
change (#2252) Add EXTRACT_WORKER_PORT to docker-compose environment\\\")
| 3 weeks agoOct 4, 2025 |\\n| View all files |\\n\\n## Repository files navigation\\n\\n###
[![](https://raw.githubusercontent.com/firecrawl/firecrawl/main/img/firecrawl_logo.png)](https://raw.githubusercontent.com/firecrawl/firecrawl/main/img/firecrawl_logo.png)\\n\\n[Permalink:
](https://github.com/firecrawl/firecrawl#----)\\n\\n[![License](https://camo.githubusercontent.com/d8ec6c81115d21c81bc26f2c80f8987a4d2a72e538b88afaa738fad5cd6289ff/68747470733a2f2f696d672e736869656c64732e696f2f6769746875622f6c6963656e73652f66697265637261776c2f66697265637261776c)](https://github.com/firecrawl/firecrawl/blob/main/LICENSE)[![Downloads](https://camo.githubusercontent.com/9d76afe428b4085c8b7103f2f4e31da110ee154ad7320bace4348d92ac0c2450/68747470733a2f2f7374617469632e706570792e746563682f62616467652f66697265637261776c2d7079)](https://pepy.tech/project/firecrawl-py)[![GitHub
Contributors](https://camo.githubusercontent.com/a9eabcb95ba00300afa51ce546660540c1f65764492cb2ba8fb67fe541c7e97f/68747470733a2f2f696d672e736869656c64732e696f2f6769746875622f636f6e7472696275746f72732f66697265637261776c2f66697265637261776c2e737667)](https://github.com/firecrawl/firecrawl/graphs/contributors)[![Visit
firecrawl.dev](https://camo.githubusercontent.com/3576b8cb0e77344c001cc8456d28c830691cb96480d4b65be90f8a4c99dead56/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f56697369742d66697265637261776c2e6465762d6f72616e6765)](https://firecrawl.dev/)\\n\\n[![Follow
on X](https://camo.githubusercontent.com/610127222e603752676f0275682f12398f8e434706861d577c1f6688d999191c/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f466f6c6c6f772532306f6e253230582d3030303030303f7374796c653d666f722d7468652d6261646765266c6f676f3d78266c6f676f436f6c6f723d7768697465)](https://twitter.com/firecrawl_dev)[![Follow
on LinkedIn](https://camo.githubusercontent.com/8741d51bb8e1c8ae576ac05e875f826bcf80e8711dcf9225935bb78d5bb03802/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f466f6c6c6f772532306f6e2532304c696e6b6564496e2d3030373742353f7374796c653d666f722d7468652d6261646765266c6f676f3d6c696e6b6564696e266c6f676f436f6c6f723d7768697465)](https://www.linkedin.com/company/104100957)[![Join
our Discord](https://camo.githubusercontent.com/886138c89a84dc2ad74d06900f364d736ccf753b2732d59fbd4106f6310f3616/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f4a6f696e2532306f7572253230446973636f72642d3538363546323f7374796c653d666f722d7468652d6261646765266c6f676f3d646973636f7264266c6f676f436f6c6f723d7768697465)](https://discord.com/invite/gSmWdAkdwd)\\n\\n#
\U0001F525 Firecrawl\\n\\n[Permalink: \U0001F525 Firecrawl](https://github.com/firecrawl/firecrawl#-firecrawl)\\n\\nEmpower
your AI apps with clean data from any website. Featuring advanced scraping,
crawling, and data extraction capabilities.\\n\\n_This repository is in development,
and we\u2019re still integrating custom modules into the mono repo. It's not
fully ready for self-hosted deployment yet, but you can run it locally._\\n\\n##
What is Firecrawl?\\n\\n[Permalink: What is Firecrawl?](https://github.com/firecrawl/firecrawl#what-is-firecrawl)\\n\\n[Firecrawl](https://firecrawl.dev/?ref=github)
is an API service that takes a URL, crawls it, and converts it into clean
markdown or structured data. We crawl all accessible subpages and give you
clean data for each. No sitemap required. Check out our [documentation](https://docs.firecrawl.dev/).\\n\\nLooking
for our MCP? Check out the [repo here](https://github.com/firecrawl/firecrawl-mcp-server).\\n\\n_Pst.
hey, you, join our stargazers :)_\\n\\n[![GitHub stars](https://camo.githubusercontent.com/11f7ce76e9f1608b3470b3a23a1db3a7d9ec083ee18f2d826862f420bac800dc/68747470733a2f2f696d672e736869656c64732e696f2f6769746875622f73746172732f66697265637261776c2f66697265637261776c2e7376673f7374796c653d736f6369616c266c6162656c3d53746172266d61784167653d32353932303030)](https://github.com/firecrawl/firecrawl)\\n\\n##
How to use it?\\n\\n[Permalink: How to use it?](https://github.com/firecrawl/firecrawl#how-to-use-it)\\n\\nWe
provide an easy to use API with our hosted version. You can find the playground
and documentation [here](https://firecrawl.dev/playground). You can also self
host the backend if you'd like.\\n\\nCheck out the following resources to
get started:\\n\\n- [x] **API**: [Documentation](https://docs.firecrawl.dev/api-reference/introduction)\\n-
[x] **SDKs**: [Python](https://docs.firecrawl.dev/sdks/python), [Node](https://docs.firecrawl.dev/sdks/node)\\n-
[x] **LLM Frameworks**: [Langchain (python)](https://python.langchain.com/docs/integrations/document_loaders/firecrawl/),
[Langchain (js)](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/firecrawl),
[Llama Index](https://docs.llamaindex.ai/en/latest/examples/data_connectors/WebPageDemo/#using-firecrawl-reader),
[Crew.ai](https://docs.crewai.com/), [Composio](https://composio.dev/tools/firecrawl/all),
[PraisonAI](https://docs.praison.ai/firecrawl/), [Superinterface](https://superinterface.ai/docs/assistants/functions/firecrawl),
[Vectorize](https://docs.vectorize.io/integrations/source-connectors/firecrawl)\\n-
[x] **Low-code Frameworks**: [Dify](https://dify.ai/blog/dify-ai-blog-integrated-with-firecrawl),
[Langflow](https://docs.langflow.org/), [Flowise AI](https://docs.flowiseai.com/integrations/langchain/document-loaders/firecrawl),
[Cargo](https://docs.getcargo.io/integration/firecrawl), [Pipedream](https://pipedream.com/apps/firecrawl/)\\n-
[x] **Community SDKs**: [Go](https://docs.firecrawl.dev/sdks/go), [Rust](https://docs.firecrawl.dev/sdks/rust)\\n-
[x] **Others**: [Zapier](https://zapier.com/apps/firecrawl/integrations),
[Pabbly Connect](https://www.pabbly.com/connect/integrations/firecrawl/)\\n-
[ ] Want an SDK or Integration? Let us know by opening an issue.\\n\\nTo
run locally, refer to guide [here](https://github.com/firecrawl/firecrawl/blob/main/CONTRIBUTING.md).\\n\\n###
API Key\\n\\n[Permalink: API Key](https://github.com/firecrawl/firecrawl#api-key)\\n\\nTo
use the API, you need to sign up on [Firecrawl](https://firecrawl.dev/) and
get an API key.\\n\\n### Features\\n\\n[Permalink: Features](https://github.com/firecrawl/firecrawl#features)\\n\\n-
[**Scrape**](https://github.com/firecrawl/firecrawl#scraping): scrapes a URL
and get its content in LLM-ready format (markdown, structured data via [LLM
Extract](https://github.com/firecrawl/firecrawl#llm-extraction-beta), screenshot,
html)\\n- [**Crawl**](https://github.com/firecrawl/firecrawl#crawling): scrapes
all the URLs of a web page and return content in LLM-ready format\\n- [**Map**](https://github.com/firecrawl/firecrawl#map):
input a website and get all the website urls - extremely fast\\n- [**Search**](https://github.com/firecrawl/firecrawl#search):
search the web and get full content from results\\n- [**Extract**](https://github.com/firecrawl/firecrawl#extract):
get structured data from single page, multiple pages or entire websites with
AI.\\n\\n### Powerful Capabilities\\n\\n[Permalink: Powerful Capabilities](https://github.com/firecrawl/firecrawl#powerful-capabilities)\\n\\n-
**LLM-ready formats**: markdown, structured data, screenshot, HTML, links,
metadata\\n- **The hard stuff**: proxies, anti-bot mechanisms, dynamic content
(js-rendered), output parsing, orchestration\\n- **Customizability**: exclude
tags, crawl behind auth walls with custom headers, max crawl depth, etc...\\n-
**Media parsing**: pdfs, docx, images\\n- **Reliability first**: designed
to get the data you need - no matter how hard it is\\n- **Actions**: click,
scroll, input, wait and more before extracting data\\n- **Batching**: scrape
thousands of URLs at the same time with a new async endpoint\\n- **Change
Tracking**: monitor and detect changes in website content over time\\n\\nYou
can find all of Firecrawl's capabilities and how to use them in our [documentation](https://docs.firecrawl.dev/)\\n\\n###
Crawling\\n\\n[Permalink: Crawling](https://github.com/firecrawl/firecrawl#crawling)\\n\\nUsed
to crawl a URL and all accessible subpages. This submits a crawl job and returns
a job ID to check the status of the crawl.\\n\\n```\\ncurl -X POST https://api.firecrawl.dev/v2/crawl
\\\\\\n -H 'Content-Type: application/json' \\\\\\n -H 'Authorization:
Bearer fc-YOUR_API_KEY' \\\\\\n -d '{\\n \\\"url\\\": \\\"https://docs.firecrawl.dev\\\",\\n
\ \\\"limit\\\": 10,\\n \\\"scrapeOptions\\\": {\\n \\\"formats\\\":
[\\\"markdown\\\", \\\"html\\\"]\\n }\\n }'\\n```\\n\\nReturns a crawl
job id and the url to check the status of the crawl.\\n\\n```\\n{\\n \\\"success\\\":
true,\\n \\\"id\\\": \\\"123-456-789\\\",\\n \\\"url\\\": \\\"https://api.firecrawl.dev/v2/crawl/123-456-789\\\"\\n}\\n```\\n\\n###
Check Crawl Job\\n\\n[Permalink: Check Crawl Job](https://github.com/firecrawl/firecrawl#check-crawl-job)\\n\\nUsed
to check the status of a crawl job and get its result.\\n\\n```\\ncurl -X
GET https://api.firecrawl.dev/v2/crawl/123-456-789 \\\\\\n -H 'Content-Type:
application/json' \\\\\\n -H 'Authorization: Bearer YOUR_API_KEY'\\n```\\n\\n```\\n{\\n
\ \\\"status\\\": \\\"completed\\\",\\n \\\"total\\\": 36,\\n \\\"creditsUsed\\\":
36,\\n \\\"expiresAt\\\": \\\"2024-00-00T00:00:00.000Z\\\",\\n \\\"data\\\":
[\\\\\\n {\\\\\\n \\\"markdown\\\": \\\"[Firecrawl Docs home page![light
logo](https://mintlify.s3-us-west-1.amazonaws.com/firecrawl/logo/light.svg)!...\\\",\\\\\\n
\ \\\"html\\\": \\\"<!DOCTYPE html><html lang=\\\\\\\"en\\\\\\\" class=\\\\\\\"js-focus-visible
lg:[--scroll-mt:9.5rem]\\\\\\\" data-js-focus-visible=\\\\\\\"\\\\\\\">...\\\",\\\\\\n
\ \\\"metadata\\\": {\\\\\\n \\\"title\\\": \\\"Build a 'Chat with
website' using Groq Llama 3 | Firecrawl\\\",\\\\\\n \\\"language\\\":
\\\"en\\\",\\\\\\n \\\"sourceURL\\\": \\\"https://docs.firecrawl.dev/learn/rag-llama3\\\",\\\\\\n
\ \\\"description\\\": \\\"Learn how to use Firecrawl, Groq Llama 3,
and Langchain to build a 'Chat with your website' bot.\\\",\\\\\\n \\\"ogLocaleAlternate\\\":
[],\\\\\\n \\\"statusCode\\\": 200\\\\\\n }\\\\\\n }\\\\\\n
\ ]\\\\\\n}\\\\\\n```\\\\\\n\\\\\\n### Scraping\\\\\\n\\\\\\n[Permalink: Scraping](https://github.com/firecrawl/firecrawl#scraping)\\\\\\n\\\\\\nUsed
to scrape a URL and get its content in the specified formats.\\\\\\n\\\\\\n```\\\\\\ncurl
-X POST https://api.firecrawl.dev/v2/scrape \\\\\\\\\\n -H 'Content-Type:
application/json' \\\\\\\\\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\\\\\\\\n
\ -d '{\\\\\\n \\\"url\\\": \\\"https://docs.firecrawl.dev\\\",\\\\\\n
\ \\\"formats\\\" : [\\\"markdown\\\", \\\"html\\\"]\\\\\\n }'\\\\\\n```\\\\\\n\\\\\\nResponse:\\\\\\n\\\\\\n```\\\\\\n{\\\\\\n
\ \\\"success\\\": true,\\\\\\n \\\"data\\\": {\\\\\\n \\\"markdown\\\":
\\\"Launch Week I is here! [See our Day 2 Release \U0001F680](https://www.firecrawl.dev/blog/launch-week-i-day-2-doubled-rate-limits)[\U0001F4A5
Get 2 months free...\\\",\\\\\\n \\\"html\\\": \\\"<!DOCTYPE html><html
lang=\\\\\\\"en\\\\\\\" class=\\\\\\\"light\\\\\\\" style=\\\\\\\"color-scheme:
light;\\\\\\\"><body class=\\\\\\\"__variable_36bd41 __variable_d7dc5d font-inter
...\\\",\\\\\\n \\\"metadata\\\": {\\\\\\n \\\"title\\\": \\\"Home
- Firecrawl\\\",\\\\\\n \\\"description\\\": \\\"Firecrawl crawls and
converts any website into clean markdown.\\\",\\\\\\n \\\"language\\\":
\\\"en\\\",\\\\\\n \\\"keywords\\\": \\\"Firecrawl,Markdown,Data,Mendable,Langchain\\\",\\\\\\n
\ \\\"robots\\\": \\\"follow, index\\\",\\\\\\n \\\"ogTitle\\\":
\\\"Firecrawl\\\",\\\\\\n \\\"ogDescription\\\": \\\"Turn any website
into LLM-ready data.\\\",\\\\\\n \\\"ogUrl\\\": \\\"https://www.firecrawl.dev/\\\",\\\\\\n
\ \\\"ogImage\\\": \\\"https://www.firecrawl.dev/og.png?123\\\",\\\\\\n
\ \\\"ogLocaleAlternate\\\": [],\\\\\\n \\\"ogSiteName\\\": \\\"Firecrawl\\\",\\\\\\n
\ \\\"sourceURL\\\": \\\"https://firecrawl.dev\\\",\\\\\\n \\\"statusCode\\\":
200\\\\\\n }\\\\\\n }\\\\\\n}\\\\\\n```\\\\\\n\\\\\\n### Map\\\\\\n\\\\\\n[Permalink:
Map](https://github.com/firecrawl/firecrawl#map)\\\\\\n\\\\\\nUsed to map
a URL and get urls of the website. This returns most links present on the
website.\\\\\\n\\\\\\n```\\\\\\ncurl -X POST https://api.firecrawl.dev/v2/map
\\\\\\\\\\n -H 'Content-Type: application/json' \\\\\\\\\\n -H 'Authorization:
Bearer YOUR_API_KEY' \\\\\\\\\\n -d '{\\\\\\n \\\"url\\\": \\\"https://firecrawl.dev\\\"\\\\\\n
\ }'\\\\\\n```\\\\\\n\\\\\\nResponse:\\\\\\n\\\\\\n```\\\\\\n{\\\\\\n \\\"success\\\":
true,\\\\\\n \\\"links\\\": [\\\\\\n { \\\"url\\\": \\\"https://firecrawl.dev\\\",
\\\"title\\\": \\\"Firecrawl\\\", \\\"description\\\": \\\"Firecrawl is a
tool that allows you to crawl a website and get the data you need.\\\" },\\\\\\n
\ { \\\"url\\\": \\\"https://www.firecrawl.dev/pricing\\\", \\\"title\\\":
\\\"Firecrawl Pricing\\\", \\\"description\\\": \\\"Firecrawl Pricing\\\"
},\\\\\\n { \\\"url\\\": \\\"https://www.firecrawl.dev/blog\\\", \\\"title\\\":
\\\"Firecrawl Blog\\\", \\\"description\\\": \\\"Firecrawl Blog\\\" },\\\\\\n
\ { \\\"url\\\": \\\"https://www.firecrawl.dev/playground\\\", \\\"title\\\":
\\\"Firecrawl Playground\\\", \\\"description\\\": \\\"Firecrawl Playground\\\"
},\\\\\\n { \\\"url\\\": \\\"https://www.firecrawl.dev/smart-crawl\\\",
\\\"title\\\": \\\"Firecrawl Smart Crawl\\\", \\\"description\\\": \\\"Firecrawl
Smart Crawl\\\" }\\\\\\n ]\\\\\\n}\\\\\\n```\\\\\\n\\\\\\n#### Map with search\\\\\\n\\\\\\n[Permalink:
Map with search](https://github.com/firecrawl/firecrawl#map-with-search)\\\\\\n\\\\\\nMap
with `search` param allows you to search for specific urls inside a website.\\\\\\n\\\\\\n```\\\\\\ncurl
-X POST https://api.firecrawl.dev/v2/map \\\\\\\\\\n -H 'Content-Type:
application/json' \\\\\\\\\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\\\\\\\\n
\ -d '{\\\\\\n \\\"url\\\": \\\"https://firecrawl.dev\\\",\\\\\\n \\\"search\\\":
\\\"docs\\\"\\\\\\n }'\\\\\\n```\\\\\\n\\\\\\nResponse will be an ordered
list from the most relevant to the least relevant.\\\\\\n\\\\\\n```\\\\\\n{\\\\\\n
\ \\\"success\\\": true,\\\\\\n \\\"links\\\": [\\\\\\n { \\\"url\\\":
\\\"https://docs.firecrawl.dev\\\", \\\"title\\\": \\\"Firecrawl Docs\\\",
\\\"description\\\": \\\"Firecrawl Docs\\\" },\\\\\\n { \\\"url\\\": \\\"https://docs.firecrawl.dev/sdks/python\\\",
\\\"title\\\": \\\"Firecrawl Python SDK\\\", \\\"description\\\": \\\"Firecrawl
Python SDK\\\" },\\\\\\n { \\\"url\\\": \\\"https://docs.firecrawl.dev/learn/rag-llama3\\\",
\\\"title\\\": \\\"Firecrawl RAG Llama 3\\\", \\\"description\\\": \\\"Firecrawl
RAG Llama 3\\\" }\\\\\\n ]\\\\\\n}\\\\\\n```\\\\\\n\\\\\\n### Search\\\\\\n\\\\\\n[Permalink:
Search](https://github.com/firecrawl/firecrawl#search)\\\\\\n\\\\\\nSearch
the web and get full content from results\\\\\\n\\\\\\nFirecrawl\u2019s search
API allows you to perform web searches and optionally scrape the search results
in one operation.\\\\\\n\\\\\\n- Choose specific output formats (markdown,
HTML, links, screenshots)\\\\\\n- Search the web with customizable parameters
(language, country, etc.)\\\\\\n- Optionally retrieve content from search
results in various formats\\\\\\n- Control the number of results and set timeouts\\\\\\n\\\\\\n```\\\\\\ncurl
-X POST https://api.firecrawl.dev/v2/search \\\\\\\\\\n -H \\\"Content-Type:
application/json\\\" \\\\\\\\\\n -H \\\"Authorization: Bearer fc-YOUR_API_KEY\\\"
\\\\\\\\\\n -d '{\\\\\\n \\\"query\\\": \\\"what is firecrawl?\\\",\\\\\\n
\ \\\"limit\\\": 5\\\\\\n }'\\\\\\n```\\\\\\n\\\\\\n#### Response\\\\\\n\\\\\\n[Permalink:
Response](https://github.com/firecrawl/firecrawl#response)\\\\\\n\\\\\\n```\\\\\\n{\\\\\\n
\ \\\"success\\\": true,\\\\\\n \\\"data\\\": [\\\\\\n {\\\\\\n \\\"url\\\":
\\\"https://firecrawl.dev\\\",\\\\\\n \\\"title\\\": \\\"Firecrawl |
Home Page\\\",\\\\\\n \\\"description\\\": \\\"Turn websites into LLM-ready
data with Firecrawl\\\"\\\\\\n },\\\\\\n {\\\\\\n \\\"url\\\":
\\\"https://docs.firecrawl.dev\\\",\\\\\\n \\\"title\\\": \\\"Documentation
| Firecrawl\\\",\\\\\\n \\\"description\\\": \\\"Learn how to use Firecrawl
in your own applications\\\"\\\\\\n }\\\\\\n ]\\\\\\n}\\\\\\n```\\\\\\n\\\\\\n####
With content scraping\\\\\\n\\\\\\n[Permalink: With content scraping](https://github.com/firecrawl/firecrawl#with-content-scraping)\\\\\\n\\\\\\n```\\\\\\ncurl
-X POST https://api.firecrawl.dev/v2/search \\\\\\\\\\n -H \\\"Content-Type:
application/json\\\" \\\\\\\\\\n -H \\\"Authorization: Bearer fc-YOUR_API_KEY\\\"
\\\\\\\\\\n -d '{\\\\\\n \\\"query\\\": \\\"what is firecrawl?\\\",\\\\\\n
\ \\\"limit\\\": 5,\\\\\\n \\\"scrapeOptions\\\": {\\\\\\n \\\"formats\\\":
[\\\"markdown\\\", \\\"links\\\"]\\\\\\n }\\\\\\n }'\\\\\\n```\\\\\\n\\\\\\n###
Extract (Beta)\\\\\\n\\\\\\n[Permalink: Extract (Beta)](https://github.com/firecrawl/firecrawl#extract-beta)\\\\\\n\\\\\\nGet
structured data from entire websites with a prompt and/or a schema.\\\\\\n\\\\\\nYou
can extract structured data from one or multiple URLs, including wildcards:\\\\\\n\\\\\\nSingle
Page:\\\\\\nExample: [https://firecrawl.dev/some-page](https://firecrawl.dev/some-page)\\\\\\n\\\\\\nMultiple
Pages / Full Domain\\\\\\nExample: [https://firecrawl.dev/](https://firecrawl.dev/)\\\\*\\\\\\n\\\\\\nWhen
you use /\\\\*, Firecrawl will automatically crawl and parse all URLs it can
discover in that domain, then extract the requested data.\\\\\\n\\\\\\n```\\\\\\ncurl
-X POST https://api.firecrawl.dev/v2/extract \\\\\\\\\\n -H 'Content-Type:
application/json' \\\\\\\\\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\\\\\\\\n
\ -d '{\\\\\\n \\\"urls\\\": [\\\\\\n \\\"https://firecrawl.dev/*\\\",\\\\\\n
\ \\\"https://docs.firecrawl.dev/\\\",\\\\\\n \\\"https://www.ycombinator.com/companies\\\"\\\\\\n
\ ],\\\\\\n \\\"prompt\\\": \\\"Extract the company mission, whether
it is open source, and whether it is in Y Combinator from the page.\\\",\\\\\\n
\ \\\"schema\\\": {\\\\\\n \\\"type\\\": \\\"object\\\",\\\\\\n
\ \\\"properties\\\": {\\\\\\n \\\"company_mission\\\": {\\\\\\n
\ \\\"type\\\": \\\"string\\\"\\\\\\n },\\\\\\n \\\"is_open_source\\\":
{\\\\\\n \\\"type\\\": \\\"boolean\\\"\\\\\\n },\\\\\\n
\ \\\"is_in_yc\\\": {\\\\\\n \\\"type\\\": \\\"boolean\\\"\\\\\\n
\ }\\\\\\n },\\\\\\n \\\"required\\\": [\\\\\\n \\\"company_mission\\\",\\\\\\n
\ \\\"is_open_source\\\",\\\\\\n \\\"is_in_yc\\\"\\\\\\n
\ ]\\\\\\n }\\\\\\n }'\\\\\\n```\\\\\\n\\\\\\n```\\\\\\n{\\\\\\n
\ \\\"success\\\": true,\\\\\\n \\\"id\\\": \\\"44aa536d-f1cb-4706-ab87-ed0386685740\\\",\\\\\\n
\ \\\"urlTrace\\\": []\\\\\\n}\\\\\\n```\\\\\\n\\\\\\nIf you are using the
sdks, it will auto pull the response for you:\\\\\\n\\\\\\n```\\\\\\n{\\\\\\n
\ \\\"success\\\": true,\\\\\\n \\\"data\\\": {\\\\\\n \\\"company_mission\\\":
\\\"Firecrawl is the easiest way to extract data from the web. Developers
use us to reliably convert URLs into LLM-ready markdown or structured data
with a single API call.\\\",\\\\\\n \\\"supports_sso\\\": false,\\\\\\n
\ \\\"is_open_source\\\": true,\\\\\\n \\\"is_in_yc\\\": true\\\\\\n
\ }\\\\\\n}\\\\\\n```\\\\\\n\\\\\\n### LLM Extraction (Beta)\\\\\\n\\\\\\n[Permalink:
LLM Extraction (Beta)](https://github.com/firecrawl/firecrawl#llm-extraction-beta)\\\\\\n\\\\\\nUsed
to extract structured data from scraped pages.\\\\\\n\\\\\\n```\\\\\\ncurl
-X POST https://api.firecrawl.dev/v2/scrape \\\\\\\\\\n -H 'Content-Type:
application/json' \\\\\\\\\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\\\\\\\\n
\ -d '{\\\\\\n \\\"url\\\": \\\"https://www.mendable.ai/\\\",\\\\\\n \\\"formats\\\":
[\\\\\\n {\\\\\\n \\\"type\\\": \\\"json\\\",\\\\\\n \\\"schema\\\":
{\\\\\\n \\\"type\\\": \\\"object\\\",\\\\\\n \\\"properties\\\":
{\\\\\\n \\\"company_mission\\\": { \\\"type\\\": \\\"string\\\"
},\\\\\\n \\\"supports_sso\\\": { \\\"type\\\": \\\"boolean\\\"
},\\\\\\n \\\"is_open_source\\\": { \\\"type\\\": \\\"boolean\\\"
},\\\\\\n \\\"is_in_yc\\\": { \\\"type\\\": \\\"boolean\\\" }\\\\\\n
\ }\\\\\\n }\\\\\\n }\\\\\\n ]\\\\\\n }'\\\\\\n```\\\\\\n\\\\\\n```\\\\\\n{\\\\\\n
\ \\\"success\\\": true,\\\\\\n \\\"data\\\": {\\\\\\n \\\"content\\\":
\\\"Raw Content\\\",\\\\\\n \\\"metadata\\\": {\\\\\\n \\\"title\\\":
\\\"Mendable\\\",\\\\\\n \\\"description\\\": \\\"Mendable allows you
to easily build AI chat applications. Ingest, customize, then deploy with
one line of code anywhere you want. Brought to you by SideGuide\\\",\\\\\\n
\ \\\"robots\\\": \\\"follow, index\\\",\\\\\\n \\\"ogTitle\\\":
\\\"Mendable\\\",\\\\\\n \\\"ogDescription\\\": \\\"Mendable allows you
to easily build AI chat applications. Ingest, customize, then deploy with
one line of code anywhere you want. Brought to you by SideGuide\\\",\\\\\\n
\ \\\"ogUrl\\\": \\\"https://mendable.ai/\\\",\\\\\\n \\\"ogImage\\\":
\\\"https://mendable.ai/mendable_new_og1.png\\\",\\\\\\n \\\"ogLocaleAlternate\\\":
[],\\\\\\n \\\"ogSiteName\\\": \\\"Mendable\\\",\\\\\\n \\\"sourceURL\\\":
\\\"https://mendable.ai/\\\"\\\\\\n },\\\\\\n \\\"json\\\": {\\\\\\n
\ \\\"company_mission\\\": \\\"Train a secure AI on your technical resources
that answers customer and employee questions so your team doesn't have to\\\",\\\\\\n
\ \\\"supports_sso\\\": true,\\\\\\n \\\"is_open_source\\\": false,\\\\\\n
\ \\\"is_in_yc\\\": true\\\\\\n }\\\\\\n }\\\\\\n}\\\\\\n```\\\\\\n\\\\\\n###
Extracting without a schema (New)\\\\\\n\\\\\\n[Permalink: Extracting without
a schema (New)](https://github.com/firecrawl/firecrawl#extracting-without-a-schema-new)\\\\\\n\\\\\\nYou
can now extract without a schema by just passing a `prompt` to the endpoint.
The llm chooses the structure of the data.\\\\\\n\\\\\\n```\\\\\\ncurl -X
POST https://api.firecrawl.dev/v2/scrape \\\\\\\\\\n -H 'Content-Type:
application/json' \\\\\\\\\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\\\\\\\\n
\ -d '{\\\\\\n \\\"url\\\": \\\"https://docs.firecrawl.dev/\\\",\\\\\\n
\ \\\"formats\\\": [\\\\\\n {\\\\\\n \\\"type\\\": \\\"json\\\",\\\\\\n
\ \\\"prompt\\\": \\\"Extract the company mission from the page.\\\"\\\\\\n
\ }\\\\\\n ]\\\\\\n }'\\\\\\n```\\\\\\n\\\\\\n### Interacting
with the page with Actions (Cloud-only)\\\\\\n\\\\\\n[Permalink: Interacting
with the page with Actions (Cloud-only)](https://github.com/firecrawl/firecrawl#interacting-with-the-page-with-actions-cloud-only)\\\\\\n\\\\\\nFirecrawl
allows you to perform various actions on a web page before scraping its content.
This is particularly useful for interacting with dynamic content, navigating
through pages, or accessing content that requires user interaction.\\\\\\n\\\\\\nHere
is an example of how to use actions to navigate to google.com, search for
Firecrawl, click on the first result, and take a screenshot.\\\\\\n\\\\\\n```\\\\\\ncurl
-X POST https://api.firecrawl.dev/v2/scrape \\\\\\\\\\n -H 'Content-Type:
application/json' \\\\\\\\\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\\\\\\\\n
\ -d '{\\\\\\n \\\"url\\\": \\\"google.com\\\",\\\\\\n \\\"formats\\\":
[\\\"markdown\\\"],\\\\\\n \\\"actions\\\": [\\\\\\n {\\\"type\\\":
\\\"wait\\\", \\\"milliseconds\\\": 2000},\\\\\\n {\\\"type\\\":
\\\"click\\\", \\\"selector\\\": \\\"textarea[title=\\\\\\\"Search\\\\\\\"]\\\"},\\\\\\n
\ {\\\"type\\\": \\\"wait\\\", \\\"milliseconds\\\": 2000},\\\\\\n
\ {\\\"type\\\": \\\"write\\\", \\\"text\\\": \\\"firecrawl\\\"},\\\\\\n
\ {\\\"type\\\": \\\"wait\\\", \\\"milliseconds\\\": 2000},\\\\\\n
\ {\\\"type\\\": \\\"press\\\", \\\"key\\\": \\\"ENTER\\\"},\\\\\\n
\ {\\\"type\\\": \\\"wait\\\", \\\"milliseconds\\\": 3000},\\\\\\n
\ {\\\"type\\\": \\\"click\\\", \\\"selector\\\": \\\"h3\\\"},\\\\\\n
\ {\\\"type\\\": \\\"wait\\\", \\\"milliseconds\\\": 3000},\\\\\\n
\ {\\\"type\\\": \\\"screenshot\\\"}\\\\\\n ]\\\\\\n }'\\\\\\n```\\\\\\n\\\\\\n###
Batch Scraping Multiple URLs (New)\\\\\\n\\\\\\n[Permalink: Batch Scraping
Multiple URLs (New)](https://github.com/firecrawl/firecrawl#batch-scraping-multiple-urls-new)\\\\\\n\\\\\\nYou
can now batch scrape multiple URLs at the same time. It is very similar to
how the /crawl endpoint works. It submits a batch scrape job and returns a
job ID to check the status of the batch scrape.\\\\\\n\\\\\\n```\\\\\\ncurl
-X POST https://api.firecrawl.dev/v2/batch/scrape \\\\\\\\\\n -H 'Content-Type:
application/json' \\\\\\\\\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\\\\\\\\n
\ -d '{\\\\\\n \\\"urls\\\": [\\\"https://docs.firecrawl.dev\\\", \\\"https://docs.firecrawl.dev/sdks/overview\\\"],\\\\\\n
\ \\\"formats\\\" : [\\\"markdown\\\", \\\"html\\\"]\\\\\\n }'\\\\\\n```\\\\\\n\\\\\\n##
Using Python SDK\\\\\\n\\\\\\n[Permalink: Using Python SDK](https://github.com/firecrawl/firecrawl#using-python-sdk)\\\\\\n\\\\\\n###
Installing Python SDK\\\\\\n\\\\\\n[Permalink: Installing Python SDK](https://github.com/firecrawl/firecrawl#installing-python-sdk)\\\\\\n\\\\\\n```\\\\\\npip
install firecrawl-py\\\\\\n```\\\\\\n\\\\\\n### Crawl a website\\\\\\n\\\\\\n[Permalink:
Crawl a website](https://github.com/firecrawl/firecrawl#crawl-a-website)\\\\\\n\\\\\\n```\\\\\\nfrom
firecrawl import Firecrawl\\\\\\n\\\\\\nfirecrawl = Firecrawl(api_key=\\\"fc-YOUR_API_KEY\\\")\\\\\\n\\\\\\n#
Scrape a website (returns a Document)\\\\\\ndoc = firecrawl.scrape(\\\\\\n
\ \\\"https://firecrawl.dev\\\",\\\\\\n formats=[\\\"markdown\\\", \\\"html\\\"],\\\\\\n)\\\\\\nprint(doc.markdown)\\\\\\n\\\\\\n#
Crawl a website\\\\\\nresponse = firecrawl.crawl(\\\\\\n \\\"https://firecrawl.dev\\\",\\\\\\n
\ limit=100,\\\\\\n scrape_options={\\\"formats\\\": [\\\"markdown\\\",
\\\"html\\\"]},\\\\\\n poll_interval=30,\\\\\\n)\\\\\\nprint(response)\\\\\\n```\\\\\\n\\\\\\n###
Extracting structured data from a URL\\\\\\n\\\\\\n[Permalink: Extracting
structured data from a URL](https://github.com/firecrawl/firecrawl#extracting-structured-data-from-a-url)\\\\\\n\\\\\\nWith
LLM extraction, you can easily extract structured data from any URL. We support
pydantic schemas to make it easier for you too. Here is how you to use it:\\\\\\n\\\\\\n```\\\\\\nfrom
pydantic import BaseModel, Field\\\\\\nfrom typing import List\\\\\\n\\\\\\nclass
Article(BaseModel):\\\\\\n title: str\\\\\\n points: int\\\\\\n by:
str\\\\\\n commentsURL: str\\\\\\n\\\\\\nclass TopArticles(BaseModel):\\\\\\n
\ top: List[Article] = Field(..., description=\\\"Top 5 stories\\\")\\\\\\n\\\\\\n#
Use JSON format with a Pydantic schema\\\\\\ndoc = firecrawl.scrape(\\\\\\n
\ \\\"https://news.ycombinator.com\\\",\\\\\\n formats=[{\\\"type\\\":
\\\"json\\\", \\\"schema\\\": TopArticles}],\\\\\\n)\\\\\\nprint(doc.json)\\\\\\n```\\\\\\n\\\\\\n##
Using the Node SDK\\\\\\n\\\\\\n[Permalink: Using the Node SDK](https://github.com/firecrawl/firecrawl#using-the-node-sdk)\\\\\\n\\\\\\n###
Installation\\\\\\n\\\\\\n[Permalink: Installation](https://github.com/firecrawl/firecrawl#installation)\\\\\\n\\\\\\nTo
install the Firecrawl Node SDK, you can use npm:\\\\\\n\\\\\\n```\\\\\\nnpm
install @mendable/firecrawl-js\\\\\\n```\\\\\\n\\\\\\n### Usage\\\\\\n\\\\\\n[Permalink:
Usage](https://github.com/firecrawl/firecrawl#usage)\\\\\\n\\\\\\n1. Get an
API key from [firecrawl.dev](https://firecrawl.dev/)\\\\\\n2. Set the API
key as an environment variable named `FIRECRAWL_API_KEY` or pass it as a parameter
to the `Firecrawl` class.\\\\\\n\\\\\\n```\\\\\\nimport Firecrawl from '@mendable/firecrawl-js';\\\\\\n\\\\\\nconst
firecrawl = new Firecrawl({ apiKey: 'fc-YOUR_API_KEY' });\\\\\\n\\\\\\n//
Scrape a website\\\\\\nconst doc = await firecrawl.scrape('https://firecrawl.dev',
{\\\\\\n formats: ['markdown', 'html'],\\\\\\n});\\\\\\nconsole.log(doc);\\\\\\n\\\\\\n//
Crawl a website\\\\\\nconst response = await firecrawl.crawl('https://firecrawl.dev',
{\\\\\\n limit: 100,\\\\\\n scrapeOptions: { formats: ['markdown', 'html']
},\\\\\\n});\\\\\\nconsole.log(response);\\\\\\n```\\\\\\n\\\\\\n### Extracting
structured data from a URL\\\\\\n\\\\\\n[Permalink: Extracting structured
data from a URL](https://github.com/firecrawl/firecrawl#extracting-structured-data-from-a-url-1)\\\\\\n\\\\\\nWith
LLM extraction, you can easily extract structured data from any URL. We support
zod schema to make it easier for you too. Here is how to use it:\\\\\\n\\\\\\n```\\\\\\nimport
Firecrawl from '@mendable/firecrawl-js';\\\\\\nimport { z } from 'zod';\\\\\\n\\\\\\nconst
firecrawl = new Firecrawl({ apiKey: 'fc-YOUR_API_KEY' });\\\\\\n\\\\\\n//
Define schema to extract contents into\\\\\\nconst schema = z.object({\\\\\\n
\ top: z\\\\\\n .array(\\\\\\n z.object({\\\\\\n title: z.string(),\\\\\\n
\ points: z.number(),\\\\\\n by: z.string(),\\\\\\n commentsURL:
z.string(),\\\\\\n })\\\\\\n )\\\\\\n .length(5)\\\\\\n .describe('Top
5 stories on Hacker News'),\\\\\\n});\\\\\\n\\\\\\n// Use the v2 extract API
with direct Zod schema support\\\\\\nconst extractRes = await firecrawl.extract({\\\\\\n
\ urls: ['https://news.ycombinator.com'],\\\\\\n schema,\\\\\\n prompt:
'Extract the top 5 stories',\\\\\\n});\\\\\\n\\\\\\nconsole.log(extractRes);\\\\\\n```\\\\\\n\\\\\\n##
Open Source vs Cloud Offering\\\\\\n\\\\\\n[Permalink: Open Source vs Cloud
Offering](https://github.com/firecrawl/firecrawl#open-source-vs-cloud-offering)\\\\\\n\\\\\\nFirecrawl
is open source available under the AGPL-3.0 license.\\\\\\n\\\\\\nTo deliver
the best possible product, we offer a hosted version of Firecrawl alongside
our open-source offering. The cloud solution allows us to continuously innovate
and maintain a high-quality, sustainable service for all users.\\\\\\n\\\\\\nFirecrawl
Cloud is available at [firecrawl.dev](https://firecrawl.dev/) and offers a
range of features that are not available in the open source version:\\\\\\n\\\\\\n[![Open
Source vs Cloud Offering](https://raw.githubusercontent.com/firecrawl/firecrawl/main/img/open-source-cloud.png)](https://raw.githubusercontent.com/firecrawl/firecrawl/main/img/open-source-cloud.png)\\\\\\n\\\\\\n##
Contributing\\\\\\n\\\\\\n[Permalink: Contributing](https://github.com/firecrawl/firecrawl#contributing)\\\\\\n\\\\\\nWe
love contributions! Please read our [contributing guide](https://github.com/firecrawl/firecrawl/blob/main/CONTRIBUTING.md)
before submitting a pull request. If you'd like to self-host, refer to the
[self-hosting guide](https://github.com/firecrawl/firecrawl/blob/main/SELF_HOST.md).\\\\\\n\\\\\\n_It
is the sole responsibility of the end users to respect websites' policies
when scraping, searching and crawling with Firecrawl. Users are advised to
adhere to the applicable privacy policies and terms of use of the websites
prior to initiating any scraping activities. By default, Firecrawl respects
the directives specified in the websites' robots.txt files when crawling.
By utilizing Firecrawl, you expressly agree to comply with these conditions._\\\\\\n\\\\\\n##
Contributors\\\\\\n\\\\\\n[Permalink: Contributors](https://github.com/firecrawl/firecrawl#contributors)\\\\\\n\\\\\\n[![contributors](https://camo.githubusercontent.com/e3b2e7ad4c1f76e68fc11ede158c87a0f039052f649002a1ff855d13fb9294fb/68747470733a2f2f636f6e747269622e726f636b732f696d6167653f7265706f3d66697265637261776c2f66697265637261776c)](https://github.com/firecrawl/firecrawl/graphs/contributors)\\\\\\n\\\\\\n##
License Disclaimer\\\\\\n\\\\\\n[Permalink: License Disclaimer](https://github.com/firecrawl/firecrawl#license-disclaimer)\\\\\\n\\\\\\nThis
project is primarily licensed under the GNU Affero General Public License
v3.0 (AGPL-3.0), as specified in the LICENSE file in the root directory of
this repository. However, certain components of this project are licensed
under the MIT License. Refer to the LICENSE files in these specific directories
for details.\\\\\\n\\\\\\nPlease note:\\\\\\n\\\\\\n- The AGPL-3.0 license
applies to all parts of the project unless otherwise specified.\\\\\\n- The
SDKs and some UI components are licensed under the MIT License. Refer to the
LICENSE files in these specific directories for details.\\\\\\n- When using
or contributing to this project, ensure you comply with the appropriate license
terms for the specific component you are working with.\\\\\\n\\\\\\nFor more
details on the licensing of specific components, please refer to the LICENSE
files in the respective directories or contact the project maintainers.\\\\\\n\\\\\\n[\u2191
Back to Top \u2191](https://github.com/firecrawl/firecrawl#readme-top)\\\\\\n\\\\\\n##
About\\\\\\n\\\\\\n\U0001F525 The Web Data API for AI - Turn entire websites
into LLM-ready markdown or structured data\\\\\\n\\\\\\n\\\\\\n[firecrawl.dev](https://firecrawl.dev/
\\\"https://firecrawl.dev\\\")\\\\\\n\\\\\\n### Topics\\\\\\n\\\\\\n[markdown](https://github.com/topics/markdown
\\\"Topic: markdown\\\") [crawler](https://github.com/topics/crawler \\\"Topic:
crawler\\\") [scraper](https://github.com/topics/scraper \\\"Topic: scraper\\\")
[ai](https://github.com/topics/ai \\\"Topic: ai\\\") [html-to-markdown](https://github.com/topics/html-to-markdown
\\\"Topic: html-to-markdown\\\") [web-crawler](https://github.com/topics/web-crawler
\\\"Topic: web-crawler\\\") [scraping](https://github.com/topics/scraping
\\\"Topic: scraping\\\") [web-scraper](https://github.com/topics/web-scraper
\\\"Topic: web-scraper\\\") [web-scraping](https://github.com/topics/web-scraping
\\\"Topic: web-scraping\\\") [data-extraction](https://github.com/topics/data-extraction
\\\"Topic: data-extraction\\\") [webscraping](https://github.com/topics/webscraping
\\\"Topic: webscraping\\\") [web-data-extraction](https://github.com/topics/web-data-extraction
\\\"Topic: web-data-extraction\\\") [ai-agents](https://github.com/topics/ai-agents
\\\"Topic: ai-agents\\\") [web-search](https://github.com/topics/web-search
\\\"Topic: web-search\\\") [ai-search](https://github.com/topics/ai-search
\\\"Topic: ai-search\\\") [web-data](https://github.com/topics/web-data \\\"Topic:
web-data\\\") [llm](https://github.com/topics/llm \\\"Topic: llm\\\") [ai-crawler](https://github.com/topics/ai-crawler
\\\"Topic: ai-crawler\\\") [ai-scraping](https://github.com/topics/ai-scraping
\\\"Topic: ai-scraping\\\")\\\\\\n\\\\\\n### Resources\\\\\\n\\\\\\n[Readme](https://github.com/firecrawl/firecrawl#readme-ov-file)\\\\\\n\\\\\\n###
License\\\\\\n\\\\\\n[AGPL-3.0 license](https://github.com/firecrawl/firecrawl#AGPL-3.0-1-ov-file)\\\\\\n\\\\\\n###
Contributing\\\\\\n\\\\\\n[Contributing](https://github.com/firecrawl/firecrawl#contributing-ov-file)\\\\\\n\\\\\\n###
Uh oh!\\\\\\n\\\\\\nThere was an error while loading. [Please reload this
page](https://github.com/firecrawl/firecrawl).\\\\\\n\\\\\\n[Activity](https://github.com/firecrawl/firecrawl/activity)\\\\\\n\\\\\\n[Custom
properties](https://github.com/firecrawl/firecrawl/custom-properties)\\\\\\n\\\\\\n###
Stars\\\\\\n\\\\\\n[**65.2k**\\\\\\\\\\nstars](https://github.com/firecrawl/firecrawl/stargazers)\\\\\\n\\\\\\n###
Watchers\\\\\\n\\\\\\n[**256**\\\\\\\\\\nwatching](https://github.com/firecrawl/firecrawl/watchers)\\\\\\n\\\\\\n###
Forks\\\\\\n\\\\\\n[**5.1k**\\\\\\\\\\nforks](https://github.com/firecrawl/firecrawl/forks)\\\\\\n\\\\\\n[Report
repository](https://github.com/contact/report-content?content_url=https%3A%2F%2Fgithub.com%2Ffirecrawl%2Ffirecrawl&report=firecrawl+%28user%29)\\\\\\n\\\\\\n##
[Releases\\\\ 28](https://github.com/firecrawl/firecrawl/releases)\\\\\\n\\\\\\n[v2.4.0\\\\\\\\\\nLatest\\\\\\\\\\n\\\\\\\\\\n2
weeks agoOct 13, 2025](https://github.com/firecrawl/firecrawl/releases/tag/v2.4.0)\\\\\\n\\\\\\n[\\\\+
27 releases](https://github.com/firecrawl/firecrawl/releases)\\\\\\n\\\\\\n##
[Packages\\\\ 3](https://github.com/orgs/firecrawl/packages?repo_name=firecrawl)\\\\\\n\\\\\\n-
[firecrawl](https://github.com/orgs/firecrawl/packages/container/package/firecrawl)\\\\\\n-
[playwright-service](https://github.com/orgs/firecrawl/packages/container/package/playwright-service)\\\\\\n-
[nuq-postgres](https://github.com/orgs/firecrawl/packages/container/package/nuq-postgres)\\\\\\n\\\\\\n##
[Contributors\\\\ 121](https://github.com/firecrawl/firecrawl/graphs/contributors)\\\\\\n\\\\\\n[\\\\+
107 contributors](https://github.com/firecrawl/firecrawl/graphs/contributors)\\\\\\n\\\\\\n##
Languages\\\\\\n\\\\\\n- [TypeScript73.5%](https://github.com/firecrawl/firecrawl/search?l=typescript)\\\\\\n-
[Python18.9%](https://github.com/firecrawl/firecrawl/search?l=python)\\\\\\n-
[Rust6.0%](https://github.com/firecrawl/firecrawl/search?l=rust)\\\\\\n- [Astro0.6%](https://github.com/firecrawl/firecrawl/search?l=astro)\\\\\\n-
[JavaScript0.3%](https://github.com/firecrawl/firecrawl/search?l=javascript)\\\\\\n-
[Jupyter Notebook0.2%](https://github.com/firecrawl/firecrawl/search?l=jupyter-notebook)\\\\\\n-
Other0.5%\",\"metadata\":{\"octolytics-dimension-repository_network_root_id\":\"787076358\",\"visitor-hmac\":\"163b2538b2335f7d4000a770785477e15881e1101708ca5ff1e8c021095f6ca1\",\"og:type\":\"object\",\"language\":\"en\",\"route-action\":\"disambiguate\",\"og:title\":\"GitHub
- firecrawl/firecrawl: \U0001F525 The Web Data API for AI - Turn entire websites
into LLM-ready markdown or structured data\",\"octolytics-dimension-repository_public\":\"true\",\"octolytics-dimension-repository_network_root_nwo\":\"firecrawl/firecrawl\",\"browser-errors-url\":\"https://api.github.com/_private/browser/errors\",\"browser-stats-url\":\"https://api.github.com/_private/browser/stats\",\"twitter:title\":\"GitHub
- firecrawl/firecrawl: \U0001F525 The Web Data API for AI - Turn entire websites
into LLM-ready markdown or structured data\",\"ui-target\":\"full\",\"og:image\":\"https://repository-images.githubusercontent.com/787076358/f9616c09-3701-41ef-b5a6-fdf912ffb15b\",\"google-site-verification\":\"Apib7-x98H0j5cPqHWwSMm6dNU4GmODRoqxLiDzdx9I\",\"ogSiteName\":\"GitHub\",\"route-pattern\":\"/:user_id/:repository\",\"visitor-payload\":\"eyJyZWZlcnJlciI6IiIsInJlcXVlc3RfaWQiOiJBMTVGOjE3OUI0RDo2MzdFQUREOjg3MzEwOTM6NjkwMTE4MjUiLCJ2aXNpdG9yX2lkIjoiNDkyNzk2MzExNjg5OTIxMTMwMSIsInJlZ2lvbl9lZGdlIjoiaWFkIiwicmVnaW9uX3JlbmRlciI6ImlhZCJ9\",\"og:description\":\"\U0001F525
The Web Data API for AI - Turn entire websites into LLM-ready markdown or
structured data - firecrawl/firecrawl\",\"expected-hostname\":\"github.com\",\"release\":\"66136a30a16cc69206f1249b6ba072daa2174535\",\"title\":\"GitHub
- firecrawl/firecrawl: \U0001F525 The Web Data API for AI - Turn entire websites
into LLM-ready markdown or structured data\",\"ogDescription\":\"\U0001F525
The Web Data API for AI - Turn entire websites into LLM-ready markdown or
structured data - firecrawl/firecrawl\",\"twitter:card\":\"summary_large_image\",\"fb:app_id\":\"1401488693436528\",\"color-scheme\":\"light
dark\",\"twitter:description\":\"\U0001F525 The Web Data API for AI - Turn
entire websites into LLM-ready markdown or structured data - firecrawl/firecrawl\",\"favicon\":\"https://github.githubassets.com/favicons/favicon.svg\",\"viewport\":\"width=device-width\",\"twitter:image\":\"https://repository-images.githubusercontent.com/787076358/f9616c09-3701-41ef-b5a6-fdf912ffb15b\",\"user-login\":\"\",\"description\":\"\U0001F525
The Web Data API for AI - Turn entire websites into LLM-ready markdown or
structured data - firecrawl/firecrawl\",\"octolytics-dimension-repository_nwo\":\"firecrawl/firecrawl\",\"octolytics-dimension-user_id\":\"135057108\",\"twitter:site\":\"@github\",\"og:url\":\"https://github.com/firecrawl/firecrawl\",\"octolytics-dimension-user_login\":\"firecrawl\",\"hostname\":\"github.com\",\"current-catalog-service-hash\":\"f3abb0cc802f3d7b95fc8762b94bdcb13bf39634c40c357301c4aa1d67a256fb\",\"html-safe-nonce\":\"e5653da3800db7de2c8b4a64ff6367242043a9e452608e9a6941a1c9e8346cfc\",\"apple-itunes-app\":\"app-id=1477376905,
app-argument=https://github.com/firecrawl/firecrawl\",\"turbo-cache-control\":\"no-preview\",\"og:site_name\":\"GitHub\",\"request-id\":\"A15F:179B4D:637EADD:8731093:69011825\",\"octolytics-url\":\"https://collector.github.com/github/collect\",\"octolytics-dimension-repository_is_fork\":\"false\",\"fetch-nonce\":\"v2:bc80e85b-edaf-e746-9f2c-ffdc75854b07\",\"og:image:alt\":\"\U0001F525
The Web Data API for AI - Turn entire websites into LLM-ready markdown or
structured data - firecrawl/firecrawl\",\"github-keyboard-shortcuts\":\"repository,copilot\",\"ogTitle\":\"GitHub
- firecrawl/firecrawl: \U0001F525 The Web Data API for AI - Turn entire websites
into LLM-ready markdown or structured data\",\"ogImage\":\"https://repository-images.githubusercontent.com/787076358/f9616c09-3701-41ef-b5a6-fdf912ffb15b\",\"analytics-location\":\"/<user-name>/<repo-name>\",\"route-controller\":\"files\",\"octolytics-dimension-repository_id\":\"787076358\",\"ogUrl\":\"https://github.com/firecrawl/firecrawl\",\"go-import\":\"github.com/firecrawl/firecrawl
git https://github.com/firecrawl/firecrawl.git\",\"hovercard-subject-tag\":\"repository:787076358\",\"theme-color\":\"#1e2327\",\"turbo-body-classes\":\"logged-out
env-production page-responsive\",\"scrapeId\":\"ec4d99a0-4c4f-4d1a-9fd2-08b8f891f883\",\"sourceURL\":\"https://github.com/firecrawl/firecrawl\",\"url\":\"https://github.com/firecrawl/firecrawl\",\"statusCode\":200,\"contentType\":\"text/html;
charset=utf-8\",\"proxyUsed\":\"basic\",\"cacheState\":\"hit\",\"cachedAt\":\"2025-10-28T19:23:20.106Z\"}},{\"url\":\"https://x.com/firecrawl_dev?lang=en\",\"title\":\"Firecrawl
(@firecrawl_dev) / Posts / X\",\"description\":\"Firecrawl (@firecrawl_dev)
- Posts - Turn websites into LLM-ready data. Built by @mendableai team Open
source: | X (formerly Twitter)\",\"position\":3},{\"url\":\"https://github.com/firecrawl\",\"title\":\"Firecrawl
- GitHub\",\"description\":\"Building AI applications? You need clean, structured
data from the web. Firecrawl handles the complexity of modern web scraping
so you can focus on building ...\",\"position\":4,\"category\":\"github\",\"markdown\":\"[Skip
to content](https://github.com/firecrawl#start-of-content)\\n\\nYou signed
in with another tab or window. [Reload](https://github.com/firecrawl) to refresh
your session.You signed out in another tab or window. [Reload](https://github.com/firecrawl)
to refresh your session.You switched accounts on another tab or window. [Reload](https://github.com/firecrawl)
to refresh your session.Dismiss alert\\n\\n{{ message }}\\n\\n[README.md](https://github.com/firecrawl/.github/tree/main/profile/README.md)\\n\\n#
\U0001F525 Firecrawl\\n\\n[Permalink: \U0001F525 Firecrawl](https://github.com/firecrawl#-firecrawl)\\n\\n[![Firecrawl
Logo](https://raw.githubusercontent.com/mendableai/firecrawl/main/img/firecrawl_logo.png)](https://raw.githubusercontent.com/mendableai/firecrawl/main/img/firecrawl_logo.png)\\n\\n###
Transform any website into LLM-ready data\\n\\n[Permalink: Transform any website
into LLM-ready data](https://github.com/firecrawl#transform-any-website-into-llm-ready-data)\\n\\nAdvanced
web scraping, crawling, and data extraction infrastructure for AI applications\\n\\n[![Get
Started](https://camo.githubusercontent.com/85b729c7fb201b60a98279ddc4e70268281fc6df999e110276f798cf5c050126/68747470733a2f2f696d672e736869656c64732e696f2f62616467652ff09f9a805f4765745f537461727465642d4646364233353f7374796c653d666f722d7468652d6261646765)](https://firecrawl.dev/)
[![Documentation](https://camo.githubusercontent.com/f7531cb91d3d3dcac76ac7c2ba9d36c1a74728016c64d942cfaf954f5ae4a238/68747470733a2f2f696d672e736869656c64732e696f2f62616467652ff09f939a5f446f63756d656e746174696f6e2d3441393045323f7374796c653d666f722d7468652d6261646765)](https://docs.firecrawl.dev/)
[![Discord](https://camo.githubusercontent.com/8c2d9f948c1d79b69e26d25add89a17a734b48cc6fd6fc0040f34f31c6a11774/68747470733a2f2f696d672e736869656c64732e696f2f62616467652ff09f92ac5f4a6f696e5f446973636f72642d3538363546323f7374796c653d666f722d7468652d6261646765)](https://discord.com/invite/gSmWdAkdwd)\\n\\n[![License](https://camo.githubusercontent.com/a6f4431b80529dbeaa43c3c5fbcf4649f6b4ebbeb82d5a58abeb39ca3eeca8be/68747470733a2f2f696d672e736869656c64732e696f2f6769746875622f6c6963656e73652f6d656e6461626c6561692f66697265637261776c)](https://github.com/mendableai/firecrawl/blob/main/LICENSE)[![GitHub
Stars](https://camo.githubusercontent.com/f42ce9a4d46d07baa67b74b49277e72ed33877743358deebfa774e17532eb4ff/68747470733a2f2f696d672e736869656c64732e696f2f6769746875622f73746172732f6d656e6461626c6561692f66697265637261776c3f7374796c653d736f6369616c)](https://github.com/mendableai/firecrawl/stargazers)[![Python
Downloads](https://camo.githubusercontent.com/9d76afe428b4085c8b7103f2f4e31da110ee154ad7320bace4348d92ac0c2450/68747470733a2f2f7374617469632e706570792e746563682f62616467652f66697265637261776c2d7079)](https://pepy.tech/project/firecrawl-py)[![Follow
on X](https://camo.githubusercontent.com/e32f3aece18eaab32ee100cadb592843d985aa1171a8da08ae047626363d65ae/68747470733a2f2f696d672e736869656c64732e696f2f747769747465722f666f6c6c6f772f66697265637261776c5f6465763f7374796c653d736f6369616c)](https://x.com/firecrawl_dev)\\n\\n*
* *\\n\\n## Why Firecrawl?\\n\\n[Permalink: Why Firecrawl?](https://github.com/firecrawl#why-firecrawl)\\n\\n**Building
AI applications?** You need clean, structured data from the web. Firecrawl
handles the complexity of modern web scraping so you can focus on building
great products.\\n\\n## Our Core Ecosystem\\n\\n[Permalink: Our Core Ecosystem](https://github.com/firecrawl#our-core-ecosystem)\\n\\n###
Main Repository\\n\\n[Permalink: Main Repository](https://github.com/firecrawl#main-repository)\\n\\n[![](https://camo.githubusercontent.com/97aa9741f2773cb2d192c516c7689f4e2bbab89403aa868d842487551743626a/68747470733a2f2f6769746875622d726561646d652d73746174732e76657263656c2e6170702f6170692f70696e2f3f757365726e616d653d6d656e6461626c656169267265706f3d66697265637261776c267468656d653d6c69676874)](https://github.com/mendableai/firecrawl)\\n\\n**[firecrawl](https://github.com/mendableai/firecrawl)**
\\\\- Core API & SDK\\n\\nTurn entire websites into LLM-ready markdown or
structured data. Our flagship product with 40k+ stars.\\n\\n### Cloud API\\n\\n[Permalink:
Cloud API](https://github.com/firecrawl#cloud-api)\\n\\n[![Cloud API](https://camo.githubusercontent.com/6ad9773ed98c84d54b1546ffbf8b0fbb085be0c60580ae5a1ead2d23ddf1b121/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f436c6f75645f4150492d4646364233353f7374796c653d666f722d7468652d6261646765266c6f676f3d636c6f7564266c6f676f436f6c6f723d7768697465)](https://firecrawl.dev/)\\n\\n**[Firecrawl](https://firecrawl.dev/)**
\\\\- Hosted API Service\\n\\nProduction-ready web scraping without infrastructure
management. Get your API key and start scraping in minutes with our reliable,
scalable cloud service.\\n\\n### MCP Integration\\n\\n[Permalink: MCP Integration](https://github.com/firecrawl#mcp-integration)\\n\\n[![MCP
Server](https://camo.githubusercontent.com/ea9cbd6a754e0932d17ae393ff53566bfb681492abddd08704dbe04b122dfaa1/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f4d43505f5365727665722d3441393045323f7374796c653d666f722d7468652d6261646765266c6f676f3d736572766572266c6f676f436f6c6f723d7768697465)](https://github.com/mendableai/firecrawl-mcp-server)\\n\\n**[firecrawl-mcp-server](https://github.com/mendableai/firecrawl-mcp-server)**
\\\\- Model Context Protocol Server\\n\\nAdd powerful web scraping capabilities
to Claude, Cursor, and any MCP-compatible LLM client.\\n\\n## Community &
Support\\n\\n[Permalink: Community & Support](https://github.com/firecrawl#community--support)\\n\\n[![Discord](https://camo.githubusercontent.com/62d3d35241760cf174631c4e6b5f4503c0a6b34640fd306e36a829ab5ec47b14/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f446973636f72642d3538363546323f7374796c653d666f722d7468652d6261646765266c6f676f3d646973636f7264266c6f676f436f6c6f723d7768697465)](https://discord.com/invite/gSmWdAkdwd)[![X](https://camo.githubusercontent.com/8c709aaebc7feee6050eba44984b294d9da3ace3353bd5eed8b499dd04af3c06/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f582d3030303030303f7374796c653d666f722d7468652d6261646765266c6f676f3d78266c6f676f436f6c6f723d7768697465)](https://x.com/firecrawl_dev)[![LinkedIn](https://camo.githubusercontent.com/8c0692475a5bfc1d9e7361074bdb648e567cae7b5b40ffd32adae31180b0d7b6/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f4c696e6b6564496e2d3030373742353f7374796c653d666f722d7468652d6261646765266c6f676f3d6c696e6b6564696e266c6f676f436f6c6f723d7768697465)](https://www.linkedin.com/company/104100957/)[![Discussions](https://camo.githubusercontent.com/9403fd9d6d54f5a23a79f9a8a6a256ae82159fb626710fd56c2495fff1257d62/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f4769744875625f44697363757373696f6e732d3138313731373f7374796c653d666f722d7468652d6261646765266c6f676f3d676974687562266c6f676f436f6c6f723d7768697465)](https://github.com/mendableai/firecrawl/discussions)[![Documentation](https://camo.githubusercontent.com/9d518c9da8018ae3524a2580522bd1ef591f343cc4df7983b4476e117fa70bba/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f446f63756d656e746174696f6e2d3441393045323f7374796c653d666f722d7468652d6261646765266c6f676f3d626f6f6b266c6f676f436f6c6f723d7768697465)](https://docs.firecrawl.dev/)\\n\\n##
Built By Mendable\\n\\n[Permalink: Built By Mendable](https://github.com/firecrawl#built-by-mendable)\\n\\nWe're
the team behind [Mendable.ai](https://mendable.ai/), passionate about making
web data accessible for AI applications. Firecrawl powers thousands of AI
products worldwide.\\n\\n* * *\\n\\n**Ready to build something amazing?**\\n\\n[Get
your API key](https://firecrawl.dev/) and start scraping in minutes\\n\\n\\n[Star
our main repo](https://github.com/mendableai/firecrawl) \u2022\\n[Try the
playground](https://firecrawl.dev/playground) \u2022\\n[Read the docs](https://docs.firecrawl.dev/)\\n\\n##
Pinned Loading\\n\\n1. [firecrawl](https://github.com/firecrawl/firecrawl)
firecrawlPublic\\n\\n\\n\\n\\n\\n\\n\U0001F525 The Web Data API for AI - Turn
entire websites into LLM-ready markdown or structured data\\n\\n\\n\\n\\nTypeScript[64.9k](https://github.com/firecrawl/firecrawl/stargazers)
[5.1k](https://github.com/firecrawl/firecrawl/forks)\\n\\n2. [mendable-nextjs-chatbot](https://github.com/firecrawl/mendable-nextjs-chatbot)
mendable-nextjs-chatbotPublic template\\n\\n\\n\\n\\n\\n\\nNext.js Starter
Template for building chatbots with Mendable\\n\\n\\n\\n\\nTypeScript[256](https://github.com/firecrawl/mendable-nextjs-chatbot/stargazers)
[52](https://github.com/firecrawl/mendable-nextjs-chatbot/forks)\\n\\n3. [rag-arena](https://github.com/firecrawl/rag-arena)
rag-arenaPublic\\n\\n\\n\\n\\n\\n\\nOpen-source RAG evaluation through users'
feedback\\n\\n\\n\\n\\nTypeScript[206](https://github.com/firecrawl/rag-arena/stargazers)
[32](https://github.com/firecrawl/rag-arena/forks)\\n\\n4. [QA\\\\_clustering](https://github.com/firecrawl/QA_clustering)
QA\\\\_clusteringPublic\\n\\n\\n\\n\\n\\n\\nAnalyzing chat interactions w/
LLMs to improve \U0001F99C\U0001F517 Langchain docs\\n\\n\\n\\n\\nJupyter
Notebook[80](https://github.com/firecrawl/QA_clustering/stargazers) [12](https://github.com/firecrawl/QA_clustering/forks)\\n\\n5.
[data-connectors](https://github.com/firecrawl/data-connectors) data-connectorsPublic\\n\\n\\n\\n\\n\\n\\nLLM-ready
data connectors\\n\\n\\n\\n\\nTypeScript[95](https://github.com/firecrawl/data-connectors/stargazers)
[23](https://github.com/firecrawl/data-connectors/forks)\\n\\n6. [mendable-py](https://github.com/firecrawl/mendable-py)
mendable-pyPublic\\n\\n\\n\\n\\n\\n\\nBuild Production Ready LLM Chat Apps
in Minutes\\n\\n\\n\\n\\nPython[33](https://github.com/firecrawl/mendable-py/stargazers)
[7](https://github.com/firecrawl/mendable-py/forks)\\n\\n\\n### Repositories\\n\\nLoading\\n\\nType\\n\\nAllPublicSourcesForksArchivedMirrorsTemplates\\n\\nLanguage\\n\\nAllCSSGoJavaJavaScriptJupyter
NotebookMDXPythonRustTypeScript\\n\\nSort\\n\\nLast updatedNameStars\\n\\nShowing
10 of 61 repositories\\n\\n- [firecrawl](https://github.com/firecrawl/firecrawl)\\nPublic\\n\\n\\n\\n\U0001F525
The Web Data API for AI - Turn entire websites into LLM-ready markdown or
structured data\\n\\n\\n\\n\\n\\n\\nfirecrawl/firecrawl\u2019s past year of
commit activity\\n\\n\\n\\nTypeScript[64,949](https://github.com/firecrawl/firecrawl/stargazers)AGPL-3.0\\n[5,132](https://github.com/firecrawl/firecrawl/forks)
[27](https://github.com/firecrawl/firecrawl/issues) [(2 issues need help)](https://github.com/firecrawl/firecrawl/issues?q=label%3A%22good+first+issue%22+is%3Aissue+is%3Aopen)
[85](https://github.com/firecrawl/firecrawl/pulls)\\nUpdated 2 hours agoOct
27, 2025\\n\\n- [firecrawl-docs](https://github.com/firecrawl/firecrawl-docs)\\nPublic\\n\\n\\n\\nDocumentation
for Firecrawl.\\n\\n\\n\\n\\n\\n\\nfirecrawl/firecrawl-docs\u2019s past year
of commit activity\\n\\n\\n\\nMDX[17](https://github.com/firecrawl/firecrawl-docs/stargazers)
[35](https://github.com/firecrawl/firecrawl-docs/forks) [10](https://github.com/firecrawl/firecrawl-docs/issues)
[5](https://github.com/firecrawl/firecrawl-docs/pulls)\\nUpdated 20 hours
agoOct 26, 2025\\n\\n- [open-agent-builder](https://github.com/firecrawl/open-agent-builder)\\nPublic\\n\\n\\n\\n\U0001F525
Visual workflow builder for AI agents powered by Firecrawl - drag-and-drop
web scraping pipelines with real-time execution\\n\\n\\n\\n\\n\\n\\nfirecrawl/open-agent-builder\u2019s
past year of commit activity\\n\\n\\n\\nTypeScript[1,673](https://github.com/firecrawl/open-agent-builder/stargazers)
[274](https://github.com/firecrawl/open-agent-builder/forks) [4](https://github.com/firecrawl/open-agent-builder/issues)
[2](https://github.com/firecrawl/open-agent-builder/pulls)\\nUpdated last
weekOct 20, 2025\\n\\n- [firecrawl-mcp-server](https://github.com/firecrawl/firecrawl-mcp-server)\\nPublic\\n\\n\\n\\n\U0001F525
Official Firecrawl MCP Server - Adds powerful web scraping and search to Cursor,
Claude and any other LLM clients.\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n[**Uh
oh!**](https://github.com/firecrawl/firecrawl-mcp-server/graphs/commit-activity)\\n\\n[There
was an error while loading.](https://github.com/firecrawl/firecrawl-mcp-server/graphs/commit-activity)
[Please reload this page](https://github.com/firecrawl).\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nfirecrawl/firecrawl-mcp-server\u2019s
past year of commit activity\\n\\n\\n\\nJavaScript[4,794](https://github.com/firecrawl/firecrawl-mcp-server/stargazers)MIT\\n[519](https://github.com/firecrawl/firecrawl-mcp-server/forks)
[44](https://github.com/firecrawl/firecrawl-mcp-server/issues) [17](https://github.com/firecrawl/firecrawl-mcp-server/pulls)\\nUpdated
last weekOct 19, 2025\\n\\n- [n8n-nodes-firecrawl](https://github.com/firecrawl/n8n-nodes-firecrawl)\\nPublic\\n\\n\\n\\nn8n
node to interact with Firecrawl\\n\\n\\n\\n\\n\\n\\nfirecrawl/n8n-nodes-firecrawl\u2019s
past year of commit activity\\n\\n\\n\\nTypeScript[21](https://github.com/firecrawl/n8n-nodes-firecrawl/stargazers)MIT\\n[13](https://github.com/firecrawl/n8n-nodes-firecrawl/forks)
[3](https://github.com/firecrawl/n8n-nodes-firecrawl/issues) [0](https://github.com/firecrawl/n8n-nodes-firecrawl/pulls)\\nUpdated
2 weeks agoOct 17, 2025\\n\\n- [.github](https://github.com/firecrawl/.github)\\nPublic\\n\\n\\n\\n\\nfirecrawl/.github\u2019s
past year of commit activity\\n\\n\\n\\n0\\n[1](https://github.com/firecrawl/.github/forks)
[0](https://github.com/firecrawl/.github/issues) [0](https://github.com/firecrawl/.github/pulls)\\nUpdated
2 weeks agoOct 12, 2025\\n\\n- [fire-enrich](https://github.com/firecrawl/fire-enrich)\\nPublic\\n\\n\\n\\n\U0001F525
AI-powered data enrichment tool that transforms emails into rich datasets
with company profiles, funding data, tech stacks, and more using Firecrawl
and multi-agent AI\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n[**Uh oh!**](https://github.com/firecrawl/fire-enrich/graphs/commit-activity)\\n\\n[There
was an error while loading.](https://github.com/firecrawl/fire-enrich/graphs/commit-activity)
[Please reload this page](https://github.com/firecrawl).\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nfirecrawl/fire-enrich\u2019s
past year of commit activity\\n\\n\\n\\nTypeScript[953](https://github.com/firecrawl/fire-enrich/stargazers)MIT\\n[239](https://github.com/firecrawl/fire-enrich/forks)
[12](https://github.com/firecrawl/fire-enrich/issues) [3](https://github.com/firecrawl/fire-enrich/pulls)\\nUpdated
3 weeks agoOct 8, 2025\\n\\n- [firecrawl-java-sdk](https://github.com/firecrawl/firecrawl-java-sdk)\\nPublic\\n\\n\\n\\n\\nfirecrawl/firecrawl-java-sdk\u2019s
past year of commit activity\\n\\n\\n\\nJava[11](https://github.com/firecrawl/firecrawl-java-sdk/stargazers)MIT\\n[4](https://github.com/firecrawl/firecrawl-java-sdk/forks)
[0](https://github.com/firecrawl/firecrawl-java-sdk/issues) [0](https://github.com/firecrawl/firecrawl-java-sdk/pulls)\\nUpdated
last monthSep 28, 2025\\n\\n- [open-lovable](https://github.com/firecrawl/open-lovable)\\nPublic\\n\\n\\n\\n\U0001F525
Clone and recreate any website as a modern React app in seconds\\n\\n\\n\\n\\n\\n\\nfirecrawl/open-lovable\u2019s
past year of commit activity\\n\\n\\n\\nTypeScript[21,320](https://github.com/firecrawl/open-lovable/stargazers)MIT\\n[3,986](https://github.com/firecrawl/open-lovable/forks)
[70](https://github.com/firecrawl/open-lovable/issues) [33](https://github.com/firecrawl/open-lovable/pulls)\\nUpdated
last monthSep 27, 2025\\n\\n- [mineru-api](https://github.com/firecrawl/mineru-api)\\nPublic\\n\\n\\n\\n\\nfirecrawl/mineru-api\u2019s
past year of commit activity\\n\\n\\n\\nPython[12](https://github.com/firecrawl/mineru-api/stargazers)AGPL-3.0\\n[2](https://github.com/firecrawl/mineru-api/forks)
[1](https://github.com/firecrawl/mineru-api/issues) [1](https://github.com/firecrawl/mineru-api/pulls)\\nUpdated
on Sep 26Sep 26, 2025\\n\\n\\n[View all repositories](https://github.com/orgs/firecrawl/repositories?type=all)\\n\\n[**People**](https://github.com/orgs/firecrawl/people)\\n\\n[![@alexnucci](https://avatars.githubusercontent.com/u/1919849?s=70&v=4)](https://github.com/alexnucci)[![@micahstairs](https://avatars.githubusercontent.com/u/7231485?s=70&v=4)](https://github.com/micahstairs)[![@nickscamara](https://avatars.githubusercontent.com/u/20311743?s=70&v=4)](https://github.com/nickscamara)[![@mogery](https://avatars.githubusercontent.com/u/66118807?s=70&v=4)](https://github.com/mogery)[![@developersdigest](https://avatars.githubusercontent.com/u/124798203?s=70&v=4)](https://github.com/developersdigest)\\n\\n####
Top languages\\n\\n[TypeScript](https://github.com/orgs/firecrawl/repositories?language=typescript&type=all)
[Python](https://github.com/orgs/firecrawl/repositories?language=python&type=all)
[JavaScript](https://github.com/orgs/firecrawl/repositories?language=javascript&type=all)
[Go](https://github.com/orgs/firecrawl/repositories?language=go&type=all)
[MDX](https://github.com/orgs/firecrawl/repositories?language=mdx&type=all)\\n\\n####
Most used topics\\n\\n[ai](https://github.com/search?q=topic%3Aai+org%3Afirecrawl+fork%3Atrue&type=repositories
\\\"Topic: ai\\\") [firecrawl](https://github.com/search?q=topic%3Afirecrawl+org%3Afirecrawl+fork%3Atrue&type=repositories
\\\"Topic: firecrawl\\\") [llm](https://github.com/search?q=topic%3Allm+org%3Afirecrawl+fork%3Atrue&type=repositories
\\\"Topic: llm\\\") [web-crawler](https://github.com/search?q=topic%3Aweb-crawler+org%3Afirecrawl+fork%3Atrue&type=repositories
\\\"Topic: web-crawler\\\") [web-scraping](https://github.com/search?q=topic%3Aweb-scraping+org%3Afirecrawl+fork%3Atrue&type=repositories
\\\"Topic: web-scraping\\\")\\n\\nYou can\u2019t perform that action at this
time.\",\"metadata\":{\"analytics-location\":\"/<org-login>\",\"apple-itunes-app\":\"app-id=1477376905,
app-argument=https://github.com/firecrawl\",\"twitter:card\":\"summary_large_image\",\"google-site-verification\":\"Apib7-x98H0j5cPqHWwSMm6dNU4GmODRoqxLiDzdx9I\",\"description\":\"Web
data API for AI. Firecrawl has 61 repositories available. Follow their code
on GitHub.\",\"og:image\":\"https://avatars.githubusercontent.com/u/135057108?s=280&v=4\",\"og:type\":\"profile\",\"visitor-payload\":\"eyJyZWZlcnJlciI6IiIsInJlcXVlc3RfaWQiOiI5REFEOjIzM0ExMzo4RDgzNEI6QzQ5OUNCOjY4RkYzODlGIiwidmlzaXRvcl9pZCI6IjQwMDQ0MTI5MTkxMDA5NDY1OTEiLCJyZWdpb25fZWRnZSI6ImlhZCIsInJlZ2lvbl9yZW5kZXIiOiJpYWQifQ==\",\"github-keyboard-shortcuts\":\"copilot\",\"user-login\":\"\",\"viewport\":\"width=device-width\",\"og:description\":\"Web
data API for AI. Firecrawl has 61 repositories available. Follow their code
on GitHub.\",\"turbo-cache-control\":\"no-preview\",\"fetch-nonce\":\"v2:35a2e032-0081-3f6b-595e-967e32025c6f\",\"og:url\":\"https://github.com/firecrawl\",\"title\":\"Firecrawl
\xB7 GitHub\",\"route-pattern\":\"/:user_id(.:format)\",\"route-action\":\"show\",\"octolytics-url\":\"https://collector.github.com/github/collect\",\"og:site_name\":\"GitHub\",\"twitter:title\":\"Firecrawl\",\"request-id\":\"9DAD:233A13:8D834B:C499CB:68FF389F\",\"ogSiteName\":\"GitHub\",\"fb:app_id\":\"1401488693436528\",\"language\":\"en\",\"twitter:image\":\"https://avatars.githubusercontent.com/u/135057108?s=280&v=4\",\"ogImage\":\"https://avatars.githubusercontent.com/u/135057108?s=280&v=4\",\"release\":\"c44b7f7aa5c70f3296484971978c9f4b1b473352\",\"theme-color\":\"#1e2327\",\"color-scheme\":\"light
dark\",\"html-safe-nonce\":\"2d932295da6aa360d861f16279839ab109dc4e977a51bc99204477969d7d12c6\",\"ogTitle\":\"Firecrawl\",\"hovercard-subject-tag\":\"organization:135057108\",\"twitter:description\":\"Web
data API for AI. Firecrawl has 61 repositories available. Follow their code
on GitHub.\",\"hostname\":\"github.com\",\"ogUrl\":\"https://github.com/firecrawl\",\"ogDescription\":\"Web
data API for AI. Firecrawl has 61 repositories available. Follow their code
on GitHub.\",\"route-controller\":\"profiles\",\"favicon\":\"https://github.githubassets.com/favicons/favicon.svg\",\"visitor-hmac\":\"43cf3bbb9c57a3bcf0b1857fbcabcc78c274e7082e0d28c76cd6d4651bc2a920\",\"twitter:site\":\"@github\",\"ui-target\":\"full\",\"og:title\":\"Firecrawl\",\"expected-hostname\":\"github.com\",\"og:image:alt\":\"Web
data API for AI. Firecrawl has 61 repositories available. Follow their code
on GitHub.\",\"profile:username\":\"firecrawl\",\"current-catalog-service-hash\":\"4a1c50a83cf6cc4b55b6b9c53e553e3f847c876b87fb333f71f5d05db8f1a7db\",\"turbo-body-classes\":\"logged-out
env-production page-responsive\",\"browser-stats-url\":\"https://api.github.com/_private/browser/stats\",\"browser-errors-url\":\"https://api.github.com/_private/browser/errors\",\"scrapeId\":\"65cbc300-be11-4a1a-9d20-c114fb8473a7\",\"sourceURL\":\"https://github.com/firecrawl\",\"url\":\"https://github.com/firecrawl\",\"statusCode\":200,\"contentType\":\"text/html;
charset=utf-8\",\"proxyUsed\":\"basic\",\"cacheState\":\"hit\",\"cachedAt\":\"2025-10-27T09:17:20.756Z\"}},{\"url\":\"https://www.linkedin.com/company/firecrawl\",\"title\":\"Firecrawl
| LinkedIn\",\"description\":\"Our Dify integration now uses Firecrawl /v2
endpoints Scraping is 10x faster thanks to intelligent caching, plus we've
added semantic ...\",\"position\":5}]},\"creditsUsed\":3}"
headers:
Access-Control-Allow-Origin:
- '*'
Alt-Svc:
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
Content-Length:
- '93428'
Content-Type:
- application/json; charset=utf-8
Date:
- Wed, 29 Oct 2025 14:37:39 GMT
ETag:
- W/"16cf4-kHwVbMu4CCVG2UIt6p1g/gz5M4M"
Via:
- 1.1 google
X-Powered-By:
- Express
X-Response-Time:
- 13172.495ms
status:
code: 200
message: OK
version: 1

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -4,7 +4,7 @@ from crewai_tools.tools.firecrawl_crawl_website_tool.firecrawl_crawl_website_too
FirecrawlCrawlWebsiteTool,
)
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_firecrawl_crawl_tool_integration():
tool = FirecrawlCrawlWebsiteTool(config={
"limit": 2,

View File

@@ -4,7 +4,7 @@ from crewai_tools.tools.firecrawl_scrape_website_tool.firecrawl_scrape_website_t
FirecrawlScrapeWebsiteTool,
)
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_firecrawl_scrape_tool_integration():
tool = FirecrawlScrapeWebsiteTool()
result = tool.run(url="https://firecrawl.dev")

View File

@@ -3,7 +3,7 @@ import pytest
from crewai_tools.tools.firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_firecrawl_search_tool_integration():
tool = FirecrawlSearchTool()
result = tool.run(query="firecrawl")

View File

@@ -48,7 +48,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = [
"crewai-tools==1.4.1",
"crewai-tools==1.3.0",
]
embeddings = [
"tiktoken~=0.8.0"

View File

@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
__version__ = "1.4.1"
__version__ = "1.3.0"
_telemetry_submitted = False

View File

@@ -40,16 +40,6 @@ from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
from crewai.lite_agent import LiteAgent
from crewai.llms.base_llm import BaseLLM
from crewai.mcp import (
MCPClient,
MCPServerConfig,
MCPServerHTTP,
MCPServerSSE,
MCPServerStdio,
)
from crewai.mcp.transports.http import HTTPTransport
from crewai.mcp.transports.sse import SSETransport
from crewai.mcp.transports.stdio import StdioTransport
from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.rag.embeddings.types import EmbedderConfig
from crewai.security.fingerprint import Fingerprint
@@ -118,7 +108,6 @@ class Agent(BaseAgent):
"""
_times_executed: int = PrivateAttr(default=0)
_mcp_clients: list[Any] = PrivateAttr(default_factory=list)
max_execution_time: int | None = Field(
default=None,
description="Maximum execution time for an agent to execute a task",
@@ -537,9 +526,6 @@ class Agent(BaseAgent):
self,
event=AgentExecutionCompletedEvent(agent=self, task=task, output=result),
)
self._cleanup_mcp_clients()
return result
def _execute_with_timeout(self, task_prompt: str, task: Task, timeout: int) -> Any:
@@ -663,70 +649,30 @@ class Agent(BaseAgent):
self._logger.log("error", f"Error getting platform tools: {e!s}")
return []
def get_mcp_tools(self, mcps: list[str | MCPServerConfig]) -> list[BaseTool]:
"""Convert MCP server references/configs to CrewAI tools.
Supports both string references (backwards compatible) and structured
configuration objects (MCPServerStdio, MCPServerHTTP, MCPServerSSE).
Args:
mcps: List of MCP server references (strings) or configurations.
Returns:
List of BaseTool instances from MCP servers.
"""
def get_mcp_tools(self, mcps: list[str]) -> list[BaseTool]:
"""Convert MCP server references to CrewAI tools."""
all_tools = []
clients = []
for mcp_config in mcps:
if isinstance(mcp_config, str):
tools = self._get_mcp_tools_from_string(mcp_config)
else:
tools, client = self._get_native_mcp_tools(mcp_config)
if client:
clients.append(client)
for mcp_ref in mcps:
try:
if mcp_ref.startswith("crewai-amp:"):
tools = self._get_amp_mcp_tools(mcp_ref)
elif mcp_ref.startswith("https://"):
tools = self._get_external_mcp_tools(mcp_ref)
else:
continue
all_tools.extend(tools)
all_tools.extend(tools)
self._logger.log(
"info", f"Successfully loaded {len(tools)} tools from {mcp_ref}"
)
except Exception as e:
self._logger.log("warning", f"Skipping MCP {mcp_ref} due to error: {e}")
continue
# Store clients for cleanup
self._mcp_clients.extend(clients)
return all_tools
def _cleanup_mcp_clients(self) -> None:
"""Cleanup MCP client connections after task execution."""
if not self._mcp_clients:
return
async def _disconnect_all() -> None:
for client in self._mcp_clients:
if client and hasattr(client, "connected") and client.connected:
await client.disconnect()
try:
asyncio.run(_disconnect_all())
except Exception as e:
self._logger.log("error", f"Error during MCP client cleanup: {e}")
finally:
self._mcp_clients.clear()
def _get_mcp_tools_from_string(self, mcp_ref: str) -> list[BaseTool]:
"""Get tools from legacy string-based MCP references.
This method maintains backwards compatibility with string-based
MCP references (https://... and crewai-amp:...).
Args:
mcp_ref: String reference to MCP server.
Returns:
List of BaseTool instances.
"""
if mcp_ref.startswith("crewai-amp:"):
return self._get_amp_mcp_tools(mcp_ref)
if mcp_ref.startswith("https://"):
return self._get_external_mcp_tools(mcp_ref)
return []
def _get_external_mcp_tools(self, mcp_ref: str) -> list[BaseTool]:
"""Get tools from external HTTPS MCP server with graceful error handling."""
from crewai.tools.mcp_tool_wrapper import MCPToolWrapper
@@ -785,164 +731,6 @@ class Agent(BaseAgent):
)
return []
def _get_native_mcp_tools(
self, mcp_config: MCPServerConfig
) -> tuple[list[BaseTool], Any | None]:
"""Get tools from MCP server using structured configuration.
This method creates an MCP client based on the configuration type,
connects to the server, discovers tools, applies filtering, and
returns wrapped tools along with the client instance for cleanup.
Args:
mcp_config: MCP server configuration (MCPServerStdio, MCPServerHTTP, or MCPServerSSE).
Returns:
Tuple of (list of BaseTool instances, MCPClient instance for cleanup).
"""
from crewai.tools.base_tool import BaseTool
from crewai.tools.mcp_native_tool import MCPNativeTool
if isinstance(mcp_config, MCPServerStdio):
transport = StdioTransport(
command=mcp_config.command,
args=mcp_config.args,
env=mcp_config.env,
)
server_name = f"{mcp_config.command}_{'_'.join(mcp_config.args)}"
elif isinstance(mcp_config, MCPServerHTTP):
transport = HTTPTransport(
url=mcp_config.url,
headers=mcp_config.headers,
streamable=mcp_config.streamable,
)
server_name = self._extract_server_name(mcp_config.url)
elif isinstance(mcp_config, MCPServerSSE):
transport = SSETransport(
url=mcp_config.url,
headers=mcp_config.headers,
)
server_name = self._extract_server_name(mcp_config.url)
else:
raise ValueError(f"Unsupported MCP server config type: {type(mcp_config)}")
client = MCPClient(
transport=transport,
cache_tools_list=mcp_config.cache_tools_list,
)
async def _setup_client_and_list_tools() -> list[dict[str, Any]]:
"""Async helper to connect and list tools in same event loop."""
try:
if not client.connected:
await client.connect()
tools_list = await client.list_tools()
try:
await client.disconnect()
# Small delay to allow background tasks to finish cleanup
# This helps prevent "cancel scope in different task" errors
# when asyncio.run() closes the event loop
await asyncio.sleep(0.1)
except Exception as e:
self._logger.log("error", f"Error during disconnect: {e}")
return tools_list
except Exception as e:
if client.connected:
await client.disconnect()
await asyncio.sleep(0.1)
raise RuntimeError(
f"Error during setup client and list tools: {e}"
) from e
try:
try:
asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(
asyncio.run, _setup_client_and_list_tools()
)
tools_list = future.result()
except RuntimeError:
try:
tools_list = asyncio.run(_setup_client_and_list_tools())
except RuntimeError as e:
error_msg = str(e).lower()
if "cancel scope" in error_msg or "task" in error_msg:
raise ConnectionError(
"MCP connection failed due to event loop cleanup issues. "
"This may be due to authentication errors or server unavailability."
) from e
except asyncio.CancelledError as e:
raise ConnectionError(
"MCP connection was cancelled. This may indicate an authentication "
"error or server unavailability."
) from e
if mcp_config.tool_filter:
filtered_tools = []
for tool in tools_list:
if callable(mcp_config.tool_filter):
try:
from crewai.mcp.filters import ToolFilterContext
context = ToolFilterContext(
agent=self,
server_name=server_name,
run_context=None,
)
if mcp_config.tool_filter(context, tool):
filtered_tools.append(tool)
except (TypeError, AttributeError):
if mcp_config.tool_filter(tool):
filtered_tools.append(tool)
else:
# Not callable - include tool
filtered_tools.append(tool)
tools_list = filtered_tools
tools = []
for tool_def in tools_list:
tool_name = tool_def.get("name", "")
if not tool_name:
continue
# Convert inputSchema to Pydantic model if present
args_schema = None
if tool_def.get("inputSchema"):
args_schema = self._json_schema_to_pydantic(
tool_name, tool_def["inputSchema"]
)
tool_schema = {
"description": tool_def.get("description", ""),
"args_schema": args_schema,
}
try:
native_tool = MCPNativeTool(
mcp_client=client,
tool_name=tool_name,
tool_schema=tool_schema,
server_name=server_name,
)
tools.append(native_tool)
except Exception as e:
self._logger.log("error", f"Failed to create native MCP tool: {e}")
continue
return cast(list[BaseTool], tools), client
except Exception as e:
if client.connected:
asyncio.run(client.disconnect())
raise RuntimeError(f"Failed to get native MCP tools: {e}") from e
def _get_amp_mcp_tools(self, amp_ref: str) -> list[BaseTool]:
"""Get tools from CrewAI AMP MCP marketplace."""
# Parse: "crewai-amp:mcp-name" or "crewai-amp:mcp-name#tool_name"

View File

@@ -25,7 +25,6 @@ from crewai.agents.tools_handler import ToolsHandler
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.knowledge_config import KnowledgeConfig
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.mcp.config import MCPServerConfig
from crewai.rag.embeddings.types import EmbedderConfig
from crewai.security.security_config import SecurityConfig
from crewai.tools.base_tool import BaseTool, Tool
@@ -195,7 +194,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
default=None,
description="List of applications or application/action combinations that the agent can access through CrewAI Platform. Can contain app names (e.g., 'gmail') or specific actions (e.g., 'gmail/send_email')",
)
mcps: list[str | MCPServerConfig] | None = Field(
mcps: list[str] | None = Field(
default=None,
description="List of MCP server references. Supports 'https://server.com/path' for external servers and 'crewai-amp:mcp-name' for AMP marketplace. Use '#tool_name' suffix for specific tools.",
)
@@ -254,36 +253,20 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
@field_validator("mcps")
@classmethod
def validate_mcps(
cls, mcps: list[str | MCPServerConfig] | None
) -> list[str | MCPServerConfig] | None:
"""Validate MCP server references and configurations.
Supports both string references (for backwards compatibility) and
structured configuration objects (MCPServerStdio, MCPServerHTTP, MCPServerSSE).
"""
def validate_mcps(cls, mcps: list[str] | None) -> list[str] | None:
if not mcps:
return mcps
validated_mcps = []
for mcp in mcps:
if isinstance(mcp, str):
if mcp.startswith(("https://", "crewai-amp:")):
validated_mcps.append(mcp)
else:
raise ValueError(
f"Invalid MCP reference: {mcp}. "
"String references must start with 'https://' or 'crewai-amp:'"
)
elif isinstance(mcp, (MCPServerConfig)):
if mcp.startswith(("https://", "crewai-amp:")):
validated_mcps.append(mcp)
else:
raise ValueError(
f"Invalid MCP configuration: {type(mcp)}. "
"Must be a string reference or MCPServerConfig instance."
f"Invalid MCP reference: {mcp}. Must start with 'https://' or 'crewai-amp:'"
)
return validated_mcps
return list(set(validated_mcps))
@model_validator(mode="after")
def validate_and_set_attributes(self) -> Self:
@@ -360,7 +343,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
"""Get platform tools for the specified list of applications and/or application/action combinations."""
@abstractmethod
def get_mcp_tools(self, mcps: list[str | MCPServerConfig]) -> list[BaseTool]:
def get_mcp_tools(self, mcps: list[str]) -> list[BaseTool]:
"""Get MCP tools for the specified list of MCP server references."""
def copy(self) -> Self: # type: ignore # Signature of "copy" incompatible with supertype "BaseModel"

View File

@@ -214,7 +214,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
llm=self.llm,
callbacks=self.callbacks,
)
break
enforce_rpm_limit(self.request_within_rpm_limit)
@@ -227,7 +226,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
from_agent=self.agent,
response_model=self.response_model,
)
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
formatted_answer = process_llm_response(answer, self.use_stop_words)
if isinstance(formatted_answer, AgentAction):
# Extract agent fingerprint if available
@@ -259,11 +258,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
formatted_answer, tool_result
)
self._invoke_step_callback(formatted_answer) # type: ignore[arg-type]
self._append_message(formatted_answer.text) # type: ignore[union-attr,attr-defined]
self._invoke_step_callback(formatted_answer)
self._append_message(formatted_answer.text)
except OutputParserError as e:
formatted_answer = handle_output_parser_exception( # type: ignore[assignment]
except OutputParserError as e: # noqa: PERF203
formatted_answer = handle_output_parser_exception(
e=e,
messages=self.messages,
iterations=self.iterations,

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.4.1"
"crewai[tools]==1.3.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.4.1"
"crewai[tools]==1.3.0"
]
[project.scripts]

View File

@@ -16,6 +16,7 @@ from crewai.events.base_event_listener import BaseEventListener
from crewai.events.depends import Depends
from crewai.events.event_bus import crewai_event_bus
from crewai.events.handler_graph import CircularDependencyError
from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
@@ -60,14 +61,6 @@ from crewai.events.types.logging_events import (
AgentLogsExecutionEvent,
AgentLogsStartedEvent,
)
from crewai.events.types.mcp_events import (
MCPConnectionCompletedEvent,
MCPConnectionFailedEvent,
MCPConnectionStartedEvent,
MCPToolExecutionCompletedEvent,
MCPToolExecutionFailedEvent,
MCPToolExecutionStartedEvent,
)
from crewai.events.types.memory_events import (
MemoryQueryCompletedEvent,
MemoryQueryFailedEvent,
@@ -160,12 +153,6 @@ __all__ = [
"LiteAgentExecutionCompletedEvent",
"LiteAgentExecutionErrorEvent",
"LiteAgentExecutionStartedEvent",
"MCPConnectionCompletedEvent",
"MCPConnectionFailedEvent",
"MCPConnectionStartedEvent",
"MCPToolExecutionCompletedEvent",
"MCPToolExecutionFailedEvent",
"MCPToolExecutionStartedEvent",
"MemoryQueryCompletedEvent",
"MemoryQueryFailedEvent",
"MemoryQueryStartedEvent",

View File

@@ -65,14 +65,6 @@ from crewai.events.types.logging_events import (
AgentLogsExecutionEvent,
AgentLogsStartedEvent,
)
from crewai.events.types.mcp_events import (
MCPConnectionCompletedEvent,
MCPConnectionFailedEvent,
MCPConnectionStartedEvent,
MCPToolExecutionCompletedEvent,
MCPToolExecutionFailedEvent,
MCPToolExecutionStartedEvent,
)
from crewai.events.types.reasoning_events import (
AgentReasoningCompletedEvent,
AgentReasoningFailedEvent,
@@ -623,67 +615,5 @@ class EventListener(BaseEventListener):
event.total_turns,
)
# ----------- MCP EVENTS -----------
@crewai_event_bus.on(MCPConnectionStartedEvent)
def on_mcp_connection_started(source, event: MCPConnectionStartedEvent):
self.formatter.handle_mcp_connection_started(
event.server_name,
event.server_url,
event.transport_type,
event.is_reconnect,
event.connect_timeout,
)
@crewai_event_bus.on(MCPConnectionCompletedEvent)
def on_mcp_connection_completed(source, event: MCPConnectionCompletedEvent):
self.formatter.handle_mcp_connection_completed(
event.server_name,
event.server_url,
event.transport_type,
event.connection_duration_ms,
event.is_reconnect,
)
@crewai_event_bus.on(MCPConnectionFailedEvent)
def on_mcp_connection_failed(source, event: MCPConnectionFailedEvent):
self.formatter.handle_mcp_connection_failed(
event.server_name,
event.server_url,
event.transport_type,
event.error,
event.error_type,
)
@crewai_event_bus.on(MCPToolExecutionStartedEvent)
def on_mcp_tool_execution_started(source, event: MCPToolExecutionStartedEvent):
self.formatter.handle_mcp_tool_execution_started(
event.server_name,
event.tool_name,
event.tool_args,
)
@crewai_event_bus.on(MCPToolExecutionCompletedEvent)
def on_mcp_tool_execution_completed(
source, event: MCPToolExecutionCompletedEvent
):
self.formatter.handle_mcp_tool_execution_completed(
event.server_name,
event.tool_name,
event.tool_args,
event.result,
event.execution_duration_ms,
)
@crewai_event_bus.on(MCPToolExecutionFailedEvent)
def on_mcp_tool_execution_failed(source, event: MCPToolExecutionFailedEvent):
self.formatter.handle_mcp_tool_execution_failed(
event.server_name,
event.tool_name,
event.tool_args,
event.error,
event.error_type,
)
event_listener = EventListener()

View File

@@ -40,14 +40,6 @@ from crewai.events.types.llm_guardrail_events import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
from crewai.events.types.mcp_events import (
MCPConnectionCompletedEvent,
MCPConnectionFailedEvent,
MCPConnectionStartedEvent,
MCPToolExecutionCompletedEvent,
MCPToolExecutionFailedEvent,
MCPToolExecutionStartedEvent,
)
from crewai.events.types.memory_events import (
MemoryQueryCompletedEvent,
MemoryQueryFailedEvent,
@@ -123,10 +115,4 @@ EventTypes = (
| MemoryQueryFailedEvent
| MemoryRetrievalStartedEvent
| MemoryRetrievalCompletedEvent
| MCPConnectionStartedEvent
| MCPConnectionCompletedEvent
| MCPConnectionFailedEvent
| MCPToolExecutionStartedEvent
| MCPToolExecutionCompletedEvent
| MCPToolExecutionFailedEvent
)

View File

@@ -1,85 +0,0 @@
from datetime import datetime
from typing import Any
from crewai.events.base_events import BaseEvent
class MCPEvent(BaseEvent):
"""Base event for MCP operations."""
server_name: str
server_url: str | None = None
transport_type: str | None = None # "stdio", "http", "sse"
agent_id: str | None = None
agent_role: str | None = None
from_agent: Any | None = None
from_task: Any | None = None
def __init__(self, **data):
super().__init__(**data)
self._set_agent_params(data)
self._set_task_params(data)
class MCPConnectionStartedEvent(MCPEvent):
"""Event emitted when starting to connect to an MCP server."""
type: str = "mcp_connection_started"
connect_timeout: int | None = None
is_reconnect: bool = (
False # True if this is a reconnection, False for first connection
)
class MCPConnectionCompletedEvent(MCPEvent):
"""Event emitted when successfully connected to an MCP server."""
type: str = "mcp_connection_completed"
started_at: datetime | None = None
completed_at: datetime | None = None
connection_duration_ms: float | None = None
is_reconnect: bool = (
False # True if this was a reconnection, False for first connection
)
class MCPConnectionFailedEvent(MCPEvent):
"""Event emitted when connection to an MCP server fails."""
type: str = "mcp_connection_failed"
error: str
error_type: str | None = None # "timeout", "authentication", "network", etc.
started_at: datetime | None = None
failed_at: datetime | None = None
class MCPToolExecutionStartedEvent(MCPEvent):
"""Event emitted when starting to execute an MCP tool."""
type: str = "mcp_tool_execution_started"
tool_name: str
tool_args: dict[str, Any] | None = None
class MCPToolExecutionCompletedEvent(MCPEvent):
"""Event emitted when MCP tool execution completes."""
type: str = "mcp_tool_execution_completed"
tool_name: str
tool_args: dict[str, Any] | None = None
result: Any | None = None
started_at: datetime | None = None
completed_at: datetime | None = None
execution_duration_ms: float | None = None
class MCPToolExecutionFailedEvent(MCPEvent):
"""Event emitted when MCP tool execution fails."""
type: str = "mcp_tool_execution_failed"
tool_name: str
tool_args: dict[str, Any] | None = None
error: str
error_type: str | None = None # "timeout", "validation", "server_error", etc.
started_at: datetime | None = None
failed_at: datetime | None = None

View File

@@ -2248,203 +2248,3 @@ class ConsoleFormatter:
self.current_a2a_conversation_branch = None
self.current_a2a_turn_count = 0
# ----------- MCP EVENTS -----------
def handle_mcp_connection_started(
self,
server_name: str,
server_url: str | None = None,
transport_type: str | None = None,
is_reconnect: bool = False,
connect_timeout: int | None = None,
) -> None:
"""Handle MCP connection started event."""
if not self.verbose:
return
content = Text()
reconnect_text = " (Reconnecting)" if is_reconnect else ""
content.append(f"MCP Connection Started{reconnect_text}\n\n", style="cyan bold")
content.append("Server: ", style="white")
content.append(f"{server_name}\n", style="cyan")
if server_url:
content.append("URL: ", style="white")
content.append(f"{server_url}\n", style="cyan dim")
if transport_type:
content.append("Transport: ", style="white")
content.append(f"{transport_type}\n", style="cyan")
if connect_timeout:
content.append("Timeout: ", style="white")
content.append(f"{connect_timeout}s\n", style="cyan")
panel = self.create_panel(content, "🔌 MCP Connection", "cyan")
self.print(panel)
self.print()
def handle_mcp_connection_completed(
self,
server_name: str,
server_url: str | None = None,
transport_type: str | None = None,
connection_duration_ms: float | None = None,
is_reconnect: bool = False,
) -> None:
"""Handle MCP connection completed event."""
if not self.verbose:
return
content = Text()
reconnect_text = " (Reconnected)" if is_reconnect else ""
content.append(
f"MCP Connection Completed{reconnect_text}\n\n", style="green bold"
)
content.append("Server: ", style="white")
content.append(f"{server_name}\n", style="green")
if server_url:
content.append("URL: ", style="white")
content.append(f"{server_url}\n", style="green dim")
if transport_type:
content.append("Transport: ", style="white")
content.append(f"{transport_type}\n", style="green")
if connection_duration_ms is not None:
content.append("Duration: ", style="white")
content.append(f"{connection_duration_ms:.2f}ms\n", style="green")
panel = self.create_panel(content, "✅ MCP Connected", "green")
self.print(panel)
self.print()
def handle_mcp_connection_failed(
self,
server_name: str,
server_url: str | None = None,
transport_type: str | None = None,
error: str = "",
error_type: str | None = None,
) -> None:
"""Handle MCP connection failed event."""
if not self.verbose:
return
content = Text()
content.append("MCP Connection Failed\n\n", style="red bold")
content.append("Server: ", style="white")
content.append(f"{server_name}\n", style="red")
if server_url:
content.append("URL: ", style="white")
content.append(f"{server_url}\n", style="red dim")
if transport_type:
content.append("Transport: ", style="white")
content.append(f"{transport_type}\n", style="red")
if error_type:
content.append("Error Type: ", style="white")
content.append(f"{error_type}\n", style="red")
if error:
content.append("\nError: ", style="white bold")
error_preview = error[:500] + "..." if len(error) > 500 else error
content.append(f"{error_preview}\n", style="red")
panel = self.create_panel(content, "❌ MCP Connection Failed", "red")
self.print(panel)
self.print()
def handle_mcp_tool_execution_started(
self,
server_name: str,
tool_name: str,
tool_args: dict[str, Any] | None = None,
) -> None:
"""Handle MCP tool execution started event."""
if not self.verbose:
return
content = self.create_status_content(
"MCP Tool Execution Started",
tool_name,
"yellow",
tool_args=tool_args or {},
Server=server_name,
)
panel = self.create_panel(content, "🔧 MCP Tool", "yellow")
self.print(panel)
self.print()
def handle_mcp_tool_execution_completed(
self,
server_name: str,
tool_name: str,
tool_args: dict[str, Any] | None = None,
result: Any | None = None,
execution_duration_ms: float | None = None,
) -> None:
"""Handle MCP tool execution completed event."""
if not self.verbose:
return
content = self.create_status_content(
"MCP Tool Execution Completed",
tool_name,
"green",
tool_args=tool_args or {},
Server=server_name,
)
if execution_duration_ms is not None:
content.append("Duration: ", style="white")
content.append(f"{execution_duration_ms:.2f}ms\n", style="green")
if result is not None:
result_str = str(result)
if len(result_str) > 500:
result_str = result_str[:497] + "..."
content.append("\nResult: ", style="white bold")
content.append(f"{result_str}\n", style="green")
panel = self.create_panel(content, "✅ MCP Tool Completed", "green")
self.print(panel)
self.print()
def handle_mcp_tool_execution_failed(
self,
server_name: str,
tool_name: str,
tool_args: dict[str, Any] | None = None,
error: str = "",
error_type: str | None = None,
) -> None:
"""Handle MCP tool execution failed event."""
if not self.verbose:
return
content = self.create_status_content(
"MCP Tool Execution Failed",
tool_name,
"red",
tool_args=tool_args or {},
Server=server_name,
)
if error_type:
content.append("Error Type: ", style="white")
content.append(f"{error_type}\n", style="red")
if error:
content.append("\nError: ", style="white bold")
error_preview = error[:500] + "..." if len(error) > 500 else error
content.append(f"{error_preview}\n", style="red")
panel = self.create_panel(content, "❌ MCP Tool Failed", "red")
self.print(panel)
self.print()

View File

@@ -15,6 +15,7 @@ import logging
from typing import (
Any,
ClassVar,
Final,
Generic,
Literal,
ParamSpec,
@@ -44,7 +45,7 @@ from crewai.events.types.flow_events import (
MethodExecutionFinishedEvent,
MethodExecutionStartedEvent,
)
from crewai.flow.constants import AND_CONDITION, OR_CONDITION
from crewai.flow.visualization import build_flow_structure, render_interactive
from crewai.flow.flow_wrappers import (
FlowCondition,
FlowConditions,
@@ -57,16 +58,18 @@ from crewai.flow.flow_wrappers import (
from crewai.flow.persistence.base import FlowPersistence
from crewai.flow.types import FlowExecutionData, FlowMethodName, PendingListenerKey
from crewai.flow.utils import (
_extract_all_methods,
_normalize_condition,
get_possible_return_constants,
is_flow_condition_dict,
is_flow_condition_list,
is_flow_method,
is_flow_method_callable,
is_flow_method_name,
is_simple_flow_condition,
_extract_all_methods,
_extract_all_methods_recursive,
_normalize_condition,
)
from crewai.flow.visualization import build_flow_structure, render_interactive
from crewai.flow.constants import AND_CONDITION, OR_CONDITION
from crewai.utilities.printer import Printer, PrinterColor
@@ -428,8 +431,6 @@ class FlowMeta(type):
possible_returns = get_possible_return_constants(attr_value)
if possible_returns:
router_paths[attr_name] = possible_returns
else:
router_paths[attr_name] = []
cls._start_methods = start_methods # type: ignore[attr-defined]
cls._listeners = listeners # type: ignore[attr-defined]
@@ -494,7 +495,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
or should_auto_collect_first_time_traces()
):
trace_listener = TraceCollectionListener()
trace_listener.setup_listeners(crewai_event_bus)
trace_listener.setup_listeners(crewai_event_bus) # type: ignore[no-untyped-call]
# Apply any additional kwargs
if kwargs:
self._initialize_state(kwargs)
@@ -600,26 +601,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
)
def _copy_state(self) -> T:
"""Create a copy of the current state.
Returns:
A copy of the current state
"""
if isinstance(self._state, BaseModel):
try:
return self._state.model_copy(deep=True)
except (TypeError, AttributeError):
try:
state_dict = self._state.model_dump()
model_class = type(self._state)
return model_class(**state_dict)
except Exception:
return self._state.model_copy(deep=False)
else:
try:
return copy.deepcopy(self._state)
except (TypeError, AttributeError):
return cast(T, self._state.copy())
return copy.deepcopy(self._state)
@property
def state(self) -> T:
@@ -944,8 +926,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
trace_listener = TraceCollectionListener()
if trace_listener.batch_manager.batch_owner_type == "flow":
if trace_listener.first_time_handler.is_first_time:
trace_listener.first_time_handler.mark_events_collected()
trace_listener.first_time_handler.handle_execution_completion()
trace_listener.first_time_handler.mark_events_collected() # type: ignore[no-untyped-call]
trace_listener.first_time_handler.handle_execution_completion() # type: ignore[no-untyped-call]
else:
trace_listener.batch_manager.finalize_batch()

View File

@@ -21,7 +21,6 @@ P = ParamSpec("P")
R = TypeVar("R", covariant=True)
FlowMethodName = NewType("FlowMethodName", str)
FlowRouteName = NewType("FlowRouteName", str)
PendingListenerKey = NewType(
"PendingListenerKey",
Annotated[str, "nested flow conditions use 'listener_name:object_id'"],

View File

@@ -19,11 +19,11 @@ import ast
from collections import defaultdict, deque
import inspect
import textwrap
from typing import TYPE_CHECKING, Any
from typing import Any, TYPE_CHECKING
from typing_extensions import TypeIs
from crewai.flow.constants import AND_CONDITION, OR_CONDITION
from crewai.flow.constants import OR_CONDITION, AND_CONDITION
from crewai.flow.flow_wrappers import (
FlowCondition,
FlowConditions,
@@ -33,7 +33,6 @@ from crewai.flow.flow_wrappers import (
from crewai.flow.types import FlowMethodCallable, FlowMethodName
from crewai.utilities.printer import Printer
if TYPE_CHECKING:
from crewai.flow.flow import Flow
@@ -41,22 +40,6 @@ _printer = Printer()
def get_possible_return_constants(function: Any) -> list[str] | None:
"""Extract possible string return values from a function using AST parsing.
This function analyzes the source code of a router method to identify
all possible string values it might return. It handles:
- Direct string literals: return "value"
- Variable assignments: x = "value"; return x
- Dictionary lookups: d = {"k": "v"}; return d[key]
- Conditional returns: return "a" if cond else "b"
- State attributes: return self.state.attr (infers from class context)
Args:
function: The function to analyze.
Returns:
List of possible string return values, or None if analysis fails.
"""
try:
source = inspect.getsource(function)
except OSError:
@@ -99,7 +82,6 @@ def get_possible_return_constants(function: Any) -> list[str] | None:
return_values: set[str] = set()
dict_definitions: dict[str, list[str]] = {}
variable_values: dict[str, list[str]] = {}
state_attribute_values: dict[str, list[str]] = {}
def extract_string_constants(node: ast.expr) -> list[str]:
"""Recursively extract all string constants from an AST node."""
@@ -109,17 +91,6 @@ def get_possible_return_constants(function: Any) -> list[str] | None:
elif isinstance(node, ast.IfExp):
strings.extend(extract_string_constants(node.body))
strings.extend(extract_string_constants(node.orelse))
elif isinstance(node, ast.Call):
if (
isinstance(node.func, ast.Attribute)
and node.func.attr == "get"
and len(node.args) >= 2
):
default_arg = node.args[1]
if isinstance(default_arg, ast.Constant) and isinstance(
default_arg.value, str
):
strings.append(default_arg.value)
return strings
class VariableAssignmentVisitor(ast.NodeVisitor):
@@ -153,22 +124,6 @@ def get_possible_return_constants(function: Any) -> list[str] | None:
self.generic_visit(node)
def get_attribute_chain(node: ast.expr) -> str | None:
"""Extract the full attribute chain from an AST node.
Examples:
self.state.run_type -> "self.state.run_type"
x.y.z -> "x.y.z"
simple_var -> "simple_var"
"""
if isinstance(node, ast.Name):
return node.id
if isinstance(node, ast.Attribute):
base = get_attribute_chain(node.value)
if base:
return f"{base}.{node.attr}"
return None
class ReturnVisitor(ast.NodeVisitor):
def visit_Return(self, node: ast.Return) -> None:
if (
@@ -184,94 +139,21 @@ def get_possible_return_constants(function: Any) -> list[str] | None:
for v in dict_definitions[var_name_dict]:
return_values.add(v)
elif node.value:
var_name_ret = get_attribute_chain(node.value)
var_name_ret: str | None = None
if isinstance(node.value, ast.Name):
var_name_ret = node.value.id
elif isinstance(node.value, ast.Attribute):
var_name_ret = f"{node.value.value.id if isinstance(node.value.value, ast.Name) else '_'}.{node.value.attr}"
if var_name_ret and var_name_ret in variable_values:
for v in variable_values[var_name_ret]:
return_values.add(v)
elif var_name_ret and var_name_ret in state_attribute_values:
for v in state_attribute_values[var_name_ret]:
return_values.add(v)
self.generic_visit(node)
def visit_If(self, node: ast.If) -> None:
self.generic_visit(node)
# Try to get the class context to infer state attribute values
try:
if hasattr(function, "__self__"):
# Method is bound, get the class
class_obj = function.__self__.__class__
elif hasattr(function, "__qualname__") and "." in function.__qualname__:
# Method is unbound but we can try to get class from module
class_name = function.__qualname__.rsplit(".", 1)[0]
if hasattr(function, "__globals__"):
class_obj = function.__globals__.get(class_name)
else:
class_obj = None
else:
class_obj = None
if class_obj is not None:
try:
class_source = inspect.getsource(class_obj)
class_source = textwrap.dedent(class_source)
class_ast = ast.parse(class_source)
# Look for comparisons and assignments involving state attributes
class StateAttributeVisitor(ast.NodeVisitor):
def visit_Compare(self, node: ast.Compare) -> None:
"""Find comparisons like: self.state.attr == "value" """
left_attr = get_attribute_chain(node.left)
if left_attr:
for comparator in node.comparators:
if isinstance(comparator, ast.Constant) and isinstance(
comparator.value, str
):
if left_attr not in state_attribute_values:
state_attribute_values[left_attr] = []
if (
comparator.value
not in state_attribute_values[left_attr]
):
state_attribute_values[left_attr].append(
comparator.value
)
# Also check right side
for comparator in node.comparators:
right_attr = get_attribute_chain(comparator)
if (
right_attr
and isinstance(node.left, ast.Constant)
and isinstance(node.left.value, str)
):
if right_attr not in state_attribute_values:
state_attribute_values[right_attr] = []
if (
node.left.value
not in state_attribute_values[right_attr]
):
state_attribute_values[right_attr].append(
node.left.value
)
self.generic_visit(node)
StateAttributeVisitor().visit(class_ast)
except Exception as e:
_printer.print(
f"Could not analyze class context for {function.__name__}: {e}",
color="yellow",
)
except Exception as e:
_printer.print(
f"Could not introspect class for {function.__name__}: {e}",
color="yellow",
)
VariableAssignmentVisitor().visit(code_ast)
ReturnVisitor().visit(code_ast)

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,6 @@
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap" rel="stylesheet">
<link rel="stylesheet" href="'{{ css_path }}'" />
<script src="https://unpkg.com/lucide@latest"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/prism.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/components/prism-python.min.js"></script>
<script src="'{{ js_path }}'"></script>
@@ -24,129 +23,93 @@
<div class="drawer-title" id="drawer-node-name">Node Details</div>
<div style="display: flex; align-items: center;">
<button class="drawer-open-ide" id="drawer-open-ide" style="display: none;">
<i data-lucide="file-code" style="width: 16px; height: 16px;"></i>
<svg viewBox="0 0 16 16" fill="none" stroke="currentColor" stroke-width="2">
<path d="M4 2 L12 2 L12 14 L4 14 Z" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M6 5 L10 5 M6 8 L10 8 M6 11 L10 11" stroke-linecap="round"/>
</svg>
Open in IDE
</button>
<button class="drawer-close" id="drawer-close">
<i data-lucide="x" style="width: 20px; height: 20px;"></i>
</button>
<button class="drawer-close" id="drawer-close">×</button>
</div>
</div>
<div class="drawer-content" id="drawer-content"></div>
</div>
<div id="info">
<div style="text-align: center;">
<div style="text-align: center; margin-bottom: 20px;">
<img src="https://cdn.prod.website-files.com/68de1ee6d7c127849807d7a6/68de1ee6d7c127849807d7ef_Logo.svg"
alt="CrewAI Logo"
style="width: 144px; height: auto;">
style="width: 120px; height: auto;">
</div>
<h3>Flow Execution</h3>
<div class="stats">
<p><strong>Nodes:</strong> '{{ dag_nodes_count }}'</p>
<p><strong>Edges:</strong> '{{ dag_edges_count }}'</p>
<p><strong>Topological Paths:</strong> '{{ execution_paths }}'</p>
</div>
<div class="legend">
<div class="legend-title">Node Types</div>
<div class="legend-item">
<div class="legend-color" style="background: '{{ CREWAI_ORANGE }}';"></div>
<span>Start Methods</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background: '{{ DARK_GRAY }}'; border: 3px solid '{{ CREWAI_ORANGE }}';"></div>
<span>Router Methods</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background: '{{ DARK_GRAY }}';"></div>
<span>Listen Methods</span>
</div>
</div>
<div class="legend">
<div class="legend-title">Edge Types</div>
<div class="legend-item">
<svg width="24" height="12" style="margin-right: 12px;">
<line x1="0" y1="6" x2="24" y2="6" stroke="'{{ CREWAI_ORANGE }}'" stroke-width="2" stroke-dasharray="5,5"/>
</svg>
<span>Router Paths</span>
</div>
<div class="legend-item">
<svg width="24" height="12" style="margin-right: 12px;" class="legend-or-line">
<line x1="0" y1="6" x2="24" y2="6" stroke="var(--edge-or-color)" stroke-width="2"/>
</svg>
<span>OR Conditions</span>
</div>
<div class="legend-item">
<svg width="24" height="12" style="margin-right: 12px;">
<line x1="0" y1="6" x2="24" y2="6" stroke="'{{ CREWAI_ORANGE }}'" stroke-width="2"/>
</svg>
<span>AND Conditions</span>
</div>
</div>
<div class="instructions">
<strong>Interactions:</strong><br>
• Drag to pan<br>
• Scroll to zoom<br><br>
<strong>IDE:</strong>
<select id="ide-selector" style="width: 100%; padding: 4px; margin-top: 4px; border-radius: 3px; border: 1px solid #e0e0e0; background: white; font-size: 12px; cursor: pointer; pointer-events: auto; position: relative; z-index: 10;">
<option value="auto">Auto-detect</option>
<option value="pycharm">PyCharm</option>
<option value="vscode">VS Code</option>
<option value="jetbrains">JetBrains (Toolbox)</option>
</select>
</div>
</div>
<!-- Custom navigation controls -->
<div class="nav-controls">
<div class="nav-button" id="theme-toggle" title="Toggle Dark Mode">
<i data-lucide="moon" style="width: 18px; height: 18px;"></i>
</div>
<div class="nav-button" id="zoom-in" title="Zoom In">
<i data-lucide="zoom-in" style="width: 18px; height: 18px;"></i>
</div>
<div class="nav-button" id="zoom-out" title="Zoom Out">
<i data-lucide="zoom-out" style="width: 18px; height: 18px;"></i>
</div>
<div class="nav-button" id="fit" title="Fit to Screen">
<i data-lucide="maximize-2" style="width: 18px; height: 18px;"></i>
</div>
<div class="nav-button" id="export-png" title="Export to PNG">
<i data-lucide="image" style="width: 18px; height: 18px;"></i>
</div>
<div class="nav-button" id="export-pdf" title="Export to PDF">
<i data-lucide="file-text" style="width: 18px; height: 18px;"></i>
</div>
<!-- <div class="nav-button" id="export-json" title="Export to JSON">
<i data-lucide="braces" style="width: 18px; height: 18px;"></i>
</div> -->
<div class="nav-button" id="theme-toggle" title="Toggle Dark Mode">🌙</div>
<div class="nav-button" id="zoom-in" title="Zoom In">+</div>
<div class="nav-button" id="zoom-out" title="Zoom Out"></div>
<div class="nav-button" id="fit" title="Fit to Screen">⊡</div>
<div class="nav-button" id="export-png" title="Export to PNG">🖼</div>
<div class="nav-button" id="export-pdf" title="Export to PDF">📄</div>
<div class="nav-button" id="export-json" title="Export to JSON">{}</div>
</div>
<div id="network-container">
<div id="network"></div>
</div>
<!-- Info panel at bottom -->
<div id="legend-panel">
<!-- Stats Section -->
<div class="legend-section">
<div class="legend-stats-row">
<div class="legend-stat-item">
<span class="stat-value">'{{ dag_nodes_count }}'</span>
<span class="stat-label">Nodes</span>
</div>
<div class="legend-stat-item">
<span class="stat-value">'{{ dag_edges_count }}'</span>
<span class="stat-label">Edges</span>
</div>
<div class="legend-stat-item">
<span class="stat-value">'{{ execution_paths }}'</span>
<span class="stat-label">Paths</span>
</div>
</div>
</div>
<!-- Node Types Section -->
<div class="legend-section">
<div class="legend-group">
<div class="legend-item-compact">
<div class="legend-color-small" style="background: var(--node-bg-start);"></div>
<span>Start</span>
</div>
<div class="legend-item-compact">
<div class="legend-color-small" style="background: var(--node-bg-router); border: 2px solid var(--node-border-start);"></div>
<span>Router</span>
</div>
<div class="legend-item-compact">
<div class="legend-color-small" style="background: var(--node-bg-listen); border: 2px solid var(--node-border-listen);"></div>
<span>Listen</span>
</div>
</div>
</div>
<!-- Edge Types Section -->
<div class="legend-section">
<div class="legend-group">
<div class="legend-item-compact">
<svg>
<line x1="0" y1="7" x2="29" y2="7" stroke="var(--edge-router-color)" stroke-width="2" stroke-dasharray="4,4"/>
</svg>
<span>Router</span>
</div>
<div class="legend-item-compact">
<svg class="legend-or-line">
<line x1="0" y1="7" x2="29" y2="7" stroke="var(--edge-or-color)" stroke-width="2"/>
</svg>
<span>OR</span>
</div>
<div class="legend-item-compact">
<svg>
<line x1="0" y1="7" x2="29" y2="7" stroke="var(--edge-router-color)" stroke-width="2"/>
</svg>
<span>AND</span>
</div>
</div>
</div>
<!-- IDE Selector Section -->
<div class="legend-section">
<div class="legend-ide-column">
<label class="legend-ide-label">IDE</label>
<select id="ide-selector" class="legend-ide-select">
<option value="auto">Auto-detect</option>
<option value="pycharm">PyCharm</option>
<option value="vscode">VS Code</option>
<option value="jetbrains">JetBrains</option>
</select>
</div>
</div>
</div>
</body>
</html>

View File

@@ -13,14 +13,6 @@
--edge-label-text: '{{ GRAY }}';
--edge-label-bg: rgba(255, 255, 255, 0.8);
--edge-or-color: #000000;
--edge-router-color: '{{ CREWAI_ORANGE }}';
--node-border-start: #C94238;
--node-border-listen: #3D3D3D;
--node-bg-start: #FF7066;
--node-bg-router: #FFFFFF;
--node-bg-listen: #FFFFFF;
--node-text-color: #FFFFFF;
--nav-button-hover: #f5f5f5;
}
[data-theme="dark"] {
@@ -38,14 +30,6 @@
--edge-label-text: #c9d1d9;
--edge-label-bg: rgba(22, 27, 34, 0.9);
--edge-or-color: #ffffff;
--edge-router-color: '{{ CREWAI_ORANGE }}';
--node-border-start: #FF5A50;
--node-border-listen: #666666;
--node-bg-start: #B33830;
--node-bg-router: #3D3D3D;
--node-bg-listen: #3D3D3D;
--node-text-color: #FFFFFF;
--nav-button-hover: #30363d;
}
@keyframes dash {
@@ -88,10 +72,12 @@ body {
position: absolute;
top: 20px;
left: 20px;
background: transparent;
background: var(--bg-secondary);
padding: 20px;
border-radius: 8px;
box-shadow: 0 4px 12px var(--shadow-strong);
max-width: 320px;
border: 1px solid var(--border-color);
z-index: 10000;
pointer-events: auto;
transition: background 0.3s ease, border-color 0.3s ease, box-shadow 0.3s ease;
@@ -139,16 +125,12 @@ h3 {
margin-right: 12px;
border-radius: 3px;
box-sizing: border-box;
transition: background 0.3s ease, border-color 0.3s ease;
}
.legend-item span {
color: var(--text-secondary);
font-size: 13px;
transition: color 0.3s ease;
}
.legend-item svg line {
transition: stroke 0.3s ease;
}
.instructions {
margin-top: 15px;
padding-top: 15px;
@@ -173,7 +155,7 @@ h3 {
bottom: 20px;
right: auto;
display: grid;
grid-template-columns: repeat(3, 40px);
grid-template-columns: repeat(4, 40px);
gap: 8px;
z-index: 10002;
pointer-events: auto;
@@ -183,187 +165,10 @@ h3 {
.nav-controls.drawer-open {
}
#legend-panel {
position: fixed;
left: 164px;
bottom: 20px;
right: 20px;
height: 92px;
background: var(--bg-secondary);
backdrop-filter: blur(12px) saturate(180%);
-webkit-backdrop-filter: blur(12px) saturate(180%);
border: 1px solid var(--border-subtle);
border-radius: 6px;
box-shadow: 0 2px 8px var(--shadow-color);
display: grid;
grid-template-columns: repeat(4, 1fr);
align-items: center;
gap: 0;
padding: 0 24px;
box-sizing: border-box;
z-index: 10001;
pointer-events: auto;
transition: background 0.3s ease, border-color 0.3s ease, box-shadow 0.3s ease, right 0.3s cubic-bezier(0.4, 0, 0.2, 1);
}
#legend-panel.drawer-open {
right: 405px;
}
.legend-section {
display: flex;
align-items: center;
justify-content: center;
min-width: 0;
width: -webkit-fill-available;
width: -moz-available;
width: stretch;
position: relative;
}
.legend-section:not(:last-child)::after {
content: '';
position: absolute;
right: 0;
top: 50%;
transform: translateY(-50%);
width: 1px;
height: 48px;
background: var(--border-color);
transition: background 0.3s ease;
}
.legend-stats-row {
display: flex;
gap: 32px;
justify-content: center;
align-items: center;
min-width: 0;
}
.legend-stat-item {
display: flex;
flex-direction: column;
align-items: center;
gap: 4px;
}
.stat-value {
font-size: 19px;
font-weight: 700;
color: var(--text-primary);
line-height: 1;
transition: color 0.3s ease;
}
.stat-label {
font-size: 8px;
font-weight: 500;
text-transform: uppercase;
color: var(--text-secondary);
letter-spacing: 0.5px;
transition: color 0.3s ease;
}
.legend-items-row {
display: flex;
gap: 16px;
align-items: center;
justify-content: center;
min-width: 0;
}
.legend-group {
display: flex;
gap: 16px;
align-items: center;
}
.legend-item-compact {
display: flex;
align-items: center;
gap: 6px;
}
.legend-item-compact span {
font-size: 12px;
font-weight: 500;
text-transform: uppercase;
color: var(--text-secondary);
letter-spacing: 0.5px;
white-space: nowrap;
font-family: inherit;
line-height: 1;
transition: color 0.3s ease;
}
.legend-color-small {
width: 17px;
height: 17px;
border-radius: 2px;
box-sizing: border-box;
flex-shrink: 0;
transition: background 0.3s ease, border-color 0.3s ease;
}
.legend-item-compact svg {
display: block;
flex-shrink: 0;
width: 29px;
height: 14px;
}
.legend-item-compact svg line {
transition: stroke 0.3s ease;
}
.legend-ide-column {
display: flex;
flex-direction: row;
gap: 8px;
align-items: center;
justify-content: center;
min-width: 0;
width: 100%;
}
.legend-ide-label {
font-size: 12px;
font-weight: 500;
text-transform: uppercase;
color: var(--text-secondary);
letter-spacing: 0.5px;
transition: color 0.3s ease;
white-space: nowrap;
}
.legend-ide-select {
width: 120px;
padding: 6px 10px;
border-radius: 4px;
border: 1px solid var(--border-subtle);
background: var(--bg-primary);
color: var(--text-primary);
font-size: 11px;
cursor: pointer;
transition: all 0.3s ease;
}
.legend-ide-select:hover {
border-color: var(--text-secondary);
}
.legend-ide-select:focus {
outline: none;
border-color: '{{ CREWAI_ORANGE }}';
}
.nav-button {
width: 40px;
height: 40px;
background: var(--bg-secondary);
backdrop-filter: blur(12px) saturate(180%);
-webkit-backdrop-filter: blur(12px) saturate(180%);
border: 1px solid var(--border-subtle);
border-radius: 6px;
display: flex;
@@ -376,12 +181,12 @@ h3 {
user-select: none;
pointer-events: auto;
position: relative;
z-index: 10002;
z-index: 10001;
transition: background 0.3s ease, border-color 0.3s ease, color 0.3s ease, box-shadow 0.3s ease;
}
.nav-button:hover {
background: var(--nav-button-hover);
background: var(--border-subtle);
}
#drawer {
@@ -393,10 +198,9 @@ h3 {
background: var(--bg-drawer);
box-shadow: -4px 0 12px var(--shadow-strong);
transition: right 0.3s cubic-bezier(0.4, 0, 0.2, 1), background 0.3s ease, box-shadow 0.3s ease;
z-index: 10003;
overflow: hidden;
transform: translateZ(0);
isolation: isolate;
z-index: 2000;
overflow-y: auto;
padding: 24px;
}
#drawer.open {
@@ -443,22 +247,17 @@ h3 {
justify-content: space-between;
align-items: center;
margin-bottom: 20px;
padding: 24px 24px 16px 24px;
padding-bottom: 16px;
border-bottom: 2px solid '{{ CREWAI_ORANGE }}';
position: relative;
z-index: 2001;
}
.drawer-title {
font-size: 15px;
font-size: 20px;
font-weight: 700;
color: var(--text-primary);
transition: color 0.3s ease;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
flex: 1;
min-width: 0;
}
.drawer-close {
@@ -470,19 +269,12 @@ h3 {
padding: 4px 8px;
line-height: 1;
transition: color 0.3s ease;
display: flex;
align-items: center;
justify-content: center;
}
.drawer-close:hover {
color: '{{ CREWAI_ORANGE }}';
}
.drawer-close i {
display: block;
}
.drawer-open-ide {
background: '{{ CREWAI_ORANGE }}';
border: none;
@@ -500,9 +292,6 @@ h3 {
position: relative;
z-index: 9999;
pointer-events: auto;
white-space: nowrap;
flex-shrink: 0;
min-width: fit-content;
}
.drawer-open-ide:hover {
@@ -516,19 +305,14 @@ h3 {
box-shadow: 0 1px 4px rgba(255, 90, 80, 0.2);
}
.drawer-open-ide svg,
.drawer-open-ide i {
.drawer-open-ide svg {
width: 14px;
height: 14px;
display: block;
}
.drawer-content {
color: '{{ DARK_GRAY }}';
line-height: 1.6;
padding: 0 24px 24px 24px;
overflow-y: auto;
height: calc(100vh - 95px);
}
.drawer-section {
@@ -544,10 +328,6 @@ h3 {
position: relative;
}
.drawer-metadata-grid:has(.drawer-section:nth-child(3):nth-last-child(1)) {
grid-template-columns: 1fr 2fr;
}
.drawer-metadata-grid::before {
content: '';
position: absolute;
@@ -639,35 +419,20 @@ h3 {
grid-column: 2;
display: flex;
flex-direction: column;
justify-content: flex-start;
align-items: flex-start;
justify-content: center;
}
.drawer-metadata-grid:has(.drawer-section:nth-child(3):nth-last-child(1))::after {
right: 66.666%;
}
.drawer-metadata-grid:has(.drawer-section:nth-child(3):nth-last-child(1))::before {
left: 33.333%;
}
.drawer-metadata-grid .drawer-section:nth-child(3):nth-last-child(1) .drawer-section-title {
align-self: flex-start;
}
.drawer-metadata-grid .drawer-section:nth-child(3):nth-last-child(1) > *:not(.drawer-section-title) {
width: 100%;
align-self: stretch;
right: 50%;
}
.drawer-section-title {
font-size: 12px;
text-transform: uppercase;
color: var(--text-secondary);
color: '{{ GRAY }}';
letter-spacing: 0.5px;
margin-bottom: 8px;
font-weight: 600;
transition: color 0.3s ease;
}
.drawer-badge {
@@ -700,44 +465,9 @@ h3 {
padding: 3px 0;
}
.drawer-metadata-grid .drawer-section .drawer-list {
display: flex;
flex-direction: column;
gap: 6px;
}
.drawer-metadata-grid .drawer-section .drawer-list li {
border-bottom: none;
padding: 0;
}
.drawer-metadata-grid .drawer-section:nth-child(3) .drawer-list li {
border-bottom: none;
padding: 0;
}
.drawer-metadata-grid .drawer-section {
overflow: visible;
}
.drawer-metadata-grid .drawer-section .condition-group,
.drawer-metadata-grid .drawer-section .trigger-group {
width: 100%;
box-sizing: border-box;
}
.drawer-metadata-grid .drawer-section .condition-children {
width: 100%;
}
.drawer-metadata-grid .drawer-section .trigger-group-items {
width: 100%;
}
.drawer-metadata-grid .drawer-section .drawer-code-link {
word-break: break-word;
overflow-wrap: break-word;
max-width: 100%;
padding: 3px 0;
}
.drawer-code {
@@ -761,7 +491,6 @@ h3 {
cursor: pointer;
transition: all 0.2s;
display: inline-block;
margin: 3px 2px;
}
.drawer-code-link:hover {

View File

@@ -3,13 +3,12 @@
from __future__ import annotations
from collections import defaultdict
from collections.abc import Iterable
import inspect
from typing import TYPE_CHECKING, Any
from crewai.flow.constants import AND_CONDITION, OR_CONDITION
from crewai.flow.flow_wrappers import FlowCondition
from crewai.flow.types import FlowMethodName, FlowRouteName
from crewai.flow.types import FlowMethodName
from crewai.flow.utils import (
is_flow_condition_dict,
is_simple_flow_condition,
@@ -198,6 +197,8 @@ def build_flow_structure(flow: Flow[Any]) -> FlowStructure:
node_metadata["type"] = "router"
router_methods.append(method_name)
node_metadata["condition_type"] = "IF"
if method_name in flow._router_paths:
node_metadata["router_paths"] = [
str(p) for p in flow._router_paths[method_name]
@@ -209,13 +210,9 @@ def build_flow_structure(flow: Flow[Any]) -> FlowStructure:
]
if hasattr(method, "__condition_type__") and method.__condition_type__:
node_metadata["trigger_condition_type"] = method.__condition_type__
if "condition_type" not in node_metadata:
node_metadata["condition_type"] = method.__condition_type__
if node_metadata.get("is_router") and "condition_type" not in node_metadata:
node_metadata["condition_type"] = "IF"
if (
hasattr(method, "__trigger_condition__")
and method.__trigger_condition__ is not None
@@ -301,9 +298,6 @@ def build_flow_structure(flow: Flow[Any]) -> FlowStructure:
nodes[method_name] = node_metadata
for listener_name, condition_data in flow._listeners.items():
if listener_name in router_methods:
continue
if is_simple_flow_condition(condition_data):
cond_type, methods = condition_data
edges.extend(
@@ -321,60 +315,6 @@ def build_flow_structure(flow: Flow[Any]) -> FlowStructure:
_create_edges_from_condition(condition_data, str(listener_name), nodes)
)
for method_name, node_metadata in nodes.items(): # type: ignore[assignment]
if node_metadata.get("is_router") and "trigger_methods" in node_metadata:
trigger_methods = node_metadata["trigger_methods"]
condition_type = node_metadata.get("trigger_condition_type", OR_CONDITION)
if "trigger_condition" in node_metadata:
edges.extend(
_create_edges_from_condition(
node_metadata["trigger_condition"], # type: ignore[arg-type]
method_name,
nodes,
)
)
else:
edges.extend(
StructureEdge(
source=trigger_method,
target=method_name,
condition_type=condition_type,
is_router_path=False,
)
for trigger_method in trigger_methods
if trigger_method in nodes
)
for router_method_name in router_methods:
if router_method_name not in flow._router_paths:
flow._router_paths[FlowMethodName(router_method_name)] = []
inferred_paths: Iterable[FlowMethodName | FlowRouteName] = set(
flow._router_paths.get(FlowMethodName(router_method_name), [])
)
for condition_data in flow._listeners.values():
trigger_strings: list[str] = []
if is_simple_flow_condition(condition_data):
_, methods = condition_data
trigger_strings = [str(m) for m in methods]
elif is_flow_condition_dict(condition_data):
trigger_strings = _extract_direct_or_triggers(condition_data)
for trigger_str in trigger_strings:
if trigger_str not in nodes:
# This is likely a router path output
inferred_paths.add(trigger_str) # type: ignore[attr-defined]
if inferred_paths:
flow._router_paths[FlowMethodName(router_method_name)] = list(
inferred_paths # type: ignore[arg-type]
)
if router_method_name in nodes:
nodes[router_method_name]["router_paths"] = list(inferred_paths)
for router_method_name in router_methods:
if router_method_name not in flow._router_paths:
continue
@@ -400,7 +340,6 @@ def build_flow_structure(flow: Flow[Any]) -> FlowStructure:
target=str(listener_name),
condition_type=None,
is_router_path=True,
router_path_label=str(path),
)
)

View File

@@ -20,7 +20,7 @@ class CSSExtension(Extension):
Provides {% css 'path/to/file.css' %} tag syntax.
"""
tags: ClassVar[set[str]] = {"css"} # type: ignore[misc]
tags: ClassVar[set[str]] = {"css"} # type: ignore[assignment]
def parse(self, parser: Parser) -> nodes.Node:
"""Parse {% css 'styles.css' %} tag.
@@ -53,7 +53,7 @@ class JSExtension(Extension):
Provides {% js 'path/to/file.js' %} tag syntax.
"""
tags: ClassVar[set[str]] = {"js"} # type: ignore[misc]
tags: ClassVar[set[str]] = {"js"} # type: ignore[assignment]
def parse(self, parser: Parser) -> nodes.Node:
"""Parse {% js 'script.js' %} tag.
@@ -91,116 +91,6 @@ TEXT_PRIMARY = "#e6edf3"
TEXT_SECONDARY = "#7d8590"
def calculate_node_positions(
dag: FlowStructure,
) -> dict[str, dict[str, int | float]]:
"""Calculate hierarchical positions (level, x, y) for each node.
Args:
dag: FlowStructure containing nodes and edges.
Returns:
Dictionary mapping node names to their position data (level, x, y).
"""
children: dict[str, list[str]] = {name: [] for name in dag["nodes"]}
parents: dict[str, list[str]] = {name: [] for name in dag["nodes"]}
for edge in dag["edges"]:
source = edge["source"]
target = edge["target"]
if source in children and target in children:
children[source].append(target)
parents[target].append(source)
levels: dict[str, int] = {}
queue: list[tuple[str, int]] = []
for start_method in dag["start_methods"]:
if start_method in dag["nodes"]:
levels[start_method] = 0
queue.append((start_method, 0))
visited: set[str] = set()
while queue:
node, level = queue.pop(0)
if node in visited:
continue
visited.add(node)
if node not in levels or levels[node] < level:
levels[node] = level
for child in children.get(node, []):
if child not in visited:
child_level = level + 1
if child not in levels or levels[child] < child_level:
levels[child] = child_level
queue.append((child, child_level))
for name in dag["nodes"]:
if name not in levels:
levels[name] = 0
nodes_by_level: dict[int, list[str]] = {}
for node, level in levels.items():
if level not in nodes_by_level:
nodes_by_level[level] = []
nodes_by_level[level].append(node)
positions: dict[str, dict[str, int | float]] = {}
level_separation = 300 # Vertical spacing between levels
node_spacing = 400 # Horizontal spacing between nodes
parent_count: dict[str, int] = {}
for node, parent_list in parents.items():
parent_count[node] = len(parent_list)
for level, nodes_at_level in sorted(nodes_by_level.items()):
y = level * level_separation
if level == 0:
num_nodes = len(nodes_at_level)
for i, node in enumerate(nodes_at_level):
x = (i - (num_nodes - 1) / 2) * node_spacing
positions[node] = {"level": level, "x": x, "y": y}
else:
for i, node in enumerate(nodes_at_level):
parent_list = parents.get(node, [])
parent_positions: list[float] = [
positions[parent]["x"]
for parent in parent_list
if parent in positions
]
if parent_positions:
if len(parent_positions) > 1 and len(set(parent_positions)) == 1:
base_x = parent_positions[0]
avg_x = base_x + node_spacing * 0.4
else:
avg_x = sum(parent_positions) / len(parent_positions)
else:
avg_x = i * node_spacing * 0.5
positions[node] = {"level": level, "x": avg_x, "y": y}
nodes_at_level_sorted = sorted(
nodes_at_level, key=lambda n: positions[n]["x"]
)
min_spacing = node_spacing * 0.6 # Minimum horizontal distance
for i in range(len(nodes_at_level_sorted) - 1):
current_node = nodes_at_level_sorted[i]
next_node = nodes_at_level_sorted[i + 1]
current_x = positions[current_node]["x"]
next_x = positions[next_node]["x"]
if next_x - current_x < min_spacing:
positions[next_node]["x"] = current_x + min_spacing
return positions
def render_interactive(
dag: FlowStructure,
filename: str = "flow_dag.html",
@@ -220,8 +110,6 @@ def render_interactive(
Returns:
Absolute path to generated HTML file in temporary directory.
"""
node_positions = calculate_node_positions(dag)
nodes_list: list[dict[str, Any]] = []
for name, metadata in dag["nodes"].items():
node_type: str = metadata.get("type", "listen")
@@ -232,37 +120,37 @@ def render_interactive(
if node_type == "start":
color_config = {
"background": "var(--node-bg-start)",
"border": "var(--node-border-start)",
"highlight": {
"background": "var(--node-bg-start)",
"border": "var(--node-border-start)",
},
}
font_color = "var(--node-text-color)"
border_width = 3
elif node_type == "router":
color_config = {
"background": "var(--node-bg-router)",
"background": CREWAI_ORANGE,
"border": CREWAI_ORANGE,
"highlight": {
"background": "var(--node-bg-router)",
"background": CREWAI_ORANGE,
"border": CREWAI_ORANGE,
},
}
font_color = "var(--node-text-color)"
font_color = WHITE
border_width = 2
elif node_type == "router":
color_config = {
"background": DARK_GRAY,
"border": CREWAI_ORANGE,
"highlight": {
"background": DARK_GRAY,
"border": CREWAI_ORANGE,
},
}
font_color = WHITE
border_width = 3
else:
color_config = {
"background": "var(--node-bg-listen)",
"border": "var(--node-border-listen)",
"background": DARK_GRAY,
"border": DARK_GRAY,
"highlight": {
"background": "var(--node-bg-listen)",
"border": "var(--node-border-listen)",
"background": DARK_GRAY,
"border": DARK_GRAY,
},
}
font_color = "var(--node-text-color)"
border_width = 3
font_color = WHITE
border_width = 2
title_parts: list[str] = []
@@ -327,34 +215,25 @@ def render_interactive(
bg_color = color_config["background"]
border_color = color_config["border"]
position_data = node_positions.get(name, {"level": 0, "x": 0, "y": 0})
node_data: dict[str, Any] = {
"id": name,
"label": name,
"title": "".join(title_parts),
"shape": "custom",
"size": 30,
"level": position_data["level"],
"nodeStyle": {
"name": name,
"bgColor": bg_color,
"borderColor": border_color,
"borderWidth": border_width,
"fontColor": font_color,
},
"opacity": 1.0,
"glowSize": 0,
"glowColor": None,
}
# Add x,y only for graphs with 3-4 nodes
total_nodes = len(dag["nodes"])
if 3 <= total_nodes <= 4:
node_data["x"] = position_data["x"]
node_data["y"] = position_data["y"]
nodes_list.append(node_data)
nodes_list.append(
{
"id": name,
"label": name,
"title": "".join(title_parts),
"shape": "custom",
"size": 30,
"nodeStyle": {
"name": name,
"bgColor": bg_color,
"borderColor": border_color,
"borderWidth": border_width,
"fontColor": font_color,
},
"opacity": 1.0,
"glowSize": 0,
"glowColor": None,
}
)
execution_paths: int = calculate_execution_paths(dag)
@@ -367,8 +246,6 @@ def render_interactive(
if edge["is_router_path"]:
edge_color = CREWAI_ORANGE
edge_dashes = [15, 10]
if "router_path_label" in edge:
edge_label = edge["router_path_label"]
elif edge["condition_type"] == "AND":
edge_label = "AND"
edge_color = CREWAI_ORANGE

View File

@@ -10,7 +10,6 @@ class NodeMetadata(TypedDict, total=False):
is_router: bool
router_paths: list[str]
condition_type: str | None
trigger_condition_type: str | None
trigger_methods: list[str]
trigger_condition: dict[str, Any] | None
method_signature: dict[str, Any]
@@ -23,14 +22,13 @@ class NodeMetadata(TypedDict, total=False):
class_line_number: int
class StructureEdge(TypedDict, total=False):
class StructureEdge(TypedDict):
"""Represents a connection in the flow structure."""
source: str
target: str
condition_type: str | None
is_router_path: bool
router_path_label: str
class FlowStructure(TypedDict):

View File

@@ -1,7 +1,6 @@
import asyncio
from collections.abc import Callable
import inspect
import json
from typing import (
Any,
Literal,
@@ -59,11 +58,7 @@ from crewai.utilities.agent_utils import (
process_llm_response,
render_text_description_and_args,
)
from crewai.utilities.converter import (
Converter,
ConverterError,
generate_model_description,
)
from crewai.utilities.converter import generate_model_description
from crewai.utilities.guardrail import process_guardrail
from crewai.utilities.guardrail_types import GuardrailCallable, GuardrailType
from crewai.utilities.i18n import I18N, get_i18n
@@ -246,11 +241,7 @@ class LiteAgent(FlowTrackable, BaseModel):
"""Return the original role for compatibility with tool interfaces."""
return self.role
def kickoff(
self,
messages: str | list[LLMMessage],
response_format: type[BaseModel] | None = None,
) -> LiteAgentOutput:
def kickoff(self, messages: str | list[LLMMessage]) -> LiteAgentOutput:
"""
Execute the agent with the given messages.
@@ -258,8 +249,6 @@ class LiteAgent(FlowTrackable, BaseModel):
messages: Either a string query or a list of message dictionaries.
If a string is provided, it will be converted to a user message.
If a list is provided, each dict should have 'role' and 'content' keys.
response_format: Optional Pydantic model for structured output. If provided,
overrides self.response_format for this execution.
Returns:
LiteAgentOutput: The result of the agent execution.
@@ -280,13 +269,9 @@ class LiteAgent(FlowTrackable, BaseModel):
self.tools_results = []
# Format messages for the LLM
self._messages = self._format_messages(
messages, response_format=response_format
)
self._messages = self._format_messages(messages)
return self._execute_core(
agent_info=agent_info, response_format=response_format
)
return self._execute_core(agent_info=agent_info)
except Exception as e:
self._printer.print(
@@ -304,9 +289,7 @@ class LiteAgent(FlowTrackable, BaseModel):
)
raise e
def _execute_core(
self, agent_info: dict[str, Any], response_format: type[BaseModel] | None = None
) -> LiteAgentOutput:
def _execute_core(self, agent_info: dict[str, Any]) -> LiteAgentOutput:
# Emit event for agent execution start
crewai_event_bus.emit(
self,
@@ -320,29 +303,15 @@ class LiteAgent(FlowTrackable, BaseModel):
# Execute the agent using invoke loop
agent_finish = self._invoke_loop()
formatted_result: BaseModel | None = None
active_response_format = response_format or self.response_format
if active_response_format:
if self.response_format:
try:
model_schema = generate_model_description(active_response_format)
schema = json.dumps(model_schema, indent=2)
instructions = self.i18n.slice("formatted_task_instructions").format(
output_format=schema
)
converter = Converter(
llm=self.llm,
text=agent_finish.output,
model=active_response_format,
instructions=instructions,
)
result = converter.to_pydantic()
# Cast to BaseModel to ensure type safety
result = self.response_format.model_validate_json(agent_finish.output)
if isinstance(result, BaseModel):
formatted_result = result
except ConverterError as e:
except Exception as e:
self._printer.print(
content=f"Failed to parse output into response format after retries: {e.message}",
content=f"Failed to parse output into response format: {e!s}",
color="yellow",
)
@@ -431,14 +400,8 @@ class LiteAgent(FlowTrackable, BaseModel):
"""
return await asyncio.to_thread(self.kickoff, messages)
def _get_default_system_prompt(
self, response_format: type[BaseModel] | None = None
) -> str:
"""Get the default system prompt for the agent.
Args:
response_format: Optional response format to use instead of self.response_format
"""
def _get_default_system_prompt(self) -> str:
"""Get the default system prompt for the agent."""
base_prompt = ""
if self._parsed_tools:
# Use the prompt template for agents with tools
@@ -459,31 +422,21 @@ class LiteAgent(FlowTrackable, BaseModel):
goal=self.goal,
)
active_response_format = response_format or self.response_format
if active_response_format:
model_description = generate_model_description(active_response_format)
schema_json = json.dumps(model_description, indent=2)
# Add response format instructions if specified
if self.response_format:
schema = generate_model_description(self.response_format)
base_prompt += self.i18n.slice("lite_agent_response_format").format(
response_format=schema_json
response_format=schema
)
return base_prompt
def _format_messages(
self,
messages: str | list[LLMMessage],
response_format: type[BaseModel] | None = None,
) -> list[LLMMessage]:
"""Format messages for the LLM.
Args:
messages: Input messages to format
response_format: Optional response format to use instead of self.response_format
"""
def _format_messages(self, messages: str | list[LLMMessage]) -> list[LLMMessage]:
"""Format messages for the LLM."""
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
system_prompt = self._get_default_system_prompt(response_format=response_format)
system_prompt = self._get_default_system_prompt()
# Add system message at the beginning
formatted_messages: list[LLMMessage] = [
@@ -553,10 +506,6 @@ class LiteAgent(FlowTrackable, BaseModel):
self._append_message(formatted_answer.text, role="assistant")
except OutputParserError as e: # noqa: PERF203
self._printer.print(
content="Failed to parse LLM output. Retrying...",
color="yellow",
)
formatted_answer = handle_output_parser_exception(
e=e,
messages=self._messages,

View File

@@ -20,7 +20,6 @@ from typing import (
)
from dotenv import load_dotenv
import httpx
from pydantic import BaseModel, Field
from typing_extensions import Self
@@ -38,13 +37,6 @@ from crewai.events.types.tool_usage_events import (
ToolUsageStartedEvent,
)
from crewai.llms.base_llm import BaseLLM
from crewai.llms.constants import (
ANTHROPIC_MODELS,
AZURE_MODELS,
BEDROCK_MODELS,
GEMINI_MODELS,
OPENAI_MODELS,
)
from crewai.utilities import InternalInstructor
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
@@ -61,7 +53,6 @@ if TYPE_CHECKING:
from litellm.utils import supports_response_schema
from crewai.agent.core import Agent
from crewai.llms.hooks.base import BaseInterceptor
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
from crewai.utilities.types import LLMMessage
@@ -330,67 +321,19 @@ class LLM(BaseLLM):
completion_cost: float | None = None
def __new__(cls, model: str, is_litellm: bool = False, **kwargs: Any) -> LLM:
"""Factory method that routes to native SDK or falls back to LiteLLM.
Routing priority:
1. If 'provider' kwarg is present, use that provider with constants
2. If only 'model' kwarg, use constants to infer provider
3. If "/" in model name:
- Check if prefix is a native provider (openai/anthropic/azure/bedrock/gemini)
- If yes, validate model against constants
- If valid, route to native SDK; otherwise route to LiteLLM
"""
"""Factory method that routes to native SDK or falls back to LiteLLM."""
if not model or not isinstance(model, str):
raise ValueError("Model must be a non-empty string")
explicit_provider = kwargs.get("provider")
provider = model.partition("/")[0] if "/" in model else "openai"
if explicit_provider:
provider = explicit_provider
use_native = True
model_string = model
elif "/" in model:
prefix, _, model_part = model.partition("/")
provider_mapping = {
"openai": "openai",
"anthropic": "anthropic",
"claude": "anthropic",
"azure": "azure",
"azure_openai": "azure",
"google": "gemini",
"gemini": "gemini",
"bedrock": "bedrock",
"aws": "bedrock",
}
canonical_provider = provider_mapping.get(prefix.lower())
if canonical_provider and cls._validate_model_in_constants(
model_part, canonical_provider
):
provider = canonical_provider
use_native = True
model_string = model_part
else:
provider = prefix
use_native = False
model_string = model_part
else:
provider = cls._infer_provider_from_model(model)
use_native = True
model_string = model
native_class = cls._get_native_provider(provider) if use_native else None
native_class = cls._get_native_provider(provider)
if native_class and not is_litellm and provider in SUPPORTED_NATIVE_PROVIDERS:
try:
# Remove 'provider' from kwargs if it exists to avoid duplicate keyword argument
kwargs_copy = {k: v for k, v in kwargs.items() if k != 'provider'}
model_string = model.partition("/")[2] if "/" in model else model
return cast(
Self, native_class(model=model_string, provider=provider, **kwargs_copy)
Self, native_class(model=model_string, provider=provider, **kwargs)
)
except NotImplementedError:
raise
except Exception as e:
raise ImportError(f"Error importing native provider: {e}") from e
@@ -404,63 +347,6 @@ class LLM(BaseLLM):
instance.is_litellm = True
return instance
@classmethod
def _validate_model_in_constants(cls, model: str, provider: str) -> bool:
"""Validate if a model name exists in the provider's constants.
Args:
model: The model name to validate
provider: The provider to check against (canonical name)
Returns:
True if the model exists in the provider's constants, False otherwise
"""
if provider == "openai":
return model in OPENAI_MODELS
if provider == "anthropic" or provider == "claude":
return model in ANTHROPIC_MODELS
if provider == "gemini":
return model in GEMINI_MODELS
if provider == "bedrock":
return model in BEDROCK_MODELS
if provider == "azure":
# azure does not provide a list of available models, determine a better way to handle this
return True
return False
@classmethod
def _infer_provider_from_model(cls, model: str) -> str:
"""Infer the provider from the model name.
Args:
model: The model name without provider prefix
Returns:
The inferred provider name, defaults to "openai"
"""
if model in OPENAI_MODELS:
return "openai"
if model in ANTHROPIC_MODELS:
return "anthropic"
if model in GEMINI_MODELS:
return "gemini"
if model in BEDROCK_MODELS:
return "bedrock"
if model in AZURE_MODELS:
return "azure"
return "openai"
@classmethod
def _get_native_provider(cls, provider: str) -> type | None:
"""Get native provider class if available."""
@@ -517,7 +403,6 @@ class LLM(BaseLLM):
callbacks: list[Any] | None = None,
reasoning_effort: Literal["none", "low", "medium", "high"] | None = None,
stream: bool = False,
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
**kwargs: Any,
) -> None:
"""Initialize LLM instance.
@@ -557,7 +442,6 @@ class LLM(BaseLLM):
self.additional_params = kwargs
self.is_anthropic = self._is_anthropic_model(model)
self.stream = stream
self.interceptor = interceptor
litellm.drop_params = True

View File

@@ -1,558 +0,0 @@
from typing import Literal, TypeAlias
OpenAIModels: TypeAlias = Literal[
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-instruct-0914",
"gpt-4",
"gpt-4-0125-preview",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
"gpt-4-vision-preview",
"gpt-4.1",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano",
"gpt-4.1-nano-2025-04-14",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-audio-preview",
"gpt-4o-audio-preview-2024-10-01",
"gpt-4o-audio-preview-2024-12-17",
"gpt-4o-audio-preview-2025-06-03",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4o-mini-audio-preview",
"gpt-4o-mini-audio-preview-2024-12-17",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-4o-mini-search-preview",
"gpt-4o-mini-search-preview-2025-03-11",
"gpt-4o-mini-transcribe",
"gpt-4o-mini-tts",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-search-preview",
"gpt-4o-search-preview-2025-03-11",
"gpt-4o-transcribe",
"gpt-4o-transcribe-diarize",
"gpt-5",
"gpt-5-2025-08-07",
"gpt-5-chat",
"gpt-5-chat-latest",
"gpt-5-codex",
"gpt-5-mini",
"gpt-5-mini-2025-08-07",
"gpt-5-nano",
"gpt-5-nano-2025-08-07",
"gpt-5-pro",
"gpt-5-pro-2025-10-06",
"gpt-5-search-api",
"gpt-5-search-api-2025-10-14",
"gpt-audio",
"gpt-audio-2025-08-28",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-image-1",
"gpt-image-1-mini",
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"o1",
"o1-preview",
"o1-2024-12-17",
"o1-mini",
"o1-mini-2024-09-12",
"o1-pro",
"o1-pro-2025-03-19",
"o3-mini",
"o3",
"o4-mini",
"whisper-1",
]
OPENAI_MODELS: list[OpenAIModels] = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-instruct-0914",
"gpt-4",
"gpt-4-0125-preview",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
"gpt-4-vision-preview",
"gpt-4.1",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano",
"gpt-4.1-nano-2025-04-14",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-audio-preview",
"gpt-4o-audio-preview-2024-10-01",
"gpt-4o-audio-preview-2024-12-17",
"gpt-4o-audio-preview-2025-06-03",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4o-mini-audio-preview",
"gpt-4o-mini-audio-preview-2024-12-17",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-4o-mini-search-preview",
"gpt-4o-mini-search-preview-2025-03-11",
"gpt-4o-mini-transcribe",
"gpt-4o-mini-tts",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-search-preview",
"gpt-4o-search-preview-2025-03-11",
"gpt-4o-transcribe",
"gpt-4o-transcribe-diarize",
"gpt-5",
"gpt-5-2025-08-07",
"gpt-5-chat",
"gpt-5-chat-latest",
"gpt-5-codex",
"gpt-5-mini",
"gpt-5-mini-2025-08-07",
"gpt-5-nano",
"gpt-5-nano-2025-08-07",
"gpt-5-pro",
"gpt-5-pro-2025-10-06",
"gpt-5-search-api",
"gpt-5-search-api-2025-10-14",
"gpt-audio",
"gpt-audio-2025-08-28",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-image-1",
"gpt-image-1-mini",
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"o1",
"o1-preview",
"o1-2024-12-17",
"o1-mini",
"o1-mini-2024-09-12",
"o1-pro",
"o1-pro-2025-03-19",
"o3-mini",
"o3",
"o4-mini",
"whisper-1",
]
AnthropicModels: TypeAlias = Literal[
"claude-3-7-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-haiku-4-5",
"claude-haiku-4-5-20251001",
"claude-sonnet-4-20250514",
"claude-sonnet-4-0",
"claude-4-sonnet-20250514",
"claude-sonnet-4-5",
"claude-sonnet-4-5-20250929",
"claude-3-5-sonnet-latest",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-20240620",
"claude-opus-4-0",
"claude-opus-4-20250514",
"claude-4-opus-20250514",
"claude-opus-4-1",
"claude-opus-4-1-20250805",
"claude-3-opus-latest",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-latest",
"claude-3-haiku-20240307",
]
ANTHROPIC_MODELS: list[AnthropicModels] = [
"claude-3-7-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-haiku-4-5",
"claude-haiku-4-5-20251001",
"claude-sonnet-4-20250514",
"claude-sonnet-4-0",
"claude-4-sonnet-20250514",
"claude-sonnet-4-5",
"claude-sonnet-4-5-20250929",
"claude-3-5-sonnet-latest",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-20240620",
"claude-opus-4-0",
"claude-opus-4-20250514",
"claude-4-opus-20250514",
"claude-opus-4-1",
"claude-opus-4-1-20250805",
"claude-3-opus-latest",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-latest",
"claude-3-haiku-20240307",
]
GeminiModels: TypeAlias = Literal[
"gemini-2.5-pro",
"gemini-2.5-pro-preview-03-25",
"gemini-2.5-pro-preview-05-06",
"gemini-2.5-pro-preview-06-05",
"gemini-2.5-flash",
"gemini-2.5-flash-preview-05-20",
"gemini-2.5-flash-preview-04-17",
"gemini-2.5-flash-image",
"gemini-2.5-flash-image-preview",
"gemini-2.5-flash-lite",
"gemini-2.5-flash-lite-preview-06-17",
"gemini-2.5-flash-preview-09-2025",
"gemini-2.5-flash-lite-preview-09-2025",
"gemini-2.5-flash-preview-tts",
"gemini-2.5-pro-preview-tts",
"gemini-2.5-computer-use-preview-10-2025",
"gemini-2.0-flash",
"gemini-2.0-flash-001",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-exp-image-generation",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-lite-001",
"gemini-2.0-flash-lite-preview",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-preview-image-generation",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-thinking-exp-1219",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-exp-1206",
"gemini-1.5-pro",
"gemini-1.5-flash",
"gemini-1.5-flash-8b",
"gemini-flash-latest",
"gemini-flash-lite-latest",
"gemini-pro-latest",
"gemini-2.0-flash-live-001",
"gemini-live-2.5-flash-preview",
"gemini-2.5-flash-live-preview",
"gemini-robotics-er-1.5-preview",
"gemini-gemma-2-27b-it",
"gemini-gemma-2-9b-it",
"gemma-3-1b-it",
"gemma-3-4b-it",
"gemma-3-12b-it",
"gemma-3-27b-it",
"gemma-3n-e2b-it",
"gemma-3n-e4b-it",
"learnlm-2.0-flash-experimental",
]
GEMINI_MODELS: list[GeminiModels] = [
"gemini-2.5-pro",
"gemini-2.5-pro-preview-03-25",
"gemini-2.5-pro-preview-05-06",
"gemini-2.5-pro-preview-06-05",
"gemini-2.5-flash",
"gemini-2.5-flash-preview-05-20",
"gemini-2.5-flash-preview-04-17",
"gemini-2.5-flash-image",
"gemini-2.5-flash-image-preview",
"gemini-2.5-flash-lite",
"gemini-2.5-flash-lite-preview-06-17",
"gemini-2.5-flash-preview-09-2025",
"gemini-2.5-flash-lite-preview-09-2025",
"gemini-2.5-flash-preview-tts",
"gemini-2.5-pro-preview-tts",
"gemini-2.5-computer-use-preview-10-2025",
"gemini-2.0-flash",
"gemini-2.0-flash-001",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-exp-image-generation",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-lite-001",
"gemini-2.0-flash-lite-preview",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-preview-image-generation",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-thinking-exp-1219",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-exp-1206",
"gemini-1.5-pro",
"gemini-1.5-flash",
"gemini-1.5-flash-8b",
"gemini-flash-latest",
"gemini-flash-lite-latest",
"gemini-pro-latest",
"gemini-2.0-flash-live-001",
"gemini-live-2.5-flash-preview",
"gemini-2.5-flash-live-preview",
"gemini-robotics-er-1.5-preview",
"gemini-gemma-2-27b-it",
"gemini-gemma-2-9b-it",
"gemma-3-1b-it",
"gemma-3-4b-it",
"gemma-3-12b-it",
"gemma-3-27b-it",
"gemma-3n-e2b-it",
"gemma-3n-e4b-it",
"learnlm-2.0-flash-experimental",
]
AzureModels: TypeAlias = Literal[
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-35-turbo",
"gpt-35-turbo-0125",
"gpt-35-turbo-1106",
"gpt-35-turbo-16k-0613",
"gpt-35-turbo-instruct-0914",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-0125-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-vision",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-mini",
"gpt-5",
"o1",
"o1-mini",
"o1-preview",
"o3-mini",
"o3",
"o4-mini",
]
AZURE_MODELS: list[AzureModels] = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-35-turbo",
"gpt-35-turbo-0125",
"gpt-35-turbo-1106",
"gpt-35-turbo-16k-0613",
"gpt-35-turbo-instruct-0914",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-0125-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-vision",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-mini",
"gpt-5",
"o1",
"o1-mini",
"o1-preview",
"o3-mini",
"o3",
"o4-mini",
]
BedrockModels: TypeAlias = Literal[
"ai21.jamba-1-5-large-v1:0",
"ai21.jamba-1-5-mini-v1:0",
"amazon.nova-lite-v1:0",
"amazon.nova-lite-v1:0:24k",
"amazon.nova-lite-v1:0:300k",
"amazon.nova-micro-v1:0",
"amazon.nova-micro-v1:0:128k",
"amazon.nova-micro-v1:0:24k",
"amazon.nova-premier-v1:0",
"amazon.nova-premier-v1:0:1000k",
"amazon.nova-premier-v1:0:20k",
"amazon.nova-premier-v1:0:8k",
"amazon.nova-premier-v1:0:mm",
"amazon.nova-pro-v1:0",
"amazon.nova-pro-v1:0:24k",
"amazon.nova-pro-v1:0:300k",
"amazon.titan-text-express-v1",
"amazon.titan-text-express-v1:0:8k",
"amazon.titan-text-lite-v1",
"amazon.titan-text-lite-v1:0:4k",
"amazon.titan-tg1-large",
"anthropic.claude-3-5-haiku-20241022-v1:0",
"anthropic.claude-3-5-sonnet-20240620-v1:0",
"anthropic.claude-3-5-sonnet-20241022-v2:0",
"anthropic.claude-3-7-sonnet-20250219-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0:200k",
"anthropic.claude-3-haiku-20240307-v1:0:48k",
"anthropic.claude-3-opus-20240229-v1:0",
"anthropic.claude-3-opus-20240229-v1:0:12k",
"anthropic.claude-3-opus-20240229-v1:0:200k",
"anthropic.claude-3-opus-20240229-v1:0:28k",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0:200k",
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
"anthropic.claude-haiku-4-5-20251001-v1:0",
"anthropic.claude-instant-v1:2:100k",
"anthropic.claude-opus-4-1-20250805-v1:0",
"anthropic.claude-opus-4-20250514-v1:0",
"anthropic.claude-sonnet-4-20250514-v1:0",
"anthropic.claude-sonnet-4-5-20250929-v1:0",
"anthropic.claude-v2:0:100k",
"anthropic.claude-v2:0:18k",
"anthropic.claude-v2:1:18k",
"anthropic.claude-v2:1:200k",
"cohere.command-r-plus-v1:0",
"cohere.command-r-v1:0",
"cohere.rerank-v3-5:0",
"deepseek.r1-v1:0",
"meta.llama3-1-70b-instruct-v1:0",
"meta.llama3-1-8b-instruct-v1:0",
"meta.llama3-2-11b-instruct-v1:0",
"meta.llama3-2-1b-instruct-v1:0",
"meta.llama3-2-3b-instruct-v1:0",
"meta.llama3-2-90b-instruct-v1:0",
"meta.llama3-3-70b-instruct-v1:0",
"meta.llama3-70b-instruct-v1:0",
"meta.llama3-8b-instruct-v1:0",
"meta.llama4-maverick-17b-instruct-v1:0",
"meta.llama4-scout-17b-instruct-v1:0",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mistral-large-2402-v1:0",
"mistral.mistral-small-2402-v1:0",
"mistral.mixtral-8x7b-instruct-v0:1",
"mistral.pixtral-large-2502-v1:0",
"openai.gpt-oss-120b-1:0",
"openai.gpt-oss-20b-1:0",
"qwen.qwen3-32b-v1:0",
"qwen.qwen3-coder-30b-a3b-v1:0",
"twelvelabs.pegasus-1-2-v1:0",
]
BEDROCK_MODELS: list[BedrockModels] = [
"ai21.jamba-1-5-large-v1:0",
"ai21.jamba-1-5-mini-v1:0",
"amazon.nova-lite-v1:0",
"amazon.nova-lite-v1:0:24k",
"amazon.nova-lite-v1:0:300k",
"amazon.nova-micro-v1:0",
"amazon.nova-micro-v1:0:128k",
"amazon.nova-micro-v1:0:24k",
"amazon.nova-premier-v1:0",
"amazon.nova-premier-v1:0:1000k",
"amazon.nova-premier-v1:0:20k",
"amazon.nova-premier-v1:0:8k",
"amazon.nova-premier-v1:0:mm",
"amazon.nova-pro-v1:0",
"amazon.nova-pro-v1:0:24k",
"amazon.nova-pro-v1:0:300k",
"amazon.titan-text-express-v1",
"amazon.titan-text-express-v1:0:8k",
"amazon.titan-text-lite-v1",
"amazon.titan-text-lite-v1:0:4k",
"amazon.titan-tg1-large",
"anthropic.claude-3-5-haiku-20241022-v1:0",
"anthropic.claude-3-5-sonnet-20240620-v1:0",
"anthropic.claude-3-5-sonnet-20241022-v2:0",
"anthropic.claude-3-7-sonnet-20250219-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0:200k",
"anthropic.claude-3-haiku-20240307-v1:0:48k",
"anthropic.claude-3-opus-20240229-v1:0",
"anthropic.claude-3-opus-20240229-v1:0:12k",
"anthropic.claude-3-opus-20240229-v1:0:200k",
"anthropic.claude-3-opus-20240229-v1:0:28k",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0:200k",
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
"anthropic.claude-haiku-4-5-20251001-v1:0",
"anthropic.claude-instant-v1:2:100k",
"anthropic.claude-opus-4-1-20250805-v1:0",
"anthropic.claude-opus-4-20250514-v1:0",
"anthropic.claude-sonnet-4-20250514-v1:0",
"anthropic.claude-sonnet-4-5-20250929-v1:0",
"anthropic.claude-v2:0:100k",
"anthropic.claude-v2:0:18k",
"anthropic.claude-v2:1:18k",
"anthropic.claude-v2:1:200k",
"cohere.command-r-plus-v1:0",
"cohere.command-r-v1:0",
"cohere.rerank-v3-5:0",
"deepseek.r1-v1:0",
"meta.llama3-1-70b-instruct-v1:0",
"meta.llama3-1-8b-instruct-v1:0",
"meta.llama3-2-11b-instruct-v1:0",
"meta.llama3-2-1b-instruct-v1:0",
"meta.llama3-2-3b-instruct-v1:0",
"meta.llama3-2-90b-instruct-v1:0",
"meta.llama3-3-70b-instruct-v1:0",
"meta.llama3-70b-instruct-v1:0",
"meta.llama3-8b-instruct-v1:0",
"meta.llama4-maverick-17b-instruct-v1:0",
"meta.llama4-scout-17b-instruct-v1:0",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mistral-large-2402-v1:0",
"mistral.mistral-small-2402-v1:0",
"mistral.mixtral-8x7b-instruct-v0:1",
"mistral.pixtral-large-2502-v1:0",
"openai.gpt-oss-120b-1:0",
"openai.gpt-oss-20b-1:0",
"qwen.qwen3-32b-v1:0",
"qwen.qwen3-coder-30b-a3b-v1:0",
"twelvelabs.pegasus-1-2-v1:0",
]

View File

@@ -1,6 +0,0 @@
"""Interceptor contracts for crewai"""
from crewai.llms.hooks.base import BaseInterceptor
__all__ = ["BaseInterceptor"]

View File

@@ -1,133 +0,0 @@
"""Base classes for LLM transport interceptors.
This module provides abstract base classes for intercepting and modifying
outbound and inbound messages at the transport level.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Generic, TypeVar
from pydantic_core import core_schema
if TYPE_CHECKING:
from pydantic import GetCoreSchemaHandler
from pydantic_core import CoreSchema
T = TypeVar("T")
U = TypeVar("U")
class BaseInterceptor(ABC, Generic[T, U]):
"""Abstract base class for intercepting transport-level messages.
Provides hooks to intercept and modify outbound and inbound messages
at the transport layer.
Type parameters:
T: Outbound message type (e.g., httpx.Request)
U: Inbound message type (e.g., httpx.Response)
Example:
>>> import httpx
>>> class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
... def on_outbound(self, message: httpx.Request) -> httpx.Request:
... message.headers["X-Custom-Header"] = "value"
... return message
...
... def on_inbound(self, message: httpx.Response) -> httpx.Response:
... print(f"Status: {message.status_code}")
... return message
"""
@abstractmethod
def on_outbound(self, message: T) -> T:
"""Intercept outbound message before sending.
Args:
message: Outbound message object.
Returns:
Modified message object.
"""
...
@abstractmethod
def on_inbound(self, message: U) -> U:
"""Intercept inbound message after receiving.
Args:
message: Inbound message object.
Returns:
Modified message object.
"""
...
async def aon_outbound(self, message: T) -> T:
"""Async version of on_outbound.
Args:
message: Outbound message object.
Returns:
Modified message object.
"""
raise NotImplementedError
async def aon_inbound(self, message: U) -> U:
"""Async version of on_inbound.
Args:
message: Inbound message object.
Returns:
Modified message object.
"""
raise NotImplementedError
@classmethod
def __get_pydantic_core_schema__(
cls, _source_type: Any, _handler: GetCoreSchemaHandler
) -> CoreSchema:
"""Generate Pydantic core schema for BaseInterceptor.
This allows the generic BaseInterceptor to be used in Pydantic models
without requiring arbitrary_types_allowed=True. The schema validates
that the value is an instance of BaseInterceptor.
Args:
_source_type: The source type being validated (unused).
_handler: Handler for generating schemas (unused).
Returns:
A Pydantic core schema that validates BaseInterceptor instances.
"""
return core_schema.no_info_plain_validator_function(
_validate_interceptor,
serialization=core_schema.plain_serializer_function_ser_schema(
lambda x: x, return_schema=core_schema.any_schema()
),
)
def _validate_interceptor(value: Any) -> BaseInterceptor[T, U]:
"""Validate that the value is a BaseInterceptor instance.
Args:
value: The value to validate.
Returns:
The validated BaseInterceptor instance.
Raises:
ValueError: If the value is not a BaseInterceptor instance.
"""
if not isinstance(value, BaseInterceptor):
raise ValueError(
f"Expected BaseInterceptor instance, got {type(value).__name__}"
)
return value

View File

@@ -1,123 +0,0 @@
"""HTTP transport implementations for LLM request/response interception.
This module provides internal transport classes that integrate with BaseInterceptor
to enable request/response modification at the transport level.
"""
from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING, TypedDict
from httpx import (
AsyncHTTPTransport as _AsyncHTTPTransport,
HTTPTransport as _HTTPTransport,
)
from typing_extensions import NotRequired, Unpack
if TYPE_CHECKING:
from ssl import SSLContext
from httpx import Limits, Request, Response
from httpx._types import CertTypes, ProxyTypes
from crewai.llms.hooks.base import BaseInterceptor
class HTTPTransportKwargs(TypedDict, total=False):
"""Typed dictionary for httpx.HTTPTransport initialization parameters.
These parameters configure the underlying HTTP transport behavior including
SSL verification, proxies, connection limits, and low-level socket options.
"""
verify: bool | str | SSLContext
cert: NotRequired[CertTypes]
trust_env: bool
http1: bool
http2: bool
limits: Limits
proxy: NotRequired[ProxyTypes]
uds: NotRequired[str]
local_address: NotRequired[str]
retries: int
socket_options: NotRequired[
Iterable[
tuple[int, int, int]
| tuple[int, int, bytes | bytearray]
| tuple[int, int, None, int]
]
]
class HTTPTransport(_HTTPTransport):
"""HTTP transport that uses an interceptor for request/response modification.
This transport is used internally when a user provides a BaseInterceptor.
Users should not instantiate this class directly - instead, pass an interceptor
to the LLM client and this transport will be created automatically.
"""
def __init__(
self,
interceptor: BaseInterceptor[Request, Response],
**kwargs: Unpack[HTTPTransportKwargs],
) -> None:
"""Initialize transport with interceptor.
Args:
interceptor: HTTP interceptor for modifying raw request/response objects.
**kwargs: HTTPTransport configuration parameters (verify, cert, proxy, etc.).
"""
super().__init__(**kwargs)
self.interceptor = interceptor
def handle_request(self, request: Request) -> Response:
"""Handle request with interception.
Args:
request: The HTTP request to handle.
Returns:
The HTTP response.
"""
request = self.interceptor.on_outbound(request)
response = super().handle_request(request)
return self.interceptor.on_inbound(response)
class AsyncHTTPTransport(_AsyncHTTPTransport):
"""Async HTTP transport that uses an interceptor for request/response modification.
This transport is used internally when a user provides a BaseInterceptor.
Users should not instantiate this class directly - instead, pass an interceptor
to the LLM client and this transport will be created automatically.
"""
def __init__(
self,
interceptor: BaseInterceptor[Request, Response],
**kwargs: Unpack[HTTPTransportKwargs],
) -> None:
"""Initialize async transport with interceptor.
Args:
interceptor: HTTP interceptor for modifying raw request/response objects.
**kwargs: HTTPTransport configuration parameters (verify, cert, proxy, etc.).
"""
super().__init__(**kwargs)
self.interceptor = interceptor
async def handle_async_request(self, request: Request) -> Response:
"""Handle async request with interception.
Args:
request: The HTTP request to handle.
Returns:
The HTTP response.
"""
request = await self.interceptor.aon_outbound(request)
response = await super().handle_async_request(request)
return await self.interceptor.aon_inbound(response)

View File

@@ -1,15 +1,15 @@
from __future__ import annotations
import json
import logging
import os
from typing import TYPE_CHECKING, Any, cast
from typing import Any, cast
from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM
from crewai.llms.hooks.transport import HTTPTransport
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
@@ -17,14 +17,10 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.llms.hooks.base import BaseInterceptor
try:
from anthropic import Anthropic
from anthropic.types import Message
from anthropic.types.tool_use_block import ToolUseBlock
import httpx
except ImportError:
raise ImportError(
'Anthropic native provider not available, to install: uv add "crewai[anthropic]"'
@@ -51,8 +47,7 @@ class AnthropicCompletion(BaseLLM):
stop_sequences: list[str] | None = None,
stream: bool = False,
client_params: dict[str, Any] | None = None,
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
**kwargs: Any,
**kwargs,
):
"""Initialize Anthropic chat completion client.
@@ -68,7 +63,6 @@ class AnthropicCompletion(BaseLLM):
stop_sequences: Stop sequences (Anthropic uses stop_sequences, not stop)
stream: Enable streaming responses
client_params: Additional parameters for the Anthropic client
interceptor: HTTP interceptor for modifying requests/responses at transport level.
**kwargs: Additional parameters
"""
super().__init__(
@@ -76,7 +70,6 @@ class AnthropicCompletion(BaseLLM):
)
# Client params
self.interceptor = interceptor
self.client_params = client_params
self.base_url = base_url
self.timeout = timeout
@@ -94,30 +87,6 @@ class AnthropicCompletion(BaseLLM):
self.is_claude_3 = "claude-3" in model.lower()
self.supports_tools = self.is_claude_3 # Claude 3+ supports tool use
@property
def stop(self) -> list[str]:
"""Get stop sequences sent to the API."""
return self.stop_sequences
@stop.setter
def stop(self, value: list[str] | str | None) -> None:
"""Set stop sequences.
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
are properly sent to the Anthropic API.
Args:
value: Stop sequences as a list, single string, or None
"""
if value is None:
self.stop_sequences = []
elif isinstance(value, str):
self.stop_sequences = [value]
elif isinstance(value, list):
self.stop_sequences = value
else:
self.stop_sequences = []
def _get_client_params(self) -> dict[str, Any]:
"""Get client parameters."""
@@ -133,11 +102,6 @@ class AnthropicCompletion(BaseLLM):
"max_retries": self.max_retries,
}
if self.interceptor:
transport = HTTPTransport(interceptor=self.interceptor)
http_client = httpx.Client(transport=transport)
client_params["http_client"] = http_client # type: ignore[assignment]
if self.client_params:
client_params.update(self.client_params)
@@ -146,7 +110,7 @@ class AnthropicCompletion(BaseLLM):
def call(
self,
messages: str | list[LLMMessage],
tools: list[dict[str, Any]] | None = None,
tools: list[dict] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
@@ -169,7 +133,7 @@ class AnthropicCompletion(BaseLLM):
try:
# Emit call started event
self._emit_call_started_event(
messages=messages,
messages=messages, # type: ignore[arg-type]
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
@@ -179,7 +143,7 @@ class AnthropicCompletion(BaseLLM):
# Format messages for Anthropic
formatted_messages, system_message = self._format_messages_for_anthropic(
messages
messages # type: ignore[arg-type]
)
# Prepare completion parameters
@@ -217,7 +181,7 @@ class AnthropicCompletion(BaseLLM):
self,
messages: list[LLMMessage],
system_message: str | None = None,
tools: list[dict[str, Any]] | None = None,
tools: list[dict] | None = None,
) -> dict[str, Any]:
"""Prepare parameters for Anthropic messages API.
@@ -254,9 +218,7 @@ class AnthropicCompletion(BaseLLM):
return params
def _convert_tools_for_interference(
self, tools: list[dict[str, Any]]
) -> list[dict[str, Any]]:
def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]:
"""Convert CrewAI tool format to Anthropic tool use format."""
anthropic_tools = []

View File

@@ -3,7 +3,7 @@ from __future__ import annotations
import json
import logging
import os
from typing import TYPE_CHECKING, Any
from typing import Any, TYPE_CHECKING
from pydantic import BaseModel
@@ -13,25 +13,23 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
)
from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.llms.hooks.base import BaseInterceptor
from crewai.tools.base_tool import BaseTool
try:
from azure.ai.inference import (
from azure.ai.inference import ( # type: ignore[import-not-found]
ChatCompletionsClient,
)
from azure.ai.inference.models import (
from azure.ai.inference.models import ( # type: ignore[import-not-found]
ChatCompletions,
ChatCompletionsToolCall,
StreamingChatCompletionsUpdate,
)
from azure.core.credentials import (
from azure.core.credentials import ( # type: ignore[import-not-found]
AzureKeyCredential,
)
from azure.core.exceptions import (
from azure.core.exceptions import ( # type: ignore[import-not-found]
HttpResponseError,
)
@@ -66,8 +64,7 @@ class AzureCompletion(BaseLLM):
max_tokens: int | None = None,
stop: list[str] | None = None,
stream: bool = False,
interceptor: BaseInterceptor[Any, Any] | None = None,
**kwargs: Any,
**kwargs,
):
"""Initialize Azure AI Inference chat completion client.
@@ -85,15 +82,8 @@ class AzureCompletion(BaseLLM):
max_tokens: Maximum tokens in response
stop: Stop sequences
stream: Enable streaming responses
interceptor: HTTP interceptor (not yet supported for Azure).
**kwargs: Additional parameters
"""
if interceptor is not None:
raise NotImplementedError(
"HTTP interceptors are not yet supported for Azure AI Inference provider. "
"Interceptors are currently supported for OpenAI and Anthropic providers only."
)
super().__init__(
model=model, temperature=temperature, stop=stop or [], **kwargs
)
@@ -131,7 +121,7 @@ class AzureCompletion(BaseLLM):
if self.api_version:
client_kwargs["api_version"] = self.api_version
self.client = ChatCompletionsClient(**client_kwargs) # type: ignore[arg-type]
self.client = ChatCompletionsClient(**client_kwargs)
self.top_p = top_p
self.frequency_penalty = frequency_penalty
@@ -259,7 +249,7 @@ class AzureCompletion(BaseLLM):
def _prepare_completion_params(
self,
messages: list[LLMMessage],
tools: list[dict[str, Any]] | None = None,
tools: list[dict] | None = None,
response_model: type[BaseModel] | None = None,
) -> dict[str, Any]:
"""Prepare parameters for Azure AI Inference chat completion.
@@ -312,9 +302,7 @@ class AzureCompletion(BaseLLM):
return params
def _convert_tools_for_interference(
self, tools: list[dict[str, Any]]
) -> list[dict[str, Any]]:
def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]:
"""Convert CrewAI tool format to Azure OpenAI function calling format."""
from crewai.llms.providers.utils.common import safe_tool_conversion

View File

@@ -30,8 +30,6 @@ if TYPE_CHECKING:
ToolTypeDef,
)
from crewai.llms.hooks.base import BaseInterceptor
try:
from boto3.session import Session
@@ -159,9 +157,8 @@ class BedrockCompletion(BaseLLM):
guardrail_config: dict[str, Any] | None = None,
additional_model_request_fields: dict[str, Any] | None = None,
additional_model_response_field_paths: list[str] | None = None,
interceptor: BaseInterceptor[Any, Any] | None = None,
**kwargs: Any,
) -> None:
**kwargs,
):
"""Initialize AWS Bedrock completion client.
Args:
@@ -179,15 +176,8 @@ class BedrockCompletion(BaseLLM):
guardrail_config: Guardrail configuration for content filtering
additional_model_request_fields: Model-specific request parameters
additional_model_response_field_paths: Custom response field paths
interceptor: HTTP interceptor (not yet supported for Bedrock).
**kwargs: Additional parameters
"""
if interceptor is not None:
raise NotImplementedError(
"HTTP interceptors are not yet supported for AWS Bedrock provider. "
"Interceptors are currently supported for OpenAI and Anthropic providers only."
)
# Extract provider from kwargs to avoid duplicate argument
kwargs.pop("provider", None)
@@ -243,30 +233,6 @@ class BedrockCompletion(BaseLLM):
# Handle inference profiles for newer models
self.model_id = model
@property
def stop(self) -> list[str]:
"""Get stop sequences sent to the API."""
return list(self.stop_sequences)
@stop.setter
def stop(self, value: Sequence[str] | str | None) -> None:
"""Set stop sequences.
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
are properly sent to the Bedrock API.
Args:
value: Stop sequences as a Sequence, single string, or None
"""
if value is None:
self.stop_sequences = []
elif isinstance(value, str):
self.stop_sequences = [value]
elif isinstance(value, Sequence):
self.stop_sequences = list(value)
else:
self.stop_sequences = []
def call(
self,
messages: str | list[LLMMessage],
@@ -281,7 +247,7 @@ class BedrockCompletion(BaseLLM):
try:
# Emit call started event
self._emit_call_started_event(
messages=messages,
messages=messages, # type: ignore[arg-type]
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
@@ -774,9 +740,7 @@ class BedrockCompletion(BaseLLM):
return converse_messages, system_message
@staticmethod
def _format_tools_for_converse(
tools: list[dict[str, Any]],
) -> list[ConverseToolTypeDef]:
def _format_tools_for_converse(tools: list[dict]) -> list[ConverseToolTypeDef]:
"""Convert CrewAI tools to Converse API format following AWS specification."""
from crewai.llms.providers.utils.common import safe_tool_conversion

View File

@@ -1,3 +1,4 @@
import json
import logging
import os
from typing import Any, cast
@@ -6,7 +7,6 @@ from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM
from crewai.llms.hooks.base import BaseInterceptor
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
@@ -45,14 +45,13 @@ class GeminiCompletion(BaseLLM):
stream: bool = False,
safety_settings: dict[str, Any] | None = None,
client_params: dict[str, Any] | None = None,
interceptor: BaseInterceptor[Any, Any] | None = None,
**kwargs: Any,
**kwargs,
):
"""Initialize Google Gemini chat completion client.
Args:
model: Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro')
api_key: Google API key (defaults to GEMINI_API_KEY or GEMINI_API_KEY env var)
api_key: Google API key (defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var)
project: Google Cloud project ID (for Vertex AI)
location: Google Cloud location (for Vertex AI, defaults to 'us-central1')
temperature: Sampling temperature (0-2)
@@ -64,15 +63,8 @@ class GeminiCompletion(BaseLLM):
safety_settings: Safety filter settings
client_params: Additional parameters to pass to the Google Gen AI Client constructor.
Supports parameters like http_options, credentials, debug_config, etc.
interceptor: HTTP interceptor (not yet supported for Gemini).
**kwargs: Additional parameters
"""
if interceptor is not None:
raise NotImplementedError(
"HTTP interceptors are not yet supported for Google Gemini provider. "
"Interceptors are currently supported for OpenAI and Anthropic providers only."
)
super().__init__(
model=model, temperature=temperature, stop=stop_sequences or [], **kwargs
)
@@ -82,7 +74,7 @@ class GeminiCompletion(BaseLLM):
# Get API configuration with environment variable fallbacks
self.api_key = (
api_key or os.getenv("GEMINI_API_KEY") or os.getenv("GEMINI_API_KEY")
api_key or os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
)
self.project = project or os.getenv("GOOGLE_CLOUD_PROJECT")
self.location = location or os.getenv("GOOGLE_CLOUD_LOCATION") or "us-central1"
@@ -104,31 +96,7 @@ class GeminiCompletion(BaseLLM):
self.is_gemini_1_5 = "gemini-1.5" in model.lower()
self.supports_tools = self.is_gemini_1_5 or self.is_gemini_2
@property
def stop(self) -> list[str]:
"""Get stop sequences sent to the API."""
return self.stop_sequences
@stop.setter
def stop(self, value: list[str] | str | None) -> None:
"""Set stop sequences.
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
are properly sent to the Gemini API.
Args:
value: Stop sequences as a list, single string, or None
"""
if value is None:
self.stop_sequences = []
elif isinstance(value, str):
self.stop_sequences = [value]
elif isinstance(value, list):
self.stop_sequences = value
else:
self.stop_sequences = []
def _initialize_client(self, use_vertexai: bool = False) -> genai.Client: # type: ignore[no-any-unimported]
def _initialize_client(self, use_vertexai: bool = False) -> genai.Client:
"""Initialize the Google Gen AI client with proper parameter handling.
Args:
@@ -165,7 +133,7 @@ class GeminiCompletion(BaseLLM):
return genai.Client(**client_params)
except Exception as e:
raise ValueError(
"Either GEMINI_API_KEY/GEMINI_API_KEY (for Gemini API) or "
"Either GOOGLE_API_KEY/GEMINI_API_KEY (for Gemini API) or "
"GOOGLE_CLOUD_PROJECT (for Vertex AI) must be set"
) from e
@@ -203,7 +171,7 @@ class GeminiCompletion(BaseLLM):
def call(
self,
messages: str | list[LLMMessage],
tools: list[dict[str, Any]] | None = None,
tools: list[dict] | None = None,
callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None,
from_task: Any | None = None,
@@ -225,7 +193,7 @@ class GeminiCompletion(BaseLLM):
"""
try:
self._emit_call_started_event(
messages=messages,
messages=messages, # type: ignore[arg-type]
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
@@ -235,7 +203,7 @@ class GeminiCompletion(BaseLLM):
self.tools = tools
formatted_content, system_instruction = self._format_messages_for_gemini(
messages
messages # type: ignore[arg-type]
)
config = self._prepare_generation_config(
@@ -277,10 +245,10 @@ class GeminiCompletion(BaseLLM):
)
raise
def _prepare_generation_config( # type: ignore[no-any-unimported]
def _prepare_generation_config(
self,
system_instruction: str | None = None,
tools: list[dict[str, Any]] | None = None,
tools: list[dict] | None = None,
response_model: type[BaseModel] | None = None,
) -> types.GenerateContentConfig:
"""Prepare generation config for Google Gemini API.
@@ -329,9 +297,7 @@ class GeminiCompletion(BaseLLM):
return types.GenerateContentConfig(**config_params)
def _convert_tools_for_interference( # type: ignore[no-any-unimported]
self, tools: list[dict[str, Any]]
) -> list[types.Tool]:
def _convert_tools_for_interference(self, tools: list[dict]) -> list[types.Tool]:
"""Convert CrewAI tool format to Gemini function declaration format."""
gemini_tools = []
@@ -354,7 +320,7 @@ class GeminiCompletion(BaseLLM):
return gemini_tools
def _format_messages_for_gemini( # type: ignore[no-any-unimported]
def _format_messages_for_gemini(
self, messages: str | list[LLMMessage]
) -> tuple[list[types.Content], str | None]:
"""Format messages for Gemini API.
@@ -398,7 +364,7 @@ class GeminiCompletion(BaseLLM):
return contents, system_instruction
def _handle_completion( # type: ignore[no-any-unimported]
def _handle_completion(
self,
contents: list[types.Content],
system_instruction: str | None,
@@ -465,7 +431,7 @@ class GeminiCompletion(BaseLLM):
return content
def _handle_streaming_completion( # type: ignore[no-any-unimported]
def _handle_streaming_completion(
self,
contents: list[types.Content],
config: types.GenerateContentConfig,
@@ -594,9 +560,8 @@ class GeminiCompletion(BaseLLM):
}
return {"total_tokens": 0}
def _convert_contents_to_dict( # type: ignore[no-any-unimported]
self,
contents: list[types.Content],
def _convert_contents_to_dict(
self, contents: list[types.Content]
) -> list[dict[str, str]]:
"""Convert contents to dict format."""
return [

View File

@@ -6,7 +6,6 @@ import logging
import os
from typing import TYPE_CHECKING, Any
import httpx
from openai import APIConnectionError, NotFoundError, OpenAI
from openai.types.chat import ChatCompletion, ChatCompletionChunk
from openai.types.chat.chat_completion import Choice
@@ -15,7 +14,6 @@ from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM
from crewai.llms.hooks.transport import HTTPTransport
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
@@ -25,7 +23,6 @@ from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.agent.core import Agent
from crewai.llms.hooks.base import BaseInterceptor
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
@@ -62,7 +59,6 @@ class OpenAICompletion(BaseLLM):
top_logprobs: int | None = None,
reasoning_effort: str | None = None,
provider: str | None = None,
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
**kwargs: Any,
) -> None:
"""Initialize OpenAI chat completion client."""
@@ -70,7 +66,6 @@ class OpenAICompletion(BaseLLM):
if provider is None:
provider = kwargs.pop("provider", "openai")
self.interceptor = interceptor
# Client configuration attributes
self.organization = organization
self.project = project
@@ -93,11 +88,6 @@ class OpenAICompletion(BaseLLM):
)
client_config = self._get_client_params()
if self.interceptor:
transport = HTTPTransport(interceptor=self.interceptor)
http_client = httpx.Client(transport=transport)
client_config["http_client"] = http_client
self.client = OpenAI(**client_config)
# Completion parameters

View File

@@ -1,37 +0,0 @@
"""MCP (Model Context Protocol) client support for CrewAI agents.
This module provides native MCP client functionality, allowing CrewAI agents
to connect to any MCP-compliant server using various transport types.
"""
from crewai.mcp.client import MCPClient
from crewai.mcp.config import (
MCPServerConfig,
MCPServerHTTP,
MCPServerSSE,
MCPServerStdio,
)
from crewai.mcp.filters import (
StaticToolFilter,
ToolFilter,
ToolFilterContext,
create_dynamic_tool_filter,
create_static_tool_filter,
)
from crewai.mcp.transports.base import BaseTransport, TransportType
__all__ = [
"BaseTransport",
"MCPClient",
"MCPServerConfig",
"MCPServerHTTP",
"MCPServerSSE",
"MCPServerStdio",
"StaticToolFilter",
"ToolFilter",
"ToolFilterContext",
"TransportType",
"create_dynamic_tool_filter",
"create_static_tool_filter",
]

View File

@@ -1,742 +0,0 @@
"""MCP client with session management for CrewAI agents."""
import asyncio
from collections.abc import Callable
from contextlib import AsyncExitStack
from datetime import datetime
import logging
import time
from typing import Any
from typing_extensions import Self
# BaseExceptionGroup is available in Python 3.11+
try:
from builtins import BaseExceptionGroup
except ImportError:
# Fallback for Python < 3.11 (shouldn't happen in practice)
BaseExceptionGroup = Exception
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.mcp_events import (
MCPConnectionCompletedEvent,
MCPConnectionFailedEvent,
MCPConnectionStartedEvent,
MCPToolExecutionCompletedEvent,
MCPToolExecutionFailedEvent,
MCPToolExecutionStartedEvent,
)
from crewai.mcp.transports.base import BaseTransport
from crewai.mcp.transports.http import HTTPTransport
from crewai.mcp.transports.sse import SSETransport
from crewai.mcp.transports.stdio import StdioTransport
# MCP Connection timeout constants (in seconds)
MCP_CONNECTION_TIMEOUT = 30 # Increased for slow servers
MCP_TOOL_EXECUTION_TIMEOUT = 30
MCP_DISCOVERY_TIMEOUT = 30 # Increased for slow servers
MCP_MAX_RETRIES = 3
# Simple in-memory cache for MCP tool schemas (duration: 5 minutes)
_mcp_schema_cache: dict[str, tuple[dict[str, Any], float]] = {}
_cache_ttl = 300 # 5 minutes
class MCPClient:
"""MCP client with session management.
This client manages connections to MCP servers and provides a high-level
interface for interacting with MCP tools, prompts, and resources.
Example:
```python
transport = StdioTransport(command="python", args=["server.py"])
client = MCPClient(transport)
async with client:
tools = await client.list_tools()
result = await client.call_tool("tool_name", {"arg": "value"})
```
"""
def __init__(
self,
transport: BaseTransport,
connect_timeout: int = MCP_CONNECTION_TIMEOUT,
execution_timeout: int = MCP_TOOL_EXECUTION_TIMEOUT,
discovery_timeout: int = MCP_DISCOVERY_TIMEOUT,
max_retries: int = MCP_MAX_RETRIES,
cache_tools_list: bool = False,
logger: logging.Logger | None = None,
) -> None:
"""Initialize MCP client.
Args:
transport: Transport instance for MCP server connection.
connect_timeout: Connection timeout in seconds.
execution_timeout: Tool execution timeout in seconds.
discovery_timeout: Tool discovery timeout in seconds.
max_retries: Maximum retry attempts for operations.
cache_tools_list: Whether to cache tool list results.
logger: Optional logger instance.
"""
self.transport = transport
self.connect_timeout = connect_timeout
self.execution_timeout = execution_timeout
self.discovery_timeout = discovery_timeout
self.max_retries = max_retries
self.cache_tools_list = cache_tools_list
# self._logger = logger or logging.getLogger(__name__)
self._session: Any = None
self._initialized = False
self._exit_stack = AsyncExitStack()
self._was_connected = False
@property
def connected(self) -> bool:
"""Check if client is connected to server."""
return self.transport.connected and self._initialized
@property
def session(self) -> Any:
"""Get the MCP session."""
if self._session is None:
raise RuntimeError("Client not connected. Call connect() first.")
return self._session
def _get_server_info(self) -> tuple[str, str | None, str | None]:
"""Get server information for events.
Returns:
Tuple of (server_name, server_url, transport_type).
"""
if isinstance(self.transport, StdioTransport):
server_name = f"{self.transport.command} {' '.join(self.transport.args)}"
server_url = None
transport_type = self.transport.transport_type.value
elif isinstance(self.transport, HTTPTransport):
server_name = self.transport.url
server_url = self.transport.url
transport_type = self.transport.transport_type.value
elif isinstance(self.transport, SSETransport):
server_name = self.transport.url
server_url = self.transport.url
transport_type = self.transport.transport_type.value
else:
server_name = "Unknown MCP Server"
server_url = None
transport_type = (
self.transport.transport_type.value
if hasattr(self.transport, "transport_type")
else None
)
return server_name, server_url, transport_type
async def connect(self) -> Self:
"""Connect to MCP server and initialize session.
Returns:
Self for method chaining.
Raises:
ConnectionError: If connection fails.
ImportError: If MCP SDK not available.
"""
if self.connected:
return self
# Get server info for events
server_name, server_url, transport_type = self._get_server_info()
is_reconnect = self._was_connected
# Emit connection started event
started_at = datetime.now()
crewai_event_bus.emit(
self,
MCPConnectionStartedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
is_reconnect=is_reconnect,
connect_timeout=self.connect_timeout,
),
)
try:
from mcp import ClientSession
# Use AsyncExitStack to manage transport and session contexts together
# This ensures they're in the same async scope and prevents cancel scope errors
# Always enter transport context via exit stack (it handles already-connected state)
await self._exit_stack.enter_async_context(self.transport)
# Create ClientSession with transport streams
self._session = ClientSession(
self.transport.read_stream,
self.transport.write_stream,
)
# Enter the session's async context manager via exit stack
await self._exit_stack.enter_async_context(self._session)
# Initialize the session (required by MCP protocol)
try:
await asyncio.wait_for(
self._session.initialize(),
timeout=self.connect_timeout,
)
except asyncio.CancelledError:
# If initialization was cancelled (e.g., event loop closing),
# cleanup and re-raise - don't suppress cancellation
await self._cleanup_on_error()
raise
except BaseExceptionGroup as eg:
# Handle exception groups from anyio task groups
# Extract the actual meaningful error (not GeneratorExit)
actual_error = None
for exc in eg.exceptions:
if isinstance(exc, Exception) and not isinstance(
exc, GeneratorExit
):
# Check if it's an HTTP error (like 401)
error_msg = str(exc).lower()
if "401" in error_msg or "unauthorized" in error_msg:
actual_error = exc
break
if "cancel scope" not in error_msg and "task" not in error_msg:
actual_error = exc
break
await self._cleanup_on_error()
if actual_error:
raise ConnectionError(
f"Failed to connect to MCP server: {actual_error}"
) from actual_error
raise ConnectionError(f"Failed to connect to MCP server: {eg}") from eg
self._initialized = True
self._was_connected = True
completed_at = datetime.now()
connection_duration_ms = (completed_at - started_at).total_seconds() * 1000
crewai_event_bus.emit(
self,
MCPConnectionCompletedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
started_at=started_at,
completed_at=completed_at,
connection_duration_ms=connection_duration_ms,
is_reconnect=is_reconnect,
),
)
return self
except ImportError as e:
await self._cleanup_on_error()
error_msg = (
"MCP library not available. Please install with: pip install mcp"
)
self._emit_connection_failed(
server_name,
server_url,
transport_type,
error_msg,
"import_error",
started_at,
)
raise ImportError(error_msg) from e
except asyncio.TimeoutError as e:
await self._cleanup_on_error()
error_msg = f"MCP connection timed out after {self.connect_timeout} seconds. The server may be slow or unreachable."
self._emit_connection_failed(
server_name,
server_url,
transport_type,
error_msg,
"timeout",
started_at,
)
raise ConnectionError(error_msg) from e
except asyncio.CancelledError:
# Re-raise cancellation - don't suppress it
await self._cleanup_on_error()
self._emit_connection_failed(
server_name,
server_url,
transport_type,
"Connection cancelled",
"cancelled",
started_at,
)
raise
except BaseExceptionGroup as eg:
# Handle exception groups from anyio task groups at outer level
actual_error = None
for exc in eg.exceptions:
if isinstance(exc, Exception) and not isinstance(exc, GeneratorExit):
error_msg = str(exc).lower()
if "401" in error_msg or "unauthorized" in error_msg:
actual_error = exc
break
if "cancel scope" not in error_msg and "task" not in error_msg:
actual_error = exc
break
await self._cleanup_on_error()
error_type = (
"authentication"
if actual_error
and (
"401" in str(actual_error).lower()
or "unauthorized" in str(actual_error).lower()
)
else "network"
)
error_msg = str(actual_error) if actual_error else str(eg)
self._emit_connection_failed(
server_name,
server_url,
transport_type,
error_msg,
error_type,
started_at,
)
if actual_error:
raise ConnectionError(
f"Failed to connect to MCP server: {actual_error}"
) from actual_error
raise ConnectionError(f"Failed to connect to MCP server: {eg}") from eg
except Exception as e:
await self._cleanup_on_error()
error_type = (
"authentication"
if "401" in str(e).lower() or "unauthorized" in str(e).lower()
else "network"
)
self._emit_connection_failed(
server_name, server_url, transport_type, str(e), error_type, started_at
)
raise ConnectionError(f"Failed to connect to MCP server: {e}") from e
def _emit_connection_failed(
self,
server_name: str,
server_url: str | None,
transport_type: str | None,
error: str,
error_type: str,
started_at: datetime,
) -> None:
"""Emit connection failed event."""
failed_at = datetime.now()
crewai_event_bus.emit(
self,
MCPConnectionFailedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
error=error,
error_type=error_type,
started_at=started_at,
failed_at=failed_at,
),
)
async def _cleanup_on_error(self) -> None:
"""Cleanup resources when an error occurs during connection."""
try:
await self._exit_stack.aclose()
except Exception as e:
# Best effort cleanup - ignore all other errors
raise RuntimeError(f"Error during MCP client cleanup: {e}") from e
finally:
self._session = None
self._initialized = False
self._exit_stack = AsyncExitStack()
async def disconnect(self) -> None:
"""Disconnect from MCP server and cleanup resources."""
if not self.connected:
return
try:
await self._exit_stack.aclose()
except Exception as e:
raise RuntimeError(f"Error during MCP client disconnect: {e}") from e
finally:
self._session = None
self._initialized = False
self._exit_stack = AsyncExitStack()
async def list_tools(self, use_cache: bool | None = None) -> list[dict[str, Any]]:
"""List available tools from MCP server.
Args:
use_cache: Whether to use cached results. If None, uses
client's cache_tools_list setting.
Returns:
List of tool definitions with name, description, and inputSchema.
"""
if not self.connected:
await self.connect()
# Check cache if enabled
use_cache = use_cache if use_cache is not None else self.cache_tools_list
if use_cache:
cache_key = self._get_cache_key("tools")
if cache_key in _mcp_schema_cache:
cached_data, cache_time = _mcp_schema_cache[cache_key]
if time.time() - cache_time < _cache_ttl:
# Logger removed - return cached data
return cached_data
# List tools with timeout and retries
tools = await self._retry_operation(
self._list_tools_impl,
timeout=self.discovery_timeout,
)
# Cache results if enabled
if use_cache:
cache_key = self._get_cache_key("tools")
_mcp_schema_cache[cache_key] = (tools, time.time())
return tools
async def _list_tools_impl(self) -> list[dict[str, Any]]:
"""Internal implementation of list_tools."""
tools_result = await asyncio.wait_for(
self.session.list_tools(),
timeout=self.discovery_timeout,
)
return [
{
"name": tool.name,
"description": getattr(tool, "description", ""),
"inputSchema": getattr(tool, "inputSchema", {}),
}
for tool in tools_result.tools
]
async def call_tool(
self, tool_name: str, arguments: dict[str, Any] | None = None
) -> Any:
"""Call a tool on the MCP server.
Args:
tool_name: Name of the tool to call.
arguments: Tool arguments.
Returns:
Tool execution result.
"""
if not self.connected:
await self.connect()
arguments = arguments or {}
cleaned_arguments = self._clean_tool_arguments(arguments)
# Get server info for events
server_name, server_url, transport_type = self._get_server_info()
# Emit tool execution started event
started_at = datetime.now()
crewai_event_bus.emit(
self,
MCPToolExecutionStartedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
tool_name=tool_name,
tool_args=cleaned_arguments,
),
)
try:
result = await self._retry_operation(
lambda: self._call_tool_impl(tool_name, cleaned_arguments),
timeout=self.execution_timeout,
)
completed_at = datetime.now()
execution_duration_ms = (completed_at - started_at).total_seconds() * 1000
crewai_event_bus.emit(
self,
MCPToolExecutionCompletedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
tool_name=tool_name,
tool_args=cleaned_arguments,
result=result,
started_at=started_at,
completed_at=completed_at,
execution_duration_ms=execution_duration_ms,
),
)
return result
except Exception as e:
failed_at = datetime.now()
error_type = (
"timeout"
if isinstance(e, (asyncio.TimeoutError, ConnectionError))
and "timeout" in str(e).lower()
else "server_error"
)
crewai_event_bus.emit(
self,
MCPToolExecutionFailedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
tool_name=tool_name,
tool_args=cleaned_arguments,
error=str(e),
error_type=error_type,
started_at=started_at,
failed_at=failed_at,
),
)
raise
def _clean_tool_arguments(self, arguments: dict[str, Any]) -> dict[str, Any]:
"""Clean tool arguments by removing None values and fixing formats.
Args:
arguments: Raw tool arguments.
Returns:
Cleaned arguments ready for MCP server.
"""
cleaned = {}
for key, value in arguments.items():
# Skip None values
if value is None:
continue
# Fix sources array format: convert ["web"] to [{"type": "web"}]
if key == "sources" and isinstance(value, list):
fixed_sources = []
for item in value:
if isinstance(item, str):
# Convert string to object format
fixed_sources.append({"type": item})
elif isinstance(item, dict):
# Already in correct format
fixed_sources.append(item)
else:
# Keep as is if unknown format
fixed_sources.append(item)
if fixed_sources:
cleaned[key] = fixed_sources
continue
# Recursively clean nested dictionaries
if isinstance(value, dict):
nested_cleaned = self._clean_tool_arguments(value)
if nested_cleaned: # Only add if not empty
cleaned[key] = nested_cleaned
elif isinstance(value, list):
# Clean list items
cleaned_list = []
for item in value:
if isinstance(item, dict):
cleaned_item = self._clean_tool_arguments(item)
if cleaned_item:
cleaned_list.append(cleaned_item)
elif item is not None:
cleaned_list.append(item)
if cleaned_list:
cleaned[key] = cleaned_list
else:
# Keep primitive values
cleaned[key] = value
return cleaned
async def _call_tool_impl(self, tool_name: str, arguments: dict[str, Any]) -> Any:
"""Internal implementation of call_tool."""
result = await asyncio.wait_for(
self.session.call_tool(tool_name, arguments),
timeout=self.execution_timeout,
)
# Extract result content
if hasattr(result, "content") and result.content:
if isinstance(result.content, list) and len(result.content) > 0:
content_item = result.content[0]
if hasattr(content_item, "text"):
return str(content_item.text)
return str(content_item)
return str(result.content)
return str(result)
async def list_prompts(self) -> list[dict[str, Any]]:
"""List available prompts from MCP server.
Returns:
List of prompt definitions.
"""
if not self.connected:
await self.connect()
return await self._retry_operation(
self._list_prompts_impl,
timeout=self.discovery_timeout,
)
async def _list_prompts_impl(self) -> list[dict[str, Any]]:
"""Internal implementation of list_prompts."""
prompts_result = await asyncio.wait_for(
self.session.list_prompts(),
timeout=self.discovery_timeout,
)
return [
{
"name": prompt.name,
"description": getattr(prompt, "description", ""),
"arguments": getattr(prompt, "arguments", []),
}
for prompt in prompts_result.prompts
]
async def get_prompt(
self, prompt_name: str, arguments: dict[str, Any] | None = None
) -> dict[str, Any]:
"""Get a prompt from the MCP server.
Args:
prompt_name: Name of the prompt to get.
arguments: Optional prompt arguments.
Returns:
Prompt content and metadata.
"""
if not self.connected:
await self.connect()
arguments = arguments or {}
return await self._retry_operation(
lambda: self._get_prompt_impl(prompt_name, arguments),
timeout=self.execution_timeout,
)
async def _get_prompt_impl(
self, prompt_name: str, arguments: dict[str, Any]
) -> dict[str, Any]:
"""Internal implementation of get_prompt."""
result = await asyncio.wait_for(
self.session.get_prompt(prompt_name, arguments),
timeout=self.execution_timeout,
)
return {
"name": prompt_name,
"messages": [
{
"role": msg.role,
"content": msg.content,
}
for msg in result.messages
],
"arguments": arguments,
}
async def _retry_operation(
self,
operation: Callable[[], Any],
timeout: int | None = None,
) -> Any:
"""Retry an operation with exponential backoff.
Args:
operation: Async operation to retry.
timeout: Operation timeout in seconds.
Returns:
Operation result.
"""
last_error = None
timeout = timeout or self.execution_timeout
for attempt in range(self.max_retries):
try:
if timeout:
return await asyncio.wait_for(operation(), timeout=timeout)
return await operation()
except asyncio.TimeoutError as e: # noqa: PERF203
last_error = f"Operation timed out after {timeout} seconds"
if attempt < self.max_retries - 1:
wait_time = 2**attempt
await asyncio.sleep(wait_time)
else:
raise ConnectionError(last_error) from e
except Exception as e:
error_str = str(e).lower()
# Classify errors as retryable or non-retryable
if "authentication" in error_str or "unauthorized" in error_str:
raise ConnectionError(f"Authentication failed: {e}") from e
if "not found" in error_str:
raise ValueError(f"Resource not found: {e}") from e
# Retryable errors
last_error = str(e)
if attempt < self.max_retries - 1:
wait_time = 2**attempt
await asyncio.sleep(wait_time)
else:
raise ConnectionError(
f"Operation failed after {self.max_retries} attempts: {last_error}"
) from e
raise ConnectionError(f"Operation failed: {last_error}")
def _get_cache_key(self, resource_type: str) -> str:
"""Generate cache key for resource.
Args:
resource_type: Type of resource (e.g., "tools", "prompts").
Returns:
Cache key string.
"""
# Use transport type and URL/command as cache key
if isinstance(self.transport, StdioTransport):
key = f"stdio:{self.transport.command}:{':'.join(self.transport.args)}"
elif isinstance(self.transport, HTTPTransport):
key = f"http:{self.transport.url}"
elif isinstance(self.transport, SSETransport):
key = f"sse:{self.transport.url}"
else:
key = f"{self.transport.transport_type}:unknown"
return f"mcp:{key}:{resource_type}"
async def __aenter__(self) -> Self:
"""Async context manager entry."""
return await self.connect()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> None:
"""Async context manager exit."""
await self.disconnect()

View File

@@ -1,124 +0,0 @@
"""MCP server configuration models for CrewAI agents.
This module provides Pydantic models for configuring MCP servers with
various transport types, similar to OpenAI's Agents SDK.
"""
from pydantic import BaseModel, Field
from crewai.mcp.filters import ToolFilter
class MCPServerStdio(BaseModel):
"""Stdio MCP server configuration.
This configuration is used for connecting to local MCP servers
that run as processes and communicate via standard input/output.
Example:
```python
mcp_server = MCPServerStdio(
command="python",
args=["path/to/server.py"],
env={"API_KEY": "..."},
tool_filter=create_static_tool_filter(
allowed_tool_names=["read_file", "write_file"]
),
)
```
"""
command: str = Field(
...,
description="Command to execute (e.g., 'python', 'node', 'npx', 'uvx').",
)
args: list[str] = Field(
default_factory=list,
description="Command arguments (e.g., ['server.py'] or ['-y', '@mcp/server']).",
)
env: dict[str, str] | None = Field(
default=None,
description="Environment variables to pass to the process.",
)
tool_filter: ToolFilter | None = Field(
default=None,
description="Optional tool filter for filtering available tools.",
)
cache_tools_list: bool = Field(
default=False,
description="Whether to cache the tool list for faster subsequent access.",
)
class MCPServerHTTP(BaseModel):
"""HTTP/Streamable HTTP MCP server configuration.
This configuration is used for connecting to remote MCP servers
over HTTP/HTTPS using streamable HTTP transport.
Example:
```python
mcp_server = MCPServerHTTP(
url="https://api.example.com/mcp",
headers={"Authorization": "Bearer ..."},
cache_tools_list=True,
)
```
"""
url: str = Field(
..., description="Server URL (e.g., 'https://api.example.com/mcp')."
)
headers: dict[str, str] | None = Field(
default=None,
description="Optional HTTP headers for authentication or other purposes.",
)
streamable: bool = Field(
default=True,
description="Whether to use streamable HTTP transport (default: True).",
)
tool_filter: ToolFilter | None = Field(
default=None,
description="Optional tool filter for filtering available tools.",
)
cache_tools_list: bool = Field(
default=False,
description="Whether to cache the tool list for faster subsequent access.",
)
class MCPServerSSE(BaseModel):
"""Server-Sent Events (SSE) MCP server configuration.
This configuration is used for connecting to remote MCP servers
using Server-Sent Events for real-time streaming communication.
Example:
```python
mcp_server = MCPServerSSE(
url="https://api.example.com/mcp/sse",
headers={"Authorization": "Bearer ..."},
)
```
"""
url: str = Field(
...,
description="Server URL (e.g., 'https://api.example.com/mcp/sse').",
)
headers: dict[str, str] | None = Field(
default=None,
description="Optional HTTP headers for authentication or other purposes.",
)
tool_filter: ToolFilter | None = Field(
default=None,
description="Optional tool filter for filtering available tools.",
)
cache_tools_list: bool = Field(
default=False,
description="Whether to cache the tool list for faster subsequent access.",
)
# Type alias for all MCP server configurations
MCPServerConfig = MCPServerStdio | MCPServerHTTP | MCPServerSSE

View File

@@ -1,166 +0,0 @@
"""Tool filtering support for MCP servers.
This module provides utilities for filtering tools from MCP servers,
including static allow/block lists and dynamic context-aware filtering.
"""
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel, Field
if TYPE_CHECKING:
pass
class ToolFilterContext(BaseModel):
"""Context for dynamic tool filtering.
This context is passed to dynamic tool filters to provide
information about the agent, run context, and server.
"""
agent: Any = Field(..., description="The agent requesting tools.")
server_name: str = Field(..., description="Name of the MCP server.")
run_context: dict[str, Any] | None = Field(
default=None,
description="Optional run context for additional filtering logic.",
)
# Type alias for tool filter functions
ToolFilter = (
Callable[[ToolFilterContext, dict[str, Any]], bool]
| Callable[[dict[str, Any]], bool]
)
class StaticToolFilter:
"""Static tool filter with allow/block lists.
This filter provides simple allow/block list filtering based on
tool names. Useful for restricting which tools are available
from an MCP server.
Example:
```python
filter = StaticToolFilter(
allowed_tool_names=["read_file", "write_file"],
blocked_tool_names=["delete_file"],
)
```
"""
def __init__(
self,
allowed_tool_names: list[str] | None = None,
blocked_tool_names: list[str] | None = None,
) -> None:
"""Initialize static tool filter.
Args:
allowed_tool_names: List of tool names to allow. If None,
all tools are allowed (unless blocked).
blocked_tool_names: List of tool names to block. Blocked tools
take precedence over allowed tools.
"""
self.allowed_tool_names = set(allowed_tool_names or [])
self.blocked_tool_names = set(blocked_tool_names or [])
def __call__(self, tool: dict[str, Any]) -> bool:
"""Filter tool based on allow/block lists.
Args:
tool: Tool definition dictionary with at least 'name' key.
Returns:
True if tool should be included, False otherwise.
"""
tool_name = tool.get("name", "")
# Blocked tools take precedence
if self.blocked_tool_names and tool_name in self.blocked_tool_names:
return False
# If allow list exists, tool must be in it
if self.allowed_tool_names:
return tool_name in self.allowed_tool_names
# No restrictions - allow all
return True
def create_static_tool_filter(
allowed_tool_names: list[str] | None = None,
blocked_tool_names: list[str] | None = None,
) -> Callable[[dict[str, Any]], bool]:
"""Create a static tool filter function.
This is a convenience function for creating static tool filters
with allow/block lists.
Args:
allowed_tool_names: List of tool names to allow. If None,
all tools are allowed (unless blocked).
blocked_tool_names: List of tool names to block. Blocked tools
take precedence over allowed tools.
Returns:
Tool filter function that returns True for allowed tools.
Example:
```python
filter_fn = create_static_tool_filter(
allowed_tool_names=["read_file", "write_file"],
blocked_tool_names=["delete_file"],
)
# Use in MCPServerStdio
mcp_server = MCPServerStdio(
command="npx",
args=["-y", "@modelcontextprotocol/server-filesystem"],
tool_filter=filter_fn,
)
```
"""
return StaticToolFilter(
allowed_tool_names=allowed_tool_names,
blocked_tool_names=blocked_tool_names,
)
def create_dynamic_tool_filter(
filter_func: Callable[[ToolFilterContext, dict[str, Any]], bool],
) -> Callable[[ToolFilterContext, dict[str, Any]], bool]:
"""Create a dynamic tool filter function.
This function wraps a dynamic filter function that has access
to the tool filter context (agent, server, run context).
Args:
filter_func: Function that takes (context, tool) and returns bool.
Returns:
Tool filter function that can be used with MCP server configs.
Example:
```python
async def context_aware_filter(
context: ToolFilterContext, tool: dict[str, Any]
) -> bool:
# Block dangerous tools for code reviewers
if context.agent.role == "Code Reviewer":
if tool["name"].startswith("danger_"):
return False
return True
filter_fn = create_dynamic_tool_filter(context_aware_filter)
mcp_server = MCPServerStdio(
command="python", args=["server.py"], tool_filter=filter_fn
)
```
"""
return filter_func

View File

@@ -1,15 +0,0 @@
"""MCP transport implementations for various connection types."""
from crewai.mcp.transports.base import BaseTransport, TransportType
from crewai.mcp.transports.http import HTTPTransport
from crewai.mcp.transports.sse import SSETransport
from crewai.mcp.transports.stdio import StdioTransport
__all__ = [
"BaseTransport",
"HTTPTransport",
"SSETransport",
"StdioTransport",
"TransportType",
]

View File

@@ -1,125 +0,0 @@
"""Base transport interface for MCP connections."""
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, Protocol
from typing_extensions import Self
class TransportType(str, Enum):
"""MCP transport types."""
STDIO = "stdio"
HTTP = "http"
STREAMABLE_HTTP = "streamable-http"
SSE = "sse"
class ReadStream(Protocol):
"""Protocol for read streams."""
async def read(self, n: int = -1) -> bytes:
"""Read bytes from stream."""
...
class WriteStream(Protocol):
"""Protocol for write streams."""
async def write(self, data: bytes) -> None:
"""Write bytes to stream."""
...
class BaseTransport(ABC):
"""Base class for MCP transport implementations.
This abstract base class defines the interface that all transport
implementations must follow. Transports handle the low-level communication
with MCP servers.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the transport.
Args:
**kwargs: Transport-specific configuration options.
"""
self._read_stream: ReadStream | None = None
self._write_stream: WriteStream | None = None
self._connected = False
@property
@abstractmethod
def transport_type(self) -> TransportType:
"""Return the transport type."""
...
@property
def connected(self) -> bool:
"""Check if transport is connected."""
return self._connected
@property
def read_stream(self) -> ReadStream:
"""Get the read stream."""
if self._read_stream is None:
raise RuntimeError("Transport not connected. Call connect() first.")
return self._read_stream
@property
def write_stream(self) -> WriteStream:
"""Get the write stream."""
if self._write_stream is None:
raise RuntimeError("Transport not connected. Call connect() first.")
return self._write_stream
@abstractmethod
async def connect(self) -> Self:
"""Establish connection to MCP server.
Returns:
Self for method chaining.
Raises:
ConnectionError: If connection fails.
"""
...
@abstractmethod
async def disconnect(self) -> None:
"""Close connection to MCP server."""
...
@abstractmethod
async def __aenter__(self) -> Self:
"""Async context manager entry."""
...
@abstractmethod
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> None:
"""Async context manager exit."""
...
def _set_streams(self, read: ReadStream, write: WriteStream) -> None:
"""Set the read and write streams.
Args:
read: Read stream.
write: Write stream.
"""
self._read_stream = read
self._write_stream = write
self._connected = True
def _clear_streams(self) -> None:
"""Clear the read and write streams."""
self._read_stream = None
self._write_stream = None
self._connected = False

View File

@@ -1,174 +0,0 @@
"""HTTP and Streamable HTTP transport for MCP servers."""
import asyncio
from typing import Any
from typing_extensions import Self
# BaseExceptionGroup is available in Python 3.11+
try:
from builtins import BaseExceptionGroup
except ImportError:
# Fallback for Python < 3.11 (shouldn't happen in practice)
BaseExceptionGroup = Exception
from crewai.mcp.transports.base import BaseTransport, TransportType
class HTTPTransport(BaseTransport):
"""HTTP/Streamable HTTP transport for connecting to remote MCP servers.
This transport connects to MCP servers over HTTP/HTTPS using the
streamable HTTP client from the MCP SDK.
Example:
```python
transport = HTTPTransport(
url="https://api.example.com/mcp",
headers={"Authorization": "Bearer ..."}
)
async with transport:
# Use transport...
```
"""
def __init__(
self,
url: str,
headers: dict[str, str] | None = None,
streamable: bool = True,
**kwargs: Any,
) -> None:
"""Initialize HTTP transport.
Args:
url: Server URL (e.g., "https://api.example.com/mcp").
headers: Optional HTTP headers.
streamable: Whether to use streamable HTTP (default: True).
**kwargs: Additional transport options.
"""
super().__init__(**kwargs)
self.url = url
self.headers = headers or {}
self.streamable = streamable
self._transport_context: Any = None
@property
def transport_type(self) -> TransportType:
"""Return the transport type."""
return TransportType.STREAMABLE_HTTP if self.streamable else TransportType.HTTP
async def connect(self) -> Self:
"""Establish HTTP connection to MCP server.
Returns:
Self for method chaining.
Raises:
ConnectionError: If connection fails.
ImportError: If MCP SDK not available.
"""
if self._connected:
return self
try:
from mcp.client.streamable_http import streamablehttp_client
self._transport_context = streamablehttp_client(
self.url,
headers=self.headers if self.headers else None,
terminate_on_close=True,
)
try:
read, write, _ = await asyncio.wait_for(
self._transport_context.__aenter__(), timeout=30.0
)
except asyncio.TimeoutError as e:
self._transport_context = None
raise ConnectionError(
"Transport context entry timed out after 30 seconds. "
"Server may be slow or unreachable."
) from e
except Exception as e:
self._transport_context = None
raise ConnectionError(f"Failed to enter transport context: {e}") from e
self._set_streams(read=read, write=write)
return self
except ImportError as e:
raise ImportError(
"MCP library not available. Please install with: pip install mcp"
) from e
except Exception as e:
self._clear_streams()
if self._transport_context is not None:
self._transport_context = None
raise ConnectionError(f"Failed to connect to MCP server: {e}") from e
async def disconnect(self) -> None:
"""Close HTTP connection."""
if not self._connected:
return
try:
# Clear streams first
self._clear_streams()
# await self._exit_stack.aclose()
# Exit transport context - this will clean up background tasks
# Give a small delay to allow background tasks to complete
if self._transport_context is not None:
try:
# Wait a tiny bit for any pending operations
await asyncio.sleep(0.1)
await self._transport_context.__aexit__(None, None, None)
except (RuntimeError, asyncio.CancelledError) as e:
# Ignore "exit cancel scope in different task" errors and cancellation
# These happen when asyncio.run() closes the event loop
# while background tasks are still running
error_msg = str(e).lower()
if "cancel scope" not in error_msg and "task" not in error_msg:
# Only suppress cancel scope/task errors, re-raise others
if isinstance(e, RuntimeError):
raise
# For CancelledError, just suppress it
except BaseExceptionGroup as eg:
# Handle exception groups from anyio task groups
# Suppress if they contain cancel scope errors
should_suppress = False
for exc in eg.exceptions:
error_msg = str(exc).lower()
if "cancel scope" in error_msg or "task" in error_msg:
should_suppress = True
break
if not should_suppress:
raise
except Exception as e:
raise RuntimeError(
f"Error during HTTP transport disconnect: {e}"
) from e
self._connected = False
except Exception as e:
# Log but don't raise - cleanup should be best effort
import logging
logger = logging.getLogger(__name__)
logger.warning(f"Error during HTTP transport disconnect: {e}")
async def __aenter__(self) -> Self:
"""Async context manager entry."""
return await self.connect()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> None:
"""Async context manager exit."""
await self.disconnect()

View File

@@ -1,113 +0,0 @@
"""Server-Sent Events (SSE) transport for MCP servers."""
from typing import Any
from typing_extensions import Self
from crewai.mcp.transports.base import BaseTransport, TransportType
class SSETransport(BaseTransport):
"""SSE transport for connecting to remote MCP servers.
This transport connects to MCP servers using Server-Sent Events (SSE)
for real-time streaming communication.
Example:
```python
transport = SSETransport(
url="https://api.example.com/mcp/sse",
headers={"Authorization": "Bearer ..."}
)
async with transport:
# Use transport...
```
"""
def __init__(
self,
url: str,
headers: dict[str, str] | None = None,
**kwargs: Any,
) -> None:
"""Initialize SSE transport.
Args:
url: Server URL (e.g., "https://api.example.com/mcp/sse").
headers: Optional HTTP headers.
**kwargs: Additional transport options.
"""
super().__init__(**kwargs)
self.url = url
self.headers = headers or {}
self._transport_context: Any = None
@property
def transport_type(self) -> TransportType:
"""Return the transport type."""
return TransportType.SSE
async def connect(self) -> Self:
"""Establish SSE connection to MCP server.
Returns:
Self for method chaining.
Raises:
ConnectionError: If connection fails.
ImportError: If MCP SDK not available.
"""
if self._connected:
return self
try:
from mcp.client.sse import sse_client
self._transport_context = sse_client(
self.url,
headers=self.headers if self.headers else None,
terminate_on_close=True,
)
read, write = await self._transport_context.__aenter__()
self._set_streams(read=read, write=write)
return self
except ImportError as e:
raise ImportError(
"MCP library not available. Please install with: pip install mcp"
) from e
except Exception as e:
self._clear_streams()
raise ConnectionError(f"Failed to connect to SSE MCP server: {e}") from e
async def disconnect(self) -> None:
"""Close SSE connection."""
if not self._connected:
return
try:
self._clear_streams()
if self._transport_context is not None:
await self._transport_context.__aexit__(None, None, None)
except Exception as e:
import logging
logger = logging.getLogger(__name__)
logger.warning(f"Error during SSE transport disconnect: {e}")
async def __aenter__(self) -> Self:
"""Async context manager entry."""
return await self.connect()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> None:
"""Async context manager exit."""
await self.disconnect()

View File

@@ -1,153 +0,0 @@
"""Stdio transport for MCP servers running as local processes."""
import asyncio
import os
import subprocess
from typing import Any
from typing_extensions import Self
from crewai.mcp.transports.base import BaseTransport, TransportType
class StdioTransport(BaseTransport):
"""Stdio transport for connecting to local MCP servers.
This transport connects to MCP servers running as local processes,
communicating via standard input/output streams. Supports Python,
Node.js, and other command-line servers.
Example:
```python
transport = StdioTransport(
command="python",
args=["path/to/server.py"],
env={"API_KEY": "..."}
)
async with transport:
# Use transport...
```
"""
def __init__(
self,
command: str,
args: list[str] | None = None,
env: dict[str, str] | None = None,
**kwargs: Any,
) -> None:
"""Initialize stdio transport.
Args:
command: Command to execute (e.g., "python", "node", "npx").
args: Command arguments (e.g., ["server.py"] or ["-y", "@mcp/server"]).
env: Environment variables to pass to the process.
**kwargs: Additional transport options.
"""
super().__init__(**kwargs)
self.command = command
self.args = args or []
self.env = env or {}
self._process: subprocess.Popen[bytes] | None = None
self._transport_context: Any = None
@property
def transport_type(self) -> TransportType:
"""Return the transport type."""
return TransportType.STDIO
async def connect(self) -> Self:
"""Start the MCP server process and establish connection.
Returns:
Self for method chaining.
Raises:
ConnectionError: If process fails to start.
ImportError: If MCP SDK not available.
"""
if self._connected:
return self
try:
from mcp import StdioServerParameters
from mcp.client.stdio import stdio_client
process_env = os.environ.copy()
process_env.update(self.env)
server_params = StdioServerParameters(
command=self.command,
args=self.args,
env=process_env if process_env else None,
)
self._transport_context = stdio_client(server_params)
try:
read, write = await self._transport_context.__aenter__()
except Exception as e:
import traceback
traceback.print_exc()
self._transport_context = None
raise ConnectionError(
f"Failed to enter stdio transport context: {e}"
) from e
self._set_streams(read=read, write=write)
return self
except ImportError as e:
raise ImportError(
"MCP library not available. Please install with: pip install mcp"
) from e
except Exception as e:
self._clear_streams()
if self._transport_context is not None:
self._transport_context = None
raise ConnectionError(f"Failed to start MCP server process: {e}") from e
async def disconnect(self) -> None:
"""Terminate the MCP server process and close connection."""
if not self._connected:
return
try:
self._clear_streams()
if self._transport_context is not None:
await self._transport_context.__aexit__(None, None, None)
if self._process is not None:
try:
self._process.terminate()
try:
await asyncio.wait_for(self._process.wait(), timeout=5.0)
except asyncio.TimeoutError:
self._process.kill()
await self._process.wait()
# except ProcessLookupError:
# pass
finally:
self._process = None
except Exception as e:
# Log but don't raise - cleanup should be best effort
import logging
logger = logging.getLogger(__name__)
logger.warning(f"Error during stdio transport disconnect: {e}")
async def __aenter__(self) -> Self:
"""Async context manager entry."""
return await self.connect()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> None:
"""Async context manager exit."""
await self.disconnect()

View File

@@ -1,75 +1,21 @@
"""Utility functions for the crewai project module."""
from collections.abc import Callable
from functools import wraps
from typing import Any, ParamSpec, TypeVar, cast
from pydantic import BaseModel
from crewai.agents.cache.cache_handler import CacheHandler
from functools import lru_cache
from typing import ParamSpec, TypeVar, cast
P = ParamSpec("P")
R = TypeVar("R")
cache = CacheHandler()
def _make_hashable(arg: Any) -> Any:
"""Convert argument to hashable form for caching.
Args:
arg: The argument to convert.
Returns:
Hashable representation of the argument.
"""
if isinstance(arg, BaseModel):
return arg.model_dump_json()
if isinstance(arg, dict):
return tuple(sorted((k, _make_hashable(v)) for k, v in arg.items()))
if isinstance(arg, list):
return tuple(_make_hashable(item) for item in arg)
if hasattr(arg, "__dict__"):
return ("__instance__", id(arg))
return arg
def memoize(meth: Callable[P, R]) -> Callable[P, R]:
"""Memoize a method by caching its results based on arguments.
Handles Pydantic BaseModel instances by converting them to JSON strings
before hashing for cache lookup.
Args:
meth: The method to memoize.
Returns:
A memoized version of the method that caches results.
"""
@wraps(meth)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
"""Wrapper that converts arguments to hashable form before caching.
Args:
*args: Positional arguments to the memoized method.
**kwargs: Keyword arguments to the memoized method.
Returns:
The result of the memoized method call.
"""
hashable_args = tuple(_make_hashable(arg) for arg in args)
hashable_kwargs = tuple(
sorted((k, _make_hashable(v)) for k, v in kwargs.items())
)
cache_key = str((hashable_args, hashable_kwargs))
cached_result: R | None = cache.read(tool=meth.__name__, input=cache_key)
if cached_result is not None:
return cached_result
result = meth(*args, **kwargs)
cache.add(tool=meth.__name__, input=cache_key, output=result)
return result
return cast(Callable[P, R], wrapper)
return cast(Callable[P, R], lru_cache(typed=True)(meth))

View File

@@ -67,44 +67,31 @@ def _prepare_documents_for_chromadb(
ids: list[str] = []
texts: list[str] = []
metadatas: list[Mapping[str, str | int | float | bool]] = []
seen_ids: dict[str, int] = {}
try:
for doc in documents:
if "doc_id" in doc:
doc_id = str(doc["doc_id"])
else:
metadata = doc.get("metadata")
if metadata and isinstance(metadata, dict) and "doc_id" in metadata:
doc_id = str(metadata["doc_id"])
else:
content_for_hash = doc["content"]
if metadata:
metadata_str = json.dumps(metadata, sort_keys=True)
content_for_hash = f"{content_for_hash}|{metadata_str}"
doc_id = hashlib.sha256(content_for_hash.encode()).hexdigest()
for doc in documents:
if "doc_id" in doc:
ids.append(doc["doc_id"])
else:
content_for_hash = doc["content"]
metadata = doc.get("metadata")
if metadata:
if isinstance(metadata, list):
processed_metadata = metadata[0] if metadata and metadata[0] else {}
else:
processed_metadata = metadata
else:
processed_metadata = {}
metadata_str = json.dumps(metadata, sort_keys=True)
content_for_hash = f"{content_for_hash}|{metadata_str}"
if doc_id in seen_ids:
idx = seen_ids[doc_id]
texts[idx] = doc["content"]
metadatas[idx] = processed_metadata
content_hash = hashlib.blake2b(
content_for_hash.encode(), digest_size=32
).hexdigest()
ids.append(content_hash)
texts.append(doc["content"])
metadata = doc.get("metadata")
if metadata:
if isinstance(metadata, list):
metadatas.append(metadata[0] if metadata and metadata[0] else {})
else:
idx = len(ids)
ids.append(doc_id)
texts.append(doc["content"])
metadatas.append(processed_metadata)
seen_ids[doc_id] = idx
except Exception as e:
raise ValueError(f"Error preparing documents for ChromaDB: {e}") from e
metadatas.append(metadata)
else:
metadatas.append({})
return PreparedDocuments(ids, texts, metadatas)

View File

@@ -21,7 +21,7 @@ class GenerativeAiProvider(BaseEmbeddingsProvider[GoogleGenerativeAiEmbeddingFun
validation_alias="EMBEDDINGS_GOOGLE_GENERATIVE_AI_MODEL_NAME",
)
api_key: str = Field(
description="Google API key", validation_alias="EMBEDDINGS_GEMINI_API_KEY"
description="Google API key", validation_alias="EMBEDDINGS_GOOGLE_API_KEY"
)
task_type: str = Field(
default="RETRIEVAL_DOCUMENT",

View File

@@ -525,11 +525,7 @@ class Task(BaseModel):
tools=tools,
)
if not self._guardrails and not self._guardrail:
pydantic_output, json_output = self._export_output(result)
else:
pydantic_output, json_output = None, None
pydantic_output, json_output = self._export_output(result)
task_output = TaskOutput(
name=self.name or self.description,
description=self.description,

View File

@@ -1,162 +0,0 @@
"""Native MCP tool wrapper for CrewAI agents.
This module provides a tool wrapper that reuses existing MCP client sessions
for better performance and connection management.
"""
import asyncio
from typing import Any
from crewai.tools import BaseTool
class MCPNativeTool(BaseTool):
"""Native MCP tool that reuses client sessions.
This tool wrapper is used when agents connect to MCP servers using
structured configurations. It reuses existing client sessions for
better performance and proper connection lifecycle management.
Unlike MCPToolWrapper which connects on-demand, this tool uses
a shared MCP client instance that maintains a persistent connection.
"""
def __init__(
self,
mcp_client: Any,
tool_name: str,
tool_schema: dict[str, Any],
server_name: str,
) -> None:
"""Initialize native MCP tool.
Args:
mcp_client: MCPClient instance with active session.
tool_name: Original name of the tool on the MCP server.
tool_schema: Schema information for the tool.
server_name: Name of the MCP server for prefixing.
"""
# Create tool name with server prefix to avoid conflicts
prefixed_name = f"{server_name}_{tool_name}"
# Handle args_schema properly - BaseTool expects a BaseModel subclass
args_schema = tool_schema.get("args_schema")
# Only pass args_schema if it's provided
kwargs = {
"name": prefixed_name,
"description": tool_schema.get(
"description", f"Tool {tool_name} from {server_name}"
),
}
if args_schema is not None:
kwargs["args_schema"] = args_schema
super().__init__(**kwargs)
# Set instance attributes after super().__init__
self._mcp_client = mcp_client
self._original_tool_name = tool_name
self._server_name = server_name
# self._logger = logging.getLogger(__name__)
@property
def mcp_client(self) -> Any:
"""Get the MCP client instance."""
return self._mcp_client
@property
def original_tool_name(self) -> str:
"""Get the original tool name."""
return self._original_tool_name
@property
def server_name(self) -> str:
"""Get the server name."""
return self._server_name
def _run(self, **kwargs) -> str:
"""Execute tool using the MCP client session.
Args:
**kwargs: Arguments to pass to the MCP tool.
Returns:
Result from the MCP tool execution.
"""
try:
try:
asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
coro = self._run_async(**kwargs)
future = executor.submit(asyncio.run, coro)
return future.result()
except RuntimeError:
return asyncio.run(self._run_async(**kwargs))
except Exception as e:
raise RuntimeError(
f"Error executing MCP tool {self.original_tool_name}: {e!s}"
) from e
async def _run_async(self, **kwargs) -> str:
"""Async implementation of tool execution.
Args:
**kwargs: Arguments to pass to the MCP tool.
Returns:
Result from the MCP tool execution.
"""
# Note: Since we use asyncio.run() which creates a new event loop each time,
# Always reconnect on-demand because asyncio.run() creates new event loops per call
# All MCP transport context managers (stdio, streamablehttp_client, sse_client)
# use anyio.create_task_group() which can't span different event loops
if self._mcp_client.connected:
await self._mcp_client.disconnect()
await self._mcp_client.connect()
try:
result = await self._mcp_client.call_tool(self.original_tool_name, kwargs)
except Exception as e:
error_str = str(e).lower()
if (
"not connected" in error_str
or "connection" in error_str
or "send" in error_str
):
await self._mcp_client.disconnect()
await self._mcp_client.connect()
# Retry the call
result = await self._mcp_client.call_tool(
self.original_tool_name, kwargs
)
else:
raise
finally:
# Always disconnect after tool call to ensure clean context manager lifecycle
# This prevents "exit cancel scope in different task" errors
# All transport context managers must be exited in the same event loop they were entered
await self._mcp_client.disconnect()
# Extract result content
if isinstance(result, str):
return result
# Handle various result formats
if hasattr(result, "content") and result.content:
if isinstance(result.content, list) and len(result.content) > 0:
content_item = result.content[0]
if hasattr(content_item, "text"):
return str(content_item.text)
return str(content_item)
return str(result.content)
return str(result)

View File

@@ -22,12 +22,12 @@
"summarize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
"formatted_task_instructions": "Ensure your final answer strictly adheres to the following OpenAPI schema: {output_format}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.",
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary.",
"lite_agent_system_prompt_with_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
"lite_agent_system_prompt_without_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"lite_agent_response_format": "Ensure your final answer strictly adheres to the following OpenAPI schema: {response_format}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.",
"lite_agent_response_format": "\nIMPORTANT: Your final answer MUST contain all the information requested in the following format: {response_format}\n\nIMPORTANT: Ensure the final output does not include any code block markers like ```json or ```python.",
"knowledge_search_query": "The original query is: {task_prompt}.",
"knowledge_search_query_system_prompt": "Your goal is to rewrite the user query so that it is optimized for retrieval from a vector database. Consider how the query will be used to find relevant documents, and aim to make it more specific and context-aware. \n\n Do not include any other text than the rewritten query, especially any preamble or postamble and only add expected output format if its relevant to the rewritten query. \n\n Focus on the key words of the intended task and to retrieve the most relevant information. \n\n There will be some extra context provided that might need to be removed such as expected_output formats structured_outputs and other instructions."
},

View File

@@ -127,7 +127,7 @@ def handle_max_iterations_exceeded(
messages: list[LLMMessage],
llm: LLM | BaseLLM,
callbacks: list[TokenCalcHandler],
) -> AgentFinish:
) -> AgentAction | AgentFinish:
"""Handles the case when the maximum number of iterations is exceeded. Performs one more LLM call to get the final answer.
Args:
@@ -139,7 +139,7 @@ def handle_max_iterations_exceeded(
callbacks: List of callbacks for the LLM call.
Returns:
AgentFinish with the final answer after exceeding max iterations.
The final formatted answer after exceeding max iterations.
"""
printer.print(
content="Maximum iterations reached. Requesting final answer.",
@@ -157,7 +157,7 @@ def handle_max_iterations_exceeded(
# Perform one more LLM call to get the final answer
answer = llm.call(
messages,
messages, # type: ignore[arg-type]
callbacks=callbacks,
)
@@ -168,16 +168,8 @@ def handle_max_iterations_exceeded(
)
raise ValueError("Invalid response from LLM call - None or empty.")
formatted = format_answer(answer=answer)
# If format_answer returned an AgentAction, convert it to AgentFinish
if isinstance(formatted, AgentFinish):
return formatted
return AgentFinish(
thought=formatted.thought,
output=formatted.text,
text=formatted.text,
)
# Return the formatted answer, regardless of its type
return format_answer(answer=answer)
def format_message_for_llm(
@@ -257,10 +249,10 @@ def get_llm_response(
"""
try:
answer = llm.call(
messages,
messages, # type: ignore[arg-type]
callbacks=callbacks,
from_task=from_task,
from_agent=from_agent, # type: ignore[arg-type]
from_agent=from_agent,
response_model=response_model,
)
except Exception as e:
@@ -302,8 +294,8 @@ def handle_agent_action_core(
formatted_answer: AgentAction,
tool_result: ToolResult,
messages: list[LLMMessage] | None = None,
step_callback: Callable | None = None, # type: ignore[type-arg]
show_logs: Callable | None = None, # type: ignore[type-arg]
step_callback: Callable | None = None,
show_logs: Callable | None = None,
) -> AgentAction | AgentFinish:
"""Core logic for handling agent actions and tool results.
@@ -489,7 +481,7 @@ def summarize_messages(
),
]
summary = llm.call(
messages,
messages, # type: ignore[arg-type]
callbacks=callbacks,
)
summarized_contents.append({"content": str(summary)})

View File

@@ -10,9 +10,9 @@ from pydantic import BaseModel, ValidationError
from typing_extensions import Unpack
from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter
from crewai.utilities.i18n import get_i18n
from crewai.utilities.internal_instructor import InternalInstructor
from crewai.utilities.printer import Printer
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
if TYPE_CHECKING:
@@ -22,7 +22,6 @@ if TYPE_CHECKING:
from crewai.llms.base_llm import BaseLLM
_JSON_PATTERN: Final[re.Pattern[str]] = re.compile(r"({.*})", re.DOTALL)
_I18N = get_i18n()
class ConverterError(Exception):
@@ -88,7 +87,8 @@ class Converter(OutputConverter):
result = self.model.model_validate(result)
elif isinstance(result, str):
try:
result = self.model.model_validate_json(result)
parsed = json.loads(result)
result = self.model.model_validate(parsed)
except Exception as parse_err:
raise ConverterError(
f"Failed to convert partial JSON result into Pydantic: {parse_err}"
@@ -172,16 +172,6 @@ def convert_to_model(
model = output_pydantic or output_json
if model is None:
return result
if converter_cls:
return convert_with_instructions(
result=result,
model=model,
is_json_output=bool(output_json),
agent=agent,
converter_cls=converter_cls,
)
try:
escaped_result = json.dumps(json.loads(result, strict=False))
return validate_model(
@@ -261,7 +251,7 @@ def handle_partial_json(
except json.JSONDecodeError:
pass
except ValidationError:
raise
pass
except Exception as e:
Printer().print(
content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.",
@@ -345,26 +335,25 @@ def get_conversion_instructions(
Returns:
"""
instructions = ""
instructions = "Please convert the following text into valid JSON."
if (
llm
and not isinstance(llm, str)
and hasattr(llm, "supports_function_calling")
and llm.supports_function_calling()
):
schema_dict = generate_model_description(model)
schema = json.dumps(schema_dict, indent=2)
formatted_task_instructions = _I18N.slice("formatted_task_instructions").format(
output_format=schema
model_schema = PydanticSchemaParser(model=model).get_schema()
instructions += (
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
f"Use this format exactly:\n```json\n{model_schema}\n```"
)
instructions += formatted_task_instructions
else:
model_description = generate_model_description(model)
schema_json = json.dumps(model_description, indent=2)
formatted_task_instructions = _I18N.slice("formatted_task_instructions").format(
output_format=schema_json
schema_json = json.dumps(model_description["json_schema"]["schema"], indent=2)
instructions += (
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
f"Use this format exactly:\n```json\n{schema_json}\n```"
)
instructions += formatted_task_instructions
return instructions

View File

@@ -147,7 +147,7 @@ def test_custom_llm():
assert agent.llm.model == "gpt-4"
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_execution():
agent = Agent(
role="test role",
@@ -166,7 +166,7 @@ def test_agent_execution():
assert output == "1 + 1 is 2"
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_execution_with_tools():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -211,7 +211,7 @@ def test_agent_execution_with_tools():
assert received_events[0].tool_args == {"first_number": 3, "second_number": 4}
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_logging_tool_usage():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -245,7 +245,7 @@ def test_logging_tool_usage():
assert agent.tools_handler.last_used_tool.arguments == tool_usage.arguments
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_cache_hitting():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -330,7 +330,7 @@ def test_cache_hitting():
assert received_events[0].from_cache
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_disabling_cache_for_agent():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -394,7 +394,7 @@ def test_disabling_cache_for_agent():
read.assert_not_called()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_execution_with_specific_tools():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -417,7 +417,7 @@ def test_agent_execution_with_specific_tools():
assert output == "The result of the multiplication is 12."
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_powered_by_new_o_model_family_that_allows_skipping_tool():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -443,7 +443,7 @@ def test_agent_powered_by_new_o_model_family_that_allows_skipping_tool():
assert output == "12"
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_powered_by_new_o_model_family_that_uses_tool():
@tool
def comapny_customer_data() -> str:
@@ -469,7 +469,7 @@ def test_agent_powered_by_new_o_model_family_that_uses_tool():
assert output == "42"
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_custom_max_iterations():
@tool
def get_final_answer() -> float:
@@ -508,50 +508,10 @@ def test_agent_custom_max_iterations():
assert isinstance(result, str)
assert len(result) > 0
assert call_count > 0
# With max_iter=1, expect 2 calls:
# - Call 1: iteration 0
# - Call 2: iteration 1 (max reached, handle_max_iterations_exceeded called, then loop breaks)
assert call_count == 2
assert call_count == 3
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.timeout(30)
def test_agent_max_iterations_stops_loop():
"""Test that agent execution terminates when max_iter is reached."""
@tool
def get_data(step: str) -> str:
"""Get data for a step. Always returns data requiring more steps."""
return f"Data for {step}: incomplete, need to query more steps."
agent = Agent(
role="data collector",
goal="collect data using the get_data tool",
backstory="You must use the get_data tool extensively",
max_iter=2,
allow_delegation=False,
)
task = Task(
description="Use get_data tool for step1, step2, step3, step4, step5, step6, step7, step8, step9, and step10. Do NOT stop until you've called it for ALL steps.",
expected_output="A summary of all data collected",
)
result = agent.execute_task(
task=task,
tools=[get_data],
)
assert result is not None
assert isinstance(result, str)
assert agent.agent_executor.iterations <= agent.max_iter + 2, (
f"Agent ran {agent.agent_executor.iterations} iterations "
f"but should stop around {agent.max_iter + 1}. "
)
@pytest.mark.vcr()
def test_agent_repeated_tool_usage(capsys):
"""Test that agents handle repeated tool usage appropriately.
@@ -600,7 +560,7 @@ def test_agent_repeated_tool_usage(capsys):
)
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
@tool
def get_final_answer(anything: str) -> float:
@@ -643,7 +603,7 @@ def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
)
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_moved_on_after_max_iterations():
@tool
def get_final_answer() -> float:
@@ -670,7 +630,7 @@ def test_agent_moved_on_after_max_iterations():
assert output == "42"
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_respect_the_max_rpm_set(capsys):
@tool
def get_final_answer() -> float:
@@ -704,7 +664,7 @@ def test_agent_respect_the_max_rpm_set(capsys):
moveon.assert_called()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
from unittest.mock import patch
@@ -742,7 +702,7 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
moveon.assert_not_called()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_without_max_rpm_respects_crew_rpm(capsys):
from unittest.mock import patch
@@ -802,7 +762,7 @@ def test_agent_without_max_rpm_respects_crew_rpm(capsys):
moveon.assert_called_once()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_error_on_parsing_tool(capsys):
from unittest.mock import patch
@@ -845,7 +805,7 @@ def test_agent_error_on_parsing_tool(capsys):
assert "Error on parsing tool." in captured.out
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_remembers_output_format_after_using_tools_too_many_times():
from unittest.mock import patch
@@ -880,7 +840,7 @@ def test_agent_remembers_output_format_after_using_tools_too_many_times():
remember_format.assert_called()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_use_specific_tasks_output_as_context(capsys):
agent1 = Agent(role="test role", goal="test goal", backstory="test backstory")
agent2 = Agent(role="test role2", goal="test goal2", backstory="test backstory2")
@@ -907,7 +867,7 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
assert "hi" in result.raw.lower() or "hello" in result.raw.lower()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_step_callback():
class StepCallback:
def callback(self, step):
@@ -941,7 +901,7 @@ def test_agent_step_callback():
callback.assert_called()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_function_calling_llm():
from crewai.llm import LLM
llm = LLM(model="gpt-4o", is_litellm=True)
@@ -988,7 +948,7 @@ def test_agent_function_calling_llm():
mock_original_tool_calling.assert_called()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
from crewai.tools import BaseTool
@@ -1018,7 +978,7 @@ def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
assert result.raw == "Howdy!"
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_tool_usage_information_is_appended_to_agent():
from crewai.tools import BaseTool
@@ -1073,7 +1033,7 @@ def test_agent_definition_based_on_dict():
# test for human input
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_human_input():
# Agent configuration
config = {
@@ -1221,7 +1181,7 @@ Thought:<|eot_id|>
assert mock_format_prompt.return_value == expected_prompt
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_allow_crewai_trigger_context():
from crewai import Crew
@@ -1242,7 +1202,7 @@ def test_task_allow_crewai_trigger_context():
assert "Trigger Payload: Important context data" in prompt
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_without_allow_crewai_trigger_context():
from crewai import Crew
@@ -1265,7 +1225,7 @@ def test_task_without_allow_crewai_trigger_context():
assert "Important context data" not in prompt
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_allow_crewai_trigger_context_no_payload():
from crewai import Crew
@@ -1287,7 +1247,7 @@ def test_task_allow_crewai_trigger_context_no_payload():
assert "Trigger Payload:" not in prompt
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical():
from crewai import Crew
@@ -1316,7 +1276,7 @@ def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical():
assert "Trigger Payload: Initial context data" not in first_prompt
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_first_task_auto_inject_trigger():
from crewai import Crew
@@ -1349,7 +1309,7 @@ def test_first_task_auto_inject_trigger():
assert "Trigger Payload:" not in second_prompt
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_ensure_first_task_allow_crewai_trigger_context_is_false_does_not_inject():
from crewai import Crew
@@ -1554,7 +1514,7 @@ def test_agent_with_additional_kwargs():
assert agent.llm.frequency_penalty == 0.1
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_call():
llm = LLM(model="gpt-3.5-turbo")
messages = [{"role": "user", "content": "Say 'Hello, World!'"}]
@@ -1563,7 +1523,7 @@ def test_llm_call():
assert "Hello, World!" in response
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_call_with_error():
llm = LLM(model="non-existent-model")
messages = [{"role": "user", "content": "This should fail"}]
@@ -1572,7 +1532,7 @@ def test_llm_call_with_error():
llm.call(messages)
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_handle_context_length_exceeds_limit():
# Import necessary modules
from crewai.utilities.agent_utils import handle_context_length
@@ -1625,7 +1585,7 @@ def test_handle_context_length_exceeds_limit():
mock_summarize.assert_called_once()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_handle_context_length_exceeds_limit_cli_no():
agent = Agent(
role="test role",
@@ -1700,7 +1660,7 @@ def test_agent_with_all_llm_attributes():
assert agent.llm.api_key == "sk-your-api-key-here"
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_call_with_all_attributes():
llm = LLM(
model="gpt-3.5-turbo",
@@ -1717,7 +1677,7 @@ def test_llm_call_with_all_attributes():
assert "STOP" not in response
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_with_ollama_llama3():
agent = Agent(
role="test role",
@@ -1738,7 +1698,7 @@ def test_agent_with_ollama_llama3():
assert "Llama3" in response or "AI" in response or "language model" in response
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_call_with_ollama_llama3():
llm = LLM(
model="ollama/llama3.2:3b",
@@ -1757,7 +1717,7 @@ def test_llm_call_with_ollama_llama3():
assert "Llama3" in response or "AI" in response or "language model" in response
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_execute_task_basic():
agent = Agent(
role="test role",
@@ -1776,7 +1736,7 @@ def test_agent_execute_task_basic():
assert "4" in result
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_execute_task_with_context():
agent = Agent(
role="test role",
@@ -1798,7 +1758,7 @@ def test_agent_execute_task_with_context():
assert "fox" in result.lower() and "dog" in result.lower()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_execute_task_with_tool():
@tool
def dummy_tool(query: str) -> str:
@@ -1823,7 +1783,7 @@ def test_agent_execute_task_with_tool():
assert "Dummy result for: test query" in result
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_execute_task_with_custom_llm():
agent = Agent(
role="test role",
@@ -1844,7 +1804,7 @@ def test_agent_execute_task_with_custom_llm():
)
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_execute_task_with_ollama():
agent = Agent(
role="test role",
@@ -1864,7 +1824,7 @@ def test_agent_execute_task_with_ollama():
assert "AI" in result or "artificial intelligence" in result.lower()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_with_knowledge_sources():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -1896,7 +1856,7 @@ def test_agent_with_knowledge_sources():
assert "red" in result.raw.lower()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -1944,7 +1904,7 @@ def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold():
)
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold_default():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -1993,7 +1953,7 @@ def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold_defau
)
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_with_knowledge_sources_extensive_role():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -2029,7 +1989,7 @@ def test_agent_with_knowledge_sources_extensive_role():
assert "red" in result.raw.lower()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_with_knowledge_sources_works_with_copy():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -2068,7 +2028,7 @@ def test_agent_with_knowledge_sources_works_with_copy():
assert isinstance(agent_copy.llm, BaseLLM)
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_with_knowledge_sources_generate_search_query():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -2157,14 +2117,15 @@ def test_agent_with_only_crewai_knowledge():
goal="Provide information based on knowledge sources",
backstory="You have access to specific knowledge sources.",
llm=LLM(
model="gpt-4o-mini",
model="openrouter/openai/gpt-4o-mini",
api_key=os.getenv("OPENROUTER_API_KEY"),
),
)
# Create a task that requires the agent to use the knowledge
task = Task(
description="What is Vidit's favorite color?",
expected_output="Vidit's favorite color.",
expected_output="Vidit's favorclearite color.",
agent=agent,
)
@@ -2202,7 +2163,7 @@ def test_agent_knowledege_with_crewai_knowledge():
crew_knowledge.query.assert_called_once()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_litellm_auth_error_handling():
"""Test that LiteLLM authentication errors are handled correctly and not retried."""
from litellm import AuthenticationError as LiteLLMAuthenticationError
@@ -2331,7 +2292,7 @@ def test_litellm_anthropic_error_handling():
mock_llm_call.assert_called_once()
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_get_knowledge_search_query():
"""Test that _get_knowledge_search_query calls the LLM with the correct prompts."""
from crewai.utilities.i18n import I18N

View File

@@ -1,9 +1,18 @@
"""Test A2A wrapper is only applied when a2a is passed to Agent."""
from unittest.mock import patch
import pytest
from crewai import Agent
from crewai.a2a.config import A2AConfig
import a2a # noqa: F401
try:
import a2a # noqa: F401
A2A_SDK_INSTALLED = True
except ImportError:
A2A_SDK_INSTALLED = False
def test_agent_without_a2a_has_no_wrapper():
@@ -18,6 +27,10 @@ def test_agent_without_a2a_has_no_wrapper():
assert callable(agent.execute_task)
@pytest.mark.skipif(
True,
reason="Requires a2a-sdk to be installed. This test verifies wrapper is applied when a2a is set.",
)
def test_agent_with_a2a_has_wrapper():
"""Verify that agents with a2a get the wrapper applied."""
a2a_config = A2AConfig(
@@ -36,6 +49,7 @@ def test_agent_with_a2a_has_wrapper():
assert callable(agent.execute_task)
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_agent_with_a2a_creates_successfully():
"""Verify that creating an agent with a2a succeeds and applies wrapper."""
a2a_config = A2AConfig(
@@ -75,6 +89,7 @@ def test_multiple_agents_without_a2a():
assert callable(agent2.execute_task)
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_wrapper_is_applied_differently_per_instance():
"""Verify that agents with and without a2a have different execute_task methods."""
agent_without_a2a = Agent(

View File

@@ -70,7 +70,7 @@ class ResearchResult(BaseModel):
sources: list[str] = Field(description="List of sources used")
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.parametrize("verbose", [True, False])
def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose):
"""Test that LiteAgent is created with the correct parameters when Agent.kickoff() is called."""
@@ -130,7 +130,7 @@ def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose):
assert created_lite_agent["response_format"] == TestResponse
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_lite_agent_with_tools():
"""Test that Agent can use tools."""
# Create a LiteAgent with tools
@@ -174,7 +174,7 @@ def test_lite_agent_with_tools():
assert event.tool_name == "search_web"
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_lite_agent_structured_output():
"""Test that Agent can return a simple structured output."""
@@ -217,7 +217,7 @@ def test_lite_agent_structured_output():
return result
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_lite_agent_returns_usage_metrics():
"""Test that LiteAgent returns usage metrics."""
llm = LLM(model="gpt-4o-mini")
@@ -238,7 +238,7 @@ def test_lite_agent_returns_usage_metrics():
assert result.usage_metrics["total_tokens"] > 0
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.asyncio
async def test_lite_agent_returns_usage_metrics_async():
"""Test that LiteAgent returns usage metrics when run asynchronously."""
@@ -261,6 +261,25 @@ async def test_lite_agent_returns_usage_metrics_async():
assert result.usage_metrics["total_tokens"] > 0
class TestFlow(Flow):
"""A test flow that creates and runs an agent."""
def __init__(self, llm, tools):
self.llm = llm
self.tools = tools
super().__init__()
@start()
def start(self):
agent = Agent(
role="Test Agent",
goal="Test Goal",
backstory="Test Backstory",
llm=self.llm,
tools=self.tools,
)
return agent.kickoff("Test query")
def verify_agent_parent_flow(result, agent, flow):
"""Verify that both the result and agent have the correct parent flow."""
@@ -314,7 +333,7 @@ def test_sets_parent_flow_when_inside_flow():
assert captured_agent.parent_flow is flow
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_is_called_using_string():
guardrail_events = defaultdict(list)
from crewai.events.event_types import (
@@ -363,12 +382,12 @@ def test_guardrail_is_called_using_string():
assert not guardrail_events["completed"][0].success
assert guardrail_events["completed"][1].success
assert (
"top 10 best Brazilian soccer players" in result.raw or
"Brazilian players" in result.raw
"Here are the top 10 best soccer players in the world, focusing exclusively on Brazilian players"
in result.raw
)
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_is_called_using_callable():
guardrail_events = defaultdict(list)
from crewai.events.event_types import (
@@ -414,7 +433,7 @@ def test_guardrail_is_called_using_callable():
assert "Pelé - Santos, 1958" in result.raw
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_reached_attempt_limit():
guardrail_events = defaultdict(list)
from crewai.events.event_types import (
@@ -468,7 +487,7 @@ def test_guardrail_reached_attempt_limit():
assert not guardrail_events["completed"][2].success
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_output_when_guardrail_returns_base_model():
class Player(BaseModel):
name: str
@@ -559,7 +578,7 @@ def test_lite_agent_with_custom_llm_and_guardrails():
assert result2.raw == "Modified by guardrail"
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_lite_agent_with_invalid_llm():
"""Test that LiteAgent raises proper error when create_llm returns None."""
with patch("crewai.lite_agent.create_llm", return_value=None):
@@ -575,7 +594,7 @@ def test_lite_agent_with_invalid_llm():
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"})
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get")
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_kickoff_with_platform_tools(mock_get):
"""Test that Agent.kickoff() properly integrates platform tools with LiteAgent"""
mock_response = Mock()
@@ -617,7 +636,7 @@ def test_agent_kickoff_with_platform_tools(mock_get):
@patch.dict("os.environ", {"EXA_API_KEY": "test_exa_key"})
@patch("crewai.agent.Agent._get_external_mcp_tools")
@pytest.mark.vcr()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_kickoff_with_mcp_tools(mock_get_mcp_tools):
"""Test that Agent.kickoff() properly integrates MCP tools with LiteAgent"""
# Setup mock MCP tools - create a proper BaseTool instance

View File

@@ -0,0 +1,237 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. An agent
created for testing purposes\nYour personal goal is: Complete test tasks successfully\n\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "Complete this task successfully"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '583'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFNNb9swDL3nVxA6J0U+HKTNbd0woMAOw7Bu6LbCUCXa1iqLgkgnzYr8
98FKWqdbB+wiQHx81OMj9TgCUM6qNSjTaDFt9JNL+TZ7N/dfrusPN01NyV6vPk3f/mrl5vLrXI17
Bt39RCNPrDNDbfQojsIBNgm1YF91tlrOl+fzxXKWgZYs+p5WR5kUNGldcJP5dF5MpqvJ7PzIbsgZ
ZLWG7yMAgMd89jqDxQe1hun4KdIis65RrZ+TAFQi30eUZnYsOogaD6ChIBiy9M8NdXUja7iCQFsw
OkDtNgga6l4/6MBbTAA/wnsXtIc3+b6Gjx41I8REG2cRWoStkwakQeCIxlXOgEXRzjNQgvzigwBV
OUU038OOOgiIFhr0MdPHoIOFK9g67wEDdwlBCI7OIjgB7oxB5qrzfpeznxRokIZS3wwk5EiB8ey0
54RVx7r3PXTenwA6BBLdzy27fXtE9s/+eqpjojv+g6oqFxw3ZULNFHovWSiqjO5HALd5jt2L0aiY
qI1SCt1jfu7i4lBODdszgEVxBIVE+yE+KxbjV8qVR79PFkEZbRq0A3XYGt1ZRyfA6KTpv9W8VvvQ
uAv1/5QfAGMwCtoyJrTOvOx4SEvYf65/pT2bnAUrxrRxBktxmPpBWKx05w8rr3jHgm1ZuVBjiskd
9r6K5aLQy0LjxcKo0X70GwAA//8DAMz2wVUFBAAA
headers:
CF-RAY:
- 95f93ea9af627e0b-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 15 Jul 2025 12:25:54 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=GRZmZLrjW5ZRHNmUJa4ccrMcy20D1rmeqK6Ptlv0mRY-1752582354-1.0.1.1-xKd_yga48Eedech5TRlThlEpDgsB2whxkWHlCyAGOVMqMcvH1Ju9FdXYbuQ9NdUQcVxPLgiGM35lYhqSLVQiXDyK01dnyp2Gvm560FBN9DY;
path=/; expires=Tue, 15-Jul-25 12:55:54 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=MYqswpSR7sqr4kGp6qZVkaL7HDYwMiww49PeN9QBP.A-1752582354973-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '4047'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-envoy-upstream-service-time:
- '4440'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999885'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_5704c0f206a927ddc12aa1a19b612a75
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are an expert evaluator
assessing how well an AI agent''s output aligns with its assigned task goal.\n\nScore
the agent''s goal alignment on a scale from 0-10 where:\n- 0: Complete misalignment,
agent did not understand or attempt the task goal\n- 5: Partial alignment, agent
attempted the task but missed key requirements\n- 10: Perfect alignment, agent
fully satisfied all task requirements\n\nConsider:\n1. Did the agent correctly
interpret the task goal?\n2. Did the final output directly address the requirements?\n3.
Did the agent focus on relevant aspects of the task?\n4. Did the agent provide
all requested information or deliverables?\n\nReturn your evaluation as JSON
with fields ''score'' (number) and ''feedback'' (string).\n"}, {"role": "user",
"content": "\nAgent role: Test Agent\nAgent goal: Complete test tasks successfully\n\n\nAgent''s
final output:\nPlease provide me with the specific details or context of the
task you need help with, and I will ensure to complete it successfully and provide
a thorough response.\n\nEvaluate how well the agent''s output aligns with the
assigned task goal.\n"}], "model": "gpt-4o-mini", "stop": []}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1196'
content-type:
- application/json
cookie:
- __cf_bm=GRZmZLrjW5ZRHNmUJa4ccrMcy20D1rmeqK6Ptlv0mRY-1752582354-1.0.1.1-xKd_yga48Eedech5TRlThlEpDgsB2whxkWHlCyAGOVMqMcvH1Ju9FdXYbuQ9NdUQcVxPLgiGM35lYhqSLVQiXDyK01dnyp2Gvm560FBN9DY;
_cfuvid=MYqswpSR7sqr4kGp6qZVkaL7HDYwMiww49PeN9QBP.A-1752582354973-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xUy27bQAy8+yuIPdtGbMdN4FvbSxM0QIsEKNA6MJhdSmK82hWWVFwj8L8XKz/k
9AH0ogOHnOFjVq8DAMPOLMDYCtXWjR990O+TT7dfZs/v5OtFy/ef7++mxfu7j83t/cONGeaK+PRM
Vo9VYxvrxpNyDHvYJkKlzDq5mk/n19PZfN4BdXTkc1nZ6OgyjmoOPJpeTC9HF1ejyfWhuopsScwC
fgwAAF67b+4zOPppFnAxPEZqEsGSzOKUBGBS9DliUIRFMagZ9qCNQSl0rb8uA8DSiI2JlmYB0+E+
UBC5J7TrHFuah4oASwoKjh2EqOCojkE0oRIgWE+YoA2OUhZzHEqIBWhFoChrKCP6IWwqthWwgEY4
bItASbRLEpDWWhIpWu+3Y7gJooRuCKyAsiYHRUxQx0TgSJG9DIGDY4ua5RA82nVW5cDKqPxCWYhC
iSXBhrU69TOGbxV7ysxSxY0Awoa951AGkq69/do67QLZk8vBJsUXdgQYtoBWW/SQSJoYpFPq2Ptp
MLjTttC51DFXVIPjRFb9drw0y7A7v0uiohXM3git92cAhhAVs7c6RzwekN3JAz6WTYpP8lupKTiw
VKtEKDHke4vGxnTobgDw2HmtfWMf06RYN7rSuKZObjo7eM30Fu/R6yOoUdH38dnkCLzhWx1ud+ZW
Y9FW5PrS3trYOo5nwOBs6j+7+Rv3fnIO5f/Q94C11Ci5VZPIsX07cZ+WKP8B/pV22nLXsBFKL2xp
pUwpX8JRga3fv0sjW1GqVwWHklKTuHuc+ZKD3eAXAAAA//8DADksFsafBAAA
headers:
CF-RAY:
- 95f93ec73a1c7e0b-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 15 Jul 2025 12:25:57 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1544'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-envoy-upstream-service-time:
- '1546'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999732'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_44930ba12ad8d1e3f0beed1d5e3d8b0c
status:
code: 200
message: OK
version: 1

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,224 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. An agent
created for testing purposes\nYour personal goal is: Complete test tasks successfully\nTo
give my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Test task description\n\nThis is the expected criteria
for your final answer: Expected test output\nyou MUST return the actual complete
content as the final answer, not a summary.\n\nBegin! This is VERY important
to you, use the tools available and give your best Final Answer, your job depends
on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '879'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFTBbhtHDL3rK4g5rwRbtaNYt9RoEaNoUaBODm0DgZnh7jKe5WyHXDmO
4X8vZiRLcupDLwvsPPLxPQ45jzMAx8GtwfkezQ9jnP9oeLv98N5+vfl9+4v89Mf76+XV7XDz8Yc/
r39T15SM9PkLeXvOWvg0jJGMk+xgnwmNCuv56nJ5+XZ1tbqswJACxZLWjTa/SPOBhefLs+XF/Gw1
P3+7z+4Te1K3hr9mAACP9Vt0SqCvbg1nzfPJQKrYkVsfggBcTrGcOFRlNRRzzRH0SYykSr8BSffg
UaDjLQFCV2QDit5TBvhbfmbBCO/q/xpue1ZgBesJ6OtI3iiAkRqkycbJGrjv2ffgk5S6CqkFhECG
HClAIPWZx9Kkgtz3aJVq37vChXoH2qcpBogp3UHkO1rAbU/QViW7Os8hLD5OgQBjBCFfOpEfgKVN
ecBSpoFAQxK1jMbSgY+Y2R6aWjJTT6K8JSHVBlACYOgpk3gCS4DyADqS55YpQDdxoMhCuoCbgwKf
tpSB0PeAJdaKseKpOsn0z8SZBhJrgESnXERY8S0JRsxWulkoilkKkDJ0JJQx8jcKi13DX3pWyuWm
FPDQN8jU7mW3KRfdSaj2r5ZLMEmgXOYg7K5OlcQYI1Cs4vSFavSVmLWnsDgdnEztpFiGV6YYTwAU
SVYbXkf20x55OgxpTN2Y02f9LtW1LKz9JhNqkjKQaml0FX2aAXyqyzC9mG835jSMtrF0R7Xc+Zvz
HZ877uARvXqzBy0ZxuP58nLVvMK32Q2rnqyT8+h7CsfU4+7hFDidALMT1/9V8xr3zjlL93/oj4D3
NBqFzZgpsH/p+BiW6Utd0dfDDl2ugl2ZK/a0MaZcbiJQi1PcPRxOH9Ro2LQsHeUxc309yk3Onmb/
AgAA//8DAAbYfvVABQAA
headers:
CF-RAY:
- 95f9c7ffa8331b11-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 15 Jul 2025 13:59:38 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=J_xe1AP.B5P6D2GVMCesyioeS5E9DnYT34rbwQUefFc-1752587978-1.0.1.1-5Dflk5cAj6YCsOSVbCFWWSpXpw_mXsczIdzWzs2h2OwDL01HQbduE5LAToy67sfjFjHeeO4xRrqPLUQpySy2QqyHXbI_fzX4UAt3.UdwHxU;
path=/; expires=Tue, 15-Jul-25 14:29:38 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=0rTD8RMpxBQQy42jzmum16_eoRtWNfaZMG_TJkhGS7I-1752587978437-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '2623'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-envoy-upstream-service-time:
- '2626'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999813'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_ccc347e91010713379c920aa0efd1f4f
status:
code: 200
message: OK
- request:
body: '{"trace_id": "b0237c14-8cd1-4453-920d-608a63d4b7ef", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "crew", "flow_name": null, "crewai_version": "1.0.0b2", "privacy_level":
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-10-18T15:21:00.300365+00:00"},
"ephemeral_trace_id": "b0237c14-8cd1-4453-920d-608a63d4b7ef"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '490'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.0.0b2
X-Crewai-Organization-Id:
- 60577da1-895c-4675-8135-62e9010bdcf3
X-Crewai-Version:
- 1.0.0b2
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
response:
body:
string: '{"id":"703e1e1b-7cca-4cc6-9d03-95d5ab7461e2","ephemeral_trace_id":"b0237c14-8cd1-4453-920d-608a63d4b7ef","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.0.0b2","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.0.0b2","privacy_level":"standard"},"created_at":"2025-10-18T15:21:01.551Z","updated_at":"2025-10-18T15:21:01.551Z","access_code":"TRACE-91322fd9f9","user_identifier":null}'
headers:
Connection:
- keep-alive
Content-Length:
- '519'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 18 Oct 2025 15:21:01 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"9e9becfaa0607314159093ffcadb0713"
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 7dd520cd-8e74-4648-968b-90b1dc2e81d8
x-runtime:
- '0.099253'
x-xss-protection:
- 1; mode=block
status:
code: 201
message: Created
version: 1

View File

@@ -0,0 +1,470 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
cookie:
- __cf_bm=ePO5hy0kEoADCuKcboFy1iS1qckCE5KCpifQaXnlomM-1754508545-1.0.1.1-ieWfjcdIxQIXGfaMizvmgTvZPRFehqDXliegaOT7EO.kt7KSSFGmNDcC35_D9hOhE.fJ5K302uX0snQF3nLaapds2dqgGbNcsyFPOKNvAdI;
_cfuvid=NaXWifUGChHp6Ap1mvfMrNzmO4HdzddrqXkSR9T.hYo-1754508545647-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLBbpwwEL3zFSOflwqykE25RZWiRuq9hzZCEzOAE+NxbbPbJtp/rwyb
hU1bqRck5s17fm9mXhMAoRpRgZA9BjlYnX7K6dq/jNkXaV9uyx/9vfx617Ed9mP+uRObyODHJ5Lh
jfVB8mA1BcVmhqUjDBRV811ZlNlNWVxPwMAN6UjrbEgLTgdlVHqVXRVptkvzmxO7ZyXJiwq+JQAA
r9M3+jQN/RQVZJu3ykDeY0eiOjcBCMc6VgR6r3xAE8RmASWbQGayfg+GDyDRQKf2BAhdtA1o/IEc
wHdzpwxquJ3+K+hJa4YDO92sBR21o8cYyoxarwA0hgPGoUxRHk7I8Wxec2cdP/p3VNEqo3xfO0LP
Jhr1ga2Y0GMC8DANabzILazjwYY68DNNz+XlbtYTy25W6PYEBg6oV/XdabSXenVDAZX2qzELibKn
ZqEuO8GxUbwCklXqP938TXtOrkz3P/ILICXZQE1tHTVKXiZe2hzF0/1X23nKk2Hhye2VpDoocnET
DbU46vmghP/lAw11q0xHzjo1X1Vr622BZYH0cStFckx+AwAA//8DAMHQtj5jAwAA
headers:
CF-RAY:
- 96b0f0f0ac9e7ad9-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Aug 2025 19:29:07 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '653'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '667'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999830'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_3f500b79ab1a400ea9e26d0f12e890bb
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '797'
content-type:
- application/json
cookie:
- __cf_bm=f59gEPi_nA3TTxtjbKaSQpvkTwezaAqOvqfxiGzRnVQ-1754508546-1.0.1.1-JrSaytxVIQSVE00I.vyGj7d4HJbbMV6R9fWPJbkDKu0Y8ueMRzTwTUnfz0YzP5nsZX5oxoE6WlmFxOuz0rRuq9YhZZsO_TbaFBOFk1jGK9U;
_cfuvid=3D66v3.J_RcVoYy9dlF.jHwq1zTIm842xynZxzSy1Wc-1754508546352-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '200.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLBatwwEL37K6Y622V3Y8eJb6FQ2kIPoS0EmmAm8tirRNYISc52Cfvv
RfZm7bQp9GLwvHlP783McwIgVCMqEHKLQfZWZx/WdB42Nw9Xtux3dPMNr89/fPm+f7zct9dfRRoZ
fP9AMryw3kvuraag2EywdISBouq6LPJidVHk5Qj03JCOtM6GLOesV0Zlm9Umz1Zltr44sresJHlR
wc8EAOB5/EafpqFfooJV+lLpyXvsSFSnJgDhWMeKQO+VD2iCSGdQsglkRuufwfAOJBro1BMBQhdt
Axq/Iwdwaz4qgxquxv8KPpHWnMKOnW7eLSUdtYPHGMsMWi8ANIYDxrGMYe6OyOFkX3NnHd/7P6ii
VUb5be0IPZto1Qe2YkQPCcDdOKbhVXJhHfc21IEfaXxuXZSTnpi3s0SPYOCAelEvN+kbenVDAZX2
i0ELiXJLzUydt4JDo3gBJIvUf7t5S3tKrkz3P/IzICXZQE1tHTVKvk48tzmKx/uvttOUR8PCk3tS
kuqgyMVNNNTioKeTEn7vA/V1q0xHzjo13VVr67Mcixzp8kyK5JD8BgAA//8DAB06pnJlAwAA
headers:
CF-RAY:
- 96b0f0f54d6aeb2c-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Aug 2025 19:29:08 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '809'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '823'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_806f7071fb664da48953f5b216b56d9a
status:
code: 200
message: OK
- request:
body: '{"trace_id": "eb9e0ee1-15ed-4044-b84b-f17e493a1e28", "execution_type":
"crew", "execution_context": {"crew_fingerprint": null, "crew_name": "crew",
"flow_name": "Unknown Flow", "crewai_version": "0.152.0", "privacy_level": "standard"},
"execution_metadata": {"expected_duration_estimate": 300, "agent_count": 0,
"task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-08-06T19:30:52.210701+00:00"}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '413'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.152.0
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
X-Crewai-Version:
- 0.152.0
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
response:
body:
string: "<!DOCTYPE html>\n<html>\n<head>\n <title>The page you were looking
for doesn't exist (404)</title>\n <meta name=\"viewport\" content=\"width=device-width,initial-scale=1\">\n
\ <style>\n .rails-default-error-page {\n background-color: #EFEFEF;\n
\ color: #2E2F30;\n text-align: center;\n font-family: arial, sans-serif;\n
\ margin: 0;\n }\n\n .rails-default-error-page div.dialog {\n width:
95%;\n max-width: 33em;\n margin: 4em auto 0;\n }\n\n .rails-default-error-page
div.dialog > div {\n border: 1px solid #CCC;\n border-right-color: #999;\n
\ border-left-color: #999;\n border-bottom-color: #BBB;\n border-top:
#B00100 solid 4px;\n border-top-left-radius: 9px;\n border-top-right-radius:
9px;\n background-color: white;\n padding: 7px 12% 0;\n box-shadow:
0 3px 8px rgba(50, 50, 50, 0.17);\n }\n\n .rails-default-error-page h1 {\n
\ font-size: 100%;\n color: #730E15;\n line-height: 1.5em;\n }\n\n
\ .rails-default-error-page div.dialog > p {\n margin: 0 0 1em;\n padding:
1em;\n background-color: #F7F7F7;\n border: 1px solid #CCC;\n border-right-color:
#999;\n border-left-color: #999;\n border-bottom-color: #999;\n border-bottom-left-radius:
4px;\n border-bottom-right-radius: 4px;\n border-top-color: #DADADA;\n
\ color: #666;\n box-shadow: 0 3px 8px rgba(50, 50, 50, 0.17);\n }\n
\ </style>\n</head>\n\n<body class=\"rails-default-error-page\">\n <!-- This
file lives in public/404.html -->\n <div class=\"dialog\">\n <div>\n <h1>The
page you were looking for doesn't exist.</h1>\n <p>You may have mistyped
the address or the page may have moved.</p>\n </div>\n <p>If you are
the application owner check the logs for more information.</p>\n </div>\n</body>\n</html>\n"
headers:
Connection:
- keep-alive
Content-Length:
- '1722'
Content-Type:
- text/html; charset=UTF-8
Date:
- Wed, 06 Aug 2025 19:30:52 GMT
strict-transport-security:
- max-age=63072000; includeSubDomains
x-request-id:
- bec0cf39-af9c-4955-b600-607187a7b10b
x-runtime:
- '0.005352'
status:
code: 404
message: Not Found
- request:
body: '{"version": "0.152.0", "batch_id": "eb9e0ee1-15ed-4044-b84b-f17e493a1e28",
"user_context": {"user_id": "anonymous", "organization_id": "", "session_id":
"e7e7a716-e64b-490b-96db-5c5367042114", "trace_id": "54e95e1f-cd41-4ece-9e5e-21984d635e6a"},
"execution_metadata": {"crew_name": "crew", "execution_start": "2025-08-06T19:30:52.209750+00:00",
"crewai_version": "0.152.0"}, "events": [{"event_id": "98b2a833-63fc-457c-a2e0-6ce228a8214c",
"timestamp": "2025-08-06T19:30:52.328066+00:00", "type": "crew_kickoff_started",
"event_data": {"timestamp": "2025-08-06T19:30:52.209750+00:00", "type": "crew_kickoff_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"crew_name": "crew", "crew": null, "inputs": null}}, {"event_id": "4abf563c-d35f-4a09-867d-75c1c54b3fed",
"timestamp": "2025-08-06T19:30:52.328113+00:00", "type": "crew_kickoff_started",
"event_data": {"timestamp": "2025-08-06T19:30:52.209750+00:00", "type": "crew_kickoff_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"crew_name": "crew", "crew": null, "inputs": null}}, {"event_id": "60bdc932-6b56-4f1d-bcc2-5b3b57c8dc94",
"timestamp": "2025-08-06T19:30:52.330079+00:00", "type": "task_started", "event_data":
{"task_description": "Say hello to the world", "task_name": null, "context":
"", "agent": "Test Agent"}}, {"event_id": "97761b9f-d132-47e7-8857-5fdda8c80b65",
"timestamp": "2025-08-06T19:30:52.330089+00:00", "type": "task_started", "event_data":
{"task_description": "Say hello to the world", "task_name": null, "context":
"", "agent": "Test Agent"}}, {"event_id": "cdaa47c1-448f-476e-9761-14a25f26c481",
"timestamp": "2025-08-06T19:30:52.330477+00:00", "type": "agent_execution_started",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionStartedEvent"}}, {"event_id": "7aa43738-3903-44cf-8416-d47542469537",
"timestamp": "2025-08-06T19:30:52.330612+00:00", "type": "agent_execution_started",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionStartedEvent"}}, {"event_id": "6eb42795-be95-4f1c-b70f-385c59483e43",
"timestamp": "2025-08-06T19:30:52.330751+00:00", "type": "llm_call_started",
"event_data": {"timestamp": "2025-08-06T19:30:52.330725+00:00", "type": "llm_call_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "1bfe4b49-ba6a-464d-9b6a-ca2eb8e965d8", "agent_id":
"5d6dbe70-71fc-42e2-ba0d-61b460542dad", "agent_role": "Test Agent", "model":
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\n..."}, {"role": "user", "content": "\nCurrent Task:
Say hello to the world\n\nThis is the expected criteria for your final answer:
hello world\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is ..."}], "tools": null, "callbacks": ["<crewai.utilities.token_counter_callback.TokenCalcHandler
object at 0x11bd10a10>"], "available_functions": null}}, {"event_id": "679e3211-ef91-45c0-9d4a-e5118e653dbd",
"timestamp": "2025-08-06T19:30:52.330798+00:00", "type": "llm_call_started",
"event_data": {"timestamp": "2025-08-06T19:30:52.330725+00:00", "type": "llm_call_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "1bfe4b49-ba6a-464d-9b6a-ca2eb8e965d8", "agent_id":
"5d6dbe70-71fc-42e2-ba0d-61b460542dad", "agent_role": "Test Agent", "model":
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\n..."}, {"role": "user", "content": "\nCurrent Task:
Say hello to the world\n\nThis is the expected criteria for your final answer:
hello world\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is ..."}], "tools": null, "callbacks": ["<crewai.utilities.token_counter_callback.TokenCalcHandler
object at 0x11bd10a10>"], "available_functions": null}}, {"event_id": "911c67ea-125b-4adf-87a5-4a9265575f93",
"timestamp": "2025-08-06T19:30:52.335757+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-08-06T19:30:52.335728+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "1bfe4b49-ba6a-464d-9b6a-ca2eb8e965d8", "agent_id":
"5d6dbe70-71fc-42e2-ba0d-61b460542dad", "agent_role": "Test Agent", "messages":
[{"role": "system", "content": "You are Test Agent. Test backstory\nYour personal
goal is: Test goal\nTo give my best complete final answer to the task respond
using the exact following format:\n\nThought: I now can give a great answer\n..."},
{"role": "user", "content": "\nCurrent Task: Say hello to the world\n\nThis
is the expected criteria for your final answer: hello world\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is ..."}], "response": "I now can give a great answer \nFinal Answer: hello
world", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>", "response_cost":
3.255e-05, "model": "gpt-4o-mini"}}, {"event_id": "1c93586f-82b9-4999-adda-78c8010b59f6",
"timestamp": "2025-08-06T19:30:52.335800+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-08-06T19:30:52.335728+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "1bfe4b49-ba6a-464d-9b6a-ca2eb8e965d8", "agent_id":
"5d6dbe70-71fc-42e2-ba0d-61b460542dad", "agent_role": "Test Agent", "messages":
[{"role": "system", "content": "You are Test Agent. Test backstory\nYour personal
goal is: Test goal\nTo give my best complete final answer to the task respond
using the exact following format:\n\nThought: I now can give a great answer\n..."},
{"role": "user", "content": "\nCurrent Task: Say hello to the world\n\nThis
is the expected criteria for your final answer: hello world\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is ..."}], "response": "I now can give a great answer \nFinal Answer: hello
world", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>", "response_cost":
3.255e-05, "model": "gpt-4o-mini"}}, {"event_id": "eb9d53af-ce39-4241-ab93-e40545a1ee78",
"timestamp": "2025-08-06T19:30:52.335904+00:00", "type": "agent_execution_completed",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionCompletedEvent"}}, {"event_id": "dc71f5f8-5762-4e44-ac60-aa20b033c9f9",
"timestamp": "2025-08-06T19:30:52.335989+00:00", "type": "agent_execution_completed",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionCompletedEvent"}}, {"event_id": "84da8fb8-9247-4718-bc85-a69033c9261f",
"timestamp": "2025-08-06T19:30:52.336082+00:00", "type": "task_completed", "event_data":
{"serialization_error": "Circular reference detected (id repeated)", "object_type":
"TaskCompletedEvent"}}, {"event_id": "c1a23877-2b87-40be-98a1-a3b2630c8657",
"timestamp": "2025-08-06T19:30:52.336107+00:00", "type": "task_completed", "event_data":
{"serialization_error": "Circular reference detected (id repeated)", "object_type":
"TaskCompletedEvent"}}, {"event_id": "c77587d7-68d6-4600-b98a-74fe58af41fc",
"timestamp": "2025-08-06T19:30:52.337164+00:00", "type": "crew_kickoff_completed",
"event_data": {"timestamp": "2025-08-06T19:30:52.337145+00:00", "type": "crew_kickoff_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"crew_name": "crew", "crew": null, "output": {"description": "Say hello to the
world", "name": null, "expected_output": "hello world", "summary": "Say hello
to the world...", "raw": "hello world", "pydantic": null, "json_dict": null,
"agent": "Test Agent", "output_format": "raw"}, "total_tokens": 170}}]}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '8300'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.152.0
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
X-Crewai-Version:
- 0.152.0
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing
response:
body:
string: "<!DOCTYPE html>\n<html>\n<head>\n <title>The page you were looking
for doesn't exist (404)</title>\n <meta name=\"viewport\" content=\"width=device-width,initial-scale=1\">\n
\ <style>\n .rails-default-error-page {\n background-color: #EFEFEF;\n
\ color: #2E2F30;\n text-align: center;\n font-family: arial, sans-serif;\n
\ margin: 0;\n }\n\n .rails-default-error-page div.dialog {\n width:
95%;\n max-width: 33em;\n margin: 4em auto 0;\n }\n\n .rails-default-error-page
div.dialog > div {\n border: 1px solid #CCC;\n border-right-color: #999;\n
\ border-left-color: #999;\n border-bottom-color: #BBB;\n border-top:
#B00100 solid 4px;\n border-top-left-radius: 9px;\n border-top-right-radius:
9px;\n background-color: white;\n padding: 7px 12% 0;\n box-shadow:
0 3px 8px rgba(50, 50, 50, 0.17);\n }\n\n .rails-default-error-page h1 {\n
\ font-size: 100%;\n color: #730E15;\n line-height: 1.5em;\n }\n\n
\ .rails-default-error-page div.dialog > p {\n margin: 0 0 1em;\n padding:
1em;\n background-color: #F7F7F7;\n border: 1px solid #CCC;\n border-right-color:
#999;\n border-left-color: #999;\n border-bottom-color: #999;\n border-bottom-left-radius:
4px;\n border-bottom-right-radius: 4px;\n border-top-color: #DADADA;\n
\ color: #666;\n box-shadow: 0 3px 8px rgba(50, 50, 50, 0.17);\n }\n
\ </style>\n</head>\n\n<body class=\"rails-default-error-page\">\n <!-- This
file lives in public/404.html -->\n <div class=\"dialog\">\n <div>\n <h1>The
page you were looking for doesn't exist.</h1>\n <p>You may have mistyped
the address or the page may have moved.</p>\n </div>\n <p>If you are
the application owner check the logs for more information.</p>\n </div>\n</body>\n</html>\n"
headers:
Connection:
- keep-alive
Content-Length:
- '1722'
Content-Type:
- text/html; charset=UTF-8
Date:
- Wed, 06 Aug 2025 19:30:52 GMT
strict-transport-security:
- max-age=63072000; includeSubDomains
x-request-id:
- 78674bcb-6c8a-4eaf-8577-5cb27cac4089
x-runtime:
- '0.006009'
status:
code: 404
message: Not Found
version: 1

View File

@@ -0,0 +1,558 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
cookie:
- __cf_bm=ePO5hy0kEoADCuKcboFy1iS1qckCE5KCpifQaXnlomM-1754508545-1.0.1.1-ieWfjcdIxQIXGfaMizvmgTvZPRFehqDXliegaOT7EO.kt7KSSFGmNDcC35_D9hOhE.fJ5K302uX0snQF3nLaapds2dqgGbNcsyFPOKNvAdI;
_cfuvid=NaXWifUGChHp6Ap1mvfMrNzmO4HdzddrqXkSR9T.hYo-1754508545647-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJNa9wwEL37Vww6x8Wbtbu7voXQQC+lh0Jb2mAm0thWI0tCkuOWsP+9
SN6svf2AXAyeN+/pvZl5zgCYFKwGxnsMfLAqv93Q22mP+O7z9OlwUIW4LT7S+LX68nj3YWJXkWEe
fhAPL6w33AxWUZBGzzB3hIGi6mZXlVWxr6oiAYMRpCKtsyEvTT5ILfPr4rrMi12+2Z/YvZGcPKvh
WwYA8Jy+0acW9JPVkLRSZSDvsSNWn5sAmDMqVhh6L31AHdjVAnKjA+lk/T1oMwFHDZ18IkDoom1A
7SdyAN/1ndSo4Cb919CTUgYm45RYCzpqR48xlB6VWgGotQkYh5Ki3J+Q49m8Mp115sH/QWWt1NL3
jSP0RkejPhjLEnrMAO7TkMaL3Mw6M9jQBPNI6blNtZv12LKbFbo9gcEEVKv67jTaS71GUECp/GrM
jCPvSSzUZSc4CmlWQLZK/bebf2nPyaXuXiO/AJyTDSQa60hIfpl4aXMUT/d/becpJ8PMk3uSnJog
ycVNCGpxVPNBMf/LBxqaVuqOnHVyvqrWNtsSqxLpsOUsO2a/AQAA//8DAD59q5pjAwAA
headers:
CF-RAY:
- 96b0f1059ae17ad9-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Aug 2025 19:29:10 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '521'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '537'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_c94c2a416aee4c93bae1f801c8ae3e72
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '797'
content-type:
- application/json
cookie:
- __cf_bm=f59gEPi_nA3TTxtjbKaSQpvkTwezaAqOvqfxiGzRnVQ-1754508546-1.0.1.1-JrSaytxVIQSVE00I.vyGj7d4HJbbMV6R9fWPJbkDKu0Y8ueMRzTwTUnfz0YzP5nsZX5oxoE6WlmFxOuz0rRuq9YhZZsO_TbaFBOFk1jGK9U;
_cfuvid=3D66v3.J_RcVoYy9dlF.jHwq1zTIm842xynZxzSy1Wc-1754508546352-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '200.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJNa9wwEL37Vww6x2U/7GzqW2j6BYVSaE5tMBN5bKuVNYok7zaE/e9F
2u3a2ybQi8Hz5j29NzNPGYBQjahAyB6DHKzO3yzpcnf74f3nh6+rx/bT9saa2+u343D55eGmFBeR
wfc/SIY/rFeSB6spKDYHWDrCQFF1uSmLcnFVlosEDNyQjrTOhrzgfFBG5avFqsgXm3x5dWT3rCR5
UcG3DADgKX2jT9PQL1FB0kqVgbzHjkR1agIQjnWsCPRe+YAmiIsJlGwCmWT9IxjegUQDndoSIHTR
NqDxO3IA3807ZVDDdfqvoCetGXbsdDMXdNSOHmMoM2o9A9AYDhiHkqLcHZH9ybzmzjq+939RRauM
8n3tCD2baNQHtiKh+wzgLg1pPMstrOPBhjrwT0rPLcvNQU9Mu5mh6yMYOKCe1TfH0Z7r1Q0FVNrP
xiwkyp6aiTrtBMdG8QzIZqn/dfOc9iG5Mt3/yE+AlGQDNbV11Ch5nnhqcxRP96W205STYeHJbZWk
OihycRMNtTjqw0EJ/+gDDXWrTEfOOnW4qtbW6wLLAun1Wopsn/0GAAD//wMASJr3q2MDAAA=
headers:
CF-RAY:
- 96b0f109ae7aeb2c-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Aug 2025 19:29:11 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '499'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '511'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999830'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_dece4be9f37c4d64b324ab36d1ed9cf4
status:
code: 200
message: OK
- request:
body: '{"trace_id": "ff5ac8a9-dec2-4b73-8928-3dd06d12051f", "execution_type":
"crew", "execution_context": {"crew_fingerprint": null, "crew_name": "crew",
"flow_name": "Unknown Flow", "crewai_version": "0.152.0", "privacy_level": "standard"},
"execution_metadata": {"expected_duration_estimate": 300, "agent_count": 0,
"task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-08-06T19:30:51.727534+00:00"}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '413'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.152.0
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
X-Crewai-Version:
- 0.152.0
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
response:
body:
string: "<!DOCTYPE html>\n<html>\n<head>\n <title>The page you were looking
for doesn't exist (404)</title>\n <meta name=\"viewport\" content=\"width=device-width,initial-scale=1\">\n
\ <style>\n .rails-default-error-page {\n background-color: #EFEFEF;\n
\ color: #2E2F30;\n text-align: center;\n font-family: arial, sans-serif;\n
\ margin: 0;\n }\n\n .rails-default-error-page div.dialog {\n width:
95%;\n max-width: 33em;\n margin: 4em auto 0;\n }\n\n .rails-default-error-page
div.dialog > div {\n border: 1px solid #CCC;\n border-right-color: #999;\n
\ border-left-color: #999;\n border-bottom-color: #BBB;\n border-top:
#B00100 solid 4px;\n border-top-left-radius: 9px;\n border-top-right-radius:
9px;\n background-color: white;\n padding: 7px 12% 0;\n box-shadow:
0 3px 8px rgba(50, 50, 50, 0.17);\n }\n\n .rails-default-error-page h1 {\n
\ font-size: 100%;\n color: #730E15;\n line-height: 1.5em;\n }\n\n
\ .rails-default-error-page div.dialog > p {\n margin: 0 0 1em;\n padding:
1em;\n background-color: #F7F7F7;\n border: 1px solid #CCC;\n border-right-color:
#999;\n border-left-color: #999;\n border-bottom-color: #999;\n border-bottom-left-radius:
4px;\n border-bottom-right-radius: 4px;\n border-top-color: #DADADA;\n
\ color: #666;\n box-shadow: 0 3px 8px rgba(50, 50, 50, 0.17);\n }\n
\ </style>\n</head>\n\n<body class=\"rails-default-error-page\">\n <!-- This
file lives in public/404.html -->\n <div class=\"dialog\">\n <div>\n <h1>The
page you were looking for doesn't exist.</h1>\n <p>You may have mistyped
the address or the page may have moved.</p>\n </div>\n <p>If you are
the application owner check the logs for more information.</p>\n </div>\n</body>\n</html>\n"
headers:
Connection:
- keep-alive
Content-Length:
- '1722'
Content-Type:
- text/html; charset=UTF-8
Date:
- Wed, 06 Aug 2025 19:30:51 GMT
strict-transport-security:
- max-age=63072000; includeSubDomains
x-request-id:
- 0b6a5ff5-789e-4c0d-a10b-316fecc0e905
x-runtime:
- '0.005528'
status:
code: 404
message: Not Found
- request:
body: '{"version": "0.152.0", "batch_id": "ff5ac8a9-dec2-4b73-8928-3dd06d12051f",
"user_context": {"user_id": "anonymous", "organization_id": "", "session_id":
"aabc00e7-d423-4385-8b83-0468c03ae47b", "trace_id": "0a0586da-135c-4080-a352-dbe47bb2ac86"},
"execution_metadata": {"crew_name": "crew", "execution_start": "2025-08-06T19:30:51.726805+00:00",
"crewai_version": "0.152.0"}, "events": [{"event_id": "211eb90d-fb76-4ee5-bee7-62cc2f1d9aa8",
"timestamp": "2025-08-06T19:30:51.842887+00:00", "type": "crew_kickoff_started",
"event_data": {"timestamp": "2025-08-06T19:30:51.726805+00:00", "type": "crew_kickoff_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"crew_name": "crew", "crew": null, "inputs": null}}, {"event_id": "713e4dbd-887f-4481-a6c8-554b637848e2",
"timestamp": "2025-08-06T19:30:51.842982+00:00", "type": "crew_kickoff_started",
"event_data": {"timestamp": "2025-08-06T19:30:51.726805+00:00", "type": "crew_kickoff_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"crew_name": "crew", "crew": null, "inputs": null}}, {"event_id": "b920108c-c6fe-40d7-baa3-29c23d76a8e1",
"timestamp": "2025-08-06T19:30:51.844489+00:00", "type": "task_started", "event_data":
{"task_description": "Say hello to the world", "task_name": null, "context":
"", "agent": "Test Agent"}}, {"event_id": "96180117-d060-49ab-8327-712f230653f2",
"timestamp": "2025-08-06T19:30:51.844512+00:00", "type": "task_started", "event_data":
{"task_description": "Say hello to the world", "task_name": null, "context":
"", "agent": "Test Agent"}}, {"event_id": "82baa39d-d1ae-44f8-8f35-40646fdec793",
"timestamp": "2025-08-06T19:30:51.845195+00:00", "type": "agent_execution_started",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionStartedEvent"}}, {"event_id": "c34d2e12-6671-4593-a45d-8742704f6ace",
"timestamp": "2025-08-06T19:30:51.845868+00:00", "type": "agent_execution_started",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionStartedEvent"}}, {"event_id": "87d12818-f0b4-46d0-8ecc-e46afaf8eddb",
"timestamp": "2025-08-06T19:30:51.846100+00:00", "type": "llm_call_started",
"event_data": {"timestamp": "2025-08-06T19:30:51.846006+00:00", "type": "llm_call_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "7f026c34-8c77-4710-8ecb-9d4c830b9eb4", "agent_id":
"bd02dc4e-982e-481c-9358-2b4a7ac73831", "agent_role": "Test Agent", "model":
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\n..."}, {"role": "user", "content": "\nCurrent Task:
Say hello to the world\n\nThis is the expected criteria for your final answer:
hello world\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is ..."}], "tools": null, "callbacks": ["<crewai.utilities.token_counter_callback.TokenCalcHandler
object at 0x11bd13470>"], "available_functions": null}}, {"event_id": "bbfd4480-87aa-4a56-b988-2dcc9e142c20",
"timestamp": "2025-08-06T19:30:51.846155+00:00", "type": "llm_call_started",
"event_data": {"timestamp": "2025-08-06T19:30:51.846006+00:00", "type": "llm_call_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "7f026c34-8c77-4710-8ecb-9d4c830b9eb4", "agent_id":
"bd02dc4e-982e-481c-9358-2b4a7ac73831", "agent_role": "Test Agent", "model":
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\n..."}, {"role": "user", "content": "\nCurrent Task:
Say hello to the world\n\nThis is the expected criteria for your final answer:
hello world\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is ..."}], "tools": null, "callbacks": ["<crewai.utilities.token_counter_callback.TokenCalcHandler
object at 0x11bd13470>"], "available_functions": null}}, {"event_id": "25a17ec7-b2ee-4eeb-bdf5-27efffed961c",
"timestamp": "2025-08-06T19:30:52.018207+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-08-06T19:30:52.017914+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "7f026c34-8c77-4710-8ecb-9d4c830b9eb4", "agent_id":
"bd02dc4e-982e-481c-9358-2b4a7ac73831", "agent_role": "Test Agent", "messages":
[{"role": "system", "content": "You are Test Agent. Test backstory\nYour personal
goal is: Test goal\nTo give my best complete final answer to the task respond
using the exact following format:\n\nThought: I now can give a great answer\n..."},
{"role": "user", "content": "\nCurrent Task: Say hello to the world\n\nThis
is the expected criteria for your final answer: hello world\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is ..."}], "response": "I now can give a great answer \nFinal Answer: hello
world", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>", "response_cost":
3.135e-05, "model": "gpt-4o-mini"}}, {"event_id": "0ccb9b70-c5ad-4f7f-b3ee-ecfd62c2d7cc",
"timestamp": "2025-08-06T19:30:52.018273+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-08-06T19:30:52.017914+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "7f026c34-8c77-4710-8ecb-9d4c830b9eb4", "agent_id":
"bd02dc4e-982e-481c-9358-2b4a7ac73831", "agent_role": "Test Agent", "messages":
[{"role": "system", "content": "You are Test Agent. Test backstory\nYour personal
goal is: Test goal\nTo give my best complete final answer to the task respond
using the exact following format:\n\nThought: I now can give a great answer\n..."},
{"role": "user", "content": "\nCurrent Task: Say hello to the world\n\nThis
is the expected criteria for your final answer: hello world\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is ..."}], "response": "I now can give a great answer \nFinal Answer: hello
world", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>", "response_cost":
3.135e-05, "model": "gpt-4o-mini"}}, {"event_id": "d7f4440b-8f9f-4e29-a946-6d10f4bdfc3c",
"timestamp": "2025-08-06T19:30:52.018559+00:00", "type": "agent_execution_completed",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionCompletedEvent"}}, {"event_id": "072195c3-54df-4cba-9068-b9a25bbb8d7c",
"timestamp": "2025-08-06T19:30:52.018669+00:00", "type": "agent_execution_completed",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionCompletedEvent"}}, {"event_id": "0b6f9e85-32c9-4c62-9049-6890953e2143",
"timestamp": "2025-08-06T19:30:52.018838+00:00", "type": "task_completed", "event_data":
{"serialization_error": "Circular reference detected (id repeated)", "object_type":
"TaskCompletedEvent"}}, {"event_id": "5ff20fcb-ec10-40ac-bb90-9568aa4eb1de",
"timestamp": "2025-08-06T19:30:52.018867+00:00", "type": "task_completed", "event_data":
{"serialization_error": "Circular reference detected (id repeated)", "object_type":
"TaskCompletedEvent"}}, {"event_id": "c5a36300-3911-4d75-a660-d133a7a4be94",
"timestamp": "2025-08-06T19:30:52.020135+00:00", "type": "crew_kickoff_completed",
"event_data": {"timestamp": "2025-08-06T19:30:52.020115+00:00", "type": "crew_kickoff_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"crew_name": "crew", "crew": null, "output": {"description": "Say hello to the
world", "name": null, "expected_output": "hello world", "summary": "Say hello
to the world...", "raw": "hello world", "pydantic": null, "json_dict": null,
"agent": "Test Agent", "output_format": "raw"}, "total_tokens": 170}}]}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '8300'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.152.0
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
X-Crewai-Version:
- 0.152.0
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing
response:
body:
string: "<!DOCTYPE html>\n<html>\n<head>\n <title>The page you were looking
for doesn't exist (404)</title>\n <meta name=\"viewport\" content=\"width=device-width,initial-scale=1\">\n
\ <style>\n .rails-default-error-page {\n background-color: #EFEFEF;\n
\ color: #2E2F30;\n text-align: center;\n font-family: arial, sans-serif;\n
\ margin: 0;\n }\n\n .rails-default-error-page div.dialog {\n width:
95%;\n max-width: 33em;\n margin: 4em auto 0;\n }\n\n .rails-default-error-page
div.dialog > div {\n border: 1px solid #CCC;\n border-right-color: #999;\n
\ border-left-color: #999;\n border-bottom-color: #BBB;\n border-top:
#B00100 solid 4px;\n border-top-left-radius: 9px;\n border-top-right-radius:
9px;\n background-color: white;\n padding: 7px 12% 0;\n box-shadow:
0 3px 8px rgba(50, 50, 50, 0.17);\n }\n\n .rails-default-error-page h1 {\n
\ font-size: 100%;\n color: #730E15;\n line-height: 1.5em;\n }\n\n
\ .rails-default-error-page div.dialog > p {\n margin: 0 0 1em;\n padding:
1em;\n background-color: #F7F7F7;\n border: 1px solid #CCC;\n border-right-color:
#999;\n border-left-color: #999;\n border-bottom-color: #999;\n border-bottom-left-radius:
4px;\n border-bottom-right-radius: 4px;\n border-top-color: #DADADA;\n
\ color: #666;\n box-shadow: 0 3px 8px rgba(50, 50, 50, 0.17);\n }\n
\ </style>\n</head>\n\n<body class=\"rails-default-error-page\">\n <!-- This
file lives in public/404.html -->\n <div class=\"dialog\">\n <div>\n <h1>The
page you were looking for doesn't exist.</h1>\n <p>You may have mistyped
the address or the page may have moved.</p>\n </div>\n <p>If you are
the application owner check the logs for more information.</p>\n </div>\n</body>\n</html>\n"
headers:
Connection:
- keep-alive
Content-Length:
- '1722'
Content-Type:
- text/html; charset=UTF-8
Date:
- Wed, 06 Aug 2025 19:30:52 GMT
strict-transport-security:
- max-age=63072000; includeSubDomains
x-request-id:
- 9edcdee4-f720-431e-9d6d-2dbc1a7bb8fe
x-runtime:
- '0.005504'
status:
code: 404
message: Not Found
- request:
body: '{"status": "failed", "failure_reason": "Error sending events to backend"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '73'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.0.0a2
X-Crewai-Version:
- 1.0.0a2
method: PATCH
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches/None
response:
body:
string: '{"error":"bad_credentials","message":"Bad credentials"}'
headers:
Connection:
- keep-alive
Content-Length:
- '55'
Content-Type:
- application/json; charset=utf-8
Date:
- Thu, 02 Oct 2025 22:35:43 GMT
cache-control:
- no-cache
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- c8e70a94-a6bf-4629-85d8-f0ae7b0cf8e6
x-runtime:
- '0.090999'
x-xss-protection:
- 1; mode=block
status:
code: 401
message: Unauthorized
version: 1

View File

@@ -0,0 +1,470 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
cookie:
- _cfuvid=NaXWifUGChHp6Ap1mvfMrNzmO4HdzddrqXkSR9T.hYo-1754508545647-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLBbtswDL37Kzid4yFx46bxbVixtsfssB22wlAl2lEri5okJ+uK/Psg
OY3dtQV2MWA+vqf3SD5lAExJVgETWx5EZ3X++SrcY/Hrcec3l+SKP5frm/16Yx92m6/f9mwWGXR3
jyI8sz4K6qzGoMgMsHDIA0bVxaq8WCzPyrN5AjqSqCOttSFfUt4po/JiXizz+SpfXBzZW1ICPavg
RwYA8JS+0aeR+JtVkLRSpUPveYusOjUBMEc6Vhj3XvnATWCzERRkAppk/QYM7UFwA63aIXBoo23g
xu/RAfw0X5ThGj6l/wquUWuawXdyWn6YSjpses9jLNNrPQG4MRR4HEsKc3tEDif7mlrr6M7/Q2WN
Mspva4fck4lWfSDLEnrIAG7TmPoXyZl11NlQB3rA9NyiXA16bNzOFD2CgQLXk/qqmL2hV0sMXGk/
GTQTXGxRjtRxK7yXiiZANkn92s1b2kNyZdr/kR8BIdAGlLV1KJV4mXhscxiP972205STYebR7ZTA
Oih0cRMSG97r4aSYf/QBu7pRpkVnnRruqrF1eT7nzTmW5Zplh+wvAAAA//8DAGKunMhlAwAA
headers:
CF-RAY:
- 980b99a73c1c22c6-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 17 Sep 2025 21:12:11 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=Ahwkw3J9CDiluZudRgDmybz4FO07eXLz2MQDtkgfct4-1758143531-1.0.1.1-_3e8agfTZW.FPpRMLb1A2nET4OHQEGKNZeGeWT8LIiuSi8R2HWsGsJyueUyzYBYnfHqsfBUO16K1.TkEo2XiqVCaIi6pymeeQxwtXFF1wj8;
path=/; expires=Wed, 17-Sep-25 21:42:11 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=iHqLoc_2sNQLMyzfGCLtGol8vf1Y44xirzQJUuUF_TI-1758143531242-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '419'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '609'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999830'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_ece5f999e09e4c189d38e5bc08b2fad9
status:
code: 200
message: OK
- request:
body: '{"trace_id": "0bcd1cf5-5a2e-49d5-8140-f0466ad7b7ae", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "crew", "flow_name": null, "crewai_version": "1.0.0a2", "privacy_level":
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
1, "task_count": 1, "flow_method_count": 0, "execution_started_at": "2025-10-02T22:35:43.236443+00:00"},
"ephemeral_trace_id": "0bcd1cf5-5a2e-49d5-8140-f0466ad7b7ae"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '490'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.0.0a2
X-Crewai-Version:
- 1.0.0a2
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
response:
body:
string: '{"id":"4b03b659-8866-4245-8fd2-3a5263f4f893","ephemeral_trace_id":"0bcd1cf5-5a2e-49d5-8140-f0466ad7b7ae","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.0.0a2","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.0.0a2","privacy_level":"standard"},"created_at":"2025-10-02T22:35:43.372Z","updated_at":"2025-10-02T22:35:43.372Z","access_code":"TRACE-a6b7c862fc","user_identifier":null}'
headers:
Connection:
- keep-alive
Content-Length:
- '519'
Content-Type:
- application/json; charset=utf-8
Date:
- Thu, 02 Oct 2025 22:35:43 GMT
cache-control:
- max-age=0, private, must-revalidate
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"3cd49b89c6bedfc5139cbdd350c30e4a"
permissions-policy:
- camera=(), microphone=(self), geolocation=()
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- ce2e7707-99da-4486-a7ca-11e12284d7a6
x-runtime:
- '0.030681'
x-xss-protection:
- 1; mode=block
status:
code: 201
message: Created
- request:
body: '{"events": [{"event_id": "f328f1d8-6067-4dc0-9f54-f40bd23381b9", "timestamp":
"2025-10-02T22:35:43.233706+00:00", "type": "crew_kickoff_started", "event_data":
{"timestamp": "2025-10-02T22:35:43.232688+00:00", "type": "crew_kickoff_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name":
"crew", "crew": null, "inputs": null}}, {"event_id": "a1323913-eb51-422c-b9b1-a02cebeb2fb4",
"timestamp": "2025-10-02T22:35:43.234420+00:00", "type": "task_started", "event_data":
{"task_description": "Say hello to the world", "expected_output": "hello world",
"task_name": "Say hello to the world", "context": "", "agent_role": "Test Agent",
"task_id": "e5063490-e2ae-47a6-a205-af4a91288e63"}}, {"event_id": "50a8abcd-bcdc-4dfa-97c2-259bf8affc88",
"timestamp": "2025-10-02T22:35:43.234639+00:00", "type": "agent_execution_started",
"event_data": {"agent_role": "Test Agent", "agent_goal": "Test goal", "agent_backstory":
"Test backstory"}}, {"event_id": "2c481296-a5e4-4a54-8dbc-d41ce102134b", "timestamp":
"2025-10-02T22:35:43.234694+00:00", "type": "llm_call_started", "event_data":
{"timestamp": "2025-10-02T22:35:43.234676+00:00", "type": "llm_call_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_id": "e5063490-e2ae-47a6-a205-af4a91288e63", "task_name": "Say hello to
the world", "agent_id": "65e264bb-8025-4730-a8a1-8d0a5a7a32ac", "agent_role":
"Test Agent", "from_task": null, "from_agent": null, "model": "gpt-4o-mini",
"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks":
["<crewai.utilities.token_counter_callback.TokenCalcHandler object at 0x36307baa0>"],
"available_functions": null}}, {"event_id": "bc04a066-3672-4406-9d65-818f9c68b670",
"timestamp": "2025-10-02T22:35:43.235725+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-10-02T22:35:43.235708+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_id": "e5063490-e2ae-47a6-a205-af4a91288e63", "task_name": "Say hello to
the world", "agent_id": "65e264bb-8025-4730-a8a1-8d0a5a7a32ac", "agent_role":
"Test Agent", "from_task": null, "from_agent": null, "messages": [{"role": "system",
"content": "You are Test Agent. Test backstory\nYour personal goal is: Test
goal\nTo give my best complete final answer to the task respond using the exact
following format:\n\nThought: I now can give a great answer\nFinal Answer: Your
final answer must be the great and the most complete as possible, it must be
outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role":
"user", "content": "\nCurrent Task: Say hello to the world\n\nThis is the expected
criteria for your final answer: hello world\nyou MUST return the actual complete
content as the final answer, not a summary.\n\nBegin! This is VERY important
to you, use the tools available and give your best Final Answer, your job depends
on it!\n\nThought:"}], "response": "I now can give a great answer \nFinal Answer:
Hello, World!", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>", "model":
"gpt-4o-mini"}}, {"event_id": "32a554bd-7338-49b0-869a-8cbc1a9283b0", "timestamp":
"2025-10-02T22:35:43.235801+00:00", "type": "agent_execution_completed", "event_data":
{"agent_role": "Test Agent", "agent_goal": "Test goal", "agent_backstory": "Test
backstory"}}, {"event_id": "029b9923-7455-4edc-9219-8d568d344165", "timestamp":
"2025-10-02T22:35:43.235834+00:00", "type": "task_completed", "event_data":
{"task_description": "Say hello to the world", "task_name": "Say hello to the
world", "task_id": "e5063490-e2ae-47a6-a205-af4a91288e63", "output_raw": "Hello,
World!", "output_format": "OutputFormat.RAW", "agent_role": "Test Agent"}},
{"event_id": "004091a7-6ee3-498c-b18d-91285f7d14c9", "timestamp": "2025-10-02T22:35:43.236399+00:00",
"type": "crew_kickoff_completed", "event_data": {"timestamp": "2025-10-02T22:35:43.236386+00:00",
"type": "crew_kickoff_completed", "source_fingerprint": null, "source_type":
null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id":
null, "agent_role": null, "crew_name": "crew", "crew": null, "output": {"description":
"Say hello to the world", "name": "Say hello to the world", "expected_output":
"hello world", "summary": "Say hello to the world...", "raw": "Hello, World!",
"pydantic": null, "json_dict": null, "agent": "Test Agent", "output_format":
"raw"}, "total_tokens": 172}}], "batch_metadata": {"events_count": 8, "batch_sequence":
1, "is_final_batch": false}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '5366'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.0.0a2
X-Crewai-Version:
- 1.0.0a2
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/0bcd1cf5-5a2e-49d5-8140-f0466ad7b7ae/events
response:
body:
string: '{"events_created":8,"ephemeral_trace_batch_id":"4b03b659-8866-4245-8fd2-3a5263f4f893"}'
headers:
Connection:
- keep-alive
Content-Length:
- '86'
Content-Type:
- application/json; charset=utf-8
Date:
- Thu, 02 Oct 2025 22:35:43 GMT
cache-control:
- max-age=0, private, must-revalidate
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"a8c7c5e3ef539604da1e89ad3d686230"
permissions-policy:
- camera=(), microphone=(self), geolocation=()
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 9431879b-bb0c-437c-bc43-f1fb8397e56e
x-runtime:
- '0.067705'
x-xss-protection:
- 1; mode=block
status:
code: 200
message: OK
- request:
body: '{"status": "completed", "duration_ms": 325, "final_event_count": 0}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '67'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.0.0a2
X-Crewai-Version:
- 1.0.0a2
method: PATCH
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/0bcd1cf5-5a2e-49d5-8140-f0466ad7b7ae/finalize
response:
body:
string: '{"id":"4b03b659-8866-4245-8fd2-3a5263f4f893","ephemeral_trace_id":"0bcd1cf5-5a2e-49d5-8140-f0466ad7b7ae","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"completed","duration_ms":325,"crewai_version":"1.0.0a2","total_events":0,"execution_context":{"crew_name":"crew","flow_name":null,"privacy_level":"standard","crewai_version":"1.0.0a2","crew_fingerprint":null},"created_at":"2025-10-02T22:35:43.372Z","updated_at":"2025-10-02T22:35:43.724Z","access_code":"TRACE-a6b7c862fc","user_identifier":null}'
headers:
Connection:
- keep-alive
Content-Length:
- '520'
Content-Type:
- application/json; charset=utf-8
Date:
- Thu, 02 Oct 2025 22:35:43 GMT
cache-control:
- max-age=0, private, must-revalidate
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"0a3640b7c549a0ed48c01459623ff153"
permissions-policy:
- camera=(), microphone=(self), geolocation=()
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 5bf816aa-7226-4c61-a29f-69d31af0d964
x-runtime:
- '0.030651'
x-xss-protection:
- 1; mode=block
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,128 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJNj9MwEL3nV4x8blBSmrabG0ICViqCCydYRbPOJDHreIztbIFV/zty
0m1SPiQukTJv3vN7M/OUAAhVixKE7DDI3ur09dvgfa8P/FO+e/9wa/aHb4cPH9viEw5yK1aRwfdf
SYZn1gvJvdUUFJsJlo4wUFTNd8U+32zybD0CPdekI621Id1w2iuj0nW23qTZLs33Z3bHSpIXJXxO
AACexm/0aWr6LkrIVs+VnrzHlkR5aQIQjnWsCPRe+YAmiNUMSjaBzGj9FgwfQaKBVj0SILTRNqDx
R3IAX8wbZVDDq/G/hI60Zjiy0/VS0FEzeIyhzKD1AkBjOGAcyhjl7oycLuY1t9bxvf+NKhpllO8q
R+jZRKM+sBUjekoA7sYhDVe5hXXc21AFfqDxubzYTXpi3s0CfXkGAwfUi/ruPNprvaqmgEr7xZiF
RNlRPVPnneBQK14AySL1n27+pj0lV6b9H/kZkJJsoLqyjmolrxPPbY7i6f6r7TLl0bDw5B6VpCoo
cnETNTU46OmghP/hA/VVo0xLzjo1XVVjq2KbYbOlorgRySn5BQAA//8DALxsmCBjAwAA
headers:
CF-RAY:
- 980ba79a4ab5f555-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 17 Sep 2025 21:21:42 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=aMMf0fLckKHz0BLW_2lATxD.7R61uYo1ZVW8aeFbruA-1758144102-1.0.1.1-6EKM3UxpdczoiQ6VpPpqqVnY7ftnXndFRWE4vyTzVcy.CQ4N539D97Wh8Ye9EUAvpUuukhW.r5MznkXq4tPXgCCmEv44RvVz2GBAz_e31h8;
path=/; expires=Wed, 17-Sep-25 21:51:42 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=VqrtvU8.QdEHc4.1XXUVmccaCcoj_CiNfI2zhKJoGRs-1758144102566-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '308'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '620'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999830'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_fa896433021140238115972280c05651
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,127 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Test task\n\nThis
is the expected criteria for your final answer: test output\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is VERY important to you, use the tools available and give your best Final Answer,
your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '812'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrKxY8W4WV+JHoVgR95NJD4UvaBgJDrSS2FJclV3bSwP9e
kHYsuU2BXghwZ2c4s8vnDEDoWpQgVCdZ9c7kNx+4v7vb7jafnrrPX25/vtObX48f1Q31m+uFmEUG
PXxHxS+sN4p6Z5A12QOsPErGqFqsl1fF4nJdzBPQU40m0lrH+YLyXludX8wvFvl8nRdXR3ZHWmEQ
JXzNAACe0xl92hofRQlJK1V6DEG2KMpTE4DwZGJFyBB0YGlZzEZQkWW0yfotWNqBkhZavUWQ0Ebb
IG3YoQf4Zt9rKw28TfcSNhgYaGA3nAl6bIYgYyg7GDMBpLXEMg4lRbk/IvuTeUOt8/QQ/qCKRlsd
usqjDGSj0cDkREL3GcB9GtJwlls4T73jiukHpueK5eKgJ8bdTNDLI8jE0kzqq/XsFb2qRpbahMmY
hZKqw3qkjjuRQ61pAmST1H+7eU37kFzb9n/kR0ApdIx15TzWWp0nHts8xq/7r7bTlJNhEdBvtcKK
Nfq4iRobOZjD/kV4Cox91WjbondeH35V46rlai6bFS6X1yLbZ78BAAD//wMAZdfoWWMDAAA=
headers:
CF-RAY:
- 980b9e0c5fa516a0-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 17 Sep 2025 21:15:11 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=w6UZxbAZgYg9EFkKPfrSbMK97MB4jfs7YyvcEmgkvak-1758143711-1.0.1.1-j7YC1nvoMKxYK0T.5G2XDF6TXUCPu_HUs4YO9v65r3NHQFIcOaHbQXX4vqabSgynL2tZy23pbZgD8Cdmxhdw9dp4zkAXhU.imP43_pw4dSE;
path=/; expires=Wed, 17-Sep-25 21:45:11 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=ij9Q8tB7sj2GczANlJ7gbXVjj6hMhz1iVb6oGHuRYu8-1758143711202-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '462'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '665'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999830'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999830'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_04536db97c8c4768a200e38c1368c176
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,390 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJNj9MwEL3nV4x8blC/0nRzA8QKuCBxQFrBKnLtSeLF8Vi2s11Y9b8j
O90m5UPiEinz5j2/NzPPGQBTklXARMeD6K3O33588/nm/d27T5sv6wdp1OGu/9l1T9tbWzQlW0QG
HR5QhBfWK0G91RgUmREWDnnAqLoqi/1uv19vVgnoSaKOtNaGfEt5r4zK18v1Nl+W+Wp/ZnekBHpW
wdcMAOA5faNPI/GJVbBcvFR69J63yKpLEwBzpGOFce+VD9wEtphAQSagSdY/gKEjCG6gVY8IHNpo
G7jxR3QA38ytMlzD6/RfQYdaExzJaTkXdNgMnsdQZtB6BnBjKPA4lBTl/oycLuY1tdbRwf9GZY0y
yne1Q+7JRKM+kGUJPWUA92lIw1VuZh31NtSBvmN6blWUox6bdjNDN2cwUOB6Vi/Po73WqyUGrrSf
jZkJLjqUE3XaCR+kohmQzVL/6eZv2mNyZdr/kZ8AIdAGlLV1KJW4Tjy1OYyn+6+2y5STYebRPSqB
dVDo4iYkNnzQ40Ex/8MH7OtGmRaddWq8qsbWxW7Jmx0WxQ3LTtkvAAAA//8DAIkIBqtjAwAA
headers:
CF-RAY:
- 983f8c061b6ec487-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 24 Sep 2025 04:30:32 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=JDjpnzx5y8PJaJDQcCeX6MeBt8BOGuL79pd.ca5mqvE-1758688232-1.0.1.1-5VN5hj5LzEZFfkotBaZ_dbUITo_YB7RLsFOlQc.0MdSZOsz7WhNkH.s7H700L12Yi8nHGW44BgIwCF3uWx1w4PRBqrb1IVH3FkeV.QwCTaA;
path=/; expires=Wed, 24-Sep-25 05:00:32 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=b5n8BZZDRtHA4TrxQ1RDeEdtQBzhstjP6u21LYM8L94-1758688232142-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '535'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '562'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999830'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_af61ab9d53bf400baf30c5bc5a7e2102
status:
code: 200
message: OK
- request:
body: null
headers:
Connection:
- close
Host:
- api.scarf.sh
User-Agent:
- CrewAI-Python/0.193.2
method: GET
uri: https://api.scarf.sh/v2/packages/CrewAI/crewai/docs/00f2dad1-8334-4a39-934e-003b2e1146db
response:
body:
string: ''
headers:
Connection:
- close
Date:
- Wed, 24 Sep 2025 04:47:59 GMT
Strict-Transport-Security:
- max-age=15724800; includeSubDomains
Transfer-Encoding:
- chunked
x-scarf-request-id:
- 4158376f-cb1c-46fe-a14c-dee366b955e2
status:
code: 401
message: Unauthorized
- request:
body: '{"trace_id": "06e1250e-6d88-4c64-abe5-deabde573ae1", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "crew", "flow_name": null, "crewai_version": "0.193.2", "privacy_level":
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-09-24T04:50:23.219835+00:00"}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '428'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.193.2
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
X-Crewai-Version:
- 0.193.2
method: POST
uri: http://localhost:3000/crewai_plus/api/v1/tracing/batches
response:
body:
string: '{"error":"bad_credentials","message":"Bad credentials"}'
headers:
Content-Length:
- '55'
cache-control:
- no-cache
content-security-policy:
- 'default-src ''self'' *.crewai.com crewai.com; script-src ''self'' ''unsafe-inline''
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com
https://run.pstmn.io https://share.descript.com/; style-src ''self'' ''unsafe-inline''
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self''
data: *.crewai.com crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net; font-src ''self'' data: *.crewai.com crewai.com;
connect-src ''self'' *.crewai.com crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ ws://localhost:3036
wss://localhost:3036; frame-src ''self'' *.crewai.com crewai.com https://connect.useparagon.com/
https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/
https://www.youtube.com https://share.descript.com'
content-type:
- application/json; charset=utf-8
permissions-policy:
- camera=(), microphone=(self), geolocation=()
referrer-policy:
- strict-origin-when-cross-origin
server-timing:
- cache_read.active_support;dur=0.37, sql.active_record;dur=30.81, cache_generate.active_support;dur=29.14,
cache_write.active_support;dur=0.14, cache_read_multi.active_support;dur=0.19,
start_processing.action_controller;dur=0.00, process_action.action_controller;dur=2.74
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 2420790e-9669-4235-851c-468185b6ef40
x-runtime:
- '0.102516'
x-xss-protection:
- 1; mode=block
status:
code: 401
message: Unauthorized
- request:
body: '{"status": "failed", "failure_reason": "Error sending events to backend"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '73'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.193.2
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
X-Crewai-Version:
- 0.193.2
method: PATCH
uri: http://localhost:3000/crewai_plus/api/v1/tracing/batches/None
response:
body:
string: '{"error":"bad_credentials","message":"Bad credentials"}'
headers:
Content-Length:
- '55'
cache-control:
- no-cache
content-security-policy:
- 'default-src ''self'' *.crewai.com crewai.com; script-src ''self'' ''unsafe-inline''
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com
https://run.pstmn.io https://share.descript.com/; style-src ''self'' ''unsafe-inline''
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self''
data: *.crewai.com crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net; font-src ''self'' data: *.crewai.com crewai.com;
connect-src ''self'' *.crewai.com crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ ws://localhost:3036
wss://localhost:3036; frame-src ''self'' *.crewai.com crewai.com https://connect.useparagon.com/
https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/
https://www.youtube.com https://share.descript.com'
content-type:
- application/json; charset=utf-8
permissions-policy:
- camera=(), microphone=(self), geolocation=()
referrer-policy:
- strict-origin-when-cross-origin
server-timing:
- cache_read.active_support;dur=0.06, sql.active_record;dur=3.86, cache_generate.active_support;dur=4.28,
cache_write.active_support;dur=0.15, cache_read_multi.active_support;dur=0.12,
start_processing.action_controller;dur=0.00, process_action.action_controller;dur=1.70
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 1750d141-c48f-47f1-b8b4-130195437d22
x-runtime:
- '0.043849'
x-xss-protection:
- 1; mode=block
status:
code: 401
message: Unauthorized
- request:
body: '{"trace_id": "e7ec4d48-cd70-436b-932e-45b2252284ec", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "crew", "flow_name": null, "crewai_version": "1.0.0a2", "privacy_level":
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-10-02T22:35:42.329267+00:00"}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '428'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.0.0a2
X-Crewai-Version:
- 1.0.0a2
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
response:
body:
string: '{"error":"bad_credentials","message":"Bad credentials"}'
headers:
Connection:
- keep-alive
Content-Length:
- '55'
Content-Type:
- application/json; charset=utf-8
Date:
- Thu, 02 Oct 2025 22:35:42 GMT
cache-control:
- no-cache
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 9db7bedc-a65b-4dca-ad3a-34b70101a37a
x-runtime:
- '0.029103'
x-xss-protection:
- 1; mode=block
status:
code: 401
message: Unauthorized
version: 1

View File

@@ -0,0 +1,450 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
cookie:
- __cf_bm=ePO5hy0kEoADCuKcboFy1iS1qckCE5KCpifQaXnlomM-1754508545-1.0.1.1-ieWfjcdIxQIXGfaMizvmgTvZPRFehqDXliegaOT7EO.kt7KSSFGmNDcC35_D9hOhE.fJ5K302uX0snQF3nLaapds2dqgGbNcsyFPOKNvAdI;
_cfuvid=NaXWifUGChHp6Ap1mvfMrNzmO4HdzddrqXkSR9T.hYo-1754508545647-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLBbpwwEL3zFSOfl2oJ0N1ySypV6aG3ntpGaGIGcGI8lm2yjaL998qw
Wdi2kXJBYt685/dm5iUBEKoRFQjZY5CD1ennjD6ON/5Hrr4dysxu8/72++NNd/twXT4PYhMZfP9A
MryyPkgerKag2MywdISBomq2K4tyuy+L/QQM3JCOtM6GtOB0UEalV9urIt3u0mx/YvesJHlRwc8E
AOBl+kafpqHfooLt5rUykPfYkajOTQDCsY4Vgd4rH9AEsVlAySaQmax/BcMHkGigU08ECF20DWj8
gRzAL/NFGdRwPf1X0JPWDAd2ulkLOmpHjzGUGbVeAWgMB4xDmaLcnZDj2bzmzjq+939RRauM8n3t
CD2baNQHtmJCjwnA3TSk8SK3sI4HG+rAjzQ9l5W7WU8su1mh+QkMHFCv6rvTaC/16oYCKu1XYxYS
ZU/NQl12gmOjeAUkq9T/uvmf9pxcme498gsgJdlATW0dNUpeJl7aHMXTfavtPOXJsPDknpSkOihy
cRMNtTjq+aCEf/aBhrpVpiNnnZqvqrV1XmBZIH3KpUiOyR8AAAD//wMAErrW9WMDAAA=
headers:
CF-RAY:
- 96b0f0fb5c067ad9-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Aug 2025 19:29:09 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '628'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '657'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_a0daca00035c423daf0e9df208720180
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '797'
content-type:
- application/json
cookie:
- __cf_bm=f59gEPi_nA3TTxtjbKaSQpvkTwezaAqOvqfxiGzRnVQ-1754508546-1.0.1.1-JrSaytxVIQSVE00I.vyGj7d4HJbbMV6R9fWPJbkDKu0Y8ueMRzTwTUnfz0YzP5nsZX5oxoE6WlmFxOuz0rRuq9YhZZsO_TbaFBOFk1jGK9U;
_cfuvid=3D66v3.J_RcVoYy9dlF.jHwq1zTIm842xynZxzSy1Wc-1754508546352-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '200.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLLjtswDLz7Kwid4yLZ2HXiW7FA0R567KXtwmAk2tZWlgSJTlos8u+F
nGzs9AH0YsAczmiG5EsGILQSNQjZI8vBm/xxQ2+Pnz89n8Lj7gvvP3BArNb94cClqsQqMdzhmSS/
st5IN3hDrJ29wDIQMiXVTVUW5XpXFvsJGJwik2id57xw+aCtzh/WD0W+rvLN7srunZYURQ1fMwCA
l+mbfFpFP0QN69VrZaAYsSNR35oARHAmVQTGqCOjZbGaQeksk52sfwTrTiDRQqePBAhdsg1o44kC
wDf7Xls08G76r6EnYxycXDBqKRioHSOmUHY0ZgGgtY4xDWWK8nRFzjfzxnU+uEP8jSpabXXsm0AY
nU1GIzsvJvScATxNQxrvcgsf3OC5Yfedpuc2ZXXRE/NuFuj2CrJjNIt6dR3tvV6jiFGbuBizkCh7
UjN13gmOSrsFkC1S/+nmb9qX5Np2/yM/A1KSZ1KND6S0vE88twVKp/uvttuUJ8MiUjhqSQ1rCmkT
iloczeWgRPwZmYam1baj4IO+XFXrm22BZYG030qRnbNfAAAA//8DAOX6h6tjAwAA
headers:
CF-RAY:
- 96b0f101793aeb2c-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Aug 2025 19:29:09 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '541'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '557'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_df70f95325b14817a692f23cf9cca880
status:
code: 200
message: OK
- request:
body: '{"trace_id": "2487456d-e03a-4eae-92a1-e9779e8f06a1", "execution_type":
"crew", "execution_context": {"crew_fingerprint": null, "crew_name": "Unknown
Crew", "flow_name": "Unknown Flow", "crewai_version": "0.152.0", "privacy_level":
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-08-06T19:30:52.475039+00:00"}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '421'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.152.0
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
X-Crewai-Version:
- 0.152.0
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
response:
body:
string: "<!DOCTYPE html>\n<html>\n<head>\n <title>The page you were looking
for doesn't exist (404)</title>\n <meta name=\"viewport\" content=\"width=device-width,initial-scale=1\">\n
\ <style>\n .rails-default-error-page {\n background-color: #EFEFEF;\n
\ color: #2E2F30;\n text-align: center;\n font-family: arial, sans-serif;\n
\ margin: 0;\n }\n\n .rails-default-error-page div.dialog {\n width:
95%;\n max-width: 33em;\n margin: 4em auto 0;\n }\n\n .rails-default-error-page
div.dialog > div {\n border: 1px solid #CCC;\n border-right-color: #999;\n
\ border-left-color: #999;\n border-bottom-color: #BBB;\n border-top:
#B00100 solid 4px;\n border-top-left-radius: 9px;\n border-top-right-radius:
9px;\n background-color: white;\n padding: 7px 12% 0;\n box-shadow:
0 3px 8px rgba(50, 50, 50, 0.17);\n }\n\n .rails-default-error-page h1 {\n
\ font-size: 100%;\n color: #730E15;\n line-height: 1.5em;\n }\n\n
\ .rails-default-error-page div.dialog > p {\n margin: 0 0 1em;\n padding:
1em;\n background-color: #F7F7F7;\n border: 1px solid #CCC;\n border-right-color:
#999;\n border-left-color: #999;\n border-bottom-color: #999;\n border-bottom-left-radius:
4px;\n border-bottom-right-radius: 4px;\n border-top-color: #DADADA;\n
\ color: #666;\n box-shadow: 0 3px 8px rgba(50, 50, 50, 0.17);\n }\n
\ </style>\n</head>\n\n<body class=\"rails-default-error-page\">\n <!-- This
file lives in public/404.html -->\n <div class=\"dialog\">\n <div>\n <h1>The
page you were looking for doesn't exist.</h1>\n <p>You may have mistyped
the address or the page may have moved.</p>\n </div>\n <p>If you are
the application owner check the logs for more information.</p>\n </div>\n</body>\n</html>\n"
headers:
Connection:
- keep-alive
Content-Length:
- '1722'
Content-Type:
- text/html; charset=UTF-8
Date:
- Wed, 06 Aug 2025 19:30:52 GMT
strict-transport-security:
- max-age=63072000; includeSubDomains
x-request-id:
- 3640ddcd-56a3-48cc-9a5a-110ebe34ac80
x-runtime:
- '0.007004'
status:
code: 404
message: Not Found
- request:
body: '{"version": "0.152.0", "batch_id": "2487456d-e03a-4eae-92a1-e9779e8f06a1",
"user_context": {"user_id": "anonymous", "organization_id": "", "session_id":
"57ab4cf7-915a-4d4e-b01d-3791f418dd39", "trace_id": "c780f111-40df-4c4b-ae88-b09c0f7e3276"},
"execution_metadata": {"crew_name": "Unknown Crew", "crewai_version": "0.152.0"},
"events": [{"event_id": "55b93497-f7f7-4c2e-baf9-eeec358cf90f", "timestamp":
"2025-08-06T19:30:52.588780+00:00", "type": "llm_call_started", "event_data":
{"timestamp": "2025-08-06T19:30:52.473897+00:00", "type": "llm_call_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "020f12f8-4bcf-4e49-9bf9-d8fee70eaf6a", "agent_id":
"126a2971-a630-405c-a0d9-72d2c46e8074", "agent_role": "Test Agent", "model":
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\n..."}, {"role": "user", "content": "\nCurrent Task:
Say hello to the world\n\nThis is the expected criteria for your final answer:
hello world\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is ..."}], "tools": null, "callbacks": ["<crewai.utilities.token_counter_callback.TokenCalcHandler
object at 0x11c5860f0>"], "available_functions": null}}, {"event_id": "967e03f5-fc65-477f-a1ba-4614acbd0527",
"timestamp": "2025-08-06T19:30:52.588932+00:00", "type": "llm_call_started",
"event_data": {"timestamp": "2025-08-06T19:30:52.473897+00:00", "type": "llm_call_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "020f12f8-4bcf-4e49-9bf9-d8fee70eaf6a", "agent_id":
"126a2971-a630-405c-a0d9-72d2c46e8074", "agent_role": "Test Agent", "model":
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\n..."}, {"role": "user", "content": "\nCurrent Task:
Say hello to the world\n\nThis is the expected criteria for your final answer:
hello world\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is ..."}], "tools": null, "callbacks": ["<crewai.utilities.token_counter_callback.TokenCalcHandler
object at 0x11c5860f0>"], "available_functions": null}}, {"event_id": "f67f33c0-f9ac-450e-987e-bb48880b9b69",
"timestamp": "2025-08-06T19:30:52.597813+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-08-06T19:30:52.597748+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "020f12f8-4bcf-4e49-9bf9-d8fee70eaf6a", "agent_id":
"126a2971-a630-405c-a0d9-72d2c46e8074", "agent_role": "Test Agent", "messages":
[{"role": "system", "content": "You are Test Agent. Test backstory\nYour personal
goal is: Test goal\nTo give my best complete final answer to the task respond
using the exact following format:\n\nThought: I now can give a great answer\n..."},
{"role": "user", "content": "\nCurrent Task: Say hello to the world\n\nThis
is the expected criteria for your final answer: hello world\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is ..."}], "response": "I now can give a great answer \nFinal Answer: hello
world", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>", "response_cost":
3.135e-05, "model": "gpt-4o-mini"}}, {"event_id": "0868106f-9491-4214-9674-4f9c5621875b",
"timestamp": "2025-08-06T19:30:52.597885+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-08-06T19:30:52.597748+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "020f12f8-4bcf-4e49-9bf9-d8fee70eaf6a", "agent_id":
"126a2971-a630-405c-a0d9-72d2c46e8074", "agent_role": "Test Agent", "messages":
[{"role": "system", "content": "You are Test Agent. Test backstory\nYour personal
goal is: Test goal\nTo give my best complete final answer to the task respond
using the exact following format:\n\nThought: I now can give a great answer\n..."},
{"role": "user", "content": "\nCurrent Task: Say hello to the world\n\nThis
is the expected criteria for your final answer: hello world\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is ..."}], "response": "I now can give a great answer \nFinal Answer: hello
world", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>", "response_cost":
3.135e-05, "model": "gpt-4o-mini"}}, {"event_id": "4b68fa53-1829-4201-bfa2-9364dbd8fc51",
"timestamp": "2025-08-06T19:30:52.598054+00:00", "type": "agent_execution_completed",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionCompletedEvent"}}, {"event_id": "264a2a9a-c234-45f2-8021-5e4b1b7c4c98",
"timestamp": "2025-08-06T19:30:52.598224+00:00", "type": "agent_execution_completed",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionCompletedEvent"}}, {"event_id": "b8b7f244-e2c0-4cac-9411-0efab7b9936a",
"timestamp": "2025-08-06T19:30:52.598372+00:00", "type": "task_completed", "event_data":
{"serialization_error": "Circular reference detected (id repeated)", "object_type":
"TaskCompletedEvent"}}, {"event_id": "c15489a3-31af-48bb-8f32-cb3c3f46f7b9",
"timestamp": "2025-08-06T19:30:52.598416+00:00", "type": "task_completed", "event_data":
{"serialization_error": "Circular reference detected (id repeated)", "object_type":
"TaskCompletedEvent"}}, {"event_id": "6806c6bd-1399-4261-aa23-5b6166c2ab31",
"timestamp": "2025-08-06T19:30:52.601018+00:00", "type": "crew_kickoff_completed",
"event_data": {"timestamp": "2025-08-06T19:30:52.600994+00:00", "type": "crew_kickoff_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"crew_name": "crew", "crew": null, "output": {"description": "Say hello to the
world", "name": null, "expected_output": "hello world", "summary": "Say hello
to the world...", "raw": "hello world", "pydantic": null, "json_dict": null,
"agent": "Test Agent", "output_format": "raw"}, "total_tokens": 170}}]}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '6503'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.152.0
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
X-Crewai-Version:
- 0.152.0
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing
response:
body:
string: "<!DOCTYPE html>\n<html>\n<head>\n <title>The page you were looking
for doesn't exist (404)</title>\n <meta name=\"viewport\" content=\"width=device-width,initial-scale=1\">\n
\ <style>\n .rails-default-error-page {\n background-color: #EFEFEF;\n
\ color: #2E2F30;\n text-align: center;\n font-family: arial, sans-serif;\n
\ margin: 0;\n }\n\n .rails-default-error-page div.dialog {\n width:
95%;\n max-width: 33em;\n margin: 4em auto 0;\n }\n\n .rails-default-error-page
div.dialog > div {\n border: 1px solid #CCC;\n border-right-color: #999;\n
\ border-left-color: #999;\n border-bottom-color: #BBB;\n border-top:
#B00100 solid 4px;\n border-top-left-radius: 9px;\n border-top-right-radius:
9px;\n background-color: white;\n padding: 7px 12% 0;\n box-shadow:
0 3px 8px rgba(50, 50, 50, 0.17);\n }\n\n .rails-default-error-page h1 {\n
\ font-size: 100%;\n color: #730E15;\n line-height: 1.5em;\n }\n\n
\ .rails-default-error-page div.dialog > p {\n margin: 0 0 1em;\n padding:
1em;\n background-color: #F7F7F7;\n border: 1px solid #CCC;\n border-right-color:
#999;\n border-left-color: #999;\n border-bottom-color: #999;\n border-bottom-left-radius:
4px;\n border-bottom-right-radius: 4px;\n border-top-color: #DADADA;\n
\ color: #666;\n box-shadow: 0 3px 8px rgba(50, 50, 50, 0.17);\n }\n
\ </style>\n</head>\n\n<body class=\"rails-default-error-page\">\n <!-- This
file lives in public/404.html -->\n <div class=\"dialog\">\n <div>\n <h1>The
page you were looking for doesn't exist.</h1>\n <p>You may have mistyped
the address or the page may have moved.</p>\n </div>\n <p>If you are
the application owner check the logs for more information.</p>\n </div>\n</body>\n</html>\n"
headers:
Connection:
- keep-alive
Content-Length:
- '1722'
Content-Type:
- text/html; charset=UTF-8
Date:
- Wed, 06 Aug 2025 19:30:52 GMT
strict-transport-security:
- max-age=63072000; includeSubDomains
x-request-id:
- 50885aa8-a8e2-4a5a-87f7-6d0e2f2f80f9
x-runtime:
- '0.006464'
status:
code: 404
message: Not Found
version: 1

View File

@@ -0,0 +1,433 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJdb9QwEHzPr1j8nKDkmvTavBUQnw8goQoJqKKts8kZHK9lOy1Q3X9H
Tq6XFIrES6Ts7IxndvcuARCqFTUIucMgB6uz5wWduvz8g3nx/tnny3d4MRabX6flW9O9qj6KNDL4
+hvJcM96KnmwmoJiM8PSEQaKqsW2Kqv8rCqrCRi4JR1pvQ1ZydmgjMo2+abM8m1WnB3YO1aSvKjh
SwIAcDd9o0/T0g9RQ57eVwbyHnsS9bEJQDjWsSLQe+UDmiDSBZRsApnJ+hswfAsSDfTqhgChj7YB
jb8lB/DVvFQGNVxM/zW8Jq05hU/sdPtkLemoGz3GWGbUegWgMRwwjmUKc3VA9kf7mnvr+Nr/QRWd
MsrvGkfo2USrPrAVE7pPAK6mMY0PkgvreLChCfydpueKajvriWU7a/QABg6oV/XtJn1Er2kpoNJ+
NWghUe6oXajLVnBsFa+AZJX6bzePac/Jlen/R34BpCQbqG2so1bJh4mXNkfxeP/VdpzyZFh4cjdK
UhMUubiJljoc9XxSwv/0gYamU6YnZ52a76qzzUmJVYl0fiJFsk9+AwAA//8DABLzfQllAwAA
headers:
CF-RAY:
- 96b0f0e62d177ad9-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Aug 2025 19:29:05 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=ePO5hy0kEoADCuKcboFy1iS1qckCE5KCpifQaXnlomM-1754508545-1.0.1.1-ieWfjcdIxQIXGfaMizvmgTvZPRFehqDXliegaOT7EO.kt7KSSFGmNDcC35_D9hOhE.fJ5K302uX0snQF3nLaapds2dqgGbNcsyFPOKNvAdI;
path=/; expires=Wed, 06-Aug-25 19:59:05 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=NaXWifUGChHp6Ap1mvfMrNzmO4HdzddrqXkSR9T.hYo-1754508545647-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '526'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '568'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_0e70b38c85e144d289fbdf89082cf16e
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '797'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '200.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJdj9MwEHzPr1j8nKC0TejRN4TE3YmTQIAEEpyiPWeTujheYztX0Kn/
HTnpNbkPJF4iZWdnPLO7dwmAULXYgJBbDLKzOnu7oFdu//nLr8v8Ij//1jdXq93Xjx8+7a7OzXuR
Rgbf7EiGe9ZLyZ3VFBSbEZaOMFBUXazLoszPyqIcgI5r0pHW2pAVnHXKqGyZL4ssX2eLsyN7y0qS
Fxv4ngAA3A3f6NPU9FtsIE/vKx15jy2JzakJQDjWsSLQe+UDmiDSCZRsApnB+iUY3oNEA626JUBo
o21A4/fkAH6Yd8qghjfD/wYuSGtOYc9O1y/mko6a3mOMZXqtZwAawwHjWIYw10fkcLKvubWOb/wj
qmiUUX5bOULPJlr1ga0Y0EMCcD2MqX+QXFjHnQ1V4J80PLco16OemLYzR49g4IB6Vl8v02f0qpoC
Ku1ngxYS5ZbqiTptBfta8QxIZqmfunlOe0yuTPs/8hMgJdlAdWUd1Uo+TDy1OYrH+6+205QHw8KT
u1WSqqDIxU3U1GCvx5MS/o8P1FWNMi0569R4V42tVgWWBdLrlRTJIfkLAAD//wMAE4F9LmUDAAA=
headers:
CF-RAY:
- 96b0f0eadf69eb2c-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Aug 2025 19:29:06 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=f59gEPi_nA3TTxtjbKaSQpvkTwezaAqOvqfxiGzRnVQ-1754508546-1.0.1.1-JrSaytxVIQSVE00I.vyGj7d4HJbbMV6R9fWPJbkDKu0Y8ueMRzTwTUnfz0YzP5nsZX5oxoE6WlmFxOuz0rRuq9YhZZsO_TbaFBOFk1jGK9U;
path=/; expires=Wed, 06-Aug-25 19:59:06 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=3D66v3.J_RcVoYy9dlF.jHwq1zTIm842xynZxzSy1Wc-1754508546352-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '504'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '527'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999830'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_32abf5c6f27e42579bc84b0bfcc9c4b4
status:
code: 200
message: OK
- request:
body: '{"trace_id": "e3677f76-4763-4f55-94b2-f38707f353c3", "execution_type":
"crew", "execution_context": {"crew_fingerprint": null, "crew_name": "crew",
"flow_name": "Unknown Flow", "crewai_version": "0.152.0", "privacy_level": "standard"},
"execution_metadata": {"expected_duration_estimate": 300, "agent_count": 0,
"task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-08-06T19:30:52.778875+00:00"}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '413'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.152.0
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
X-Crewai-Version:
- 0.152.0
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
response:
body:
string: "<!DOCTYPE html>\n<html>\n<head>\n <title>The page you were looking
for doesn't exist (404)</title>\n <meta name=\"viewport\" content=\"width=device-width,initial-scale=1\">\n
\ <style>\n .rails-default-error-page {\n background-color: #EFEFEF;\n
\ color: #2E2F30;\n text-align: center;\n font-family: arial, sans-serif;\n
\ margin: 0;\n }\n\n .rails-default-error-page div.dialog {\n width:
95%;\n max-width: 33em;\n margin: 4em auto 0;\n }\n\n .rails-default-error-page
div.dialog > div {\n border: 1px solid #CCC;\n border-right-color: #999;\n
\ border-left-color: #999;\n border-bottom-color: #BBB;\n border-top:
#B00100 solid 4px;\n border-top-left-radius: 9px;\n border-top-right-radius:
9px;\n background-color: white;\n padding: 7px 12% 0;\n box-shadow:
0 3px 8px rgba(50, 50, 50, 0.17);\n }\n\n .rails-default-error-page h1 {\n
\ font-size: 100%;\n color: #730E15;\n line-height: 1.5em;\n }\n\n
\ .rails-default-error-page div.dialog > p {\n margin: 0 0 1em;\n padding:
1em;\n background-color: #F7F7F7;\n border: 1px solid #CCC;\n border-right-color:
#999;\n border-left-color: #999;\n border-bottom-color: #999;\n border-bottom-left-radius:
4px;\n border-bottom-right-radius: 4px;\n border-top-color: #DADADA;\n
\ color: #666;\n box-shadow: 0 3px 8px rgba(50, 50, 50, 0.17);\n }\n
\ </style>\n</head>\n\n<body class=\"rails-default-error-page\">\n <!-- This
file lives in public/404.html -->\n <div class=\"dialog\">\n <div>\n <h1>The
page you were looking for doesn't exist.</h1>\n <p>You may have mistyped
the address or the page may have moved.</p>\n </div>\n <p>If you are
the application owner check the logs for more information.</p>\n </div>\n</body>\n</html>\n"
headers:
Connection:
- keep-alive
Content-Length:
- '1722'
Content-Type:
- text/html; charset=UTF-8
Date:
- Wed, 06 Aug 2025 19:30:52 GMT
strict-transport-security:
- max-age=63072000; includeSubDomains
x-request-id:
- 8c9c1556-d30f-4736-b62c-5a41150e859f
x-runtime:
- '0.005329'
status:
code: 404
message: Not Found
- request:
body: '{"version": "0.152.0", "batch_id": "e3677f76-4763-4f55-94b2-f38707f353c3",
"user_context": {"user_id": "anonymous", "organization_id": "", "session_id":
"eb96086e-c3b3-4757-a118-328be61c9aad", "trace_id": "90245ff6-bd46-4e0e-83da-b12edd241b0e"},
"execution_metadata": {"crew_name": "crew", "execution_start": "2025-08-06T19:30:52.777333+00:00",
"crewai_version": "0.152.0"}, "events": [{"event_id": "d5c81b9a-b8a9-4638-ab50-aa91792b95c8",
"timestamp": "2025-08-06T19:30:52.909777+00:00", "type": "crew_kickoff_started",
"event_data": {"timestamp": "2025-08-06T19:30:52.777333+00:00", "type": "crew_kickoff_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"crew_name": "crew", "crew": null, "inputs": null}}, {"event_id": "a5bd314d-f9eb-471f-b3c4-e176e6ec62f9",
"timestamp": "2025-08-06T19:30:52.911914+00:00", "type": "task_started", "event_data":
{"task_description": "Say hello to the world", "task_name": null, "context":
"", "agent": "Test Agent"}}, {"event_id": "ce0e41d9-90a9-4585-8dc3-c04ee02232bc",
"timestamp": "2025-08-06T19:30:52.912403+00:00", "type": "agent_execution_started",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionStartedEvent"}}, {"event_id": "d7c3546e-fe60-4c8a-9e4a-a510fa631a8b",
"timestamp": "2025-08-06T19:30:52.912693+00:00", "type": "llm_call_started",
"event_data": {"timestamp": "2025-08-06T19:30:52.912657+00:00", "type": "llm_call_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "e4abe414-b25d-44ea-8a0d-4998d7e55ed3", "agent_id":
"91a39492-d0c8-4994-b8b4-acdd256a2e96", "agent_role": "Test Agent", "model":
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\n..."}, {"role": "user", "content": "\nCurrent Task:
Say hello to the world\n\nThis is the expected criteria for your final answer:
hello world\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is ..."}], "tools": null, "callbacks": ["<crewai.utilities.token_counter_callback.TokenCalcHandler
object at 0x11c5b5e50>"], "available_functions": null}}, {"event_id": "6ed9e994-3e66-4653-97e0-a9e8e8c1d978",
"timestamp": "2025-08-06T19:30:52.919664+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-08-06T19:30:52.919623+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_name": null, "task_id": "e4abe414-b25d-44ea-8a0d-4998d7e55ed3", "agent_id":
"91a39492-d0c8-4994-b8b4-acdd256a2e96", "agent_role": "Test Agent", "messages":
[{"role": "system", "content": "You are Test Agent. Test backstory\nYour personal
goal is: Test goal\nTo give my best complete final answer to the task respond
using the exact following format:\n\nThought: I now can give a great answer\n..."},
{"role": "user", "content": "\nCurrent Task: Say hello to the world\n\nThis
is the expected criteria for your final answer: hello world\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is ..."}], "response": "I now can give a great answer \nFinal Answer: Hello,
World!", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>", "response_cost":
3.255e-05, "model": "gpt-4o-mini"}}, {"event_id": "2f03d7fe-2faf-4d6b-a9e1-d3cb9e87ef10",
"timestamp": "2025-08-06T19:30:52.919798+00:00", "type": "agent_execution_completed",
"event_data": {"serialization_error": "Circular reference detected (id repeated)",
"object_type": "AgentExecutionCompletedEvent"}}, {"event_id": "c2ed7fa8-0361-406f-8a0f-4cf0f580dbee",
"timestamp": "2025-08-06T19:30:52.919953+00:00", "type": "task_completed", "event_data":
{"serialization_error": "Circular reference detected (id repeated)", "object_type":
"TaskCompletedEvent"}}, {"event_id": "727d1ea2-4f7b-4d12-b491-42a27f3c3123",
"timestamp": "2025-08-06T19:30:52.921547+00:00", "type": "crew_kickoff_completed",
"event_data": {"timestamp": "2025-08-06T19:30:52.921522+00:00", "type": "crew_kickoff_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"crew_name": "crew", "crew": null, "output": {"description": "Say hello to the
world", "name": null, "expected_output": "hello world", "summary": "Say hello
to the world...", "raw": "Hello, World!", "pydantic": null, "json_dict": null,
"agent": "Test Agent", "output_format": "raw"}, "total_tokens": 172}}]}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '4656'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.152.0
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
X-Crewai-Version:
- 0.152.0
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing
response:
body:
string: "<!DOCTYPE html>\n<html>\n<head>\n <title>The page you were looking
for doesn't exist (404)</title>\n <meta name=\"viewport\" content=\"width=device-width,initial-scale=1\">\n
\ <style>\n .rails-default-error-page {\n background-color: #EFEFEF;\n
\ color: #2E2F30;\n text-align: center;\n font-family: arial, sans-serif;\n
\ margin: 0;\n }\n\n .rails-default-error-page div.dialog {\n width:
95%;\n max-width: 33em;\n margin: 4em auto 0;\n }\n\n .rails-default-error-page
div.dialog > div {\n border: 1px solid #CCC;\n border-right-color: #999;\n
\ border-left-color: #999;\n border-bottom-color: #BBB;\n border-top:
#B00100 solid 4px;\n border-top-left-radius: 9px;\n border-top-right-radius:
9px;\n background-color: white;\n padding: 7px 12% 0;\n box-shadow:
0 3px 8px rgba(50, 50, 50, 0.17);\n }\n\n .rails-default-error-page h1 {\n
\ font-size: 100%;\n color: #730E15;\n line-height: 1.5em;\n }\n\n
\ .rails-default-error-page div.dialog > p {\n margin: 0 0 1em;\n padding:
1em;\n background-color: #F7F7F7;\n border: 1px solid #CCC;\n border-right-color:
#999;\n border-left-color: #999;\n border-bottom-color: #999;\n border-bottom-left-radius:
4px;\n border-bottom-right-radius: 4px;\n border-top-color: #DADADA;\n
\ color: #666;\n box-shadow: 0 3px 8px rgba(50, 50, 50, 0.17);\n }\n
\ </style>\n</head>\n\n<body class=\"rails-default-error-page\">\n <!-- This
file lives in public/404.html -->\n <div class=\"dialog\">\n <div>\n <h1>The
page you were looking for doesn't exist.</h1>\n <p>You may have mistyped
the address or the page may have moved.</p>\n </div>\n <p>If you are
the application owner check the logs for more information.</p>\n </div>\n</body>\n</html>\n"
headers:
Connection:
- keep-alive
Content-Length:
- '1722'
Content-Type:
- text/html; charset=UTF-8
Date:
- Wed, 06 Aug 2025 19:30:53 GMT
strict-transport-security:
- max-age=63072000; includeSubDomains
x-request-id:
- 3b16d4bb-ba79-4a32-a776-26bbdf8d0a68
x-runtime:
- '0.005566'
status:
code: 404
message: Not Found
version: 1

View File

@@ -0,0 +1,126 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJdi9swEHz3r1j0HBc7ZydXvx1HC0evhT6UUtrDKNLa1lXWqpJ8aTjy
34vsXOz0A/pi8M7OaGZ3nxMApiSrgImOB9Fbnd4Why/v79HeePeD37/Zfdx8evf5+rY4fNjdPbJV
ZNDuEUV4Yb0S1FuNQZGZYOGQB4yq+bYsr7J1nhcj0JNEHWmtDWlBaa+MStfZukizbZpfn9gdKYGe
VfA1AQB4Hr/Rp5H4k1WQrV4qPXrPW2TVuQmAOdKxwrj3ygduAlvNoCAT0IzW78DQHgQ30KonBA5t
tA3c+D06gG/mrTJcw834X0GHWhPsyWm5FHTYDJ7HUGbQegFwYyjwOJQxysMJOZ7Na2qto53/jcoa
ZZTvaofck4lGfSDLRvSYADyMQxoucjPrqLehDvQdx+fycjvpsXk3C/TqBAYKXC/q29NoL/VqiYEr
7RdjZoKLDuVMnXfCB6loASSL1H+6+Zv2lFyZ9n/kZ0AItAFlbR1KJS4Tz20O4+n+q+085dEw8+ie
lMA6KHRxExIbPujpoJg/+IB93SjTorNOTVfV2LrcZLzZYFm+Zskx+QUAAP//AwB1vYZ+YwMAAA==
headers:
CF-RAY:
- 96fc9f29dea3cf1f-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 15 Aug 2025 23:55:15 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=oA9oTa3cE0ZaEUDRf0hCpnarSAQKzrVUhl6qDS4j09w-1755302115-1.0.1.1-gUUDl4ZqvBQkg7244DTwOmSiDUT2z_AiQu0P1xUaABjaufSpZuIlI5G0H7OSnW.ldypvpxjj45NGWesJ62M_2U7r20tHz_gMmDFw6D5ZiNc;
path=/; expires=Sat, 16-Aug-25 00:25:15 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=ICenEGMmOE5jaOjwD30bAOwrF8.XRbSIKTBl1EyWs0o-1755302115700-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '735'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '753'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999830'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_212fde9d945a462ba0d89ea856131dce
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,125 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '820'
content-type:
- application/json
cookie:
- _cfuvid=NaXWifUGChHp6Ap1mvfMrNzmO4HdzddrqXkSR9T.hYo-1754508545647-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJdj9MwEHzPr1j5uUFp6DUhbycE4g6eKEic4BS59ibx4Xgt27mCTv3v
yEmvSfmQeImUnZ3xzO4+JQBMSVYBEx0Porc6fZ3nH2725cc3O1Pevnv/+e5OPFx/2mW7wd5+YavI
oP0DivDMeiGotxqDIjPBwiEPGFXXxdVmm623r7Yj0JNEHWmtDemG0jzLN2lWptn2ROxICfSsgq8J
AMDT+I0WjcQfrIJs9Vzp0XveIqvOTQDMkY4Vxr1XPnAT2GoGBZmAZnR9A4YOILiBVj0icGijY+DG
H9ABfDNvleEarsf/CjrUmuBATsuloMNm8DzmMYPWC4AbQ4HHeYxR7k/I8WxeU2sd7f1vVNYoo3xX
O+SeTDTqA1k2oscE4H4c0nCRm1lHvQ11oO84Pre+KiY9Nq9lgb48gYEC14t6cRrtpV4tMXCl/WLM
THDRoZyp8074IBUtgGSR+k83f9OekivT/o/8DAiBNqCsrUOpxGXiuc1hvNp/tZ2nPBpmHt2jElgH
hS5uQmLDBz0dFPM/fcC+bpRp0VmnpqtqbJ0VZbHGnMuSJcfkFwAAAP//AwBXeOIeXgMAAA==
headers:
CF-RAY:
- 96b9d31b89dc1684-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 07 Aug 2025 21:21:37 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=nQuY9yOahy.xg.aHkxwJgC5gyX5c9Xjbhp3Y7GMX4Ek-1754601697-1.0.1.1-_K22zHDSq5PrNEgK7qwpgcjPitPpgoT54GksNiq6j.aSPasbC7UakO3AYT59smUo5j14NY_OrHkDhm.eGIdpUTpnoJZK7MfR7X8Z96FITGs;
path=/; expires=Thu, 07-Aug-25 21:51:37 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '424'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '438'
x-ratelimit-limit-project-tokens:
- '30000000'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-project-tokens:
- '29999828'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999828'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_6dbb2a6c9a864480b8d1584ae0f51fad
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,123 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
cookie:
- __cf_bm=oA9oTa3cE0ZaEUDRf0hCpnarSAQKzrVUhl6qDS4j09w-1755302115-1.0.1.1-gUUDl4ZqvBQkg7244DTwOmSiDUT2z_AiQu0P1xUaABjaufSpZuIlI5G0H7OSnW.ldypvpxjj45NGWesJ62M_2U7r20tHz_gMmDFw6D5ZiNc;
_cfuvid=ICenEGMmOE5jaOjwD30bAOwrF8.XRbSIKTBl1EyWs0o-1755302115700-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrKxY8W4VkW3ajW1GgQA7JJUGLog0EmlxJrCkuQ1J2g8D/
XlByLLkPoBcB2tkZzuzuawLAlGQlMNHyIDqr04/rl6/3z+6hu8vq/cOX5936cRPE3i8Px88rtogM
2v1AEd5Y7wR1VmNQZEZYOOQBo2q+LYpVtszzYgA6kqgjrbEhXVPaKaPSZbZcp9k2zd+f2S0pgZ6V
8C0BAHgdvtGnkfiTlZAt3iodes8bZOWlCYA50rHCuPfKB24CW0ygIBPQDNZvwdARBDfQqAMChyba
Bm78ER3Ad/NJGa7hw/BfQotaExzJaTkXdFj3nsdQptd6BnBjKPA4lCHK0xk5Xcxraqyjnf+Nympl
lG8rh9yTiUZ9IMsG9JQAPA1D6q9yM+uos6EKtMfhubzYjnps2s0MXZ3BQIHrWX17Hu21XiUxcKX9
bMxMcNGinKjTTngvFc2AZJb6Tzd/0x6TK9P8j/wECIE2oKysQ6nEdeKpzWE83X+1XaY8GGYe3UEJ
rIJCFzchsea9Hg+K+RcfsKtqZRp01qnxqmpbFZuM1xssihuWnJJfAAAA//8DAFSowWRjAwAA
headers:
CF-RAY:
- 96fc9f301bf7cf1f-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 15 Aug 2025 23:55:16 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '685'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '711'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_3f0ec42447374a76a22a4cdb9f336279
status:
code: 200
message: OK
version: 1

View File

@@ -1,335 +0,0 @@
interactions:
- request:
body: '{"trace_id": "e9702504-ee01-4414-a97d-4a31b21ea508", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "crew", "flow_name": null, "crewai_version": "1.4.1", "privacy_level":
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-08T07:11:54.711549+00:00"},
"ephemeral_trace_id": "e9702504-ee01-4414-a97d-4a31b21ea508"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '488'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- 73c2b193-f579-422c-84c7-76a39a1da77f
X-Crewai-Version:
- 1.4.1
authorization:
- AUTHORIZATION-XXX
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
response:
body:
string: '{"id":"e03b258c-9817-48ed-bfb0-0fb904658f86","ephemeral_trace_id":"e9702504-ee01-4414-a97d-4a31b21ea508","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.4.1","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.4.1","privacy_level":"standard"},"created_at":"2025-11-08T07:11:55.047Z","updated_at":"2025-11-08T07:11:55.047Z","access_code":"TRACE-3c6490d729","user_identifier":null}'
headers:
Connection:
- keep-alive
Content-Length:
- '515'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 08 Nov 2025 07:11:55 GMT
cache-control:
- no-store
content-security-policy:
- CSP-FILTERED
etag:
- ETAG-XXX
expires:
- '0'
permissions-policy:
- PERMISSIONS-POLICY-XXX
pragma:
- no-cache
referrer-policy:
- REFERRER-POLICY-XXX
strict-transport-security:
- STS-XXX
vary:
- Accept
x-content-type-options:
- X-CONTENT-TYPE-XXX
x-frame-options:
- X-FRAME-OPTIONS-XXX
x-permitted-cross-domain-policies:
- X-PERMITTED-XXX
x-request-id:
- X-REQUEST-ID-XXX
x-runtime:
- X-RUNTIME-XXX
x-xss-protection:
- X-XSS-PROTECTION-XXX
status:
code: 201
message: Created
- request:
body: !!binary |
CuctCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSvi0KEgoQY3Jld2FpLnRl
bGVtZXRyeRKXCAoQ5OwTi+Kgn9LxY8G/8cbfzRIIbhlHSeX/ZToqDENyZXcgQ3JlYXRlZDABOVhB
A+iM9nUYQXA2GeiM9nUYShkKDmNyZXdhaV92ZXJzaW9uEgcKBTEuNC4xShsKDnB5dGhvbl92ZXJz
aW9uEgkKBzMuMTIuMTBKLgoIY3Jld19rZXkSIgogZDg5OGE0ZTcyZWE2OGU2NTJhMzNjNjRlOTNl
Y2M5OWNKMQoHY3Jld19pZBImCiRmYjMxMGU2YS05MTcwLTQzNmUtODU4OC00OGQ0NTljZDQ1NjJK
OgoQY3Jld19maW5nZXJwcmludBImCiQ3NjRjZTZjYi0zMmQxLTRhYmQtYWVmOC00M2YxODU3ZDkw
NDRKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNy
ZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBSjsKG2Ny
ZXdfZmluZ2VycHJpbnRfY3JlYXRlZF9hdBIcChoyMDI1LTExLTA4VDAyOjExOjU0LjIzMDA0N0rM
AgoLY3Jld19hZ2VudHMSvAIKuQJbeyJrZXkiOiAiNGEyNWIwMDEyYWU0M2M5MGRkNWNmZDMxMTA5
OGY3OTUiLCAiaWQiOiAiN2QyMzMwMTEtNDVhOC00MGIwLTk5NWYtNGUyNmYyZjVlY2I1IiwgInJv
bGUiOiAiVGVzdCBBZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyNSwgIm1h
eF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8i
LCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxsb3dfY29kZV9leGVjdXRpb24/Ijog
ZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNfbmFtZXMiOiBbXX1dSv8BCgpjcmV3
X3Rhc2tzEvABCu0BW3sia2V5IjogImIyNGYyNWQzODhhNGE0MWY5N2IwMzY2NTYyOGQyMzRlIiwg
ImlkIjogIjI5N2JkYTc4LWMxYzktNDRmOS1iMTA0LWM3ZGRhYzkwMmRlOSIsICJhc3luY19leGVj
dXRpb24/IjogZmFsc2UsICJodW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAiVGVz
dCBBZ2VudCIsICJhZ2VudF9rZXkiOiAiNGEyNWIwMDEyYWU0M2M5MGRkNWNmZDMxMTA5OGY3OTUi
LCAidG9vbHNfbmFtZXMiOiBbXX1degIYAYUBAAEAABKcBAoQiNvSWaQbLRbGpp/kBq4pHxII3acE
dlrt1OwqDFRhc2sgQ3JlYXRlZDABOYgkReiM9nUYQQhXR+iM9nUYSi4KCGNyZXdfa2V5EiIKIGQ4
OThhNGU3MmVhNjhlNjUyYTMzYzY0ZTkzZWNjOTljSjEKB2NyZXdfaWQSJgokZmIzMTBlNmEtOTE3
MC00MzZlLTg1ODgtNDhkNDU5Y2Q0NTYySjoKEGNyZXdfZmluZ2VycHJpbnQSJgokNzY0Y2U2Y2It
MzJkMS00YWJkLWFlZjgtNDNmMTg1N2Q5MDQ0Si4KCHRhc2tfa2V5EiIKIGIyNGYyNWQzODhhNGE0
MWY5N2IwMzY2NTYyOGQyMzRlSjEKB3Rhc2tfaWQSJgokMjk3YmRhNzgtYzFjOS00NGY5LWIxMDQt
YzdkZGFjOTAyZGU5SjoKEHRhc2tfZmluZ2VycHJpbnQSJgokMWNlNTY0N2YtNTVkZS00YzQwLWE5
NGQtOTZmZGRjMjYzOWZjSjsKG3Rhc2tfZmluZ2VycHJpbnRfY3JlYXRlZF9hdBIcChoyMDI1LTEx
LTA4VDAyOjExOjU0LjIyOTgxOEo7ChFhZ2VudF9maW5nZXJwcmludBImCiQ1MmFkZmU2Yy0xZGIy
LTQ0MjYtYmMxNS1lMWExNDFlMWNmOGVKGgoKYWdlbnRfcm9sZRIMCgpUZXN0IEFnZW50egIYAYUB
AAEAABLhAwoQA0G2MzGBgIAlMTYgJWRMxhIIpYnGpKGS0TAqDlRhc2sgRXhlY3V0aW9uMAE5GPtH
6Iz2dRhBQPCi6Iz2dRhKLgoIY3Jld19rZXkSIgogZDg5OGE0ZTcyZWE2OGU2NTJhMzNjNjRlOTNl
Y2M5OWNKMQoHY3Jld19pZBImCiRmYjMxMGU2YS05MTcwLTQzNmUtODU4OC00OGQ0NTljZDQ1NjJK
OgoQY3Jld19maW5nZXJwcmludBImCiQ3NjRjZTZjYi0zMmQxLTRhYmQtYWVmOC00M2YxODU3ZDkw
NDRKLgoIdGFza19rZXkSIgogYjI0ZjI1ZDM4OGE0YTQxZjk3YjAzNjY1NjI4ZDIzNGVKMQoHdGFz
a19pZBImCiQyOTdiZGE3OC1jMWM5LTQ0ZjktYjEwNC1jN2RkYWM5MDJkZTlKOwoRYWdlbnRfZmlu
Z2VycHJpbnQSJgokNTJhZGZlNmMtMWRiMi00NDI2LWJjMTUtZTFhMTQxZTFjZjhlShoKCmFnZW50
X3JvbGUSDAoKVGVzdCBBZ2VudEo6ChB0YXNrX2ZpbmdlcnByaW50EiYKJDFjZTU2NDdmLTU1ZGUt
NGM0MC1hOTRkLTk2ZmRkYzI2MzlmY3oCGAGFAQABAAASpwgKEBoj6ZRqMncKbcghjbnAWUcSCPB6
e/eWFevXKgxDcmV3IENyZWF0ZWQwATlotlvsjPZ1GEGIYnbsjPZ1GEoZCg5jcmV3YWlfdmVyc2lv
bhIHCgUxLjQuMUobCg5weXRob25fdmVyc2lvbhIJCgczLjEyLjEwSi4KCGNyZXdfa2V5EiIKIDdh
OTk2YjBmMjM1ZTNjNWVkZTI5MjAxOGM4OTViMjBlSjEKB2NyZXdfaWQSJgokODE4MGRlYjAtN2Ez
NC00YjdmLThlYTEtZmNmNzVlMzg2MmU3SjoKEGNyZXdfZmluZ2VycHJpbnQSJgokYmVkYzY1ODgt
NzRiZi00MDc1LThjMGEtZGIyYTQ5M2Q4YmRkShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFs
ShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19u
dW1iZXJfb2ZfYWdlbnRzEgIYAUo7ChtjcmV3X2ZpbmdlcnByaW50X2NyZWF0ZWRfYXQSHAoaMjAy
NS0xMS0wOFQwMjoxMTo1NC4zMDUzMTRK1AIKC2NyZXdfYWdlbnRzEsQCCsECW3sia2V5IjogIjIw
YjQ2ZjFhMDM3ZjE3MmZjZWZkMmNiMDMwMmNjZjgzIiwgImlkIjogIjE1MzYyNjhlLWJjYmYtNDVk
ZC04OTk5LWQ0N2IzYmYxOTg2ZiIsICJyb2xlIjogIlJlc2VhcmNoIEFzc2lzdGFudCIsICJ2ZXJi
b3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyNSwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25f
Y2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6
IGZhbHNlLCAiYWxsb3dfY29kZV9leGVjdXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQi
OiAyLCAidG9vbHNfbmFtZXMiOiBbXX1dSocCCgpjcmV3X3Rhc2tzEvgBCvUBW3sia2V5IjogIjYx
MmRjNjlhMjEwOWFhYzkwZWY3MTFjOTExYTFlZjZkIiwgImlkIjogImZkY2IxMGE3LTRiMDUtNDVj
MS04YjQzLTM2MTM2M2ZhNWU0MSIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9p
bnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAiUmVzZWFyY2ggQXNzaXN0YW50IiwgImFnZW50
X2tleSI6ICIyMGI0NmYxYTAzN2YxNzJmY2VmZDJjYjAzMDJjY2Y4MyIsICJ0b29sc19uYW1lcyI6
IFtdfV16AhgBhQEAAQAAEqQEChDE5eZ8gyOi+JWLOqBc8BN7Egifc/dK/JRbMCoMVGFzayBDcmVh
dGVkMAE5wMib7Iz2dRhB2Lid7Iz2dRhKLgoIY3Jld19rZXkSIgogN2E5OTZiMGYyMzVlM2M1ZWRl
MjkyMDE4Yzg5NWIyMGVKMQoHY3Jld19pZBImCiQ4MTgwZGViMC03YTM0LTRiN2YtOGVhMS1mY2Y3
NWUzODYyZTdKOgoQY3Jld19maW5nZXJwcmludBImCiRiZWRjNjU4OC03NGJmLTQwNzUtOGMwYS1k
YjJhNDkzZDhiZGRKLgoIdGFza19rZXkSIgogNjEyZGM2OWEyMTA5YWFjOTBlZjcxMWM5MTFhMWVm
NmRKMQoHdGFza19pZBImCiRmZGNiMTBhNy00YjA1LTQ1YzEtOGI0My0zNjEzNjNmYTVlNDFKOgoQ
dGFza19maW5nZXJwcmludBImCiQzYTVhY2RiZi0yOTQxLTQ2ZTItODlhMC1hNzQ2YTcyM2QyNmFK
OwobdGFza19maW5nZXJwcmludF9jcmVhdGVkX2F0EhwKGjIwMjUtMTEtMDhUMDI6MTE6NTQuMzA1
MjczSjsKEWFnZW50X2ZpbmdlcnByaW50EiYKJDg2Yzg2NTAwLWI5ODEtNDIzMC04ZGE4LTA3MzE5
MDk4Njc5NUoiCgphZ2VudF9yb2xlEhQKElJlc2VhcmNoIEFzc2lzdGFudHoCGAGFAQABAAAS6QMK
EIGjazYO3GmE6OQW3Ik4NNcSCHormvhvVOoQKg5UYXNrIEV4ZWN1dGlvbjABOaBonuyM9nUYQUBr
teyM9nUYSi4KCGNyZXdfa2V5EiIKIDdhOTk2YjBmMjM1ZTNjNWVkZTI5MjAxOGM4OTViMjBlSjEK
B2NyZXdfaWQSJgokODE4MGRlYjAtN2EzNC00YjdmLThlYTEtZmNmNzVlMzg2MmU3SjoKEGNyZXdf
ZmluZ2VycHJpbnQSJgokYmVkYzY1ODgtNzRiZi00MDc1LThjMGEtZGIyYTQ5M2Q4YmRkSi4KCHRh
c2tfa2V5EiIKIDYxMmRjNjlhMjEwOWFhYzkwZWY3MTFjOTExYTFlZjZkSjEKB3Rhc2tfaWQSJgok
ZmRjYjEwYTctNGIwNS00NWMxLThiNDMtMzYxMzYzZmE1ZTQxSjsKEWFnZW50X2ZpbmdlcnByaW50
EiYKJDg2Yzg2NTAwLWI5ODEtNDIzMC04ZGE4LTA3MzE5MDk4Njc5NUoiCgphZ2VudF9yb2xlEhQK
ElJlc2VhcmNoIEFzc2lzdGFudEo6ChB0YXNrX2ZpbmdlcnByaW50EiYKJDNhNWFjZGJmLTI5NDEt
NDZlMi04OWEwLWE3NDZhNzIzZDI2YXoCGAGFAQABAAASpggKEFLXUh5Z1WIsgOQ3KtCkrs4SCLNn
HEHKdDaoKgxDcmV3IENyZWF0ZWQwATlwQ0PyjPZ1GEE4tmLyjPZ1GEoZCg5jcmV3YWlfdmVyc2lv
bhIHCgUxLjQuMUobCg5weXRob25fdmVyc2lvbhIJCgczLjEyLjEwSi4KCGNyZXdfa2V5EiIKIGYy
MjdkMTEyMjI3MjU2OWNhYzIxNzdmZWM2NTVkNGY5SjEKB2NyZXdfaWQSJgokYmExMTgyZGUtNmJi
Ni00ZGI3LWEwZWUtYjNhY2FlNjcxMWNjSjoKEGNyZXdfZmluZ2VycHJpbnQSJgokY2U3OTBjYTIt
YTEyZi00MzljLTlkMWMtMmFmN2FhODA5ZWZmShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFs
ShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19u
dW1iZXJfb2ZfYWdlbnRzEgIYAUo7ChtjcmV3X2ZpbmdlcnByaW50X2NyZWF0ZWRfYXQSHAoaMjAy
NS0xMS0wOFQwMjoxMTo1NC40MDQxMzFK0wIKC2NyZXdfYWdlbnRzEsMCCsACW3sia2V5IjogIjAw
ODM5OTZmNDBiOGMzNGU5NThkOGQzY2QxYWRhOTc0IiwgImlkIjogIjRmMmQxZDg4LTIyYmItNDNj
YS04YjEzLTVkODZiZmJhMjNiYyIsICJyb2xlIjogIlJlc2VhcmNoIEFzc2lzdGFudCIsICJ2ZXJi
b3NlPyI6IHRydWUsICJtYXhfaXRlciI6IDI1LCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9j
YWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdwdC00byIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/Ijog
ZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6
IDIsICJ0b29sc19uYW1lcyI6IFtdfV1KhwIKCmNyZXdfdGFza3MS+AEK9QFbeyJrZXkiOiAiMmVh
MzQ5ZTllMmQ1ZmZiNThjN2FjZGRhMGEzNzNjYTkiLCAiaWQiOiAiMzBjOGQ3MzktNDNiOS00NzI4
LTg2ZDItZDgwMDY5Zjk3MWQ0IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lu
cHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJSZXNlYXJjaCBBc3Npc3RhbnQiLCAiYWdlbnRf
a2V5IjogIjAwODM5OTZmNDBiOGMzNGU5NThkOGQzY2QxYWRhOTc0IiwgInRvb2xzX25hbWVzIjog
W119XXoCGAGFAQABAAASpAQKEHwnB3P/jEz2b/dR7U+PphMSCFj487heRbtfKgxUYXNrIENyZWF0
ZWQwATkIkajyjPZ1GEEgh6nyjPZ1GEouCghjcmV3X2tleRIiCiBmMjI3ZDExMjIyNzI1NjljYWMy
MTc3ZmVjNjU1ZDRmOUoxCgdjcmV3X2lkEiYKJGJhMTE4MmRlLTZiYjYtNGRiNy1hMGVlLWIzYWNh
ZTY3MTFjY0o6ChBjcmV3X2ZpbmdlcnByaW50EiYKJGNlNzkwY2EyLWExMmYtNDM5Yy05ZDFjLTJh
ZjdhYTgwOWVmZkouCgh0YXNrX2tleRIiCiAyZWEzNDllOWUyZDVmZmI1OGM3YWNkZGEwYTM3M2Nh
OUoxCgd0YXNrX2lkEiYKJDMwYzhkNzM5LTQzYjktNDcyOC04NmQyLWQ4MDA2OWY5NzFkNEo6ChB0
YXNrX2ZpbmdlcnByaW50EiYKJDIzMjBlNGYzLTE3ZTItNGIyNS05MjI1LTlkYTE4ZDBlMTIyN0o7
Cht0YXNrX2ZpbmdlcnByaW50X2NyZWF0ZWRfYXQSHAoaMjAyNS0xMS0wOFQwMjoxMTo1NC40MDQw
ODlKOwoRYWdlbnRfZmluZ2VycHJpbnQSJgokZmY3YzBlODktOWY3NC00NTMwLTljZDktMzNkMjg0
NDdmODg4SiIKCmFnZW50X3JvbGUSFAoSUmVzZWFyY2ggQXNzaXN0YW50egIYAYUBAAEAAA==
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '5866'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.38.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Sat, 08 Nov 2025 07:11:55 GMT
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Assistant. You
are a helpful research assistant.\nYour personal goal is: Find information about
the population of Tokyo\nTo give my best complete final answer to the task respond
using the exact following format:\n\nThought: I now can give a great answer\nFinal
Answer: Your final answer must be the great and the most complete as possible,
it must be outcome described.\n\nI MUST use these formats, my job depends on
it!"},{"role":"user","content":"\nCurrent Task: Find information about the population
of Tokyo\n\nThis is the expected criteria for your final answer: The population
of Tokyo is 10 million\nyou MUST return the actual complete content as the final
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '900'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xUTW/jRgy9+1cQuuxFNmzH+fItLdBiixYoggD92F0Y9IiSuB4N1SEVb7DIfy9m
ZMdOdgv0YsB8fE/vcYbzdQJQcFWsoXAtmut6P/3x7z9/X1jDzQ/bq/v36Jpf/7nf3d9H/aXiv4oy
MWT7mZwdWTMnXe/JWMIIu0holFQX11fLy5vL28VlBjqpyCda09t0JdPlfLmazm+m86sDsRV2pMUa
PkwAAL7m32QxVPSlWMO8PFY6UsWGivVLE0ARxadKgaqshsGK8gQ6CUYhu35oZWhaW8N7CLIHhwEa
fiRAaJJ1wKB7igAfw08c0MNd/r+Gh5agl37wmMKC1PAguycpYd+ya4EVJFAqW0uwl+irdwqdqI0s
GRQcG5OWQF8cUaWwmEPH3ie5nqT3NIM7PUp4NFIDfET2uPUEFRqWGfo5zziOBuAuEh5dUEjngaqk
B/Q3sii9eFbAUAGbgg4xyhAqDg30kWpyNsTkq0UFfBNSHinCxfUboyV0uEt8tuzodc7u+E3DABgJ
gcNpLDP4g609VL4xyabk65wzUiKPDhYXLw4iKVcUTGfw0LKe+41Ue3J2yP5OQQ1tUMi5OvwsERov
W/TpKJ5K2AXZB6gl5rmQkyAdO+BQ+4GCoxLc4G2I6EG5CVyzw1xOo8yi7bAd+cEohuwCPdQcUl9u
c9J1FB3Nzu9jpHpQTOsQBu/PAAxBLMvkTfh0QJ5f7r6Xpo+y1TfUoubA2m4ioUpI91xN+iKjzxOA
T3nHhldrU/RRut42JjvKn1tcX4x6xWmrz9Dl6oCaGPoTsLy9Lr8juKnIkL2erWnh0LVUnainncah
YjkDJmexv7XzPe0xOofm/8ifAOeoN6o2faSK3evIp7ZI6dX7r7aXMWfDhVJ8ZEcbY4rpKCqqcfDj
g1Tokxp1m5pDQ7GPPL5Kdb/ZLlar5fJ2gTfF5HnyLwAAAP//AwDuELD3ngUAAA==
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 08 Nov 2025 07:11:57 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '2312'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '2336'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-requests:
- '10000'
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-project-requests:
- '9999'
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-project-requests:
- 6ms
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,129 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Research Assistant. You
are a helpful research assistant.\nYour personal goal is: Find information about
the population of Tokyo\nTo give my best complete final answer to the task respond
using the exact following format:\n\nThought: I now can give a great answer\nFinal
Answer: Your final answer must be the great and the most complete as possible,
it must be outcome described.\n\nI MUST use these formats, my job depends on
it!"},{"role":"user","content":"\nCurrent Task: Find information about the population
of Tokyo\n\nThis is the expected criteria for your final answer: The population
of Tokyo is 10 million\nyou MUST return the actual complete content as the final
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '906'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFRRj9NIDH7vr7Dm5V7SatNt2dI3dAcCJBASi+4Ehyp34iSGyXg042y3
oP3vaJLdpnCLdC+RZj5/tr8v9nyfARiuzBaMbVFtF9z8z4//vLtcr/5eH76Vr6tXz68Thvert7fN
Xx+ePTVFZsj+C1l9YC2sdMGRsvgRtpFQKWctr54s15v10/JqADqpyGVaE3S+WpTzjj3PlxfL9fxi
NS9X9/RW2FIyW/g0AwD4Pnxzo76iW7OFi+LhpqOUsCGzPQUBmCgu3xhMiZOiV1NMoBWv5Ifer1vp
m1a38Aq8HMCih4ZvCBCaLADQpwPFf/0L9ujg2XDawnVLECT0DrNgkBqu5etRgBNgCFFuuUMld4Ry
BR07l4PYg7YEy0tIgSyjgwPGKgFGwgIOLds286VW8mDFJ64oUjWQrESaqljW4wJeyoFuKBbANRyl
P1EGAnnlSPfxb0ijBHGcTnW8dX1F6dGOilyAKRWgcvCpAPQV3LBz2FB6kHGgpBQ9BIyaO8t3IVJN
VvtIxXieHMrG7KXXc0NUoFyfToEkOFoM1tbc9KPg8mLyL0GSjpQ7SrlDqqCWUa0lrxHdYMwf6bzu
vleohBJ4USCfhxTTKHsfBbNd3YM7iv7Xn5G48VyzRa/uCA5jQ3FxPkmR6j5hHmffO3cGoPeiQw/D
DH++R+5OU+ukCVH26ReqqdlzaneRMInPE5pUghnQuxnA52E7+p8G3oQoXdCdylcaypVXl2M+M23l
GVqu71EVRTcBy82meCThriJFdulswYxF21I1UadtxL5iOQNmZ7L/285juUfp7Jv/k34CrKWgVO1C
pIrtz5KnsEj51fpd2MnmoWGTKN6wpZ0yxfwrKqqxd+NTYtIxKXW7mn1DMUQe35M67FZ2uVmX9ebJ
0szuZj8AAAD//wMA850i/F4FAAA=
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 08 Nov 2025 07:11:59 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1472'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '1532'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-project-tokens:
- '149999800'
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,449 @@
interactions:
- request:
body: !!binary |
CuMOCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSug4KEgoQY3Jld2FpLnRl
bGVtZXRyeRKSDAoQK+dPhrB8w3HKFlxX60XzYRIIk5aB+A8oCWQqDENyZXcgQ3JlYXRlZDABObix
K+HWrwgYQcBiMeHWrwgYShoKDmNyZXdhaV92ZXJzaW9uEggKBjAuODAuMEoaCg5weXRob25fdmVy
c2lvbhIICgYzLjExLjdKLgoIY3Jld19rZXkSIgogZjM0NmE5YWQ2ZDczMDYzZTA2NzdiMTdjZTlj
NTAxNzdKMQoHY3Jld19pZBImCiQ3NjRjZWM1YS04NzkxLTRmN2MtOWY0MC1hNTMzMzJmOTk3YzBK
HAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdf
bnVtYmVyX29mX3Rhc2tzEgIYAkobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgCSqwFCgtjcmV3
X2FnZW50cxKcBQqZBVt7ImtleSI6ICI3M2MzNDljOTNjMTYzYjVkNGRmOThhNjRmYWMxYzQzMCIs
ICJpZCI6ICJjZDgwYjlhNy1hN2QzLTQzNTQtYjUyOC1jMzAyODA0MjA3YzgiLCAicm9sZSI6ICJ7
dG9waWN9IFNlbmlvciBEYXRhIFJlc2VhcmNoZXJcbiIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4
X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwg
ImxsbSI6ICJncHQtNG8iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxsb3dfY29k
ZV9leGVjdXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNfbmFtZXMi
OiBbXX0sIHsia2V5IjogImJiMDY4Mzc3YzE2NDFiZTZkN2Q5N2E1MTY1OWRiNjEzIiwgImlkIjog
ImJmZjc3YmUyLWU4MjQtNGEyOS1hZTFlLTQyMWFjMzc2MjY2YyIsICJyb2xlIjogInt0b3BpY30g
UmVwb3J0aW5nIEFuYWx5c3RcbiIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwg
Im1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQt
NG8iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxsb3dfY29kZV9leGVjdXRpb24/
IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNfbmFtZXMiOiBbXX1dSpMECgpj
cmV3X3Rhc2tzEoQECoEEW3sia2V5IjogIjZhZmM0YjM5NjI1OWZiYjc2ODFmNTZjNzc1NWNjOTM3
IiwgImlkIjogIjRmNTFlYzM2LTVlMDctNGU4Ni1iYzIxLWU1MTQ0Mzg2YmIyYSIsICJhc3luY19l
eGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAi
e3RvcGljfSBTZW5pb3IgRGF0YSBSZXNlYXJjaGVyXG4iLCAiYWdlbnRfa2V5IjogIjczYzM0OWM5
M2MxNjNiNWQ0ZGY5OGE2NGZhYzFjNDMwIiwgInRvb2xzX25hbWVzIjogW119LCB7ImtleSI6ICJi
MTdiMTg4ZGJmMTRmOTNhOThlNWI5NWFhZDM2NzU3NyIsICJpZCI6ICIwMGJmZDY5ZC03OWZiLTRj
MjctYTM0Yi02NzBkZWJlMzU0NWYiLCAiYXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiaHVtYW5f
aW5wdXQ/IjogZmFsc2UsICJhZ2VudF9yb2xlIjogInt0b3BpY30gUmVwb3J0aW5nIEFuYWx5c3Rc
biIsICJhZ2VudF9rZXkiOiAiYmIwNjgzNzdjMTY0MWJlNmQ3ZDk3YTUxNjU5ZGI2MTMiLCAidG9v
bHNfbmFtZXMiOiBbXX1degIYAYUBAAEAABKOAgoQsN5cQC9ZzBr2B0OKBR2WCxII3ULL7Wk965Yq
DFRhc2sgQ3JlYXRlZDABOWB9ROHWrwgYQfg0ReHWrwgYSi4KCGNyZXdfa2V5EiIKIGYzNDZhOWFk
NmQ3MzA2M2UwNjc3YjE3Y2U5YzUwMTc3SjEKB2NyZXdfaWQSJgokNzY0Y2VjNWEtODc5MS00Zjdj
LTlmNDAtYTUzMzMyZjk5N2MwSi4KCHRhc2tfa2V5EiIKIDZhZmM0YjM5NjI1OWZiYjc2ODFmNTZj
Nzc1NWNjOTM3SjEKB3Rhc2tfaWQSJgokNGY1MWVjMzYtNWUwNy00ZTg2LWJjMjEtZTUxNDQzODZi
YjJhegIYAYUBAAEAAA==
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '1894'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.27.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Sun, 17 Nov 2024 07:09:57 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are LLMs Senior Data Researcher\n.
You''re a seasoned researcher with a knack for uncovering the latest developments
in LLMs. Known for your ability to find the most relevant information and present
it in a clear and concise manner.\n\nYour personal goal is: Uncover cutting-edge
developments in LLMs\n\nTo give my best complete final answer to the task use
the exact following format:\n\nThought: I now can give a great answer\nFinal
Answer: Your final answer must be the great and the most complete as possible,
it must be outcome described.\n\nI MUST use these formats, my job depends on
it!"}, {"role": "user", "content": "\nCurrent Task: Conduct a thorough research
about LLMs Make sure you find any interesting and relevant information given
the current year is 2024.\n\n\nThis is the expect criteria for your final answer:
A list with 10 bullet points of the most relevant information about LLMs\n\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
"stream": false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1235'
content-type:
- application/json
cookie:
- __cf_bm=08pKRcLhS1PDw0mYfL2jz19ac6M.T31GoiMuI5DlX6w-1731827382-1.0.1.1-UfOLu3AaIUuXP1sGzdV6oggJ1q7iMTC46t08FDhYVrKcW5YmD4CbifudOJiSgx8h0JLTwZdgk.aG05S0eAO_PQ;
_cfuvid=74kaPOoAcp8YRSA0XocQ1FFNksu9V0_KiWdQfo7wQuQ-1731827382509-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA2RXwW4cOQ69z1cQffFMUG04iWeS8a2xmcn2wkYMr4MFdnNhS6wqTlRSjSh1pz0/
vyBVbfdmL4atkijq8b1H+q8fAFbsVzewciMWN81hvfn8OP75+fDu3+Ufh08fNuHvzPt//sHxbvzX
08dVpyfS7g9y5XTq0qVpDlQ4xfbZZcJCGvX1u7ev37959/bXa/swJU9Bjw1zWV+n9ZurN9frq/fr
q1+Wg2NiR7K6gf/8AADwl/3UFKOnb6sbuOpOKxOJ4ECrm+dNAKucgq6sUISlYCyr7uWjS7FQtKwf
x1SHsdzAFmI6gMMIA+8JEAZNHTDKgTLAl/g7Rwywsb9v4Ev8El9fwqtXn2aKm+2FwMf7x/U1PFAg
FPKvXt1A+wR5WWo7OjiM7Eboay4jZeBpzmlPApbUt1IxQI2esmbtOQ6A0cNAkTIqruBwxh0HLkxy
CdsCqe8pC3BUsPUedK5mdMcODH7eczl2FsalkTJFR8ARMsmcotjV04yZPJQEXATmTJ4ciaQsHUz4
VdNgBQNIhGJhDFBSCtCnDLsqHHVdOiBfHRY7pvd52lNIM2W5VMDeKGAfUxoCKWA0cWS4UyYoXNsI
yoIO2g4IWKMbNauRTpuNNh30yemlA6QIE+VBfw0Yh4oDfYfegcsIUw2Fp+QxfAff48jSglrp55z0
2aCF6ACr59QewhMOJCCskTBSqhKOHVAcMbqGjgDOc2BnVdJyQM8UvEDgrwR7pTM8s1FeompGeoTj
YCC9VZA2sYw5zewuBP4WsHpShNpvHeyO8LyhAxbwJDzEVsA5c8pc+IlAsKdytKuojOz0+SkK+4VL
luXt7R1UFZCS6UJgVzkUDaSATxpmxDz1NUCqZa5L6jtGZc6caY+BYtFIhDkw5ZdKGLDSgacpRSl6
qfIZZORerzhg9nLiIe8CwWYLnuaQjhPFYnBcKxx3VPBCTFBwn2ldMrI+9zFjlD7liTL8+On+8Sd4
e3mlSOkBGFEfWHLy1ZGHT/eP+rmDXUKxTKjv2bFmbwEtuXnOCd1IAmXEAiGp/FUgtRhmhqEUURkH
ggk5luXsyMMIM2VNCKMjpdcCAtA3R8HwLppzaFouKF8bnprrjlya1HrmNNeAGZoJmsjQoaeJnaJF
mN0IvtJJr2mmuEZn1J1TYHc06H5W6G5PxTCdwQeWwmG5fuP3mqcoYg/kFAhcljTTlrk/P1HIjZH/
rCQwotpkUIDOha45tRKCTBgC5Q6mlOkM7O/4oSImP5AGUc83yaZaIKaCu3A09HOa2BR/hm8H9G1e
ZK4e0RBgU3dj/ZkiIfVK9eVhO62d7timx9O9htkvitnvHGn9WOOzf9zyxIU8fMCCC73G5MUq0+vm
0jbbBXYiLCc8GhH3dDJ6r66Bu6DbXZWSJn5qCWqwyE4fcm4kJzT0hZHI2749SrHYQuVkY0sNVDlG
J2WFZpBSq0pzzBMGMpNjDPxEHjj6KiWzOri5VaABg+EzkTffEHJq64bRO8Xot8VQdJPJUDtILKrg
z2JupWZuiQhRVNXzELlnh7EATfOIwq32S6TNtmvYpTxgXFARIFEWsIyavdk6Zm9p9xknOqT8tRVC
jaw8Z+IWBjiXaizN8o+XcKdUtH4XmQQwE6BPs9nBnNEVY6CBsNk+dwa1KeU1iwtJ6Hn9pZV0reIt
56X5aekl1Xzi1nvFTS3hgYWUj/c5TXOB3+LAkShzHBS3jZyEsRiCyWcRpu+0SekpejllKJM2Qu3+
AgguV2dN+qRXQ0jfOfGTnmgvWBx9IRDHfQo6irR2YngrS3p2wHGuZbl6scahsqfGp5LgqM3OTuog
kUlqKNJBGauctckqSyWUgssYtkwpKbYRxTRBGYSyytKQ+1WR28ZCwzIEGU82dVCyk4cHQg2q6N1+
Zy7cpgqr9DIicRzCEXZkfXsJSn6J+dAwSyENShBrqGmvL82EYV14ohcDO/PyDvacbXo7a/IcgaeJ
suhISXHPOUVNeWGIXm6c0682ObUGo6OVDlMxsxtP7laF8oWo5VFmm+Es4Zex0TSx2T7fv2D3+krB
e6ChBp3MjvDhxSfM+T+mPeWWFhxSDv6gr1W85pyGrJaqKSzd2sw3NBLlFtRkijnV6F+mif8d0kwL
c+a9jaVCrubnofRsNHGUo7GRhM79TAB50lroRDNUzN7gODVhii7VjEMraEz7RhKOCsdzOVtTbLHn
xBo1Ux/INW91tagLrK0VLWpb7o4eSqboW/8eqU12quCAeaD/a2g/qih+UiWmfplpdTYIPIxlKSdn
GHI6tIz7UK2gdtF4ztMG4rlL6p7FqDCcD0vn/+Dk/wIAAP//jJdBDsIgEEX3nIKwdqO20csYgjBU
IgKB6cJF726AprSxJm7nD5/5LMg80GMSma/caO1cnxZisn4I0d/TrC91bZxJD57v9i7TUUIfWFEn
QumtkNm4gS1WvwSO/gkuG57Ol+rHGgs2ta/4RylDj8I24Xo+HnYMuQIUxqYV3DGZ1zPVjjYSLAv7
SiCr2N/j7HnX6MYN/9g3QUoICIpncDJyG7m1Rcis/KtteeYyMEvvhPDi2rgBYoim4qoOvOul7jsF
AhiZyAcAAP//AwCidyXxtw8AAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8e3de5de7b6c6217-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 17 Nov 2024 07:10:00 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '6026'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999713'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_553f04a622d026a28dd3c5da55568fcd
status:
code: 200
message: OK
- request:
body: !!binary |
Cs4CCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSpQIKEgoQY3Jld2FpLnRl
bGVtZXRyeRKOAgoQIn+FuHJydyMnR3y/Qfb8GBII2zXFs4gynEgqDFRhc2sgQ3JlYXRlZDABOShs
9ljYrwgYQQiP+FjYrwgYSi4KCGNyZXdfa2V5EiIKIGYzNDZhOWFkNmQ3MzA2M2UwNjc3YjE3Y2U5
YzUwMTc3SjEKB2NyZXdfaWQSJgokNzY0Y2VjNWEtODc5MS00ZjdjLTlmNDAtYTUzMzMyZjk5N2Mw
Si4KCHRhc2tfa2V5EiIKIGIxN2IxODhkYmYxNGY5M2E5OGU1Yjk1YWFkMzY3NTc3SjEKB3Rhc2tf
aWQSJgokMDBiZmQ2OWQtNzlmYi00YzI3LWEzNGItNjcwZGViZTM1NDVmegIYAYUBAAEAAA==
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '337'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.27.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Sun, 17 Nov 2024 07:10:01 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are LLMs Reporting Analyst\n.
You''re a meticulous analyst with a keen eye for detail. You''re known for your
ability to turn complex data into clear and concise reports, making it easy
for others to understand and act on the information you provide.\nYour personal
goal is: Create detailed reports based on LLMs data analysis and research findings\n\nTo
give my best complete final answer to the task use the exact following format:\n\nThought:
I now can give a great answer\nFinal Answer: Your final answer must be the great
and the most complete as possible, it must be outcome described.\n\nI MUST use
these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent
Task: Review the context you got and expand each topic into a full section for
a report. Make sure the report is detailed and contains any and all relevant
information.\n\n\nThis is the expect criteria for your final answer: A fully
fledge reports with the mains topics, each with a full section of information.
Formatted as markdown without ''```''\n\nyou MUST return the actual complete
content as the final answer, not a summary.\n\nThis is the context you''re working
with:\n1. **OpenAI''s GPT-4 Released**: OpenAI released GPT-4, which further
improves contextual understanding and generation capabilities. It offers increased
accuracy, creativity, and coherence in responses compared to its predecessors,
making it an essential tool for businesses, educators, and developers.\n\n2.
**Google''s Gemini Model**: In 2024, Google launched the Gemini model, focusing
on merging language understanding with multimodal capabilities. This model can
process text, audio, and images simultaneously, enhancing its applications in
fields like voice assistants and image captioning.\n\n3. **Anthropic''s Claude**:
Claude, by Anthropic, is designed to prioritize safety and ethical considerations
in LLM usage. It''s built to minimize harmful outputs and biases prevalent in
earlier language models, demonstrating a shift towards responsible AI deployment.\n\n4.
**Meta''s Open Pre-trained Transformer (OPT) 3.0**: Meta has introduced OPT
3.0, boasting efficient training approaches that lower computational costs while
maintaining high performance. The model excels in translation tasks and has
become a popular choice for academic research due to its open-access policy.\n\n5.
**Language Model Distillation Advances**: Recent advances in model distillation
techniques have allowed developers to deploy smaller, more efficient language
models on edge devices without notably compromising performance, expanding the
accessibility and application of LLMs in mobile and IoT devices.\n\n6. **Fine-Tuning
with Limited Data**: Methods for fine-tuning LLMs with limited data have improved,
enabling customization for niche applications without the need for vast datasets.
This development has opened doors to using LLMs in specialized industries, like
legal and medical sectors.\n\n7. **Ethical and Transparent AI Use**: 2024 has
seen a significant emphasis on ethical AI, with organizations establishing standardized
frameworks for LLM transparency and accountability. More companies are adopting
practices like AI model cards to disclose model capabilities, limitations, and
data sources.\n\n8. **The Rise of Prompt Engineering**: As models become more
advanced, prompt engineering has emerged as a crucial technique for optimizing
model outputs. This involves designing specific input prompts that guide LLMs
to yield desired results, thus enhancing usability in content creation and customer
service.\n\n9. **Integration with Augmented Reality**: Language models in 2024
are increasingly being integrated with AR technologies to provide real-time
language translation, virtual assistants in immersive environments, and interactive
educational tools, enriching the user''s experience with contextualized AI assistance.\n\n10.
**Regulatory Developments**: Governments worldwide are progressing towards formalizing
regulations around LLM usage, focusing on data privacy, security, and ethical
concerns. These developments aim to safeguard users while encouraging innovation
in AI technology.\n\nThese points reflect the cutting-edge advancements and
trends in the field of large language models (LLMs) as of 2024, highlighting
their growing influence and the increasing focus on ethical and practical deployment.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
"stream": false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '4624'
content-type:
- application/json
cookie:
- __cf_bm=08pKRcLhS1PDw0mYfL2jz19ac6M.T31GoiMuI5DlX6w-1731827382-1.0.1.1-UfOLu3AaIUuXP1sGzdV6oggJ1q7iMTC46t08FDhYVrKcW5YmD4CbifudOJiSgx8h0JLTwZdgk.aG05S0eAO_PQ;
_cfuvid=74kaPOoAcp8YRSA0XocQ1FFNksu9V0_KiWdQfo7wQuQ-1731827382509-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA3RXS28bRxK+51cUlIMTYEjIlvOAbso6zhIrI4KjXSyyuRS7izO16umeVHWTovPn
F9U9Q5GG92JY0696fI/iX18BXLG/uoUrN2B24xRWd/98HA75/u//0u3z4V3+DftPP+82P/3+799/
vrm56uxE2v6XXF5OrV0ap0CZU2zLTggz2a2vf7h5/eObH95eX9eFMXkKdqyf8uptWr25fvN2df3j
6vr7+eCQ2JFe3cJ/vgIA+Kv+ayFGT89Xt1CvqV9GUsWerm5PmwCuJAX7coWqrBljvupeFl2KmWKN
+nFIpR/yLWwgpgM4jNDzngCht9ABox5I/oh/xPccMcBd/fvWPnwNH2lKkiFFuPN7jI5GilmBI9yj
9AT3GPuCPcEHy1bhm/v7D/otrMCyrVd8Da/X8OtE8W7zSuGXh8fVW/hIgVDJ24ZNrHu7eQ9wzJJ8
ceTb5g5GlCeOPSAo95F37DBmoDgs8Vg4YQmklh0yuSGmkPrjGn4qHLxdkCJwVpiEPDlSTaLdHFHa
7UgUhHYcyYPDCbccODPVZGs5n3PBACV6Eqt3vRKjByGdUlSCniIJGjTW8A86Ao+TpP2pZC4UTzAQ
90MmewWdK4Lu2Nmi1IpAhRPvOR+7evmWciYBlwYSio4smjzQ6VEFzsvDpGt4HEgJ8LxZA+4JRvQ0
54oROHrWiaLiNhDklAKgk6QKexRORUHJ5SS6hvdJYFuUI6nSqV5z9RVc0ZxGEusbCTrLHvIghrml
AB6MOduUtaa0Z6mVPAFX17CJQL64WrzOUlKSPSmgHVnS8S3SXRKYSDRFDPyJPARCiRz7DpBrV9pV
SWrvPO0ppMm+j0kIKPbY218zR1qK8y4ySFh9T6HfPWwAQ0gHrQ/XKyRti2bAaQrcYq4vRcxFMLxg
cZJkOLPHMuqTrmdGvFnDLyn1gYwRNHLkxh9b/nyh4rmDgCW6gby90/giNAlp7TDCxCkSib2kmSbb
NZaQeUzeKL25JMSxdqs3rMb+JdxLaB84D5dEyGnJCIwNHWDxnBpOecSeFJTtWYyUioZjtySxFcIn
hUgH6CWVWNO421xU0LDLOvP3jOnhCFj6BmXrzK7EijIMnI+QdrA3HT1DU2dR7hsS8kBjy8SOtlSO
lojJuNBA0c8cRpeX74GeQR1Fo4I1dp/CvsLHKjoFAo8ZDQ2jruHOe27hWL5NX9LBasSxVcWKaDuq
BDWdsUJMSZXPRQZdBcsS49IwW7UQseQ0mtUswD3Tmw48jSlqnltKz5mimsqX3K4zQJBn7ICMqBk5
Wklb804kNtZZMXdMwZ/gerOGu2icnti9UvhbwOLJ1tr/uhN5PGyPL1u7GVAuiaUATjizwwA68M6K
fUDxVi5OtvKpghd3lI81KMpD3e1SVPZzojoDx9MU0nGs9K2wCdWOPnMB9F6qbNkljiSq4WVAGXcl
QCp5KrMmbRmruh0GdkNTzC1RNOEKgWLfOlRDne9+Kb5WRu2SKzq7TBPxalmzTrPp7N2mQdEoMVbl
EtYn7eaKglLjspAjbw02wTHcLqV4ydryOGc1m/Zv7Pi5JqHJnU7k2NAJQoH22PxSDR+5zgHmPKDF
Daa2A2HIg0OhDgL1GGaEJJmSWYwRZCxxecIqRmL2wtEwVcE3R1uVpLaYFSYUHFOJeQHV2zV8oIyv
tBo/PAitsmB130fBqEYvEvjm14fHb+FmfW3HlgMPj/bFcBRJW02NeLTbsWOrTr2pUpbykPxCozxg
BjF3qHlMJWOjrtEHo1frf7jMZiJ8MruxeMyELmSKnp1NPRNKZlcCSqhMy5ZAwOaFWFs8WxQc2JNO
QugBfaq60LiPnkZ2hhdCcQP4YsZc80oTxVVTh+YxSxC2YL4MFF0qUhXYpRBwa72y5l6MIDtJY8VT
H9LWDHh5dO6pzRzN2GfpbFm+0tnw6lNVirwpWuzNqHMCz3sSJQgc+8Ka6411YKqgvHSq8xmuWtWi
A6ZgzsKuQnC3scGvzX2L+//fGe8knXWYAYStJPQkIBh7Mq5MdTSxapv8zlNbLTTJSee+W382z8I7
yybMnZwnYLXdH8kZziZJvcxa34Ly5ydqhPxnIRvCrEs2bvla2Usq62gyIx0M3A/heIbky5zV1IV8
b+dNqL888PkEMeXPPLQaXhpZ6RzNNuwUnesVl7abGw4Wl03zLbet8cLqtkmPF48bxF4UxxTr0sdM
gyZJO3P9c2dOk/2yMNQZM+2cEIZV5pG+OECZpcnssrSnyhohTUUcrVzzvqoeFPcsqbqbruE307WL
KUZLffkLXbjQzqZrghN78ORYOcXVgsg2MczRclQb6ZvcxvrLAuXYnSTVjDum0abqPQ3swmzoFgU2
X1h5YcvJLLoJfSvwDMzv1/CeI60eSzxNZvc8sg0D7zCjbXscLthu5bHfMqtczoSwFbp2tV4S5kvq
QGMQLTEk90QWnI0YjG3cbgMCf8JFryI7m6deCjbbiN2ailH+z8JyOYrYI2Zxs3idmnIEGqd0MGLO
XlW9kaMvmqW1rFWyGlKt3VKo+WeKsX5AqQJlfa3XWUsbkv4HAAD//4xZO4/jNhDu91cQWyWAYuSQ
PdxtaSRXbBcsEKQKDFoaycRJokBSdlzsfw++mSEl2T4gpS2Jj+HMfA8u5ZOs631gJYEXXZApW+68
WLKWEOZIFAa3RleFDvTYmEoX4Qw0RxuCwxbSdcLKwFpj9LVjxiYMNIOScJX9my6qMq2PSci7G0d/
1oCi0a4CLIgP6F4FifMo0Nkx3xbC9Qtrp3UAcx592ZlvCs74kHF2sgHpsn8zf0XKiXQlG1hmsPjG
PNu+vSJwBe73b9plaxApTejjNWtb7C6jLPOaMnktfM/WNfhBzolbnhd35nc/THZkPoxK8xeFUBY9
wjU4KG2wA118+L5kTg62qRlrGLRi3ftI5f+lRVRSF5nhYHFcIHLUpec69C6G2WisGzDoEVaDSQHq
kM98jsiJ43XV+GrI1Qc2QqTEW0FjERClfyeqdRl6/Otl3moPZorDdLLRCU7gYJh89WRFDXnVr67z
Af0oUUx5/rPtXSOppm0XkaAxzkG0MxcQZBJOKuQBkeOUNnRPR5uXxPsqUPEO9PGt+TP4YUrm29g5
Va3qBBUrRcrTRD+dOBalBNC7GEYwAC0DCMAOFDpk60ZuFBTmvEPCDMIx5ORVBjCJb4jTHAmV24Ib
pznphLHSA2UTbXbN0leUxGRVoECK8dBtAsW5L50vlwmv+Ui1FxBJYUai216UI2fM2Jk55pLgTqza
TySGtok7/bZGskrsCwxVDAxxnpIg+sZLWdk4ihH7Nzk88cYyVmpAFiOIsw+0xdbXNXXI5HDpRpUQ
Ue4sSbNIzog7usBzpa1auODWXfJ8CmYkWjTqKzwk9TT8qCsX24Aa807sFhScXL35ACT4Y1s+DvKx
+Wn//vNWluP8wMGpYUXfBj8mBoE7d2PHS7hhNyuZUD1wxSQNBgqMnmtao5ZLjgmeZvMM+e59H1eG
AEkpXUcKvOojpQu07f6dh8FBLS2tBAZHGyCGOdZoRcERn/S2mxVnlNMHu9YNgFwqX9okFSegG9Os
8pN3wnwUv3fm7xOlExuKm11pe4zGh62FYabeJjZjKmyp9sOReaAQnFv8z9IympO/aDWzI0jyQeM6
l8R14DpjJ2LNKLOp/SuOtJt7+IxX84c4IPwK3tjHB/ySS0LqXcOwqMHKdP5MYVT6ISYZhrj40Deq
5EEXHVQvUKLMvYI7iB/xPBbHI1DPHASYBxSbgjuz6RypnkMxmgUtciLo8MVFqMMMzsH0w7bUzVYA
gJODj5dAiUQ+ozd4xbLF/VhRGymPrXkhLaZsaiNoGIRI5Dto4M2hdsFfJJ4ThbNFuVTK7jYMtQGl
5yAtRPLerOBZpuAT1clM87FnEJAdVmay59zcL1ZacpwjcpGpPPNGwP/Fh3S65taVmw3Cx3h53Sni
xXkYWC2kex0ngxEYE+tCqAyeuWRxEfmAZe5k9y5YzJbxzuyj5GBn2czygxtJRSCJgVV4w8Ztqh5Y
K5o10v5vbkxQPG6cJdbMBPU4mqVM7vjt4wuIysSTnXLI2xnuBzbYxNpO9MgGW1+DBWrnaHELN859
r/9/lHu13ndT8Meoz8v/rRtdPB1AW/2IO7SY/PTMTz+ejPmH7+/mzZXcswDiIfnvNGLAry8vMt7z
cmO4PP306fNv+jj5ZPvVk9fXL9WDIQ8NARHj6hLwuba4Eli+XW4M2ZlfPXhabfx+QY/Gls27sfs/
wy8P6pqmRM0Bl2yu3m56eS0Q7lR/9FoJNC/4OV5jouHQurGjMAUn15rtdHj5XLefXxqy9Pz08fQf
AAAA//8DAMIhqlTfHQAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8e3de605dca06217-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 17 Nov 2024 07:10:10 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '9951'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29998873'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 2ms
x-request-id:
- req_52a4f98f9c08fbaa1724634d237f3245
status:
code: 200
message: OK
version: 1

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,480 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
just re-use this\n tool non-stop.\n\nIMPORTANT: Use the following format
in your response:\n\n```\nThought: you should always think about what to do\nAction:
the action to take, only one name of [get_final_answer], just the name, exactly
as it''s written.\nAction Input: the input to the action, just a simple JSON
object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
the result of the action\n```\n\nOnce all necessary information is gathered,
return the following format:\n\n```\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question\n```"}, {"role": "user",
"content": "\nCurrent Task: The final answer is 42. But don''t give it yet,
instead keep using the `get_final_answer` tool.\n\nThis is the expected criteria
for your final answer: The final answer\nyou MUST return the actual complete
content as the final answer, not a summary.\n\nBegin! This is VERY important
to you, use the tools available and give your best Final Answer, your job depends
on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream":
false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1455'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4yTTW/bMAyG7/4VhM5x4XiJ0/o29NQOA7bLdtgKQ5FpW4ssahK9rgjy3wfZaezs
A9jFBz58KfIlfUwAhK5FCUJ1klXvTHr/uLlvdt+bw15+ePxcH7K8eC7W608f36nb92IVFbT/hopf
VTeKemeQNdkJK4+SMVZd77a3RZFt8u0IeqrRRFnrON1Q2mur0zzLN2m2S9e3Z3VHWmEQJXxJAACO
4zf2aWv8KUrIVq+RHkOQLYrykgQgPJkYETIEHVhaFqsZKrKMdmz9AUJHg6khxrQdaAjmBYaAwB0C
ExlgglZyhx568gjaNuR7GQeFhvyY12grDUgbntHfAHy1b1XkJbTI1QirCc4MHqwbuITjCWDZm8dm
CDL6YwdjFkBaSzw+O7rydCaniw+GWudpH36TikZbHbrKowxk48yByYmRnhKAp9Hv4cpC4Tz1jium
A47P5XfrqZ6Y17ykZ8jE0szxN/l5S9f1qhpZahMWGxNKqg7rWTqvVw61pgVIFlP/2c3fak+Ta9v+
T/kZKIWOsa6cx1qr64nnNI/xL/hX2sXlsWER0P/QCivW6OMmamzkYKbbFOElMPbxXFr0zuvpQBtX
bYtMNgVut3ciOSW/AAAA//8DABaZ0EiuAwAA
headers:
CF-RAY:
- 983ce5296d26239d-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 23 Sep 2025 20:47:05 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=1fs_tWXSjOXLvWmDDleCPs6zqeoMCE9WMzw34UrJEY0-1758660425-1.0.1.1-yN.usYgsw3jmDue61Z30KB.SQOEVjuZCOMFqPwf22cZ9TvM1FzFJFR5PZPyS.uYDZAWJMX29SzSPw_PcDk7dbHVSGM.ubbhoxn1Y18nRqrI;
path=/; expires=Tue, 23-Sep-25 21:17:05 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=yrBvDYdy4HQeXpy__ld4uITFc6g85yQ2XUMU0NQ.v7Y-1758660425881-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '509'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '618'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999680'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999680'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_eca26fd131fc445a8c9b54b5b6b57f15
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
just re-use this\n tool non-stop.\n\nIMPORTANT: Use the following format
in your response:\n\n```\nThought: you should always think about what to do\nAction:
the action to take, only one name of [get_final_answer], just the name, exactly
as it''s written.\nAction Input: the input to the action, just a simple JSON
object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
the result of the action\n```\n\nOnce all necessary information is gathered,
return the following format:\n\n```\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question\n```"}, {"role": "user",
"content": "\nCurrent Task: The final answer is 42. But don''t give it yet,
instead keep using the `get_final_answer` tool.\n\nThis is the expected criteria
for your final answer: The final answer\nyou MUST return the actual complete
content as the final answer, not a summary.\n\nBegin! This is VERY important
to you, use the tools available and give your best Final Answer, your job depends
on it!\n\nThought:"}, {"role": "assistant", "content": "I should continuously
use the tool to gather more information for the final answer. \nAction: get_final_answer \nAction
Input: {} \nObservation: 42"}, {"role": "assistant", "content": "I should continuously
use the tool to gather more information for the final answer. \nAction: get_final_answer \nAction
Input: {} \nObservation: 42\nNow it''s time you MUST give your absolute best
final answer. You''ll ignore all previous instructions, stop using any tools,
and just return your absolute BEST Final answer."}], "model": "gpt-4o-mini",
"stop": ["\nObservation:"], "stream": false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2005'
content-type:
- application/json
cookie:
- __cf_bm=1fs_tWXSjOXLvWmDDleCPs6zqeoMCE9WMzw34UrJEY0-1758660425-1.0.1.1-yN.usYgsw3jmDue61Z30KB.SQOEVjuZCOMFqPwf22cZ9TvM1FzFJFR5PZPyS.uYDZAWJMX29SzSPw_PcDk7dbHVSGM.ubbhoxn1Y18nRqrI;
_cfuvid=yrBvDYdy4HQeXpy__ld4uITFc6g85yQ2XUMU0NQ.v7Y-1758660425881-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLBbtswDL37KwSd48HxHCf1begaYDu2uy2Frci0rFWmBEluOxT590Fy
GrtdB+wigHx8T3wkXxJCqGxpRSjvmeeDUen19+Ja3H0Vt/nt/mafQ1bcCKHuzOPzEbd0FRj6+Au4
f2V94nowCrzUOMHcAvMQVNfbza4ssyIvIzDoFlSgCePTQqeDRJnmWV6k2TZd787sXksOjlbkZ0II
IS/xDX1iC8+0ItnqNTOAc0wArS5FhFCrVchQ5px0nqGnqxnkGj1gbL1pmgP+6PUoel+RbwT1E3kI
j++BdBKZIgzdE9gD7mP0JUYVKfIDNk2zlLXQjY4FazgqtQAYovYsjCYauj8jp4sFpYWx+ujeUWkn
Ubq+tsCcxtCu89rQiJ4SQu7jqMY37qmxejC+9voB4nefr4pJj84bmtH17gx67Zma88U6X32gV7fg
mVRuMWzKGe+hnanzZtjYSr0AkoXrv7v5SHtyLlH8j/wMcA7GQ1sbC63kbx3PZRbCAf+r7DLl2DB1
YB8lh9pLsGETLXRsVNNZUffbeRjqTqIAa6ycbqsz9abMWFfCZnNFk1PyBwAA//8DAFrI5iJpAwAA
headers:
CF-RAY:
- 983ce52deb75239d-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 23 Sep 2025 20:47:06 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '542'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '645'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999560'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999560'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_0b91fc424913433f92a2635ee229ae15
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
just re-use this\n tool non-stop.\n\nIMPORTANT: Use the following format
in your response:\n\n```\nThought: you should always think about what to do\nAction:
the action to take, only one name of [get_final_answer], just the name, exactly
as it''s written.\nAction Input: the input to the action, just a simple JSON
object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
the result of the action\n```\n\nOnce all necessary information is gathered,
return the following format:\n\n```\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question\n```"}, {"role": "user",
"content": "\nCurrent Task: The final answer is 42. But don''t give it yet,
instead keep using the `get_final_answer` tool.\n\nThis is the expected criteria
for your final answer: The final answer\nyou MUST return the actual complete
content as the final answer, not a summary.\n\nBegin! This is VERY important
to you, use the tools available and give your best Final Answer, your job depends
on it!\n\nThought:"}, {"role": "assistant", "content": "I should continuously
use the tool to gather more information for the final answer. \nAction: get_final_answer \nAction
Input: {} \nObservation: 42"}, {"role": "assistant", "content": "I should continuously
use the tool to gather more information for the final answer. \nAction: get_final_answer \nAction
Input: {} \nObservation: 42\nNow it''s time you MUST give your absolute best
final answer. You''ll ignore all previous instructions, stop using any tools,
and just return your absolute BEST Final answer."}], "model": "gpt-4o-mini",
"stop": ["\nObservation:"], "stream": false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2005'
content-type:
- application/json
cookie:
- __cf_bm=1fs_tWXSjOXLvWmDDleCPs6zqeoMCE9WMzw34UrJEY0-1758660425-1.0.1.1-yN.usYgsw3jmDue61Z30KB.SQOEVjuZCOMFqPwf22cZ9TvM1FzFJFR5PZPyS.uYDZAWJMX29SzSPw_PcDk7dbHVSGM.ubbhoxn1Y18nRqrI;
_cfuvid=yrBvDYdy4HQeXpy__ld4uITFc6g85yQ2XUMU0NQ.v7Y-1758660425881-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLBbtswDL37KwSd48FxHTfxbSgwoFsxYFtPXQpblWlbqywKEr1sKPLv
g+w0dtcO2EUA+fie+Eg+RYxxVfOCcdkJkr3V8dXH7Ko1X24On/zuNvu8vdHZ1299epe0+R3yVWDg
ww+Q9Mx6J7G3GkihmWDpQBAE1fXlZpvnSZbmI9BjDTrQWktxhnGvjIrTJM3i5DJeb0/sDpUEzwv2
PWKMsafxDX2aGn7xgiWr50wP3osWeHEuYow71CHDhffKkzDEVzMo0RCYsfWqqvbmtsOh7ahg18zg
gT2GhzpgjTJCM2H8AdzefBij92NUsCzdm6qqlrIOmsGLYM0MWi8AYQySCKMZDd2fkOPZgsbWOnzw
f1F5o4zyXelAeDShXU9o+YgeI8bux1ENL9xz67C3VBI+wvjdxS6b9Pi8oRldb08gIQk957N1unpD
r6yBhNJ+MWwuheygnqnzZsRQK1wA0cL1627e0p6cK9P+j/wMSAmWoC6tg1rJl47nMgfhgP9Vdp7y
2DD34H4qCSUpcGETNTRi0NNZcf/bE/Rlo0wLzjo13VZjy02eiCaHzWbHo2P0BwAA//8DAG1a2r5p
AwAA
headers:
CF-RAY:
- 983ce5328a31239d-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 23 Sep 2025 20:47:07 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '418'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '435'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999560'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999560'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_7353c84c469e47edb87bca11e7eef26c
status:
code: 200
message: OK
- request:
body: '{"trace_id": "4a5d3ea4-8a22-44c3-9dee-9b18f60844a5", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "0.193.2",
"privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate":
300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at":
"2025-09-24T05:27:26.071046+00:00"}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '436'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.193.2
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
X-Crewai-Version:
- 0.193.2
method: POST
uri: http://localhost:3000/crewai_plus/api/v1/tracing/batches
response:
body:
string: '{"id":"29f0c8c3-5f4d-44c4-8039-c396f56c331c","trace_id":"4a5d3ea4-8a22-44c3-9dee-9b18f60844a5","execution_type":"crew","crew_name":"Unknown
Crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"0.193.2","privacy_level":"standard","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"Unknown
Crew","flow_name":null,"crewai_version":"0.193.2","privacy_level":"standard"},"created_at":"2025-09-24T05:27:26.748Z","updated_at":"2025-09-24T05:27:26.748Z"}'
headers:
Content-Length:
- '496'
cache-control:
- max-age=0, private, must-revalidate
content-security-policy:
- 'default-src ''self'' *.crewai.com crewai.com; script-src ''self'' ''unsafe-inline''
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com
https://run.pstmn.io https://share.descript.com/; style-src ''self'' ''unsafe-inline''
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self''
data: *.crewai.com crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net; font-src ''self'' data: *.crewai.com crewai.com;
connect-src ''self'' *.crewai.com crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ ws://localhost:3036
wss://localhost:3036; frame-src ''self'' *.crewai.com crewai.com https://connect.useparagon.com/
https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/
https://www.youtube.com https://share.descript.com'
content-type:
- application/json; charset=utf-8
etag:
- W/"15b0f995f6a15e4200edfb1225bf94cc"
permissions-policy:
- camera=(), microphone=(self), geolocation=()
referrer-policy:
- strict-origin-when-cross-origin
server-timing:
- cache_read.active_support;dur=0.04, sql.active_record;dur=23.95, cache_generate.active_support;dur=2.46,
cache_write.active_support;dur=0.11, cache_read_multi.active_support;dur=0.08,
start_processing.action_controller;dur=0.00, instantiation.active_record;dur=0.28,
feature_operation.flipper;dur=0.03, start_transaction.active_record;dur=0.01,
transaction.active_record;dur=25.78, process_action.action_controller;dur=673.72
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 827aec6a-c65c-4cc7-9d2a-2d28e541824f
x-runtime:
- '0.699809'
x-xss-protection:
- 1; mode=block
status:
code: 201
message: Created
version: 1

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,187 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Calculate 2 +
2\n\nThis is the expect criteria for your final answer: The result of the calculation\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '833'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.59.6
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.59.6
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AoJqi2nPubKHXLut6gkvISe0PizvR\",\n \"object\":
\"chat.completion\",\n \"created\": 1736556064,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: The result of the calculation 2 + 2 is 4.\",\n \"refusal\": null\n
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
\ ],\n \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\":
25,\n \"total_tokens\": 186,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_bd83329f63\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 9000dbe81c55bf7f-ATL
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 11 Jan 2025 00:41:05 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=LCNQO7gfz6xDjDqEOZ7ha3jDwPnDlsjsmJyScVf4UUw-1736556065-1.0.1.1-2ZcyBDpLvmxy7UOdCrLd6falFapRDuAu6WcVrlOXN0QIgZiDVYD0bCFWGCKeeE.6UjPHoPY6QdlEZZx8.0Pggw;
path=/; expires=Sat, 11-Jan-25 01:11:05 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=cRATWhxkeoeSGFg3z7_5BrHO3JDsmDX2Ior2i7bNF4M-1736556065175-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1060'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999810'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_463fbd324e01320dc253008f919713bd
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"trace_id": "110f149f-af21-4861-b208-2a568e0ec690", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "0.193.2",
"privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate":
300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at":
"2025-09-23T20:49:30.660760+00:00"}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '436'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.193.2
X-Crewai-Version:
- 0.193.2
method: POST
uri: http://localhost:3000/crewai_plus/api/v1/tracing/batches
response:
body:
string: '{"error":"bad_credentials","message":"Bad credentials"}'
headers:
Content-Length:
- '55'
cache-control:
- no-cache
content-security-policy:
- 'default-src ''self'' *.crewai.com crewai.com; script-src ''self'' ''unsafe-inline''
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com
https://run.pstmn.io https://share.descript.com/; style-src ''self'' ''unsafe-inline''
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self''
data: *.crewai.com crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net; font-src ''self'' data: *.crewai.com crewai.com;
connect-src ''self'' *.crewai.com crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ ws://localhost:3036
wss://localhost:3036; frame-src ''self'' *.crewai.com crewai.com https://connect.useparagon.com/
https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/
https://www.youtube.com https://share.descript.com'
content-type:
- application/json; charset=utf-8
permissions-policy:
- camera=(), microphone=(self), geolocation=()
referrer-policy:
- strict-origin-when-cross-origin
server-timing:
- cache_read.active_support;dur=0.04, cache_fetch_hit.active_support;dur=0.00,
cache_read_multi.active_support;dur=0.06, start_processing.action_controller;dur=0.00,
process_action.action_controller;dur=1.86
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- efa34d51-cac4-408f-95cc-b0f933badd75
x-runtime:
- '0.021535'
x-xss-protection:
- 1; mode=block
status:
code: 401
message: Unauthorized
version: 1

Some files were not shown because too many files have changed in this diff Show More