mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 12:28:30 +00:00
Compare commits
16 Commits
gl/chore/d
...
gl/feat/py
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
81850350e8 | ||
|
|
b2c278ed22 | ||
|
|
f6aed9798b | ||
|
|
40a2d387a1 | ||
|
|
6f36d7003b | ||
|
|
7404d8f198 | ||
|
|
138b9af274 | ||
|
|
9e5906c52f | ||
|
|
fc521839e4 | ||
|
|
a5e0803f20 | ||
|
|
c4279b0339 | ||
|
|
965aa48ea1 | ||
|
|
e4cc9a664c | ||
|
|
7e6171d5bc | ||
|
|
61ad1fb112 | ||
|
|
54710a8711 |
@@ -1200,6 +1200,52 @@ Learn how to get the most out of your LLM configuration:
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Transport Interceptors">
|
||||
CrewAI provides message interceptors for several providers, allowing you to hook into request/response cycles at the transport layer.
|
||||
|
||||
**Supported Providers:**
|
||||
- ✅ OpenAI
|
||||
- ✅ Anthropic
|
||||
|
||||
**Basic Usage:**
|
||||
```python
|
||||
import httpx
|
||||
from crewai import LLM
|
||||
from crewai.llms.hooks import BaseInterceptor
|
||||
|
||||
class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Custom interceptor to modify requests and responses."""
|
||||
|
||||
def on_outbound(self, request: httpx.Request) -> httpx.Request:
|
||||
"""Print request before sending to the LLM provider."""
|
||||
print(request)
|
||||
return request
|
||||
|
||||
def on_inbound(self, response: httpx.Response) -> httpx.Response:
|
||||
"""Process response after receiving from the LLM provider."""
|
||||
print(f"Status: {response.status_code}")
|
||||
print(f"Response time: {response.elapsed}")
|
||||
return response
|
||||
|
||||
# Use the interceptor with an LLM
|
||||
llm = LLM(
|
||||
model="openai/gpt-4o",
|
||||
interceptor=CustomInterceptor()
|
||||
)
|
||||
```
|
||||
|
||||
**Important Notes:**
|
||||
- Both methods must return the received object or type of object.
|
||||
- Modifying received objects may result in unexpected behavior or application crashes.
|
||||
- Not all providers support interceptors - check the supported providers list above
|
||||
|
||||
<Info>
|
||||
Interceptors operate at the transport layer. This is particularly useful for:
|
||||
- Message transformation and filtering
|
||||
- Debugging API interactions
|
||||
</Info>
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Common Issues and Solutions
|
||||
|
||||
@@ -11,9 +11,13 @@ The [Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP)
|
||||
|
||||
CrewAI offers **two approaches** for MCP integration:
|
||||
|
||||
### Simple DSL Integration** (Recommended)
|
||||
### 🚀 **Simple DSL Integration** (Recommended)
|
||||
|
||||
Use the `mcps` field directly on agents for seamless MCP tool integration:
|
||||
Use the `mcps` field directly on agents for seamless MCP tool integration. The DSL supports both **string references** (for quick setup) and **structured configurations** (for full control).
|
||||
|
||||
#### String-Based References (Quick Setup)
|
||||
|
||||
Perfect for remote HTTPS servers and CrewAI AMP marketplace:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
@@ -32,6 +36,46 @@ agent = Agent(
|
||||
# MCP tools are now automatically available to your agent!
|
||||
```
|
||||
|
||||
#### Structured Configurations (Full Control)
|
||||
|
||||
For complete control over connection settings, tool filtering, and all transport types:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
from crewai.mcp import MCPServerStdio, MCPServerHTTP, MCPServerSSE
|
||||
from crewai.mcp.filters import create_static_tool_filter
|
||||
|
||||
agent = Agent(
|
||||
role="Advanced Research Analyst",
|
||||
goal="Research with full control over MCP connections",
|
||||
backstory="Expert researcher with advanced tool access",
|
||||
mcps=[
|
||||
# Stdio transport for local servers
|
||||
MCPServerStdio(
|
||||
command="npx",
|
||||
args=["-y", "@modelcontextprotocol/server-filesystem"],
|
||||
env={"API_KEY": "your_key"},
|
||||
tool_filter=create_static_tool_filter(
|
||||
allowed_tool_names=["read_file", "list_directory"]
|
||||
),
|
||||
cache_tools_list=True,
|
||||
),
|
||||
# HTTP/Streamable HTTP transport for remote servers
|
||||
MCPServerHTTP(
|
||||
url="https://api.example.com/mcp",
|
||||
headers={"Authorization": "Bearer your_token"},
|
||||
streamable=True,
|
||||
cache_tools_list=True,
|
||||
),
|
||||
# SSE transport for real-time streaming
|
||||
MCPServerSSE(
|
||||
url="https://stream.example.com/mcp/sse",
|
||||
headers={"Authorization": "Bearer your_token"},
|
||||
),
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
### 🔧 **Advanced: MCPServerAdapter** (For Complex Scenarios)
|
||||
|
||||
For advanced use cases requiring manual connection management, the `crewai-tools` library provides the `MCPServerAdapter` class.
|
||||
@@ -68,12 +112,14 @@ uv pip install 'crewai-tools[mcp]'
|
||||
|
||||
## Quick Start: Simple DSL Integration
|
||||
|
||||
The easiest way to integrate MCP servers is using the `mcps` field on your agents:
|
||||
The easiest way to integrate MCP servers is using the `mcps` field on your agents. You can use either string references or structured configurations.
|
||||
|
||||
### Quick Start with String References
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# Create agent with MCP tools
|
||||
# Create agent with MCP tools using string references
|
||||
research_agent = Agent(
|
||||
role="Research Analyst",
|
||||
goal="Find and analyze information using advanced search tools",
|
||||
@@ -96,13 +142,53 @@ crew = Crew(agents=[research_agent], tasks=[research_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
### Quick Start with Structured Configurations
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai.mcp import MCPServerStdio, MCPServerHTTP, MCPServerSSE
|
||||
|
||||
# Create agent with structured MCP configurations
|
||||
research_agent = Agent(
|
||||
role="Research Analyst",
|
||||
goal="Find and analyze information using advanced search tools",
|
||||
backstory="Expert researcher with access to multiple data sources",
|
||||
mcps=[
|
||||
# Local stdio server
|
||||
MCPServerStdio(
|
||||
command="python",
|
||||
args=["local_server.py"],
|
||||
env={"API_KEY": "your_key"},
|
||||
),
|
||||
# Remote HTTP server
|
||||
MCPServerHTTP(
|
||||
url="https://api.research.com/mcp",
|
||||
headers={"Authorization": "Bearer your_token"},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
# Create task
|
||||
research_task = Task(
|
||||
description="Research the latest developments in AI agent frameworks",
|
||||
expected_output="Comprehensive research report with citations",
|
||||
agent=research_agent
|
||||
)
|
||||
|
||||
# Create and run crew
|
||||
crew = Crew(agents=[research_agent], tasks=[research_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
That's it! The MCP tools are automatically discovered and available to your agent.
|
||||
|
||||
## MCP Reference Formats
|
||||
|
||||
The `mcps` field supports various reference formats for maximum flexibility:
|
||||
The `mcps` field supports both **string references** (for quick setup) and **structured configurations** (for full control). You can mix both formats in the same list.
|
||||
|
||||
### External MCP Servers
|
||||
### String-Based References
|
||||
|
||||
#### External MCP Servers
|
||||
|
||||
```python
|
||||
mcps=[
|
||||
@@ -117,7 +203,7 @@ mcps=[
|
||||
]
|
||||
```
|
||||
|
||||
### CrewAI AMP Marketplace
|
||||
#### CrewAI AMP Marketplace
|
||||
|
||||
```python
|
||||
mcps=[
|
||||
@@ -133,17 +219,166 @@ mcps=[
|
||||
]
|
||||
```
|
||||
|
||||
### Mixed References
|
||||
### Structured Configurations
|
||||
|
||||
#### Stdio Transport (Local Servers)
|
||||
|
||||
Perfect for local MCP servers that run as processes:
|
||||
|
||||
```python
|
||||
from crewai.mcp import MCPServerStdio
|
||||
from crewai.mcp.filters import create_static_tool_filter
|
||||
|
||||
mcps=[
|
||||
"https://external-api.com/mcp", # External server
|
||||
"https://weather.service.com/mcp#forecast", # Specific external tool
|
||||
"crewai-amp:financial-insights", # AMP service
|
||||
"crewai-amp:data-analysis#sentiment_tool" # Specific AMP tool
|
||||
MCPServerStdio(
|
||||
command="npx",
|
||||
args=["-y", "@modelcontextprotocol/server-filesystem"],
|
||||
env={"API_KEY": "your_key"},
|
||||
tool_filter=create_static_tool_filter(
|
||||
allowed_tool_names=["read_file", "write_file"]
|
||||
),
|
||||
cache_tools_list=True,
|
||||
),
|
||||
# Python-based server
|
||||
MCPServerStdio(
|
||||
command="python",
|
||||
args=["path/to/server.py"],
|
||||
env={"UV_PYTHON": "3.12", "API_KEY": "your_key"},
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
#### HTTP/Streamable HTTP Transport (Remote Servers)
|
||||
|
||||
For remote MCP servers over HTTP/HTTPS:
|
||||
|
||||
```python
|
||||
from crewai.mcp import MCPServerHTTP
|
||||
|
||||
mcps=[
|
||||
# Streamable HTTP (default)
|
||||
MCPServerHTTP(
|
||||
url="https://api.example.com/mcp",
|
||||
headers={"Authorization": "Bearer your_token"},
|
||||
streamable=True,
|
||||
cache_tools_list=True,
|
||||
),
|
||||
# Standard HTTP
|
||||
MCPServerHTTP(
|
||||
url="https://api.example.com/mcp",
|
||||
headers={"Authorization": "Bearer your_token"},
|
||||
streamable=False,
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
#### SSE Transport (Real-Time Streaming)
|
||||
|
||||
For remote servers using Server-Sent Events:
|
||||
|
||||
```python
|
||||
from crewai.mcp import MCPServerSSE
|
||||
|
||||
mcps=[
|
||||
MCPServerSSE(
|
||||
url="https://stream.example.com/mcp/sse",
|
||||
headers={"Authorization": "Bearer your_token"},
|
||||
cache_tools_list=True,
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
### Mixed References
|
||||
|
||||
You can combine string references and structured configurations:
|
||||
|
||||
```python
|
||||
from crewai.mcp import MCPServerStdio, MCPServerHTTP
|
||||
|
||||
mcps=[
|
||||
# String references
|
||||
"https://external-api.com/mcp", # External server
|
||||
"crewai-amp:financial-insights", # AMP service
|
||||
|
||||
# Structured configurations
|
||||
MCPServerStdio(
|
||||
command="npx",
|
||||
args=["-y", "@modelcontextprotocol/server-filesystem"],
|
||||
),
|
||||
MCPServerHTTP(
|
||||
url="https://api.example.com/mcp",
|
||||
headers={"Authorization": "Bearer token"},
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
### Tool Filtering
|
||||
|
||||
Structured configurations support advanced tool filtering:
|
||||
|
||||
```python
|
||||
from crewai.mcp import MCPServerStdio
|
||||
from crewai.mcp.filters import create_static_tool_filter, create_dynamic_tool_filter, ToolFilterContext
|
||||
|
||||
# Static filtering (allow/block lists)
|
||||
static_filter = create_static_tool_filter(
|
||||
allowed_tool_names=["read_file", "write_file"],
|
||||
blocked_tool_names=["delete_file"],
|
||||
)
|
||||
|
||||
# Dynamic filtering (context-aware)
|
||||
def dynamic_filter(context: ToolFilterContext, tool: dict) -> bool:
|
||||
# Block dangerous tools for certain agent roles
|
||||
if context.agent.role == "Code Reviewer":
|
||||
if "delete" in tool.get("name", "").lower():
|
||||
return False
|
||||
return True
|
||||
|
||||
mcps=[
|
||||
MCPServerStdio(
|
||||
command="npx",
|
||||
args=["-y", "@modelcontextprotocol/server-filesystem"],
|
||||
tool_filter=static_filter, # or dynamic_filter
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
## Configuration Parameters
|
||||
|
||||
Each transport type supports specific configuration options:
|
||||
|
||||
### MCPServerStdio Parameters
|
||||
|
||||
- **`command`** (required): Command to execute (e.g., `"python"`, `"node"`, `"npx"`, `"uvx"`)
|
||||
- **`args`** (optional): List of command arguments (e.g., `["server.py"]` or `["-y", "@mcp/server"]`)
|
||||
- **`env`** (optional): Dictionary of environment variables to pass to the process
|
||||
- **`tool_filter`** (optional): Tool filter function for filtering available tools
|
||||
- **`cache_tools_list`** (optional): Whether to cache the tool list for faster subsequent access (default: `False`)
|
||||
|
||||
### MCPServerHTTP Parameters
|
||||
|
||||
- **`url`** (required): Server URL (e.g., `"https://api.example.com/mcp"`)
|
||||
- **`headers`** (optional): Dictionary of HTTP headers for authentication or other purposes
|
||||
- **`streamable`** (optional): Whether to use streamable HTTP transport (default: `True`)
|
||||
- **`tool_filter`** (optional): Tool filter function for filtering available tools
|
||||
- **`cache_tools_list`** (optional): Whether to cache the tool list for faster subsequent access (default: `False`)
|
||||
|
||||
### MCPServerSSE Parameters
|
||||
|
||||
- **`url`** (required): Server URL (e.g., `"https://api.example.com/mcp/sse"`)
|
||||
- **`headers`** (optional): Dictionary of HTTP headers for authentication or other purposes
|
||||
- **`tool_filter`** (optional): Tool filter function for filtering available tools
|
||||
- **`cache_tools_list`** (optional): Whether to cache the tool list for faster subsequent access (default: `False`)
|
||||
|
||||
### Common Parameters
|
||||
|
||||
All transport types support:
|
||||
- **`tool_filter`**: Filter function to control which tools are available. Can be:
|
||||
- `None` (default): All tools are available
|
||||
- Static filter: Created with `create_static_tool_filter()` for allow/block lists
|
||||
- Dynamic filter: Created with `create_dynamic_tool_filter()` for context-aware filtering
|
||||
- **`cache_tools_list`**: When `True`, caches the tool list after first discovery to improve performance on subsequent connections
|
||||
|
||||
## Key Features
|
||||
|
||||
- 🔄 **Automatic Tool Discovery**: Tools are automatically discovered and integrated
|
||||
@@ -152,26 +387,47 @@ mcps=[
|
||||
- 🛡️ **Error Resilience**: Graceful handling of unavailable servers
|
||||
- ⏱️ **Timeout Protection**: Built-in timeouts prevent hanging connections
|
||||
- 📊 **Transparent Integration**: Works seamlessly with existing CrewAI features
|
||||
- 🔧 **Full Transport Support**: Stdio, HTTP/Streamable HTTP, and SSE transports
|
||||
- 🎯 **Advanced Filtering**: Static and dynamic tool filtering capabilities
|
||||
- 🔐 **Flexible Authentication**: Support for headers, environment variables, and query parameters
|
||||
|
||||
## Error Handling
|
||||
|
||||
The MCP DSL integration is designed to be resilient:
|
||||
The MCP DSL integration is designed to be resilient and handles failures gracefully:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
from crewai.mcp import MCPServerStdio, MCPServerHTTP
|
||||
|
||||
agent = Agent(
|
||||
role="Resilient Agent",
|
||||
goal="Continue working despite server issues",
|
||||
backstory="Agent that handles failures gracefully",
|
||||
mcps=[
|
||||
# String references
|
||||
"https://reliable-server.com/mcp", # Will work
|
||||
"https://unreachable-server.com/mcp", # Will be skipped gracefully
|
||||
"https://slow-server.com/mcp", # Will timeout gracefully
|
||||
"crewai-amp:working-service" # Will work
|
||||
"crewai-amp:working-service", # Will work
|
||||
|
||||
# Structured configs
|
||||
MCPServerStdio(
|
||||
command="python",
|
||||
args=["reliable_server.py"], # Will work
|
||||
),
|
||||
MCPServerHTTP(
|
||||
url="https://slow-server.com/mcp", # Will timeout gracefully
|
||||
),
|
||||
]
|
||||
)
|
||||
# Agent will use tools from working servers and log warnings for failing ones
|
||||
```
|
||||
|
||||
All connection errors are handled gracefully:
|
||||
- **Connection failures**: Logged as warnings, agent continues with available tools
|
||||
- **Timeout errors**: Connections timeout after 30 seconds (configurable)
|
||||
- **Authentication errors**: Logged clearly for debugging
|
||||
- **Invalid configurations**: Validation errors are raised at agent creation time
|
||||
|
||||
## Advanced: MCPServerAdapter
|
||||
|
||||
For complex scenarios requiring manual connection management, use the `MCPServerAdapter` class from `crewai-tools`. Using a Python context manager (`with` statement) is the recommended approach as it automatically handles starting and stopping the connection to the MCP server.
|
||||
|
||||
@@ -40,6 +40,16 @@ from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.mcp import (
|
||||
MCPClient,
|
||||
MCPServerConfig,
|
||||
MCPServerHTTP,
|
||||
MCPServerSSE,
|
||||
MCPServerStdio,
|
||||
)
|
||||
from crewai.mcp.transports.http import HTTPTransport
|
||||
from crewai.mcp.transports.sse import SSETransport
|
||||
from crewai.mcp.transports.stdio import StdioTransport
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.rag.embeddings.types import EmbedderConfig
|
||||
from crewai.security.fingerprint import Fingerprint
|
||||
@@ -108,6 +118,7 @@ class Agent(BaseAgent):
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
_mcp_clients: list[Any] = PrivateAttr(default_factory=list)
|
||||
max_execution_time: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum execution time for an agent to execute a task",
|
||||
@@ -526,6 +537,9 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
event=AgentExecutionCompletedEvent(agent=self, task=task, output=result),
|
||||
)
|
||||
|
||||
self._cleanup_mcp_clients()
|
||||
|
||||
return result
|
||||
|
||||
def _execute_with_timeout(self, task_prompt: str, task: Task, timeout: int) -> Any:
|
||||
@@ -604,22 +618,22 @@ class Agent(BaseAgent):
|
||||
response_template=self.response_template,
|
||||
).task_execution()
|
||||
|
||||
stop_words = [self.i18n.slice("observation")]
|
||||
stop_sequences = [self.i18n.slice("observation")]
|
||||
|
||||
if self.response_template:
|
||||
stop_words.append(
|
||||
stop_sequences.append(
|
||||
self.response_template.split("{{ .Response }}")[1].strip()
|
||||
)
|
||||
|
||||
self.agent_executor = CrewAgentExecutor(
|
||||
llm=self.llm,
|
||||
llm=self.llm, # type: ignore[arg-type]
|
||||
task=task, # type: ignore[arg-type]
|
||||
agent=self,
|
||||
crew=self.crew,
|
||||
tools=parsed_tools,
|
||||
prompt=prompt,
|
||||
original_tools=raw_tools,
|
||||
stop_words=stop_words,
|
||||
stop_sequences=stop_sequences,
|
||||
max_iter=self.max_iter,
|
||||
tools_handler=self.tools_handler,
|
||||
tools_names=get_tool_names(parsed_tools),
|
||||
@@ -649,30 +663,70 @@ class Agent(BaseAgent):
|
||||
self._logger.log("error", f"Error getting platform tools: {e!s}")
|
||||
return []
|
||||
|
||||
def get_mcp_tools(self, mcps: list[str]) -> list[BaseTool]:
|
||||
"""Convert MCP server references to CrewAI tools."""
|
||||
def get_mcp_tools(self, mcps: list[str | MCPServerConfig]) -> list[BaseTool]:
|
||||
"""Convert MCP server references/configs to CrewAI tools.
|
||||
|
||||
Supports both string references (backwards compatible) and structured
|
||||
configuration objects (MCPServerStdio, MCPServerHTTP, MCPServerSSE).
|
||||
|
||||
Args:
|
||||
mcps: List of MCP server references (strings) or configurations.
|
||||
|
||||
Returns:
|
||||
List of BaseTool instances from MCP servers.
|
||||
"""
|
||||
all_tools = []
|
||||
clients = []
|
||||
|
||||
for mcp_ref in mcps:
|
||||
try:
|
||||
if mcp_ref.startswith("crewai-amp:"):
|
||||
tools = self._get_amp_mcp_tools(mcp_ref)
|
||||
elif mcp_ref.startswith("https://"):
|
||||
tools = self._get_external_mcp_tools(mcp_ref)
|
||||
else:
|
||||
continue
|
||||
for mcp_config in mcps:
|
||||
if isinstance(mcp_config, str):
|
||||
tools = self._get_mcp_tools_from_string(mcp_config)
|
||||
else:
|
||||
tools, client = self._get_native_mcp_tools(mcp_config)
|
||||
if client:
|
||||
clients.append(client)
|
||||
|
||||
all_tools.extend(tools)
|
||||
self._logger.log(
|
||||
"info", f"Successfully loaded {len(tools)} tools from {mcp_ref}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self._logger.log("warning", f"Skipping MCP {mcp_ref} due to error: {e}")
|
||||
continue
|
||||
all_tools.extend(tools)
|
||||
|
||||
# Store clients for cleanup
|
||||
self._mcp_clients.extend(clients)
|
||||
return all_tools
|
||||
|
||||
def _cleanup_mcp_clients(self) -> None:
|
||||
"""Cleanup MCP client connections after task execution."""
|
||||
if not self._mcp_clients:
|
||||
return
|
||||
|
||||
async def _disconnect_all() -> None:
|
||||
for client in self._mcp_clients:
|
||||
if client and hasattr(client, "connected") and client.connected:
|
||||
await client.disconnect()
|
||||
|
||||
try:
|
||||
asyncio.run(_disconnect_all())
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Error during MCP client cleanup: {e}")
|
||||
finally:
|
||||
self._mcp_clients.clear()
|
||||
|
||||
def _get_mcp_tools_from_string(self, mcp_ref: str) -> list[BaseTool]:
|
||||
"""Get tools from legacy string-based MCP references.
|
||||
|
||||
This method maintains backwards compatibility with string-based
|
||||
MCP references (https://... and crewai-amp:...).
|
||||
|
||||
Args:
|
||||
mcp_ref: String reference to MCP server.
|
||||
|
||||
Returns:
|
||||
List of BaseTool instances.
|
||||
"""
|
||||
if mcp_ref.startswith("crewai-amp:"):
|
||||
return self._get_amp_mcp_tools(mcp_ref)
|
||||
if mcp_ref.startswith("https://"):
|
||||
return self._get_external_mcp_tools(mcp_ref)
|
||||
return []
|
||||
|
||||
def _get_external_mcp_tools(self, mcp_ref: str) -> list[BaseTool]:
|
||||
"""Get tools from external HTTPS MCP server with graceful error handling."""
|
||||
from crewai.tools.mcp_tool_wrapper import MCPToolWrapper
|
||||
@@ -731,6 +785,164 @@ class Agent(BaseAgent):
|
||||
)
|
||||
return []
|
||||
|
||||
def _get_native_mcp_tools(
|
||||
self, mcp_config: MCPServerConfig
|
||||
) -> tuple[list[BaseTool], Any | None]:
|
||||
"""Get tools from MCP server using structured configuration.
|
||||
|
||||
This method creates an MCP client based on the configuration type,
|
||||
connects to the server, discovers tools, applies filtering, and
|
||||
returns wrapped tools along with the client instance for cleanup.
|
||||
|
||||
Args:
|
||||
mcp_config: MCP server configuration (MCPServerStdio, MCPServerHTTP, or MCPServerSSE).
|
||||
|
||||
Returns:
|
||||
Tuple of (list of BaseTool instances, MCPClient instance for cleanup).
|
||||
"""
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.mcp_native_tool import MCPNativeTool
|
||||
|
||||
if isinstance(mcp_config, MCPServerStdio):
|
||||
transport = StdioTransport(
|
||||
command=mcp_config.command,
|
||||
args=mcp_config.args,
|
||||
env=mcp_config.env,
|
||||
)
|
||||
server_name = f"{mcp_config.command}_{'_'.join(mcp_config.args)}"
|
||||
elif isinstance(mcp_config, MCPServerHTTP):
|
||||
transport = HTTPTransport(
|
||||
url=mcp_config.url,
|
||||
headers=mcp_config.headers,
|
||||
streamable=mcp_config.streamable,
|
||||
)
|
||||
server_name = self._extract_server_name(mcp_config.url)
|
||||
elif isinstance(mcp_config, MCPServerSSE):
|
||||
transport = SSETransport(
|
||||
url=mcp_config.url,
|
||||
headers=mcp_config.headers,
|
||||
)
|
||||
server_name = self._extract_server_name(mcp_config.url)
|
||||
else:
|
||||
raise ValueError(f"Unsupported MCP server config type: {type(mcp_config)}")
|
||||
|
||||
client = MCPClient(
|
||||
transport=transport,
|
||||
cache_tools_list=mcp_config.cache_tools_list,
|
||||
)
|
||||
|
||||
async def _setup_client_and_list_tools() -> list[dict[str, Any]]:
|
||||
"""Async helper to connect and list tools in same event loop."""
|
||||
|
||||
try:
|
||||
if not client.connected:
|
||||
await client.connect()
|
||||
|
||||
tools_list = await client.list_tools()
|
||||
|
||||
try:
|
||||
await client.disconnect()
|
||||
# Small delay to allow background tasks to finish cleanup
|
||||
# This helps prevent "cancel scope in different task" errors
|
||||
# when asyncio.run() closes the event loop
|
||||
await asyncio.sleep(0.1)
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Error during disconnect: {e}")
|
||||
|
||||
return tools_list
|
||||
except Exception as e:
|
||||
if client.connected:
|
||||
await client.disconnect()
|
||||
await asyncio.sleep(0.1)
|
||||
raise RuntimeError(
|
||||
f"Error during setup client and list tools: {e}"
|
||||
) from e
|
||||
|
||||
try:
|
||||
try:
|
||||
asyncio.get_running_loop()
|
||||
import concurrent.futures
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future = executor.submit(
|
||||
asyncio.run, _setup_client_and_list_tools()
|
||||
)
|
||||
tools_list = future.result()
|
||||
except RuntimeError:
|
||||
try:
|
||||
tools_list = asyncio.run(_setup_client_and_list_tools())
|
||||
except RuntimeError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "cancel scope" in error_msg or "task" in error_msg:
|
||||
raise ConnectionError(
|
||||
"MCP connection failed due to event loop cleanup issues. "
|
||||
"This may be due to authentication errors or server unavailability."
|
||||
) from e
|
||||
except asyncio.CancelledError as e:
|
||||
raise ConnectionError(
|
||||
"MCP connection was cancelled. This may indicate an authentication "
|
||||
"error or server unavailability."
|
||||
) from e
|
||||
|
||||
if mcp_config.tool_filter:
|
||||
filtered_tools = []
|
||||
for tool in tools_list:
|
||||
if callable(mcp_config.tool_filter):
|
||||
try:
|
||||
from crewai.mcp.filters import ToolFilterContext
|
||||
|
||||
context = ToolFilterContext(
|
||||
agent=self,
|
||||
server_name=server_name,
|
||||
run_context=None,
|
||||
)
|
||||
if mcp_config.tool_filter(context, tool):
|
||||
filtered_tools.append(tool)
|
||||
except (TypeError, AttributeError):
|
||||
if mcp_config.tool_filter(tool):
|
||||
filtered_tools.append(tool)
|
||||
else:
|
||||
# Not callable - include tool
|
||||
filtered_tools.append(tool)
|
||||
tools_list = filtered_tools
|
||||
|
||||
tools = []
|
||||
for tool_def in tools_list:
|
||||
tool_name = tool_def.get("name", "")
|
||||
if not tool_name:
|
||||
continue
|
||||
|
||||
# Convert inputSchema to Pydantic model if present
|
||||
args_schema = None
|
||||
if tool_def.get("inputSchema"):
|
||||
args_schema = self._json_schema_to_pydantic(
|
||||
tool_name, tool_def["inputSchema"]
|
||||
)
|
||||
|
||||
tool_schema = {
|
||||
"description": tool_def.get("description", ""),
|
||||
"args_schema": args_schema,
|
||||
}
|
||||
|
||||
try:
|
||||
native_tool = MCPNativeTool(
|
||||
mcp_client=client,
|
||||
tool_name=tool_name,
|
||||
tool_schema=tool_schema,
|
||||
server_name=server_name,
|
||||
)
|
||||
tools.append(native_tool)
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Failed to create native MCP tool: {e}")
|
||||
continue
|
||||
|
||||
return cast(list[BaseTool], tools), client
|
||||
except Exception as e:
|
||||
if client.connected:
|
||||
asyncio.run(client.disconnect())
|
||||
|
||||
raise RuntimeError(f"Failed to get native MCP tools: {e}") from e
|
||||
|
||||
def _get_amp_mcp_tools(self, amp_ref: str) -> list[BaseTool]:
|
||||
"""Get tools from CrewAI AMP MCP marketplace."""
|
||||
# Parse: "crewai-amp:mcp-name" or "crewai-amp:mcp-name#tool_name"
|
||||
@@ -762,7 +974,9 @@ class Agent(BaseAgent):
|
||||
path = parsed.path.replace("/", "_").strip("_")
|
||||
return f"{domain}_{path}" if path else domain
|
||||
|
||||
def _get_mcp_tool_schemas(self, server_params: dict) -> dict[str, dict]:
|
||||
def _get_mcp_tool_schemas(
|
||||
self, server_params: dict[str, Any]
|
||||
) -> dict[str, dict[str, Any]] | Any:
|
||||
"""Get tool schemas from MCP server for wrapper creation with caching."""
|
||||
server_url = server_params["url"]
|
||||
|
||||
@@ -794,7 +1008,7 @@ class Agent(BaseAgent):
|
||||
|
||||
async def _get_mcp_tool_schemas_async(
|
||||
self, server_params: dict[str, Any]
|
||||
) -> dict[str, dict]:
|
||||
) -> dict[str, dict[str, Any]]:
|
||||
"""Async implementation of MCP tool schema retrieval with timeouts and retries."""
|
||||
server_url = server_params["url"]
|
||||
return await self._retry_mcp_discovery(
|
||||
@@ -802,7 +1016,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
|
||||
async def _retry_mcp_discovery(
|
||||
self, operation_func, server_url: str
|
||||
self, operation_func: Any, server_url: str
|
||||
) -> dict[str, dict[str, Any]]:
|
||||
"""Retry MCP discovery operation with exponential backoff, avoiding try-except in loop."""
|
||||
last_error = None
|
||||
@@ -833,7 +1047,7 @@ class Agent(BaseAgent):
|
||||
|
||||
@staticmethod
|
||||
async def _attempt_mcp_discovery(
|
||||
operation_func, server_url: str
|
||||
operation_func: Any, server_url: str
|
||||
) -> tuple[dict[str, dict[str, Any]] | None, str, bool]:
|
||||
"""Attempt single MCP discovery operation and return (result, error_message, should_retry)."""
|
||||
try:
|
||||
@@ -937,13 +1151,13 @@ class Agent(BaseAgent):
|
||||
Field(..., description=field_description),
|
||||
)
|
||||
else:
|
||||
field_definitions[field_name] = (
|
||||
field_definitions[field_name] = ( # type: ignore[assignment]
|
||||
field_type | None,
|
||||
Field(default=None, description=field_description),
|
||||
)
|
||||
|
||||
model_name = f"{tool_name.replace('-', '_').replace(' ', '_')}Schema"
|
||||
return create_model(model_name, **field_definitions)
|
||||
return create_model(model_name, **field_definitions) # type: ignore[no-any-return,call-overload]
|
||||
|
||||
def _json_type_to_python(self, field_schema: dict[str, Any]) -> type:
|
||||
"""Convert JSON Schema type to Python type.
|
||||
@@ -963,12 +1177,12 @@ class Agent(BaseAgent):
|
||||
if "const" in option:
|
||||
types.append(str)
|
||||
else:
|
||||
types.append(self._json_type_to_python(option))
|
||||
types.append(self._json_type_to_python(option)) # type: ignore[arg-type]
|
||||
unique_types = list(set(types))
|
||||
if len(unique_types) > 1:
|
||||
result = unique_types[0]
|
||||
for t in unique_types[1:]:
|
||||
result = result | t
|
||||
result = result | t # type: ignore[assignment]
|
||||
return result
|
||||
return unique_types[0]
|
||||
|
||||
@@ -981,10 +1195,10 @@ class Agent(BaseAgent):
|
||||
"object": dict,
|
||||
}
|
||||
|
||||
return type_mapping.get(json_type, Any)
|
||||
return type_mapping.get(json_type, Any) # type: ignore[arg-type]
|
||||
|
||||
@staticmethod
|
||||
def _fetch_amp_mcp_servers(mcp_name: str) -> list[dict]:
|
||||
def _fetch_amp_mcp_servers(mcp_name: str) -> list[dict[str, Any]]:
|
||||
"""Fetch MCP server configurations from CrewAI AMP API."""
|
||||
# TODO: Implement AMP API call to "integrations/mcps" endpoint
|
||||
# Should return list of server configs with URLs
|
||||
@@ -1223,7 +1437,7 @@ class Agent(BaseAgent):
|
||||
goal=self.goal,
|
||||
backstory=self.backstory,
|
||||
llm=self.llm,
|
||||
tools=self.tools or [],
|
||||
tools=self.tools,
|
||||
max_iterations=self.max_iter,
|
||||
max_execution_time=self.max_execution_time,
|
||||
respect_context_window=self.respect_context_window,
|
||||
|
||||
@@ -25,6 +25,7 @@ from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.knowledge_config import KnowledgeConfig
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.mcp.config import MCPServerConfig
|
||||
from crewai.rag.embeddings.types import EmbedderConfig
|
||||
from crewai.security.security_config import SecurityConfig
|
||||
from crewai.tools.base_tool import BaseTool, Tool
|
||||
@@ -136,7 +137,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: list[BaseTool] | None = Field(
|
||||
tools: list[BaseTool] = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: int = Field(
|
||||
@@ -194,7 +195,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
default=None,
|
||||
description="List of applications or application/action combinations that the agent can access through CrewAI Platform. Can contain app names (e.g., 'gmail') or specific actions (e.g., 'gmail/send_email')",
|
||||
)
|
||||
mcps: list[str] | None = Field(
|
||||
mcps: list[str | MCPServerConfig] | None = Field(
|
||||
default=None,
|
||||
description="List of MCP server references. Supports 'https://server.com/path' for external servers and 'crewai-amp:mcp-name' for AMP marketplace. Use '#tool_name' suffix for specific tools.",
|
||||
)
|
||||
@@ -253,20 +254,36 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
|
||||
@field_validator("mcps")
|
||||
@classmethod
|
||||
def validate_mcps(cls, mcps: list[str] | None) -> list[str] | None:
|
||||
def validate_mcps(
|
||||
cls, mcps: list[str | MCPServerConfig] | None
|
||||
) -> list[str | MCPServerConfig] | None:
|
||||
"""Validate MCP server references and configurations.
|
||||
|
||||
Supports both string references (for backwards compatibility) and
|
||||
structured configuration objects (MCPServerStdio, MCPServerHTTP, MCPServerSSE).
|
||||
"""
|
||||
if not mcps:
|
||||
return mcps
|
||||
|
||||
validated_mcps = []
|
||||
for mcp in mcps:
|
||||
if mcp.startswith(("https://", "crewai-amp:")):
|
||||
if isinstance(mcp, str):
|
||||
if mcp.startswith(("https://", "crewai-amp:")):
|
||||
validated_mcps.append(mcp)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Invalid MCP reference: {mcp}. "
|
||||
"String references must start with 'https://' or 'crewai-amp:'"
|
||||
)
|
||||
|
||||
elif isinstance(mcp, (MCPServerConfig)):
|
||||
validated_mcps.append(mcp)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Invalid MCP reference: {mcp}. Must start with 'https://' or 'crewai-amp:'"
|
||||
f"Invalid MCP configuration: {type(mcp)}. "
|
||||
"Must be a string reference or MCPServerConfig instance."
|
||||
)
|
||||
|
||||
return list(set(validated_mcps))
|
||||
return validated_mcps
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_and_set_attributes(self) -> Self:
|
||||
@@ -343,7 +360,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
"""Get platform tools for the specified list of applications and/or application/action combinations."""
|
||||
|
||||
@abstractmethod
|
||||
def get_mcp_tools(self, mcps: list[str]) -> list[BaseTool]:
|
||||
def get_mcp_tools(self, mcps: list[str | MCPServerConfig]) -> list[BaseTool]:
|
||||
"""Get MCP tools for the specified list of MCP server references."""
|
||||
|
||||
def copy(self) -> Self: # type: ignore # Signature of "copy" incompatible with supertype "BaseModel"
|
||||
|
||||
@@ -73,7 +73,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
max_iter: int,
|
||||
tools: list[CrewStructuredTool],
|
||||
tools_names: str,
|
||||
stop_words: list[str],
|
||||
stop_sequences: list[str],
|
||||
tools_description: str,
|
||||
tools_handler: ToolsHandler,
|
||||
step_callback: Any = None,
|
||||
@@ -95,7 +95,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
max_iter: Maximum iterations.
|
||||
tools: Available tools.
|
||||
tools_names: Tool names string.
|
||||
stop_words: Stop word list.
|
||||
stop_sequences: Stop sequences list for halting generation.
|
||||
tools_description: Tool descriptions.
|
||||
tools_handler: Tool handler instance.
|
||||
step_callback: Optional step callback.
|
||||
@@ -114,7 +114,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.prompt = prompt
|
||||
self.tools = tools
|
||||
self.tools_names = tools_names
|
||||
self.stop = stop_words
|
||||
self.max_iter = max_iter
|
||||
self.callbacks = callbacks or []
|
||||
self._printer: Printer = Printer()
|
||||
@@ -131,15 +130,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.iterations = 0
|
||||
self.log_error_after = 3
|
||||
if self.llm:
|
||||
# This may be mutating the shared llm object and needs further evaluation
|
||||
existing_stop = getattr(self.llm, "stop", [])
|
||||
self.llm.stop = list(
|
||||
set(
|
||||
existing_stop + self.stop
|
||||
if isinstance(existing_stop, list)
|
||||
else self.stop
|
||||
)
|
||||
)
|
||||
self.llm.stop_sequences.extend(stop_sequences)
|
||||
|
||||
@property
|
||||
def use_stop_words(self) -> bool:
|
||||
@@ -148,7 +139,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
Returns:
|
||||
bool: True if tool should be used or not.
|
||||
"""
|
||||
return self.llm.supports_stop_words() if self.llm else False
|
||||
return self.llm.supports_stop_words if self.llm else False
|
||||
|
||||
def invoke(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Execute the agent with given inputs.
|
||||
|
||||
@@ -16,7 +16,6 @@ from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.depends import Depends
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.handler_graph import CircularDependencyError
|
||||
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
@@ -61,6 +60,14 @@ from crewai.events.types.logging_events import (
|
||||
AgentLogsExecutionEvent,
|
||||
AgentLogsStartedEvent,
|
||||
)
|
||||
from crewai.events.types.mcp_events import (
|
||||
MCPConnectionCompletedEvent,
|
||||
MCPConnectionFailedEvent,
|
||||
MCPConnectionStartedEvent,
|
||||
MCPToolExecutionCompletedEvent,
|
||||
MCPToolExecutionFailedEvent,
|
||||
MCPToolExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
@@ -153,6 +160,12 @@ __all__ = [
|
||||
"LiteAgentExecutionCompletedEvent",
|
||||
"LiteAgentExecutionErrorEvent",
|
||||
"LiteAgentExecutionStartedEvent",
|
||||
"MCPConnectionCompletedEvent",
|
||||
"MCPConnectionFailedEvent",
|
||||
"MCPConnectionStartedEvent",
|
||||
"MCPToolExecutionCompletedEvent",
|
||||
"MCPToolExecutionFailedEvent",
|
||||
"MCPToolExecutionStartedEvent",
|
||||
"MemoryQueryCompletedEvent",
|
||||
"MemoryQueryFailedEvent",
|
||||
"MemoryQueryStartedEvent",
|
||||
|
||||
@@ -65,6 +65,14 @@ from crewai.events.types.logging_events import (
|
||||
AgentLogsExecutionEvent,
|
||||
AgentLogsStartedEvent,
|
||||
)
|
||||
from crewai.events.types.mcp_events import (
|
||||
MCPConnectionCompletedEvent,
|
||||
MCPConnectionFailedEvent,
|
||||
MCPConnectionStartedEvent,
|
||||
MCPToolExecutionCompletedEvent,
|
||||
MCPToolExecutionFailedEvent,
|
||||
MCPToolExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.reasoning_events import (
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
@@ -615,5 +623,67 @@ class EventListener(BaseEventListener):
|
||||
event.total_turns,
|
||||
)
|
||||
|
||||
# ----------- MCP EVENTS -----------
|
||||
|
||||
@crewai_event_bus.on(MCPConnectionStartedEvent)
|
||||
def on_mcp_connection_started(source, event: MCPConnectionStartedEvent):
|
||||
self.formatter.handle_mcp_connection_started(
|
||||
event.server_name,
|
||||
event.server_url,
|
||||
event.transport_type,
|
||||
event.is_reconnect,
|
||||
event.connect_timeout,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MCPConnectionCompletedEvent)
|
||||
def on_mcp_connection_completed(source, event: MCPConnectionCompletedEvent):
|
||||
self.formatter.handle_mcp_connection_completed(
|
||||
event.server_name,
|
||||
event.server_url,
|
||||
event.transport_type,
|
||||
event.connection_duration_ms,
|
||||
event.is_reconnect,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MCPConnectionFailedEvent)
|
||||
def on_mcp_connection_failed(source, event: MCPConnectionFailedEvent):
|
||||
self.formatter.handle_mcp_connection_failed(
|
||||
event.server_name,
|
||||
event.server_url,
|
||||
event.transport_type,
|
||||
event.error,
|
||||
event.error_type,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MCPToolExecutionStartedEvent)
|
||||
def on_mcp_tool_execution_started(source, event: MCPToolExecutionStartedEvent):
|
||||
self.formatter.handle_mcp_tool_execution_started(
|
||||
event.server_name,
|
||||
event.tool_name,
|
||||
event.tool_args,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MCPToolExecutionCompletedEvent)
|
||||
def on_mcp_tool_execution_completed(
|
||||
source, event: MCPToolExecutionCompletedEvent
|
||||
):
|
||||
self.formatter.handle_mcp_tool_execution_completed(
|
||||
event.server_name,
|
||||
event.tool_name,
|
||||
event.tool_args,
|
||||
event.result,
|
||||
event.execution_duration_ms,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MCPToolExecutionFailedEvent)
|
||||
def on_mcp_tool_execution_failed(source, event: MCPToolExecutionFailedEvent):
|
||||
self.formatter.handle_mcp_tool_execution_failed(
|
||||
event.server_name,
|
||||
event.tool_name,
|
||||
event.tool_args,
|
||||
event.error,
|
||||
event.error_type,
|
||||
)
|
||||
|
||||
|
||||
event_listener = EventListener()
|
||||
|
||||
@@ -40,6 +40,14 @@ from crewai.events.types.llm_guardrail_events import (
|
||||
LLMGuardrailCompletedEvent,
|
||||
LLMGuardrailStartedEvent,
|
||||
)
|
||||
from crewai.events.types.mcp_events import (
|
||||
MCPConnectionCompletedEvent,
|
||||
MCPConnectionFailedEvent,
|
||||
MCPConnectionStartedEvent,
|
||||
MCPToolExecutionCompletedEvent,
|
||||
MCPToolExecutionFailedEvent,
|
||||
MCPToolExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
@@ -115,4 +123,10 @@ EventTypes = (
|
||||
| MemoryQueryFailedEvent
|
||||
| MemoryRetrievalStartedEvent
|
||||
| MemoryRetrievalCompletedEvent
|
||||
| MCPConnectionStartedEvent
|
||||
| MCPConnectionCompletedEvent
|
||||
| MCPConnectionFailedEvent
|
||||
| MCPToolExecutionStartedEvent
|
||||
| MCPToolExecutionCompletedEvent
|
||||
| MCPToolExecutionFailedEvent
|
||||
)
|
||||
|
||||
85
lib/crewai/src/crewai/events/types/mcp_events.py
Normal file
85
lib/crewai/src/crewai/events/types/mcp_events.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class MCPEvent(BaseEvent):
|
||||
"""Base event for MCP operations."""
|
||||
|
||||
server_name: str
|
||||
server_url: str | None = None
|
||||
transport_type: str | None = None # "stdio", "http", "sse"
|
||||
agent_id: str | None = None
|
||||
agent_role: str | None = None
|
||||
from_agent: Any | None = None
|
||||
from_task: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
self._set_agent_params(data)
|
||||
self._set_task_params(data)
|
||||
|
||||
|
||||
class MCPConnectionStartedEvent(MCPEvent):
|
||||
"""Event emitted when starting to connect to an MCP server."""
|
||||
|
||||
type: str = "mcp_connection_started"
|
||||
connect_timeout: int | None = None
|
||||
is_reconnect: bool = (
|
||||
False # True if this is a reconnection, False for first connection
|
||||
)
|
||||
|
||||
|
||||
class MCPConnectionCompletedEvent(MCPEvent):
|
||||
"""Event emitted when successfully connected to an MCP server."""
|
||||
|
||||
type: str = "mcp_connection_completed"
|
||||
started_at: datetime | None = None
|
||||
completed_at: datetime | None = None
|
||||
connection_duration_ms: float | None = None
|
||||
is_reconnect: bool = (
|
||||
False # True if this was a reconnection, False for first connection
|
||||
)
|
||||
|
||||
|
||||
class MCPConnectionFailedEvent(MCPEvent):
|
||||
"""Event emitted when connection to an MCP server fails."""
|
||||
|
||||
type: str = "mcp_connection_failed"
|
||||
error: str
|
||||
error_type: str | None = None # "timeout", "authentication", "network", etc.
|
||||
started_at: datetime | None = None
|
||||
failed_at: datetime | None = None
|
||||
|
||||
|
||||
class MCPToolExecutionStartedEvent(MCPEvent):
|
||||
"""Event emitted when starting to execute an MCP tool."""
|
||||
|
||||
type: str = "mcp_tool_execution_started"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class MCPToolExecutionCompletedEvent(MCPEvent):
|
||||
"""Event emitted when MCP tool execution completes."""
|
||||
|
||||
type: str = "mcp_tool_execution_completed"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any] | None = None
|
||||
result: Any | None = None
|
||||
started_at: datetime | None = None
|
||||
completed_at: datetime | None = None
|
||||
execution_duration_ms: float | None = None
|
||||
|
||||
|
||||
class MCPToolExecutionFailedEvent(MCPEvent):
|
||||
"""Event emitted when MCP tool execution fails."""
|
||||
|
||||
type: str = "mcp_tool_execution_failed"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any] | None = None
|
||||
error: str
|
||||
error_type: str | None = None # "timeout", "validation", "server_error", etc.
|
||||
started_at: datetime | None = None
|
||||
failed_at: datetime | None = None
|
||||
@@ -2248,3 +2248,203 @@ class ConsoleFormatter:
|
||||
|
||||
self.current_a2a_conversation_branch = None
|
||||
self.current_a2a_turn_count = 0
|
||||
|
||||
# ----------- MCP EVENTS -----------
|
||||
|
||||
def handle_mcp_connection_started(
|
||||
self,
|
||||
server_name: str,
|
||||
server_url: str | None = None,
|
||||
transport_type: str | None = None,
|
||||
is_reconnect: bool = False,
|
||||
connect_timeout: int | None = None,
|
||||
) -> None:
|
||||
"""Handle MCP connection started event."""
|
||||
if not self.verbose:
|
||||
return
|
||||
|
||||
content = Text()
|
||||
reconnect_text = " (Reconnecting)" if is_reconnect else ""
|
||||
content.append(f"MCP Connection Started{reconnect_text}\n\n", style="cyan bold")
|
||||
content.append("Server: ", style="white")
|
||||
content.append(f"{server_name}\n", style="cyan")
|
||||
|
||||
if server_url:
|
||||
content.append("URL: ", style="white")
|
||||
content.append(f"{server_url}\n", style="cyan dim")
|
||||
|
||||
if transport_type:
|
||||
content.append("Transport: ", style="white")
|
||||
content.append(f"{transport_type}\n", style="cyan")
|
||||
|
||||
if connect_timeout:
|
||||
content.append("Timeout: ", style="white")
|
||||
content.append(f"{connect_timeout}s\n", style="cyan")
|
||||
|
||||
panel = self.create_panel(content, "🔌 MCP Connection", "cyan")
|
||||
self.print(panel)
|
||||
self.print()
|
||||
|
||||
def handle_mcp_connection_completed(
|
||||
self,
|
||||
server_name: str,
|
||||
server_url: str | None = None,
|
||||
transport_type: str | None = None,
|
||||
connection_duration_ms: float | None = None,
|
||||
is_reconnect: bool = False,
|
||||
) -> None:
|
||||
"""Handle MCP connection completed event."""
|
||||
if not self.verbose:
|
||||
return
|
||||
|
||||
content = Text()
|
||||
reconnect_text = " (Reconnected)" if is_reconnect else ""
|
||||
content.append(
|
||||
f"MCP Connection Completed{reconnect_text}\n\n", style="green bold"
|
||||
)
|
||||
content.append("Server: ", style="white")
|
||||
content.append(f"{server_name}\n", style="green")
|
||||
|
||||
if server_url:
|
||||
content.append("URL: ", style="white")
|
||||
content.append(f"{server_url}\n", style="green dim")
|
||||
|
||||
if transport_type:
|
||||
content.append("Transport: ", style="white")
|
||||
content.append(f"{transport_type}\n", style="green")
|
||||
|
||||
if connection_duration_ms is not None:
|
||||
content.append("Duration: ", style="white")
|
||||
content.append(f"{connection_duration_ms:.2f}ms\n", style="green")
|
||||
|
||||
panel = self.create_panel(content, "✅ MCP Connected", "green")
|
||||
self.print(panel)
|
||||
self.print()
|
||||
|
||||
def handle_mcp_connection_failed(
|
||||
self,
|
||||
server_name: str,
|
||||
server_url: str | None = None,
|
||||
transport_type: str | None = None,
|
||||
error: str = "",
|
||||
error_type: str | None = None,
|
||||
) -> None:
|
||||
"""Handle MCP connection failed event."""
|
||||
if not self.verbose:
|
||||
return
|
||||
|
||||
content = Text()
|
||||
content.append("MCP Connection Failed\n\n", style="red bold")
|
||||
content.append("Server: ", style="white")
|
||||
content.append(f"{server_name}\n", style="red")
|
||||
|
||||
if server_url:
|
||||
content.append("URL: ", style="white")
|
||||
content.append(f"{server_url}\n", style="red dim")
|
||||
|
||||
if transport_type:
|
||||
content.append("Transport: ", style="white")
|
||||
content.append(f"{transport_type}\n", style="red")
|
||||
|
||||
if error_type:
|
||||
content.append("Error Type: ", style="white")
|
||||
content.append(f"{error_type}\n", style="red")
|
||||
|
||||
if error:
|
||||
content.append("\nError: ", style="white bold")
|
||||
error_preview = error[:500] + "..." if len(error) > 500 else error
|
||||
content.append(f"{error_preview}\n", style="red")
|
||||
|
||||
panel = self.create_panel(content, "❌ MCP Connection Failed", "red")
|
||||
self.print(panel)
|
||||
self.print()
|
||||
|
||||
def handle_mcp_tool_execution_started(
|
||||
self,
|
||||
server_name: str,
|
||||
tool_name: str,
|
||||
tool_args: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Handle MCP tool execution started event."""
|
||||
if not self.verbose:
|
||||
return
|
||||
|
||||
content = self.create_status_content(
|
||||
"MCP Tool Execution Started",
|
||||
tool_name,
|
||||
"yellow",
|
||||
tool_args=tool_args or {},
|
||||
Server=server_name,
|
||||
)
|
||||
|
||||
panel = self.create_panel(content, "🔧 MCP Tool", "yellow")
|
||||
self.print(panel)
|
||||
self.print()
|
||||
|
||||
def handle_mcp_tool_execution_completed(
|
||||
self,
|
||||
server_name: str,
|
||||
tool_name: str,
|
||||
tool_args: dict[str, Any] | None = None,
|
||||
result: Any | None = None,
|
||||
execution_duration_ms: float | None = None,
|
||||
) -> None:
|
||||
"""Handle MCP tool execution completed event."""
|
||||
if not self.verbose:
|
||||
return
|
||||
|
||||
content = self.create_status_content(
|
||||
"MCP Tool Execution Completed",
|
||||
tool_name,
|
||||
"green",
|
||||
tool_args=tool_args or {},
|
||||
Server=server_name,
|
||||
)
|
||||
|
||||
if execution_duration_ms is not None:
|
||||
content.append("Duration: ", style="white")
|
||||
content.append(f"{execution_duration_ms:.2f}ms\n", style="green")
|
||||
|
||||
if result is not None:
|
||||
result_str = str(result)
|
||||
if len(result_str) > 500:
|
||||
result_str = result_str[:497] + "..."
|
||||
content.append("\nResult: ", style="white bold")
|
||||
content.append(f"{result_str}\n", style="green")
|
||||
|
||||
panel = self.create_panel(content, "✅ MCP Tool Completed", "green")
|
||||
self.print(panel)
|
||||
self.print()
|
||||
|
||||
def handle_mcp_tool_execution_failed(
|
||||
self,
|
||||
server_name: str,
|
||||
tool_name: str,
|
||||
tool_args: dict[str, Any] | None = None,
|
||||
error: str = "",
|
||||
error_type: str | None = None,
|
||||
) -> None:
|
||||
"""Handle MCP tool execution failed event."""
|
||||
if not self.verbose:
|
||||
return
|
||||
|
||||
content = self.create_status_content(
|
||||
"MCP Tool Execution Failed",
|
||||
tool_name,
|
||||
"red",
|
||||
tool_args=tool_args or {},
|
||||
Server=server_name,
|
||||
)
|
||||
|
||||
if error_type:
|
||||
content.append("Error Type: ", style="white")
|
||||
content.append(f"{error_type}\n", style="red")
|
||||
|
||||
if error:
|
||||
content.append("\nError: ", style="white bold")
|
||||
error_preview = error[:500] + "..." if len(error) > 500 else error
|
||||
content.append(f"{error_preview}\n", style="red")
|
||||
|
||||
panel = self.create_panel(content, "❌ MCP Tool Failed", "red")
|
||||
self.print(panel)
|
||||
self.print()
|
||||
|
||||
@@ -15,7 +15,6 @@ import logging
|
||||
from typing import (
|
||||
Any,
|
||||
ClassVar,
|
||||
Final,
|
||||
Generic,
|
||||
Literal,
|
||||
ParamSpec,
|
||||
@@ -45,7 +44,7 @@ from crewai.events.types.flow_events import (
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.flow.visualization import build_flow_structure, render_interactive
|
||||
from crewai.flow.constants import AND_CONDITION, OR_CONDITION
|
||||
from crewai.flow.flow_wrappers import (
|
||||
FlowCondition,
|
||||
FlowConditions,
|
||||
@@ -58,18 +57,16 @@ from crewai.flow.flow_wrappers import (
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.types import FlowExecutionData, FlowMethodName, PendingListenerKey
|
||||
from crewai.flow.utils import (
|
||||
_extract_all_methods,
|
||||
_normalize_condition,
|
||||
get_possible_return_constants,
|
||||
is_flow_condition_dict,
|
||||
is_flow_condition_list,
|
||||
is_flow_method,
|
||||
is_flow_method_callable,
|
||||
is_flow_method_name,
|
||||
is_simple_flow_condition,
|
||||
_extract_all_methods,
|
||||
_extract_all_methods_recursive,
|
||||
_normalize_condition,
|
||||
)
|
||||
from crewai.flow.constants import AND_CONDITION, OR_CONDITION
|
||||
from crewai.flow.visualization import build_flow_structure, render_interactive
|
||||
from crewai.utilities.printer import Printer, PrinterColor
|
||||
|
||||
|
||||
@@ -431,6 +428,8 @@ class FlowMeta(type):
|
||||
possible_returns = get_possible_return_constants(attr_value)
|
||||
if possible_returns:
|
||||
router_paths[attr_name] = possible_returns
|
||||
else:
|
||||
router_paths[attr_name] = []
|
||||
|
||||
cls._start_methods = start_methods # type: ignore[attr-defined]
|
||||
cls._listeners = listeners # type: ignore[attr-defined]
|
||||
@@ -495,7 +494,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
or should_auto_collect_first_time_traces()
|
||||
):
|
||||
trace_listener = TraceCollectionListener()
|
||||
trace_listener.setup_listeners(crewai_event_bus) # type: ignore[no-untyped-call]
|
||||
trace_listener.setup_listeners(crewai_event_bus)
|
||||
# Apply any additional kwargs
|
||||
if kwargs:
|
||||
self._initialize_state(kwargs)
|
||||
@@ -601,7 +600,26 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
)
|
||||
|
||||
def _copy_state(self) -> T:
|
||||
return copy.deepcopy(self._state)
|
||||
"""Create a copy of the current state.
|
||||
|
||||
Returns:
|
||||
A copy of the current state
|
||||
"""
|
||||
if isinstance(self._state, BaseModel):
|
||||
try:
|
||||
return self._state.model_copy(deep=True)
|
||||
except (TypeError, AttributeError):
|
||||
try:
|
||||
state_dict = self._state.model_dump()
|
||||
model_class = type(self._state)
|
||||
return model_class(**state_dict)
|
||||
except Exception:
|
||||
return self._state.model_copy(deep=False)
|
||||
else:
|
||||
try:
|
||||
return copy.deepcopy(self._state)
|
||||
except (TypeError, AttributeError):
|
||||
return cast(T, self._state.copy())
|
||||
|
||||
@property
|
||||
def state(self) -> T:
|
||||
@@ -926,8 +944,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
trace_listener = TraceCollectionListener()
|
||||
if trace_listener.batch_manager.batch_owner_type == "flow":
|
||||
if trace_listener.first_time_handler.is_first_time:
|
||||
trace_listener.first_time_handler.mark_events_collected() # type: ignore[no-untyped-call]
|
||||
trace_listener.first_time_handler.handle_execution_completion() # type: ignore[no-untyped-call]
|
||||
trace_listener.first_time_handler.mark_events_collected()
|
||||
trace_listener.first_time_handler.handle_execution_completion()
|
||||
else:
|
||||
trace_listener.batch_manager.finalize_batch()
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ P = ParamSpec("P")
|
||||
R = TypeVar("R", covariant=True)
|
||||
|
||||
FlowMethodName = NewType("FlowMethodName", str)
|
||||
FlowRouteName = NewType("FlowRouteName", str)
|
||||
PendingListenerKey = NewType(
|
||||
"PendingListenerKey",
|
||||
Annotated[str, "nested flow conditions use 'listener_name:object_id'"],
|
||||
|
||||
@@ -19,11 +19,11 @@ import ast
|
||||
from collections import defaultdict, deque
|
||||
import inspect
|
||||
import textwrap
|
||||
from typing import Any, TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from typing_extensions import TypeIs
|
||||
|
||||
from crewai.flow.constants import OR_CONDITION, AND_CONDITION
|
||||
from crewai.flow.constants import AND_CONDITION, OR_CONDITION
|
||||
from crewai.flow.flow_wrappers import (
|
||||
FlowCondition,
|
||||
FlowConditions,
|
||||
@@ -33,6 +33,7 @@ from crewai.flow.flow_wrappers import (
|
||||
from crewai.flow.types import FlowMethodCallable, FlowMethodName
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.flow.flow import Flow
|
||||
|
||||
@@ -40,6 +41,22 @@ _printer = Printer()
|
||||
|
||||
|
||||
def get_possible_return_constants(function: Any) -> list[str] | None:
|
||||
"""Extract possible string return values from a function using AST parsing.
|
||||
|
||||
This function analyzes the source code of a router method to identify
|
||||
all possible string values it might return. It handles:
|
||||
- Direct string literals: return "value"
|
||||
- Variable assignments: x = "value"; return x
|
||||
- Dictionary lookups: d = {"k": "v"}; return d[key]
|
||||
- Conditional returns: return "a" if cond else "b"
|
||||
- State attributes: return self.state.attr (infers from class context)
|
||||
|
||||
Args:
|
||||
function: The function to analyze.
|
||||
|
||||
Returns:
|
||||
List of possible string return values, or None if analysis fails.
|
||||
"""
|
||||
try:
|
||||
source = inspect.getsource(function)
|
||||
except OSError:
|
||||
@@ -82,6 +99,7 @@ def get_possible_return_constants(function: Any) -> list[str] | None:
|
||||
return_values: set[str] = set()
|
||||
dict_definitions: dict[str, list[str]] = {}
|
||||
variable_values: dict[str, list[str]] = {}
|
||||
state_attribute_values: dict[str, list[str]] = {}
|
||||
|
||||
def extract_string_constants(node: ast.expr) -> list[str]:
|
||||
"""Recursively extract all string constants from an AST node."""
|
||||
@@ -91,6 +109,17 @@ def get_possible_return_constants(function: Any) -> list[str] | None:
|
||||
elif isinstance(node, ast.IfExp):
|
||||
strings.extend(extract_string_constants(node.body))
|
||||
strings.extend(extract_string_constants(node.orelse))
|
||||
elif isinstance(node, ast.Call):
|
||||
if (
|
||||
isinstance(node.func, ast.Attribute)
|
||||
and node.func.attr == "get"
|
||||
and len(node.args) >= 2
|
||||
):
|
||||
default_arg = node.args[1]
|
||||
if isinstance(default_arg, ast.Constant) and isinstance(
|
||||
default_arg.value, str
|
||||
):
|
||||
strings.append(default_arg.value)
|
||||
return strings
|
||||
|
||||
class VariableAssignmentVisitor(ast.NodeVisitor):
|
||||
@@ -124,6 +153,22 @@ def get_possible_return_constants(function: Any) -> list[str] | None:
|
||||
|
||||
self.generic_visit(node)
|
||||
|
||||
def get_attribute_chain(node: ast.expr) -> str | None:
|
||||
"""Extract the full attribute chain from an AST node.
|
||||
|
||||
Examples:
|
||||
self.state.run_type -> "self.state.run_type"
|
||||
x.y.z -> "x.y.z"
|
||||
simple_var -> "simple_var"
|
||||
"""
|
||||
if isinstance(node, ast.Name):
|
||||
return node.id
|
||||
if isinstance(node, ast.Attribute):
|
||||
base = get_attribute_chain(node.value)
|
||||
if base:
|
||||
return f"{base}.{node.attr}"
|
||||
return None
|
||||
|
||||
class ReturnVisitor(ast.NodeVisitor):
|
||||
def visit_Return(self, node: ast.Return) -> None:
|
||||
if (
|
||||
@@ -139,21 +184,94 @@ def get_possible_return_constants(function: Any) -> list[str] | None:
|
||||
for v in dict_definitions[var_name_dict]:
|
||||
return_values.add(v)
|
||||
elif node.value:
|
||||
var_name_ret: str | None = None
|
||||
if isinstance(node.value, ast.Name):
|
||||
var_name_ret = node.value.id
|
||||
elif isinstance(node.value, ast.Attribute):
|
||||
var_name_ret = f"{node.value.value.id if isinstance(node.value.value, ast.Name) else '_'}.{node.value.attr}"
|
||||
var_name_ret = get_attribute_chain(node.value)
|
||||
|
||||
if var_name_ret and var_name_ret in variable_values:
|
||||
for v in variable_values[var_name_ret]:
|
||||
return_values.add(v)
|
||||
elif var_name_ret and var_name_ret in state_attribute_values:
|
||||
for v in state_attribute_values[var_name_ret]:
|
||||
return_values.add(v)
|
||||
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_If(self, node: ast.If) -> None:
|
||||
self.generic_visit(node)
|
||||
|
||||
# Try to get the class context to infer state attribute values
|
||||
try:
|
||||
if hasattr(function, "__self__"):
|
||||
# Method is bound, get the class
|
||||
class_obj = function.__self__.__class__
|
||||
elif hasattr(function, "__qualname__") and "." in function.__qualname__:
|
||||
# Method is unbound but we can try to get class from module
|
||||
class_name = function.__qualname__.rsplit(".", 1)[0]
|
||||
if hasattr(function, "__globals__"):
|
||||
class_obj = function.__globals__.get(class_name)
|
||||
else:
|
||||
class_obj = None
|
||||
else:
|
||||
class_obj = None
|
||||
|
||||
if class_obj is not None:
|
||||
try:
|
||||
class_source = inspect.getsource(class_obj)
|
||||
class_source = textwrap.dedent(class_source)
|
||||
class_ast = ast.parse(class_source)
|
||||
|
||||
# Look for comparisons and assignments involving state attributes
|
||||
class StateAttributeVisitor(ast.NodeVisitor):
|
||||
def visit_Compare(self, node: ast.Compare) -> None:
|
||||
"""Find comparisons like: self.state.attr == "value" """
|
||||
left_attr = get_attribute_chain(node.left)
|
||||
|
||||
if left_attr:
|
||||
for comparator in node.comparators:
|
||||
if isinstance(comparator, ast.Constant) and isinstance(
|
||||
comparator.value, str
|
||||
):
|
||||
if left_attr not in state_attribute_values:
|
||||
state_attribute_values[left_attr] = []
|
||||
if (
|
||||
comparator.value
|
||||
not in state_attribute_values[left_attr]
|
||||
):
|
||||
state_attribute_values[left_attr].append(
|
||||
comparator.value
|
||||
)
|
||||
|
||||
# Also check right side
|
||||
for comparator in node.comparators:
|
||||
right_attr = get_attribute_chain(comparator)
|
||||
if (
|
||||
right_attr
|
||||
and isinstance(node.left, ast.Constant)
|
||||
and isinstance(node.left.value, str)
|
||||
):
|
||||
if right_attr not in state_attribute_values:
|
||||
state_attribute_values[right_attr] = []
|
||||
if (
|
||||
node.left.value
|
||||
not in state_attribute_values[right_attr]
|
||||
):
|
||||
state_attribute_values[right_attr].append(
|
||||
node.left.value
|
||||
)
|
||||
|
||||
self.generic_visit(node)
|
||||
|
||||
StateAttributeVisitor().visit(class_ast)
|
||||
except Exception as e:
|
||||
_printer.print(
|
||||
f"Could not analyze class context for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
except Exception as e:
|
||||
_printer.print(
|
||||
f"Could not introspect class for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
VariableAssignmentVisitor().visit(code_ast)
|
||||
ReturnVisitor().visit(code_ast)
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6,6 +6,7 @@
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap" rel="stylesheet">
|
||||
<link rel="stylesheet" href="'{{ css_path }}'" />
|
||||
<script src="https://unpkg.com/lucide@latest"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/prism.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/components/prism-python.min.js"></script>
|
||||
<script src="'{{ js_path }}'"></script>
|
||||
@@ -23,93 +24,129 @@
|
||||
<div class="drawer-title" id="drawer-node-name">Node Details</div>
|
||||
<div style="display: flex; align-items: center;">
|
||||
<button class="drawer-open-ide" id="drawer-open-ide" style="display: none;">
|
||||
<svg viewBox="0 0 16 16" fill="none" stroke="currentColor" stroke-width="2">
|
||||
<path d="M4 2 L12 2 L12 14 L4 14 Z" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
<path d="M6 5 L10 5 M6 8 L10 8 M6 11 L10 11" stroke-linecap="round"/>
|
||||
</svg>
|
||||
<i data-lucide="file-code" style="width: 16px; height: 16px;"></i>
|
||||
Open in IDE
|
||||
</button>
|
||||
<button class="drawer-close" id="drawer-close">×</button>
|
||||
<button class="drawer-close" id="drawer-close">
|
||||
<i data-lucide="x" style="width: 20px; height: 20px;"></i>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="drawer-content" id="drawer-content"></div>
|
||||
</div>
|
||||
|
||||
<div id="info">
|
||||
<div style="text-align: center; margin-bottom: 20px;">
|
||||
<div style="text-align: center;">
|
||||
<img src="https://cdn.prod.website-files.com/68de1ee6d7c127849807d7a6/68de1ee6d7c127849807d7ef_Logo.svg"
|
||||
alt="CrewAI Logo"
|
||||
style="width: 120px; height: auto;">
|
||||
</div>
|
||||
<h3>Flow Execution</h3>
|
||||
<div class="stats">
|
||||
<p><strong>Nodes:</strong> '{{ dag_nodes_count }}'</p>
|
||||
<p><strong>Edges:</strong> '{{ dag_edges_count }}'</p>
|
||||
<p><strong>Topological Paths:</strong> '{{ execution_paths }}'</p>
|
||||
</div>
|
||||
<div class="legend">
|
||||
<div class="legend-title">Node Types</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background: '{{ CREWAI_ORANGE }}';"></div>
|
||||
<span>Start Methods</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background: '{{ DARK_GRAY }}'; border: 3px solid '{{ CREWAI_ORANGE }}';"></div>
|
||||
<span>Router Methods</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background: '{{ DARK_GRAY }}';"></div>
|
||||
<span>Listen Methods</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="legend">
|
||||
<div class="legend-title">Edge Types</div>
|
||||
<div class="legend-item">
|
||||
<svg width="24" height="12" style="margin-right: 12px;">
|
||||
<line x1="0" y1="6" x2="24" y2="6" stroke="'{{ CREWAI_ORANGE }}'" stroke-width="2" stroke-dasharray="5,5"/>
|
||||
</svg>
|
||||
<span>Router Paths</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<svg width="24" height="12" style="margin-right: 12px;" class="legend-or-line">
|
||||
<line x1="0" y1="6" x2="24" y2="6" stroke="var(--edge-or-color)" stroke-width="2"/>
|
||||
</svg>
|
||||
<span>OR Conditions</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<svg width="24" height="12" style="margin-right: 12px;">
|
||||
<line x1="0" y1="6" x2="24" y2="6" stroke="'{{ CREWAI_ORANGE }}'" stroke-width="2"/>
|
||||
</svg>
|
||||
<span>AND Conditions</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="instructions">
|
||||
<strong>Interactions:</strong><br>
|
||||
• Drag to pan<br>
|
||||
• Scroll to zoom<br><br>
|
||||
<strong>IDE:</strong>
|
||||
<select id="ide-selector" style="width: 100%; padding: 4px; margin-top: 4px; border-radius: 3px; border: 1px solid #e0e0e0; background: white; font-size: 12px; cursor: pointer; pointer-events: auto; position: relative; z-index: 10;">
|
||||
<option value="auto">Auto-detect</option>
|
||||
<option value="pycharm">PyCharm</option>
|
||||
<option value="vscode">VS Code</option>
|
||||
<option value="jetbrains">JetBrains (Toolbox)</option>
|
||||
</select>
|
||||
style="width: 144px; height: auto;">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<!-- Custom navigation controls -->
|
||||
<div class="nav-controls">
|
||||
<div class="nav-button" id="theme-toggle" title="Toggle Dark Mode">🌙</div>
|
||||
<div class="nav-button" id="zoom-in" title="Zoom In">+</div>
|
||||
<div class="nav-button" id="zoom-out" title="Zoom Out">−</div>
|
||||
<div class="nav-button" id="fit" title="Fit to Screen">⊡</div>
|
||||
<div class="nav-button" id="export-png" title="Export to PNG">🖼</div>
|
||||
<div class="nav-button" id="export-pdf" title="Export to PDF">📄</div>
|
||||
<div class="nav-button" id="export-json" title="Export to JSON">{}</div>
|
||||
<div class="nav-button" id="theme-toggle" title="Toggle Dark Mode">
|
||||
<i data-lucide="moon" style="width: 18px; height: 18px;"></i>
|
||||
</div>
|
||||
<div class="nav-button" id="zoom-in" title="Zoom In">
|
||||
<i data-lucide="zoom-in" style="width: 18px; height: 18px;"></i>
|
||||
</div>
|
||||
<div class="nav-button" id="zoom-out" title="Zoom Out">
|
||||
<i data-lucide="zoom-out" style="width: 18px; height: 18px;"></i>
|
||||
</div>
|
||||
<div class="nav-button" id="fit" title="Fit to Screen">
|
||||
<i data-lucide="maximize-2" style="width: 18px; height: 18px;"></i>
|
||||
</div>
|
||||
<div class="nav-button" id="export-png" title="Export to PNG">
|
||||
<i data-lucide="image" style="width: 18px; height: 18px;"></i>
|
||||
</div>
|
||||
<div class="nav-button" id="export-pdf" title="Export to PDF">
|
||||
<i data-lucide="file-text" style="width: 18px; height: 18px;"></i>
|
||||
</div>
|
||||
<!-- <div class="nav-button" id="export-json" title="Export to JSON">
|
||||
<i data-lucide="braces" style="width: 18px; height: 18px;"></i>
|
||||
</div> -->
|
||||
</div>
|
||||
|
||||
<div id="network-container">
|
||||
<div id="network"></div>
|
||||
</div>
|
||||
|
||||
<!-- Info panel at bottom -->
|
||||
<div id="legend-panel">
|
||||
<!-- Stats Section -->
|
||||
<div class="legend-section">
|
||||
<div class="legend-stats-row">
|
||||
<div class="legend-stat-item">
|
||||
<span class="stat-value">'{{ dag_nodes_count }}'</span>
|
||||
<span class="stat-label">Nodes</span>
|
||||
</div>
|
||||
<div class="legend-stat-item">
|
||||
<span class="stat-value">'{{ dag_edges_count }}'</span>
|
||||
<span class="stat-label">Edges</span>
|
||||
</div>
|
||||
<div class="legend-stat-item">
|
||||
<span class="stat-value">'{{ execution_paths }}'</span>
|
||||
<span class="stat-label">Paths</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Node Types Section -->
|
||||
<div class="legend-section">
|
||||
<div class="legend-group">
|
||||
<div class="legend-item-compact">
|
||||
<div class="legend-color-small" style="background: var(--node-bg-start);"></div>
|
||||
<span>Start</span>
|
||||
</div>
|
||||
<div class="legend-item-compact">
|
||||
<div class="legend-color-small" style="background: var(--node-bg-router); border: 2px solid var(--node-border-start);"></div>
|
||||
<span>Router</span>
|
||||
</div>
|
||||
<div class="legend-item-compact">
|
||||
<div class="legend-color-small" style="background: var(--node-bg-listen); border: 2px solid var(--node-border-listen);"></div>
|
||||
<span>Listen</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Edge Types Section -->
|
||||
<div class="legend-section">
|
||||
<div class="legend-group">
|
||||
<div class="legend-item-compact">
|
||||
<svg>
|
||||
<line x1="0" y1="7" x2="29" y2="7" stroke="var(--edge-router-color)" stroke-width="2" stroke-dasharray="4,4"/>
|
||||
</svg>
|
||||
<span>Router</span>
|
||||
</div>
|
||||
<div class="legend-item-compact">
|
||||
<svg class="legend-or-line">
|
||||
<line x1="0" y1="7" x2="29" y2="7" stroke="var(--edge-or-color)" stroke-width="2"/>
|
||||
</svg>
|
||||
<span>OR</span>
|
||||
</div>
|
||||
<div class="legend-item-compact">
|
||||
<svg>
|
||||
<line x1="0" y1="7" x2="29" y2="7" stroke="var(--edge-router-color)" stroke-width="2"/>
|
||||
</svg>
|
||||
<span>AND</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- IDE Selector Section -->
|
||||
<div class="legend-section">
|
||||
<div class="legend-ide-column">
|
||||
<label class="legend-ide-label">IDE</label>
|
||||
<select id="ide-selector" class="legend-ide-select">
|
||||
<option value="auto">Auto-detect</option>
|
||||
<option value="pycharm">PyCharm</option>
|
||||
<option value="vscode">VS Code</option>
|
||||
<option value="jetbrains">JetBrains</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -13,6 +13,14 @@
|
||||
--edge-label-text: '{{ GRAY }}';
|
||||
--edge-label-bg: rgba(255, 255, 255, 0.8);
|
||||
--edge-or-color: #000000;
|
||||
--edge-router-color: '{{ CREWAI_ORANGE }}';
|
||||
--node-border-start: #C94238;
|
||||
--node-border-listen: #3D3D3D;
|
||||
--node-bg-start: #FF7066;
|
||||
--node-bg-router: #FFFFFF;
|
||||
--node-bg-listen: #FFFFFF;
|
||||
--node-text-color: #FFFFFF;
|
||||
--nav-button-hover: #f5f5f5;
|
||||
}
|
||||
|
||||
[data-theme="dark"] {
|
||||
@@ -30,6 +38,14 @@
|
||||
--edge-label-text: #c9d1d9;
|
||||
--edge-label-bg: rgba(22, 27, 34, 0.9);
|
||||
--edge-or-color: #ffffff;
|
||||
--edge-router-color: '{{ CREWAI_ORANGE }}';
|
||||
--node-border-start: #FF5A50;
|
||||
--node-border-listen: #666666;
|
||||
--node-bg-start: #B33830;
|
||||
--node-bg-router: #3D3D3D;
|
||||
--node-bg-listen: #3D3D3D;
|
||||
--node-text-color: #FFFFFF;
|
||||
--nav-button-hover: #30363d;
|
||||
}
|
||||
|
||||
@keyframes dash {
|
||||
@@ -72,12 +88,10 @@ body {
|
||||
position: absolute;
|
||||
top: 20px;
|
||||
left: 20px;
|
||||
background: var(--bg-secondary);
|
||||
background: transparent;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 4px 12px var(--shadow-strong);
|
||||
max-width: 320px;
|
||||
border: 1px solid var(--border-color);
|
||||
z-index: 10000;
|
||||
pointer-events: auto;
|
||||
transition: background 0.3s ease, border-color 0.3s ease, box-shadow 0.3s ease;
|
||||
@@ -125,12 +139,16 @@ h3 {
|
||||
margin-right: 12px;
|
||||
border-radius: 3px;
|
||||
box-sizing: border-box;
|
||||
transition: background 0.3s ease, border-color 0.3s ease;
|
||||
}
|
||||
.legend-item span {
|
||||
color: var(--text-secondary);
|
||||
font-size: 13px;
|
||||
transition: color 0.3s ease;
|
||||
}
|
||||
.legend-item svg line {
|
||||
transition: stroke 0.3s ease;
|
||||
}
|
||||
.instructions {
|
||||
margin-top: 15px;
|
||||
padding-top: 15px;
|
||||
@@ -155,7 +173,7 @@ h3 {
|
||||
bottom: 20px;
|
||||
right: auto;
|
||||
display: grid;
|
||||
grid-template-columns: repeat(4, 40px);
|
||||
grid-template-columns: repeat(3, 40px);
|
||||
gap: 8px;
|
||||
z-index: 10002;
|
||||
pointer-events: auto;
|
||||
@@ -165,10 +183,187 @@ h3 {
|
||||
.nav-controls.drawer-open {
|
||||
}
|
||||
|
||||
#legend-panel {
|
||||
position: fixed;
|
||||
left: 164px;
|
||||
bottom: 20px;
|
||||
right: 20px;
|
||||
height: 92px;
|
||||
background: var(--bg-secondary);
|
||||
backdrop-filter: blur(12px) saturate(180%);
|
||||
-webkit-backdrop-filter: blur(12px) saturate(180%);
|
||||
border: 1px solid var(--border-subtle);
|
||||
border-radius: 6px;
|
||||
box-shadow: 0 2px 8px var(--shadow-color);
|
||||
display: grid;
|
||||
grid-template-columns: repeat(4, 1fr);
|
||||
align-items: center;
|
||||
gap: 0;
|
||||
padding: 0 24px;
|
||||
box-sizing: border-box;
|
||||
z-index: 10001;
|
||||
pointer-events: auto;
|
||||
transition: background 0.3s ease, border-color 0.3s ease, box-shadow 0.3s ease, right 0.3s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
}
|
||||
|
||||
#legend-panel.drawer-open {
|
||||
right: 405px;
|
||||
}
|
||||
|
||||
.legend-section {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
min-width: 0;
|
||||
width: -webkit-fill-available;
|
||||
width: -moz-available;
|
||||
width: stretch;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.legend-section:not(:last-child)::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
right: 0;
|
||||
top: 50%;
|
||||
transform: translateY(-50%);
|
||||
width: 1px;
|
||||
height: 48px;
|
||||
background: var(--border-color);
|
||||
transition: background 0.3s ease;
|
||||
}
|
||||
|
||||
.legend-stats-row {
|
||||
display: flex;
|
||||
gap: 32px;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.legend-stat-item {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.stat-value {
|
||||
font-size: 19px;
|
||||
font-weight: 700;
|
||||
color: var(--text-primary);
|
||||
line-height: 1;
|
||||
transition: color 0.3s ease;
|
||||
}
|
||||
|
||||
.stat-label {
|
||||
font-size: 8px;
|
||||
font-weight: 500;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-secondary);
|
||||
letter-spacing: 0.5px;
|
||||
transition: color 0.3s ease;
|
||||
}
|
||||
|
||||
.legend-items-row {
|
||||
display: flex;
|
||||
gap: 16px;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.legend-group {
|
||||
display: flex;
|
||||
gap: 16px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.legend-item-compact {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
}
|
||||
|
||||
.legend-item-compact span {
|
||||
font-size: 12px;
|
||||
font-weight: 500;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-secondary);
|
||||
letter-spacing: 0.5px;
|
||||
white-space: nowrap;
|
||||
font-family: inherit;
|
||||
line-height: 1;
|
||||
transition: color 0.3s ease;
|
||||
}
|
||||
|
||||
.legend-color-small {
|
||||
width: 17px;
|
||||
height: 17px;
|
||||
border-radius: 2px;
|
||||
box-sizing: border-box;
|
||||
flex-shrink: 0;
|
||||
transition: background 0.3s ease, border-color 0.3s ease;
|
||||
}
|
||||
|
||||
.legend-item-compact svg {
|
||||
display: block;
|
||||
flex-shrink: 0;
|
||||
width: 29px;
|
||||
height: 14px;
|
||||
}
|
||||
|
||||
.legend-item-compact svg line {
|
||||
transition: stroke 0.3s ease;
|
||||
}
|
||||
|
||||
.legend-ide-column {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
min-width: 0;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.legend-ide-label {
|
||||
font-size: 12px;
|
||||
font-weight: 500;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-secondary);
|
||||
letter-spacing: 0.5px;
|
||||
transition: color 0.3s ease;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.legend-ide-select {
|
||||
width: 120px;
|
||||
padding: 6px 10px;
|
||||
border-radius: 4px;
|
||||
border: 1px solid var(--border-subtle);
|
||||
background: var(--bg-primary);
|
||||
color: var(--text-primary);
|
||||
font-size: 11px;
|
||||
cursor: pointer;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
.legend-ide-select:hover {
|
||||
border-color: var(--text-secondary);
|
||||
}
|
||||
|
||||
.legend-ide-select:focus {
|
||||
outline: none;
|
||||
border-color: '{{ CREWAI_ORANGE }}';
|
||||
}
|
||||
|
||||
.nav-button {
|
||||
width: 40px;
|
||||
height: 40px;
|
||||
background: var(--bg-secondary);
|
||||
backdrop-filter: blur(12px) saturate(180%);
|
||||
-webkit-backdrop-filter: blur(12px) saturate(180%);
|
||||
border: 1px solid var(--border-subtle);
|
||||
border-radius: 6px;
|
||||
display: flex;
|
||||
@@ -181,12 +376,12 @@ h3 {
|
||||
user-select: none;
|
||||
pointer-events: auto;
|
||||
position: relative;
|
||||
z-index: 10001;
|
||||
z-index: 10002;
|
||||
transition: background 0.3s ease, border-color 0.3s ease, color 0.3s ease, box-shadow 0.3s ease;
|
||||
}
|
||||
|
||||
.nav-button:hover {
|
||||
background: var(--border-subtle);
|
||||
background: var(--nav-button-hover);
|
||||
}
|
||||
|
||||
#drawer {
|
||||
@@ -198,9 +393,10 @@ h3 {
|
||||
background: var(--bg-drawer);
|
||||
box-shadow: -4px 0 12px var(--shadow-strong);
|
||||
transition: right 0.3s cubic-bezier(0.4, 0, 0.2, 1), background 0.3s ease, box-shadow 0.3s ease;
|
||||
z-index: 2000;
|
||||
overflow-y: auto;
|
||||
padding: 24px;
|
||||
z-index: 10003;
|
||||
overflow: hidden;
|
||||
transform: translateZ(0);
|
||||
isolation: isolate;
|
||||
}
|
||||
|
||||
#drawer.open {
|
||||
@@ -247,17 +443,22 @@ h3 {
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
margin-bottom: 20px;
|
||||
padding-bottom: 16px;
|
||||
padding: 24px 24px 16px 24px;
|
||||
border-bottom: 2px solid '{{ CREWAI_ORANGE }}';
|
||||
position: relative;
|
||||
z-index: 2001;
|
||||
}
|
||||
|
||||
.drawer-title {
|
||||
font-size: 20px;
|
||||
font-size: 15px;
|
||||
font-weight: 700;
|
||||
color: var(--text-primary);
|
||||
transition: color 0.3s ease;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
flex: 1;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.drawer-close {
|
||||
@@ -269,12 +470,19 @@ h3 {
|
||||
padding: 4px 8px;
|
||||
line-height: 1;
|
||||
transition: color 0.3s ease;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.drawer-close:hover {
|
||||
color: '{{ CREWAI_ORANGE }}';
|
||||
}
|
||||
|
||||
.drawer-close i {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.drawer-open-ide {
|
||||
background: '{{ CREWAI_ORANGE }}';
|
||||
border: none;
|
||||
@@ -292,6 +500,9 @@ h3 {
|
||||
position: relative;
|
||||
z-index: 9999;
|
||||
pointer-events: auto;
|
||||
white-space: nowrap;
|
||||
flex-shrink: 0;
|
||||
min-width: fit-content;
|
||||
}
|
||||
|
||||
.drawer-open-ide:hover {
|
||||
@@ -305,14 +516,19 @@ h3 {
|
||||
box-shadow: 0 1px 4px rgba(255, 90, 80, 0.2);
|
||||
}
|
||||
|
||||
.drawer-open-ide svg {
|
||||
.drawer-open-ide svg,
|
||||
.drawer-open-ide i {
|
||||
width: 14px;
|
||||
height: 14px;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.drawer-content {
|
||||
color: '{{ DARK_GRAY }}';
|
||||
line-height: 1.6;
|
||||
padding: 0 24px 24px 24px;
|
||||
overflow-y: auto;
|
||||
height: calc(100vh - 95px);
|
||||
}
|
||||
|
||||
.drawer-section {
|
||||
@@ -328,6 +544,10 @@ h3 {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid:has(.drawer-section:nth-child(3):nth-last-child(1)) {
|
||||
grid-template-columns: 1fr 2fr;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
@@ -419,20 +639,35 @@ h3 {
|
||||
grid-column: 2;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: center;
|
||||
justify-content: flex-start;
|
||||
align-items: flex-start;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid:has(.drawer-section:nth-child(3):nth-last-child(1))::after {
|
||||
right: 50%;
|
||||
right: 66.666%;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid:has(.drawer-section:nth-child(3):nth-last-child(1))::before {
|
||||
left: 33.333%;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid .drawer-section:nth-child(3):nth-last-child(1) .drawer-section-title {
|
||||
align-self: flex-start;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid .drawer-section:nth-child(3):nth-last-child(1) > *:not(.drawer-section-title) {
|
||||
width: 100%;
|
||||
align-self: stretch;
|
||||
}
|
||||
|
||||
.drawer-section-title {
|
||||
font-size: 12px;
|
||||
text-transform: uppercase;
|
||||
color: '{{ GRAY }}';
|
||||
color: var(--text-secondary);
|
||||
letter-spacing: 0.5px;
|
||||
margin-bottom: 8px;
|
||||
font-weight: 600;
|
||||
transition: color 0.3s ease;
|
||||
}
|
||||
|
||||
.drawer-badge {
|
||||
@@ -465,9 +700,44 @@ h3 {
|
||||
padding: 3px 0;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid .drawer-section .drawer-list {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 6px;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid .drawer-section .drawer-list li {
|
||||
border-bottom: none;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid .drawer-section:nth-child(3) .drawer-list li {
|
||||
border-bottom: none;
|
||||
padding: 3px 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid .drawer-section {
|
||||
overflow: visible;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid .drawer-section .condition-group,
|
||||
.drawer-metadata-grid .drawer-section .trigger-group {
|
||||
width: 100%;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid .drawer-section .condition-children {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid .drawer-section .trigger-group-items {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.drawer-metadata-grid .drawer-section .drawer-code-link {
|
||||
word-break: break-word;
|
||||
overflow-wrap: break-word;
|
||||
max-width: 100%;
|
||||
}
|
||||
|
||||
.drawer-code {
|
||||
@@ -491,6 +761,7 @@ h3 {
|
||||
cursor: pointer;
|
||||
transition: all 0.2s;
|
||||
display: inline-block;
|
||||
margin: 3px 2px;
|
||||
}
|
||||
|
||||
.drawer-code-link:hover {
|
||||
|
||||
@@ -3,12 +3,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
from collections.abc import Iterable
|
||||
import inspect
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from crewai.flow.constants import AND_CONDITION, OR_CONDITION
|
||||
from crewai.flow.flow_wrappers import FlowCondition
|
||||
from crewai.flow.types import FlowMethodName
|
||||
from crewai.flow.types import FlowMethodName, FlowRouteName
|
||||
from crewai.flow.utils import (
|
||||
is_flow_condition_dict,
|
||||
is_simple_flow_condition,
|
||||
@@ -197,8 +198,6 @@ def build_flow_structure(flow: Flow[Any]) -> FlowStructure:
|
||||
node_metadata["type"] = "router"
|
||||
router_methods.append(method_name)
|
||||
|
||||
node_metadata["condition_type"] = "IF"
|
||||
|
||||
if method_name in flow._router_paths:
|
||||
node_metadata["router_paths"] = [
|
||||
str(p) for p in flow._router_paths[method_name]
|
||||
@@ -210,9 +209,13 @@ def build_flow_structure(flow: Flow[Any]) -> FlowStructure:
|
||||
]
|
||||
|
||||
if hasattr(method, "__condition_type__") and method.__condition_type__:
|
||||
node_metadata["trigger_condition_type"] = method.__condition_type__
|
||||
if "condition_type" not in node_metadata:
|
||||
node_metadata["condition_type"] = method.__condition_type__
|
||||
|
||||
if node_metadata.get("is_router") and "condition_type" not in node_metadata:
|
||||
node_metadata["condition_type"] = "IF"
|
||||
|
||||
if (
|
||||
hasattr(method, "__trigger_condition__")
|
||||
and method.__trigger_condition__ is not None
|
||||
@@ -298,6 +301,9 @@ def build_flow_structure(flow: Flow[Any]) -> FlowStructure:
|
||||
nodes[method_name] = node_metadata
|
||||
|
||||
for listener_name, condition_data in flow._listeners.items():
|
||||
if listener_name in router_methods:
|
||||
continue
|
||||
|
||||
if is_simple_flow_condition(condition_data):
|
||||
cond_type, methods = condition_data
|
||||
edges.extend(
|
||||
@@ -315,6 +321,60 @@ def build_flow_structure(flow: Flow[Any]) -> FlowStructure:
|
||||
_create_edges_from_condition(condition_data, str(listener_name), nodes)
|
||||
)
|
||||
|
||||
for method_name, node_metadata in nodes.items(): # type: ignore[assignment]
|
||||
if node_metadata.get("is_router") and "trigger_methods" in node_metadata:
|
||||
trigger_methods = node_metadata["trigger_methods"]
|
||||
condition_type = node_metadata.get("trigger_condition_type", OR_CONDITION)
|
||||
|
||||
if "trigger_condition" in node_metadata:
|
||||
edges.extend(
|
||||
_create_edges_from_condition(
|
||||
node_metadata["trigger_condition"], # type: ignore[arg-type]
|
||||
method_name,
|
||||
nodes,
|
||||
)
|
||||
)
|
||||
else:
|
||||
edges.extend(
|
||||
StructureEdge(
|
||||
source=trigger_method,
|
||||
target=method_name,
|
||||
condition_type=condition_type,
|
||||
is_router_path=False,
|
||||
)
|
||||
for trigger_method in trigger_methods
|
||||
if trigger_method in nodes
|
||||
)
|
||||
|
||||
for router_method_name in router_methods:
|
||||
if router_method_name not in flow._router_paths:
|
||||
flow._router_paths[FlowMethodName(router_method_name)] = []
|
||||
|
||||
inferred_paths: Iterable[FlowMethodName | FlowRouteName] = set(
|
||||
flow._router_paths.get(FlowMethodName(router_method_name), [])
|
||||
)
|
||||
|
||||
for condition_data in flow._listeners.values():
|
||||
trigger_strings: list[str] = []
|
||||
|
||||
if is_simple_flow_condition(condition_data):
|
||||
_, methods = condition_data
|
||||
trigger_strings = [str(m) for m in methods]
|
||||
elif is_flow_condition_dict(condition_data):
|
||||
trigger_strings = _extract_direct_or_triggers(condition_data)
|
||||
|
||||
for trigger_str in trigger_strings:
|
||||
if trigger_str not in nodes:
|
||||
# This is likely a router path output
|
||||
inferred_paths.add(trigger_str) # type: ignore[attr-defined]
|
||||
|
||||
if inferred_paths:
|
||||
flow._router_paths[FlowMethodName(router_method_name)] = list(
|
||||
inferred_paths # type: ignore[arg-type]
|
||||
)
|
||||
if router_method_name in nodes:
|
||||
nodes[router_method_name]["router_paths"] = list(inferred_paths)
|
||||
|
||||
for router_method_name in router_methods:
|
||||
if router_method_name not in flow._router_paths:
|
||||
continue
|
||||
@@ -340,6 +400,7 @@ def build_flow_structure(flow: Flow[Any]) -> FlowStructure:
|
||||
target=str(listener_name),
|
||||
condition_type=None,
|
||||
is_router_path=True,
|
||||
router_path_label=str(path),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ class CSSExtension(Extension):
|
||||
Provides {% css 'path/to/file.css' %} tag syntax.
|
||||
"""
|
||||
|
||||
tags: ClassVar[set[str]] = {"css"} # type: ignore[assignment]
|
||||
tags: ClassVar[set[str]] = {"css"} # type: ignore[misc]
|
||||
|
||||
def parse(self, parser: Parser) -> nodes.Node:
|
||||
"""Parse {% css 'styles.css' %} tag.
|
||||
@@ -53,7 +53,7 @@ class JSExtension(Extension):
|
||||
Provides {% js 'path/to/file.js' %} tag syntax.
|
||||
"""
|
||||
|
||||
tags: ClassVar[set[str]] = {"js"} # type: ignore[assignment]
|
||||
tags: ClassVar[set[str]] = {"js"} # type: ignore[misc]
|
||||
|
||||
def parse(self, parser: Parser) -> nodes.Node:
|
||||
"""Parse {% js 'script.js' %} tag.
|
||||
@@ -91,6 +91,116 @@ TEXT_PRIMARY = "#e6edf3"
|
||||
TEXT_SECONDARY = "#7d8590"
|
||||
|
||||
|
||||
def calculate_node_positions(
|
||||
dag: FlowStructure,
|
||||
) -> dict[str, dict[str, int | float]]:
|
||||
"""Calculate hierarchical positions (level, x, y) for each node.
|
||||
|
||||
Args:
|
||||
dag: FlowStructure containing nodes and edges.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping node names to their position data (level, x, y).
|
||||
"""
|
||||
children: dict[str, list[str]] = {name: [] for name in dag["nodes"]}
|
||||
parents: dict[str, list[str]] = {name: [] for name in dag["nodes"]}
|
||||
|
||||
for edge in dag["edges"]:
|
||||
source = edge["source"]
|
||||
target = edge["target"]
|
||||
if source in children and target in children:
|
||||
children[source].append(target)
|
||||
parents[target].append(source)
|
||||
|
||||
levels: dict[str, int] = {}
|
||||
queue: list[tuple[str, int]] = []
|
||||
|
||||
for start_method in dag["start_methods"]:
|
||||
if start_method in dag["nodes"]:
|
||||
levels[start_method] = 0
|
||||
queue.append((start_method, 0))
|
||||
|
||||
visited: set[str] = set()
|
||||
while queue:
|
||||
node, level = queue.pop(0)
|
||||
if node in visited:
|
||||
continue
|
||||
visited.add(node)
|
||||
|
||||
if node not in levels or levels[node] < level:
|
||||
levels[node] = level
|
||||
|
||||
for child in children.get(node, []):
|
||||
if child not in visited:
|
||||
child_level = level + 1
|
||||
if child not in levels or levels[child] < child_level:
|
||||
levels[child] = child_level
|
||||
queue.append((child, child_level))
|
||||
|
||||
for name in dag["nodes"]:
|
||||
if name not in levels:
|
||||
levels[name] = 0
|
||||
|
||||
nodes_by_level: dict[int, list[str]] = {}
|
||||
for node, level in levels.items():
|
||||
if level not in nodes_by_level:
|
||||
nodes_by_level[level] = []
|
||||
nodes_by_level[level].append(node)
|
||||
|
||||
positions: dict[str, dict[str, int | float]] = {}
|
||||
level_separation = 300 # Vertical spacing between levels
|
||||
node_spacing = 400 # Horizontal spacing between nodes
|
||||
|
||||
parent_count: dict[str, int] = {}
|
||||
for node, parent_list in parents.items():
|
||||
parent_count[node] = len(parent_list)
|
||||
|
||||
for level, nodes_at_level in sorted(nodes_by_level.items()):
|
||||
y = level * level_separation
|
||||
|
||||
if level == 0:
|
||||
num_nodes = len(nodes_at_level)
|
||||
for i, node in enumerate(nodes_at_level):
|
||||
x = (i - (num_nodes - 1) / 2) * node_spacing
|
||||
positions[node] = {"level": level, "x": x, "y": y}
|
||||
else:
|
||||
for i, node in enumerate(nodes_at_level):
|
||||
parent_list = parents.get(node, [])
|
||||
parent_positions: list[float] = [
|
||||
positions[parent]["x"]
|
||||
for parent in parent_list
|
||||
if parent in positions
|
||||
]
|
||||
|
||||
if parent_positions:
|
||||
if len(parent_positions) > 1 and len(set(parent_positions)) == 1:
|
||||
base_x = parent_positions[0]
|
||||
avg_x = base_x + node_spacing * 0.4
|
||||
else:
|
||||
avg_x = sum(parent_positions) / len(parent_positions)
|
||||
else:
|
||||
avg_x = i * node_spacing * 0.5
|
||||
|
||||
positions[node] = {"level": level, "x": avg_x, "y": y}
|
||||
|
||||
nodes_at_level_sorted = sorted(
|
||||
nodes_at_level, key=lambda n: positions[n]["x"]
|
||||
)
|
||||
min_spacing = node_spacing * 0.6 # Minimum horizontal distance
|
||||
|
||||
for i in range(len(nodes_at_level_sorted) - 1):
|
||||
current_node = nodes_at_level_sorted[i]
|
||||
next_node = nodes_at_level_sorted[i + 1]
|
||||
|
||||
current_x = positions[current_node]["x"]
|
||||
next_x = positions[next_node]["x"]
|
||||
|
||||
if next_x - current_x < min_spacing:
|
||||
positions[next_node]["x"] = current_x + min_spacing
|
||||
|
||||
return positions
|
||||
|
||||
|
||||
def render_interactive(
|
||||
dag: FlowStructure,
|
||||
filename: str = "flow_dag.html",
|
||||
@@ -110,6 +220,8 @@ def render_interactive(
|
||||
Returns:
|
||||
Absolute path to generated HTML file in temporary directory.
|
||||
"""
|
||||
node_positions = calculate_node_positions(dag)
|
||||
|
||||
nodes_list: list[dict[str, Any]] = []
|
||||
for name, metadata in dag["nodes"].items():
|
||||
node_type: str = metadata.get("type", "listen")
|
||||
@@ -120,37 +232,37 @@ def render_interactive(
|
||||
|
||||
if node_type == "start":
|
||||
color_config = {
|
||||
"background": CREWAI_ORANGE,
|
||||
"border": CREWAI_ORANGE,
|
||||
"background": "var(--node-bg-start)",
|
||||
"border": "var(--node-border-start)",
|
||||
"highlight": {
|
||||
"background": CREWAI_ORANGE,
|
||||
"border": CREWAI_ORANGE,
|
||||
"background": "var(--node-bg-start)",
|
||||
"border": "var(--node-border-start)",
|
||||
},
|
||||
}
|
||||
font_color = WHITE
|
||||
border_width = 2
|
||||
font_color = "var(--node-text-color)"
|
||||
border_width = 3
|
||||
elif node_type == "router":
|
||||
color_config = {
|
||||
"background": DARK_GRAY,
|
||||
"background": "var(--node-bg-router)",
|
||||
"border": CREWAI_ORANGE,
|
||||
"highlight": {
|
||||
"background": DARK_GRAY,
|
||||
"background": "var(--node-bg-router)",
|
||||
"border": CREWAI_ORANGE,
|
||||
},
|
||||
}
|
||||
font_color = WHITE
|
||||
font_color = "var(--node-text-color)"
|
||||
border_width = 3
|
||||
else:
|
||||
color_config = {
|
||||
"background": DARK_GRAY,
|
||||
"border": DARK_GRAY,
|
||||
"background": "var(--node-bg-listen)",
|
||||
"border": "var(--node-border-listen)",
|
||||
"highlight": {
|
||||
"background": DARK_GRAY,
|
||||
"border": DARK_GRAY,
|
||||
"background": "var(--node-bg-listen)",
|
||||
"border": "var(--node-border-listen)",
|
||||
},
|
||||
}
|
||||
font_color = WHITE
|
||||
border_width = 2
|
||||
font_color = "var(--node-text-color)"
|
||||
border_width = 3
|
||||
|
||||
title_parts: list[str] = []
|
||||
|
||||
@@ -215,25 +327,34 @@ def render_interactive(
|
||||
bg_color = color_config["background"]
|
||||
border_color = color_config["border"]
|
||||
|
||||
nodes_list.append(
|
||||
{
|
||||
"id": name,
|
||||
"label": name,
|
||||
"title": "".join(title_parts),
|
||||
"shape": "custom",
|
||||
"size": 30,
|
||||
"nodeStyle": {
|
||||
"name": name,
|
||||
"bgColor": bg_color,
|
||||
"borderColor": border_color,
|
||||
"borderWidth": border_width,
|
||||
"fontColor": font_color,
|
||||
},
|
||||
"opacity": 1.0,
|
||||
"glowSize": 0,
|
||||
"glowColor": None,
|
||||
}
|
||||
)
|
||||
position_data = node_positions.get(name, {"level": 0, "x": 0, "y": 0})
|
||||
|
||||
node_data: dict[str, Any] = {
|
||||
"id": name,
|
||||
"label": name,
|
||||
"title": "".join(title_parts),
|
||||
"shape": "custom",
|
||||
"size": 30,
|
||||
"level": position_data["level"],
|
||||
"nodeStyle": {
|
||||
"name": name,
|
||||
"bgColor": bg_color,
|
||||
"borderColor": border_color,
|
||||
"borderWidth": border_width,
|
||||
"fontColor": font_color,
|
||||
},
|
||||
"opacity": 1.0,
|
||||
"glowSize": 0,
|
||||
"glowColor": None,
|
||||
}
|
||||
|
||||
# Add x,y only for graphs with 3-4 nodes
|
||||
total_nodes = len(dag["nodes"])
|
||||
if 3 <= total_nodes <= 4:
|
||||
node_data["x"] = position_data["x"]
|
||||
node_data["y"] = position_data["y"]
|
||||
|
||||
nodes_list.append(node_data)
|
||||
|
||||
execution_paths: int = calculate_execution_paths(dag)
|
||||
|
||||
@@ -246,6 +367,8 @@ def render_interactive(
|
||||
if edge["is_router_path"]:
|
||||
edge_color = CREWAI_ORANGE
|
||||
edge_dashes = [15, 10]
|
||||
if "router_path_label" in edge:
|
||||
edge_label = edge["router_path_label"]
|
||||
elif edge["condition_type"] == "AND":
|
||||
edge_label = "AND"
|
||||
edge_color = CREWAI_ORANGE
|
||||
|
||||
@@ -10,6 +10,7 @@ class NodeMetadata(TypedDict, total=False):
|
||||
is_router: bool
|
||||
router_paths: list[str]
|
||||
condition_type: str | None
|
||||
trigger_condition_type: str | None
|
||||
trigger_methods: list[str]
|
||||
trigger_condition: dict[str, Any] | None
|
||||
method_signature: dict[str, Any]
|
||||
@@ -22,13 +23,14 @@ class NodeMetadata(TypedDict, total=False):
|
||||
class_line_number: int
|
||||
|
||||
|
||||
class StructureEdge(TypedDict):
|
||||
class StructureEdge(TypedDict, total=False):
|
||||
"""Represents a connection in the flow structure."""
|
||||
|
||||
source: str
|
||||
target: str
|
||||
condition_type: str | None
|
||||
is_router_path: bool
|
||||
router_path_label: str
|
||||
|
||||
|
||||
class FlowStructure(TypedDict):
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
import inspect
|
||||
import json
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
@@ -58,7 +59,11 @@ from crewai.utilities.agent_utils import (
|
||||
process_llm_response,
|
||||
render_text_description_and_args,
|
||||
)
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.utilities.converter import (
|
||||
Converter,
|
||||
ConverterError,
|
||||
generate_model_description,
|
||||
)
|
||||
from crewai.utilities.guardrail import process_guardrail
|
||||
from crewai.utilities.guardrail_types import GuardrailCallable, GuardrailType
|
||||
from crewai.utilities.i18n import I18N, get_i18n
|
||||
@@ -241,7 +246,11 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
"""Return the original role for compatibility with tool interfaces."""
|
||||
return self.role
|
||||
|
||||
def kickoff(self, messages: str | list[LLMMessage]) -> LiteAgentOutput:
|
||||
def kickoff(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[BaseModel] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent with the given messages.
|
||||
|
||||
@@ -249,6 +258,8 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
If a string is provided, it will be converted to a user message.
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
response_format: Optional Pydantic model for structured output. If provided,
|
||||
overrides self.response_format for this execution.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
@@ -269,9 +280,13 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
self.tools_results = []
|
||||
|
||||
# Format messages for the LLM
|
||||
self._messages = self._format_messages(messages)
|
||||
self._messages = self._format_messages(
|
||||
messages, response_format=response_format
|
||||
)
|
||||
|
||||
return self._execute_core(agent_info=agent_info)
|
||||
return self._execute_core(
|
||||
agent_info=agent_info, response_format=response_format
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self._printer.print(
|
||||
@@ -289,7 +304,9 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
raise e
|
||||
|
||||
def _execute_core(self, agent_info: dict[str, Any]) -> LiteAgentOutput:
|
||||
def _execute_core(
|
||||
self, agent_info: dict[str, Any], response_format: type[BaseModel] | None = None
|
||||
) -> LiteAgentOutput:
|
||||
# Emit event for agent execution start
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -303,15 +320,29 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
# Execute the agent using invoke loop
|
||||
agent_finish = self._invoke_loop()
|
||||
formatted_result: BaseModel | None = None
|
||||
if self.response_format:
|
||||
|
||||
active_response_format = response_format or self.response_format
|
||||
if active_response_format:
|
||||
try:
|
||||
# Cast to BaseModel to ensure type safety
|
||||
result = self.response_format.model_validate_json(agent_finish.output)
|
||||
model_schema = generate_model_description(active_response_format)
|
||||
schema = json.dumps(model_schema, indent=2)
|
||||
instructions = self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
|
||||
converter = Converter(
|
||||
llm=self.llm,
|
||||
text=agent_finish.output,
|
||||
model=active_response_format,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
result = converter.to_pydantic()
|
||||
if isinstance(result, BaseModel):
|
||||
formatted_result = result
|
||||
except Exception as e:
|
||||
except ConverterError as e:
|
||||
self._printer.print(
|
||||
content=f"Failed to parse output into response format: {e!s}",
|
||||
content=f"Failed to parse output into response format after retries: {e.message}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
@@ -400,8 +431,14 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
"""
|
||||
return await asyncio.to_thread(self.kickoff, messages)
|
||||
|
||||
def _get_default_system_prompt(self) -> str:
|
||||
"""Get the default system prompt for the agent."""
|
||||
def _get_default_system_prompt(
|
||||
self, response_format: type[BaseModel] | None = None
|
||||
) -> str:
|
||||
"""Get the default system prompt for the agent.
|
||||
|
||||
Args:
|
||||
response_format: Optional response format to use instead of self.response_format
|
||||
"""
|
||||
base_prompt = ""
|
||||
if self._parsed_tools:
|
||||
# Use the prompt template for agents with tools
|
||||
@@ -422,21 +459,31 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
goal=self.goal,
|
||||
)
|
||||
|
||||
# Add response format instructions if specified
|
||||
if self.response_format:
|
||||
schema = generate_model_description(self.response_format)
|
||||
active_response_format = response_format or self.response_format
|
||||
if active_response_format:
|
||||
model_description = generate_model_description(active_response_format)
|
||||
schema_json = json.dumps(model_description, indent=2)
|
||||
base_prompt += self.i18n.slice("lite_agent_response_format").format(
|
||||
response_format=schema
|
||||
response_format=schema_json
|
||||
)
|
||||
|
||||
return base_prompt
|
||||
|
||||
def _format_messages(self, messages: str | list[LLMMessage]) -> list[LLMMessage]:
|
||||
"""Format messages for the LLM."""
|
||||
def _format_messages(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[BaseModel] | None = None,
|
||||
) -> list[LLMMessage]:
|
||||
"""Format messages for the LLM.
|
||||
|
||||
Args:
|
||||
messages: Input messages to format
|
||||
response_format: Optional response format to use instead of self.response_format
|
||||
"""
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
|
||||
system_prompt = self._get_default_system_prompt()
|
||||
system_prompt = self._get_default_system_prompt(response_format=response_format)
|
||||
|
||||
# Add system message at the beginning
|
||||
formatted_messages: list[LLMMessage] = [
|
||||
@@ -506,6 +553,10 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
self._append_message(formatted_answer.text, role="assistant")
|
||||
except OutputParserError as e: # noqa: PERF203
|
||||
self._printer.print(
|
||||
content="Failed to parse LLM output. Retrying...",
|
||||
color="yellow",
|
||||
)
|
||||
formatted_answer = handle_output_parser_exception(
|
||||
e=e,
|
||||
messages=self._messages,
|
||||
|
||||
@@ -20,7 +20,7 @@ from typing import (
|
||||
)
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
@@ -318,7 +318,138 @@ class AccumulatedToolArgs(BaseModel):
|
||||
|
||||
|
||||
class LLM(BaseLLM):
|
||||
completion_cost: float | None = None
|
||||
completion_cost: float | None = Field(
|
||||
default=None, description="The completion cost of the LLM."
|
||||
)
|
||||
top_p: float | None = Field(
|
||||
default=None, description="Sampling probability threshold."
|
||||
)
|
||||
n: int | None = Field(
|
||||
default=None, description="Number of completions to generate."
|
||||
)
|
||||
max_completion_tokens: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum number of tokens to generate in the completion.",
|
||||
)
|
||||
max_tokens: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum number of tokens allowed in the prompt + completion.",
|
||||
)
|
||||
presence_penalty: float | None = Field(
|
||||
default=None, description="Penalty on the presence penalty."
|
||||
)
|
||||
frequency_penalty: float | None = Field(
|
||||
default=None, description="Penalty on the frequency penalty."
|
||||
)
|
||||
logit_bias: dict[int, float] | None = Field(
|
||||
default=None,
|
||||
description="Modifies the likelihood of specified tokens appearing in the completion.",
|
||||
)
|
||||
response_format: type[BaseModel] | None = Field(
|
||||
default=None,
|
||||
description="Pydantic model class for structured response parsing.",
|
||||
)
|
||||
seed: int | None = Field(
|
||||
default=None,
|
||||
description="Random seed for reproducibility.",
|
||||
)
|
||||
logprobs: int | None = Field(
|
||||
default=None,
|
||||
description="Number of top logprobs to return.",
|
||||
)
|
||||
top_logprobs: int | None = Field(
|
||||
default=None,
|
||||
description="Number of top logprobs to return.",
|
||||
)
|
||||
api_base: str | None = Field(
|
||||
default=None,
|
||||
description="Base URL for the API endpoint.",
|
||||
)
|
||||
api_version: str | None = Field(
|
||||
default=None,
|
||||
description="API version to use.",
|
||||
)
|
||||
callbacks: list[Any] = Field(
|
||||
default_factory=list,
|
||||
description="List of callback handlers for LLM events.",
|
||||
)
|
||||
reasoning_effort: Literal["none", "low", "medium", "high"] | None = Field(
|
||||
default=None,
|
||||
description="Level of reasoning effort for the LLM.",
|
||||
)
|
||||
context_window_size: int = Field(
|
||||
default=0,
|
||||
description="The context window size of the LLM.",
|
||||
)
|
||||
is_anthropic: bool = Field(
|
||||
default=False,
|
||||
description="Indicates if the model is from Anthropic provider.",
|
||||
)
|
||||
supports_function_calling: bool = Field(
|
||||
default=False,
|
||||
description="Indicates if the model supports function calling.",
|
||||
)
|
||||
supports_stop_words: bool = Field(
|
||||
default=False,
|
||||
description="Indicates if the model supports stop words.",
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def initialize_client(self) -> Self:
|
||||
self.is_anthropic = any(
|
||||
prefix in self.model.lower() for prefix in ANTHROPIC_PREFIXES
|
||||
)
|
||||
try:
|
||||
provider = self._get_custom_llm_provider()
|
||||
self.supports_function_calling = litellm.utils.supports_function_calling(
|
||||
self.model, custom_llm_provider=provider
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to check function calling support: {e!s}")
|
||||
self.supports_function_calling = False
|
||||
try:
|
||||
params = get_supported_openai_params(model=self.model)
|
||||
self.supports_stop_words = params is not None and "stop" in params
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get supported params: {e!s}")
|
||||
self.supports_stop_words = False
|
||||
|
||||
with suppress_warnings():
|
||||
callback_types = [type(callback) for callback in self.callbacks]
|
||||
for callback in litellm.success_callback[:]:
|
||||
if type(callback) in callback_types:
|
||||
litellm.success_callback.remove(callback)
|
||||
|
||||
for callback in litellm._async_success_callback[:]:
|
||||
if type(callback) in callback_types:
|
||||
litellm._async_success_callback.remove(callback)
|
||||
|
||||
litellm.callbacks = self.callbacks
|
||||
|
||||
with suppress_warnings():
|
||||
success_callbacks_str = os.environ.get("LITELLM_SUCCESS_CALLBACKS", "")
|
||||
success_callbacks: list[str | Callable[..., Any] | CustomLogger] = []
|
||||
if success_callbacks_str:
|
||||
success_callbacks = [
|
||||
cb.strip() for cb in success_callbacks_str.split(",") if cb.strip()
|
||||
]
|
||||
|
||||
failure_callbacks_str = os.environ.get("LITELLM_FAILURE_CALLBACKS", "")
|
||||
if failure_callbacks_str:
|
||||
failure_callbacks: list[str | Callable[..., Any] | CustomLogger] = [
|
||||
cb.strip() for cb in failure_callbacks_str.split(",") if cb.strip()
|
||||
]
|
||||
|
||||
litellm.success_callback = success_callbacks
|
||||
litellm.failure_callback = failure_callbacks
|
||||
return self
|
||||
|
||||
# @computed_field
|
||||
# @property
|
||||
# def is_anthropic(self) -> bool:
|
||||
# """Determine if the model is from Anthropic provider."""
|
||||
# anthropic_prefixes = ("anthropic/", "claude-", "claude/")
|
||||
# return any(prefix in self.model.lower() for prefix in anthropic_prefixes)
|
||||
|
||||
def __new__(cls, model: str, is_litellm: bool = False, **kwargs: Any) -> LLM:
|
||||
"""Factory method that routes to native SDK or falls back to LiteLLM."""
|
||||
@@ -334,6 +465,8 @@ class LLM(BaseLLM):
|
||||
return cast(
|
||||
Self, native_class(model=model_string, provider=provider, **kwargs)
|
||||
)
|
||||
except NotImplementedError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise ImportError(f"Error importing native provider: {e}") from e
|
||||
|
||||
@@ -379,96 +512,6 @@ class LLM(BaseLLM):
|
||||
|
||||
return None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
timeout: float | int | None = None,
|
||||
temperature: float | None = None,
|
||||
top_p: float | None = None,
|
||||
n: int | None = None,
|
||||
stop: str | list[str] | None = None,
|
||||
max_completion_tokens: int | None = None,
|
||||
max_tokens: int | float | None = None,
|
||||
presence_penalty: float | None = None,
|
||||
frequency_penalty: float | None = None,
|
||||
logit_bias: dict[int, float] | None = None,
|
||||
response_format: type[BaseModel] | None = None,
|
||||
seed: int | None = None,
|
||||
logprobs: int | None = None,
|
||||
top_logprobs: int | None = None,
|
||||
base_url: str | None = None,
|
||||
api_base: str | None = None,
|
||||
api_version: str | None = None,
|
||||
api_key: str | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
reasoning_effort: Literal["none", "low", "medium", "high"] | None = None,
|
||||
stream: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize LLM instance.
|
||||
|
||||
Note: This __init__ method is only called for fallback instances.
|
||||
Native provider instances handle their own initialization in their respective classes.
|
||||
"""
|
||||
super().__init__(
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
timeout=timeout,
|
||||
**kwargs,
|
||||
)
|
||||
self.model = model
|
||||
self.timeout = timeout
|
||||
self.temperature = temperature
|
||||
self.top_p = top_p
|
||||
self.n = n
|
||||
self.max_completion_tokens = max_completion_tokens
|
||||
self.max_tokens = max_tokens
|
||||
self.presence_penalty = presence_penalty
|
||||
self.frequency_penalty = frequency_penalty
|
||||
self.logit_bias = logit_bias
|
||||
self.response_format = response_format
|
||||
self.seed = seed
|
||||
self.logprobs = logprobs
|
||||
self.top_logprobs = top_logprobs
|
||||
self.base_url = base_url
|
||||
self.api_base = api_base
|
||||
self.api_version = api_version
|
||||
self.api_key = api_key
|
||||
self.callbacks = callbacks
|
||||
self.context_window_size = 0
|
||||
self.reasoning_effort = reasoning_effort
|
||||
self.additional_params = kwargs
|
||||
self.is_anthropic = self._is_anthropic_model(model)
|
||||
self.stream = stream
|
||||
|
||||
litellm.drop_params = True
|
||||
|
||||
# Normalize self.stop to always be a list[str]
|
||||
if stop is None:
|
||||
self.stop: list[str] = []
|
||||
elif isinstance(stop, str):
|
||||
self.stop = [stop]
|
||||
else:
|
||||
self.stop = stop
|
||||
|
||||
self.set_callbacks(callbacks or [])
|
||||
self.set_env_callbacks()
|
||||
|
||||
@staticmethod
|
||||
def _is_anthropic_model(model: str) -> bool:
|
||||
"""Determine if the model is from Anthropic provider.
|
||||
|
||||
Args:
|
||||
model: The model identifier string.
|
||||
|
||||
Returns:
|
||||
bool: True if the model is from Anthropic, False otherwise.
|
||||
"""
|
||||
anthropic_prefixes = ("anthropic/", "claude-", "claude/")
|
||||
return any(prefix in model.lower() for prefix in anthropic_prefixes)
|
||||
|
||||
def _prepare_completion_params(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
@@ -1182,8 +1225,6 @@ class LLM(BaseLLM):
|
||||
message["role"] = msg_role
|
||||
# --- 5) Set up callbacks if provided
|
||||
with suppress_warnings():
|
||||
if callbacks and len(callbacks) > 0:
|
||||
self.set_callbacks(callbacks)
|
||||
try:
|
||||
# --- 6) Prepare parameters for the completion call
|
||||
params = self._prepare_completion_params(messages, tools)
|
||||
@@ -1372,24 +1413,6 @@ class LLM(BaseLLM):
|
||||
"Please remove response_format or use a supported model."
|
||||
)
|
||||
|
||||
def supports_function_calling(self) -> bool:
|
||||
try:
|
||||
provider = self._get_custom_llm_provider()
|
||||
return litellm.utils.supports_function_calling(
|
||||
self.model, custom_llm_provider=provider
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to check function calling support: {e!s}")
|
||||
return False
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
try:
|
||||
params = get_supported_openai_params(model=self.model)
|
||||
return params is not None and "stop" in params
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get supported params: {e!s}")
|
||||
return False
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""
|
||||
Returns the context window size, using 75% of the maximum to avoid
|
||||
@@ -1419,60 +1442,6 @@ class LLM(BaseLLM):
|
||||
self.context_window_size = int(value * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
return self.context_window_size
|
||||
|
||||
@staticmethod
|
||||
def set_callbacks(callbacks: list[Any]) -> None:
|
||||
"""
|
||||
Attempt to keep a single set of callbacks in litellm by removing old
|
||||
duplicates and adding new ones.
|
||||
"""
|
||||
with suppress_warnings():
|
||||
callback_types = [type(callback) for callback in callbacks]
|
||||
for callback in litellm.success_callback[:]:
|
||||
if type(callback) in callback_types:
|
||||
litellm.success_callback.remove(callback)
|
||||
|
||||
for callback in litellm._async_success_callback[:]:
|
||||
if type(callback) in callback_types:
|
||||
litellm._async_success_callback.remove(callback)
|
||||
|
||||
litellm.callbacks = callbacks
|
||||
|
||||
@staticmethod
|
||||
def set_env_callbacks() -> None:
|
||||
"""Sets the success and failure callbacks for the LiteLLM library from environment variables.
|
||||
|
||||
This method reads the `LITELLM_SUCCESS_CALLBACKS` and `LITELLM_FAILURE_CALLBACKS`
|
||||
environment variables, which should contain comma-separated lists of callback names.
|
||||
It then assigns these lists to `litellm.success_callback` and `litellm.failure_callback`,
|
||||
respectively.
|
||||
|
||||
If the environment variables are not set or are empty, the corresponding callback lists
|
||||
will be set to empty lists.
|
||||
|
||||
Examples:
|
||||
LITELLM_SUCCESS_CALLBACKS="langfuse,langsmith"
|
||||
LITELLM_FAILURE_CALLBACKS="langfuse"
|
||||
|
||||
This will set `litellm.success_callback` to ["langfuse", "langsmith"] and
|
||||
`litellm.failure_callback` to ["langfuse"].
|
||||
"""
|
||||
with suppress_warnings():
|
||||
success_callbacks_str = os.environ.get("LITELLM_SUCCESS_CALLBACKS", "")
|
||||
success_callbacks: list[str | Callable[..., Any] | CustomLogger] = []
|
||||
if success_callbacks_str:
|
||||
success_callbacks = [
|
||||
cb.strip() for cb in success_callbacks_str.split(",") if cb.strip()
|
||||
]
|
||||
|
||||
failure_callbacks_str = os.environ.get("LITELLM_FAILURE_CALLBACKS", "")
|
||||
if failure_callbacks_str:
|
||||
failure_callbacks: list[str | Callable[..., Any] | CustomLogger] = [
|
||||
cb.strip() for cb in failure_callbacks_str.split(",") if cb.strip()
|
||||
]
|
||||
|
||||
litellm.success_callback = success_callbacks
|
||||
litellm.failure_callback = failure_callbacks
|
||||
|
||||
def __copy__(self) -> LLM:
|
||||
"""Create a shallow copy of the LLM instance."""
|
||||
# Filter out parameters that are already explicitly passed to avoid conflicts
|
||||
@@ -1533,7 +1502,7 @@ class LLM(BaseLLM):
|
||||
**filtered_params,
|
||||
)
|
||||
|
||||
def __deepcopy__(self, memo: dict[int, Any] | None) -> LLM:
|
||||
def __deepcopy__(self, memo: dict[int, Any] | None) -> LLM: # type: ignore[override]
|
||||
"""Create a deep copy of the LLM instance."""
|
||||
import copy
|
||||
|
||||
|
||||
@@ -13,8 +13,9 @@ import logging
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Final
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import AliasChoices, BaseModel, Field, PrivateAttr, field_validator
|
||||
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
@@ -28,6 +29,7 @@ from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.llms.hooks import BaseInterceptor
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
|
||||
@@ -43,7 +45,7 @@ DEFAULT_SUPPORTS_STOP_WORDS: Final[bool] = True
|
||||
_JSON_EXTRACTION_PATTERN: Final[re.Pattern[str]] = re.compile(r"\{.*}", re.DOTALL)
|
||||
|
||||
|
||||
class BaseLLM(ABC):
|
||||
class BaseLLM(BaseModel, ABC):
|
||||
"""Abstract base class for LLM implementations.
|
||||
|
||||
This class defines the interface that all LLM implementations must follow.
|
||||
@@ -55,70 +57,105 @@ class BaseLLM(ABC):
|
||||
implement proper validation for input parameters and provide clear error
|
||||
messages when things go wrong.
|
||||
|
||||
|
||||
Attributes:
|
||||
model: The model identifier/name.
|
||||
temperature: Optional temperature setting for response generation.
|
||||
stop: A list of stop sequences that the LLM should use to stop generation.
|
||||
additional_params: Additional provider-specific parameters.
|
||||
"""
|
||||
|
||||
is_litellm: bool = False
|
||||
provider: str | re.Pattern[str] = Field(
|
||||
default="openai", description="The provider of the LLM."
|
||||
)
|
||||
model: str = Field(description="The model identifier/name.")
|
||||
temperature: float | None = Field(
|
||||
default=None, ge=0, le=2, description="Temperature for response generation."
|
||||
)
|
||||
api_key: str | None = Field(default=None, description="API key for authentication.")
|
||||
base_url: str | None = Field(default=None, description="Base URL for API calls.")
|
||||
timeout: float | None = Field(default=None, description="Timeout for API calls.")
|
||||
max_retries: int = Field(
|
||||
default=2, description="Maximum number of API requests to make."
|
||||
)
|
||||
max_tokens: int | None = Field(
|
||||
default=None, description="Maximum tokens for response generation."
|
||||
)
|
||||
stream: bool | None = Field(default=False, description="Stream the API requests.")
|
||||
client: Any = Field(description="Underlying LLM client instance.")
|
||||
interceptor: BaseInterceptor[Any, Any] | None = Field(
|
||||
default=None,
|
||||
description="An optional HTTPX interceptor for modifying requests/responses.",
|
||||
)
|
||||
client_params: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Additional parameters for the underlying LLM client.",
|
||||
)
|
||||
supports_stop_words: bool = Field(
|
||||
default=DEFAULT_SUPPORTS_STOP_WORDS,
|
||||
description="Whether or not to support stop words.",
|
||||
)
|
||||
stop_sequences: list[str] = Field(
|
||||
default_factory=list,
|
||||
validation_alias=AliasChoices("stop_sequences", "stop"),
|
||||
description="Stop sequences for generation (synchronized with stop).",
|
||||
)
|
||||
is_litellm: bool = Field(
|
||||
default=False, description="Is this LLM implementation in litellm?"
|
||||
)
|
||||
additional_params: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Additional parameters for LLM calls.",
|
||||
)
|
||||
_token_usage: TokenProcess = PrivateAttr(default_factory=TokenProcess)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
temperature: float | None = None,
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
provider: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize the BaseLLM with default attributes.
|
||||
@field_validator("provider", mode="before")
|
||||
@classmethod
|
||||
def extract_provider_from_model(
|
||||
cls, v: str | re.Pattern[str] | None, info: Any
|
||||
) -> str | re.Pattern[str]:
|
||||
"""Extract provider from model string if not explicitly provided.
|
||||
|
||||
Args:
|
||||
model: The model identifier/name.
|
||||
temperature: Optional temperature setting for response generation.
|
||||
stop: Optional list of stop sequences for generation.
|
||||
**kwargs: Additional provider-specific parameters.
|
||||
v: Provided provider value (can be str, Pattern, or None)
|
||||
info: Validation info containing other field values
|
||||
|
||||
Returns:
|
||||
Provider name (str) or Pattern
|
||||
"""
|
||||
if not model:
|
||||
raise ValueError("Model name is required and cannot be empty")
|
||||
# If provider explicitly provided, validate and return it
|
||||
if v is not None:
|
||||
if not isinstance(v, (str, re.Pattern)):
|
||||
raise ValueError(f"Provider must be str or Pattern, got {type(v)}")
|
||||
return v
|
||||
|
||||
self.model = model
|
||||
self.temperature = temperature
|
||||
self.api_key = api_key
|
||||
self.base_url = base_url
|
||||
# Store additional parameters for provider-specific use
|
||||
self.additional_params = kwargs
|
||||
self._provider = provider or "openai"
|
||||
model: str = info.data.get("model", "")
|
||||
if "/" in model:
|
||||
return model.partition("/")[0]
|
||||
return "openai"
|
||||
|
||||
stop = kwargs.pop("stop", None)
|
||||
if stop is None:
|
||||
self.stop: list[str] = []
|
||||
elif isinstance(stop, str):
|
||||
self.stop = [stop]
|
||||
elif isinstance(stop, list):
|
||||
self.stop = stop
|
||||
else:
|
||||
self.stop = []
|
||||
@field_validator("stop_sequences", mode="before")
|
||||
@classmethod
|
||||
def normalize_stop_sequences(
|
||||
cls, v: str | list[str] | set[str] | None
|
||||
) -> list[str]:
|
||||
"""Validate and normalize stop sequences.
|
||||
|
||||
self._token_usage = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
"cached_prompt_tokens": 0,
|
||||
}
|
||||
Converts string to list and handles None values.
|
||||
AliasChoices handles accepting both 'stop' and 'stop_sequences' parameter names.
|
||||
"""
|
||||
if v is None:
|
||||
return []
|
||||
if isinstance(v, str):
|
||||
return [v]
|
||||
if isinstance(v, set):
|
||||
return list(v)
|
||||
if isinstance(v, list):
|
||||
return v
|
||||
return []
|
||||
|
||||
@property
|
||||
def provider(self) -> str:
|
||||
"""Get the provider of the LLM."""
|
||||
return self._provider
|
||||
|
||||
@provider.setter
|
||||
def provider(self, value: str) -> None:
|
||||
"""Set the provider of the LLM."""
|
||||
self._provider = value
|
||||
def stop(self) -> list[str]:
|
||||
"""Alias for stop_sequences to maintain backward compatibility."""
|
||||
return self.stop_sequences
|
||||
|
||||
@abstractmethod
|
||||
def call(
|
||||
@@ -171,14 +208,6 @@ class BaseLLM(ABC):
|
||||
"""
|
||||
return tools
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
"""Check if the LLM supports stop words.
|
||||
|
||||
Returns:
|
||||
True if the LLM supports stop words, False otherwise.
|
||||
"""
|
||||
return DEFAULT_SUPPORTS_STOP_WORDS
|
||||
|
||||
def _supports_stop_words_implementation(self) -> bool:
|
||||
"""Check if stop words are configured for this LLM instance.
|
||||
|
||||
@@ -506,7 +535,7 @@ class BaseLLM(ABC):
|
||||
"""
|
||||
if "/" in model:
|
||||
return model.partition("/")[0]
|
||||
return "openai" # Default provider
|
||||
return "openai"
|
||||
|
||||
def _track_token_usage_internal(self, usage_data: dict[str, Any]) -> None:
|
||||
"""Track token usage internally in the LLM instance.
|
||||
@@ -535,11 +564,11 @@ class BaseLLM(ABC):
|
||||
or 0
|
||||
)
|
||||
|
||||
self._token_usage["prompt_tokens"] += prompt_tokens
|
||||
self._token_usage["completion_tokens"] += completion_tokens
|
||||
self._token_usage["total_tokens"] += prompt_tokens + completion_tokens
|
||||
self._token_usage["successful_requests"] += 1
|
||||
self._token_usage["cached_prompt_tokens"] += cached_tokens
|
||||
self._token_usage.prompt_tokens += prompt_tokens
|
||||
self._token_usage.completion_tokens += completion_tokens
|
||||
self._token_usage.total_tokens += prompt_tokens + completion_tokens
|
||||
self._token_usage.successful_requests += 1
|
||||
self._token_usage.cached_prompt_tokens += cached_tokens
|
||||
|
||||
def get_token_usage_summary(self) -> UsageMetrics:
|
||||
"""Get summary of token usage for this LLM instance.
|
||||
@@ -547,4 +576,10 @@ class BaseLLM(ABC):
|
||||
Returns:
|
||||
Dictionary with token usage totals
|
||||
"""
|
||||
return UsageMetrics(**self._token_usage)
|
||||
return UsageMetrics(
|
||||
prompt_tokens=self._token_usage.prompt_tokens,
|
||||
completion_tokens=self._token_usage.completion_tokens,
|
||||
total_tokens=self._token_usage.total_tokens,
|
||||
successful_requests=self._token_usage.successful_requests,
|
||||
cached_prompt_tokens=self._token_usage.cached_prompt_tokens,
|
||||
)
|
||||
|
||||
6
lib/crewai/src/crewai/llms/hooks/__init__.py
Normal file
6
lib/crewai/src/crewai/llms/hooks/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""Interceptor contracts for crewai"""
|
||||
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
__all__ = ["BaseInterceptor"]
|
||||
133
lib/crewai/src/crewai/llms/hooks/base.py
Normal file
133
lib/crewai/src/crewai/llms/hooks/base.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""Base classes for LLM transport interceptors.
|
||||
|
||||
This module provides abstract base classes for intercepting and modifying
|
||||
outbound and inbound messages at the transport level.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, Any, Generic, TypeVar
|
||||
|
||||
from pydantic_core import core_schema
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pydantic import GetCoreSchemaHandler
|
||||
from pydantic_core import CoreSchema
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
U = TypeVar("U")
|
||||
|
||||
|
||||
class BaseInterceptor(ABC, Generic[T, U]):
|
||||
"""Abstract base class for intercepting transport-level messages.
|
||||
|
||||
Provides hooks to intercept and modify outbound and inbound messages
|
||||
at the transport layer.
|
||||
|
||||
Type parameters:
|
||||
T: Outbound message type (e.g., httpx.Request)
|
||||
U: Inbound message type (e.g., httpx.Response)
|
||||
|
||||
Example:
|
||||
>>> import httpx
|
||||
>>> class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
... def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
... message.headers["X-Custom-Header"] = "value"
|
||||
... return message
|
||||
...
|
||||
... def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
... print(f"Status: {message.status_code}")
|
||||
... return message
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def on_outbound(self, message: T) -> T:
|
||||
"""Intercept outbound message before sending.
|
||||
|
||||
Args:
|
||||
message: Outbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def on_inbound(self, message: U) -> U:
|
||||
"""Intercept inbound message after receiving.
|
||||
|
||||
Args:
|
||||
message: Inbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
...
|
||||
|
||||
async def aon_outbound(self, message: T) -> T:
|
||||
"""Async version of on_outbound.
|
||||
|
||||
Args:
|
||||
message: Outbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def aon_inbound(self, message: U) -> U:
|
||||
"""Async version of on_inbound.
|
||||
|
||||
Args:
|
||||
message: Inbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, _source_type: Any, _handler: GetCoreSchemaHandler
|
||||
) -> CoreSchema:
|
||||
"""Generate Pydantic core schema for BaseInterceptor.
|
||||
|
||||
This allows the generic BaseInterceptor to be used in Pydantic models
|
||||
without requiring arbitrary_types_allowed=True. The schema validates
|
||||
that the value is an instance of BaseInterceptor.
|
||||
|
||||
Args:
|
||||
_source_type: The source type being validated (unused).
|
||||
_handler: Handler for generating schemas (unused).
|
||||
|
||||
Returns:
|
||||
A Pydantic core schema that validates BaseInterceptor instances.
|
||||
"""
|
||||
return core_schema.no_info_plain_validator_function(
|
||||
_validate_interceptor,
|
||||
serialization=core_schema.plain_serializer_function_ser_schema(
|
||||
lambda x: x, return_schema=core_schema.any_schema()
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _validate_interceptor(value: Any) -> BaseInterceptor[T, U]:
|
||||
"""Validate that the value is a BaseInterceptor instance.
|
||||
|
||||
Args:
|
||||
value: The value to validate.
|
||||
|
||||
Returns:
|
||||
The validated BaseInterceptor instance.
|
||||
|
||||
Raises:
|
||||
ValueError: If the value is not a BaseInterceptor instance.
|
||||
"""
|
||||
if not isinstance(value, BaseInterceptor):
|
||||
raise ValueError(
|
||||
f"Expected BaseInterceptor instance, got {type(value).__name__}"
|
||||
)
|
||||
return value
|
||||
123
lib/crewai/src/crewai/llms/hooks/transport.py
Normal file
123
lib/crewai/src/crewai/llms/hooks/transport.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""HTTP transport implementations for LLM request/response interception.
|
||||
|
||||
This module provides internal transport classes that integrate with BaseInterceptor
|
||||
to enable request/response modification at the transport level.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING, TypedDict
|
||||
|
||||
from httpx import (
|
||||
AsyncHTTPTransport as _AsyncHTTPTransport,
|
||||
HTTPTransport as _HTTPTransport,
|
||||
)
|
||||
from typing_extensions import NotRequired, Unpack
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ssl import SSLContext
|
||||
|
||||
from httpx import Limits, Request, Response
|
||||
from httpx._types import CertTypes, ProxyTypes
|
||||
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
class HTTPTransportKwargs(TypedDict, total=False):
|
||||
"""Typed dictionary for httpx.HTTPTransport initialization parameters.
|
||||
|
||||
These parameters configure the underlying HTTP transport behavior including
|
||||
SSL verification, proxies, connection limits, and low-level socket options.
|
||||
"""
|
||||
|
||||
verify: bool | str | SSLContext
|
||||
cert: NotRequired[CertTypes]
|
||||
trust_env: bool
|
||||
http1: bool
|
||||
http2: bool
|
||||
limits: Limits
|
||||
proxy: NotRequired[ProxyTypes]
|
||||
uds: NotRequired[str]
|
||||
local_address: NotRequired[str]
|
||||
retries: int
|
||||
socket_options: NotRequired[
|
||||
Iterable[
|
||||
tuple[int, int, int]
|
||||
| tuple[int, int, bytes | bytearray]
|
||||
| tuple[int, int, None, int]
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
class HTTPTransport(_HTTPTransport):
|
||||
"""HTTP transport that uses an interceptor for request/response modification.
|
||||
|
||||
This transport is used internally when a user provides a BaseInterceptor.
|
||||
Users should not instantiate this class directly - instead, pass an interceptor
|
||||
to the LLM client and this transport will be created automatically.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
interceptor: BaseInterceptor[Request, Response],
|
||||
**kwargs: Unpack[HTTPTransportKwargs],
|
||||
) -> None:
|
||||
"""Initialize transport with interceptor.
|
||||
|
||||
Args:
|
||||
interceptor: HTTP interceptor for modifying raw request/response objects.
|
||||
**kwargs: HTTPTransport configuration parameters (verify, cert, proxy, etc.).
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.interceptor = interceptor
|
||||
|
||||
def handle_request(self, request: Request) -> Response:
|
||||
"""Handle request with interception.
|
||||
|
||||
Args:
|
||||
request: The HTTP request to handle.
|
||||
|
||||
Returns:
|
||||
The HTTP response.
|
||||
"""
|
||||
request = self.interceptor.on_outbound(request)
|
||||
response = super().handle_request(request)
|
||||
return self.interceptor.on_inbound(response)
|
||||
|
||||
|
||||
class AsyncHTTPTransport(_AsyncHTTPTransport):
|
||||
"""Async HTTP transport that uses an interceptor for request/response modification.
|
||||
|
||||
This transport is used internally when a user provides a BaseInterceptor.
|
||||
Users should not instantiate this class directly - instead, pass an interceptor
|
||||
to the LLM client and this transport will be created automatically.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
interceptor: BaseInterceptor[Request, Response],
|
||||
**kwargs: Unpack[HTTPTransportKwargs],
|
||||
) -> None:
|
||||
"""Initialize async transport with interceptor.
|
||||
|
||||
Args:
|
||||
interceptor: HTTP interceptor for modifying raw request/response objects.
|
||||
**kwargs: HTTPTransport configuration parameters (verify, cert, proxy, etc.).
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.interceptor = interceptor
|
||||
|
||||
async def handle_async_request(self, request: Request) -> Response:
|
||||
"""Handle async request with interception.
|
||||
|
||||
Args:
|
||||
request: The HTTP request to handle.
|
||||
|
||||
Returns:
|
||||
The HTTP response.
|
||||
"""
|
||||
request = await self.interceptor.aon_outbound(request)
|
||||
response = await super().handle_async_request(request)
|
||||
return await self.interceptor.aon_inbound(response)
|
||||
@@ -1,15 +1,18 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, cast
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field, PrivateAttr, computed_field, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.types.llm_events import LLMCallType
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llms.hooks.transport import HTTPTransport
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
@@ -17,16 +20,34 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
|
||||
try:
|
||||
from anthropic import Anthropic
|
||||
from anthropic.types import Message
|
||||
from anthropic.types.tool_use_block import ToolUseBlock
|
||||
import httpx
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
'Anthropic native provider not available, to install: uv add "crewai[anthropic]"'
|
||||
) from None
|
||||
|
||||
|
||||
ANTHROPIC_CONTEXT_WINDOWS: dict[str, int] = {
|
||||
"claude-3-5-sonnet": 200000,
|
||||
"claude-3-5-haiku": 200000,
|
||||
"claude-3-opus": 200000,
|
||||
"claude-3-sonnet": 200000,
|
||||
"claude-3-haiku": 200000,
|
||||
"claude-3-7-sonnet": 200000,
|
||||
"claude-2.1": 200000,
|
||||
"claude-2": 100000,
|
||||
"claude-instant": 100000,
|
||||
}
|
||||
|
||||
|
||||
class AnthropicCompletion(BaseLLM):
|
||||
"""Anthropic native completion implementation.
|
||||
|
||||
@@ -34,87 +55,78 @@ class AnthropicCompletion(BaseLLM):
|
||||
offering native tool use, streaming support, and proper message formatting.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str = "claude-3-5-sonnet-20241022",
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
timeout: float | None = None,
|
||||
max_retries: int = 2,
|
||||
temperature: float | None = None,
|
||||
max_tokens: int = 4096, # Required for Anthropic
|
||||
top_p: float | None = None,
|
||||
stop_sequences: list[str] | None = None,
|
||||
stream: bool = False,
|
||||
client_params: dict[str, Any] | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Initialize Anthropic chat completion client.
|
||||
model: str = Field(
|
||||
default="claude-3-5-sonnet-20241022",
|
||||
description="Anthropic model name (e.g., 'claude-3-5-sonnet-20241022')",
|
||||
)
|
||||
max_tokens: int = Field(
|
||||
default=4096,
|
||||
description="Maximum number of allowed tokens in response.",
|
||||
)
|
||||
top_p: float | None = Field(
|
||||
default=None,
|
||||
description="Nucleus sampling parameter.",
|
||||
)
|
||||
_client: Anthropic = PrivateAttr(
|
||||
default_factory=Anthropic,
|
||||
)
|
||||
|
||||
Args:
|
||||
model: Anthropic model name (e.g., 'claude-3-5-sonnet-20241022')
|
||||
api_key: Anthropic API key (defaults to ANTHROPIC_API_KEY env var)
|
||||
base_url: Custom base URL for Anthropic API
|
||||
timeout: Request timeout in seconds
|
||||
max_retries: Maximum number of retries
|
||||
temperature: Sampling temperature (0-1)
|
||||
max_tokens: Maximum tokens in response (required for Anthropic)
|
||||
top_p: Nucleus sampling parameter
|
||||
stop_sequences: Stop sequences (Anthropic uses stop_sequences, not stop)
|
||||
stream: Enable streaming responses
|
||||
client_params: Additional parameters for the Anthropic client
|
||||
**kwargs: Additional parameters
|
||||
@model_validator(mode="after")
|
||||
def initialize_client(self) -> Self:
|
||||
"""Initialize the Anthropic client after Pydantic validation.
|
||||
|
||||
This runs after all field validation is complete, ensuring that:
|
||||
- All BaseLLM fields are set (model, temperature, stop_sequences, etc.)
|
||||
- Field validators have run (stop_sequences is normalized to set[str])
|
||||
- API key and other configuration is ready
|
||||
"""
|
||||
super().__init__(
|
||||
model=model, temperature=temperature, stop=stop_sequences or [], **kwargs
|
||||
)
|
||||
|
||||
# Client params
|
||||
self.client_params = client_params
|
||||
self.base_url = base_url
|
||||
self.timeout = timeout
|
||||
self.max_retries = max_retries
|
||||
|
||||
self.client = Anthropic(**self._get_client_params())
|
||||
|
||||
# Store completion parameters
|
||||
self.max_tokens = max_tokens
|
||||
self.top_p = top_p
|
||||
self.stream = stream
|
||||
self.stop_sequences = stop_sequences or []
|
||||
|
||||
# Model-specific settings
|
||||
self.is_claude_3 = "claude-3" in model.lower()
|
||||
self.supports_tools = self.is_claude_3 # Claude 3+ supports tool use
|
||||
|
||||
def _get_client_params(self) -> dict[str, Any]:
|
||||
"""Get client parameters."""
|
||||
|
||||
if self.api_key is None:
|
||||
self.api_key = os.getenv("ANTHROPIC_API_KEY")
|
||||
if self.api_key is None:
|
||||
raise ValueError("ANTHROPIC_API_KEY is required")
|
||||
|
||||
client_params = {
|
||||
"api_key": self.api_key,
|
||||
"base_url": self.base_url,
|
||||
"timeout": self.timeout,
|
||||
"max_retries": self.max_retries,
|
||||
}
|
||||
params = self.model_dump(
|
||||
include={"api_key", "base_url", "timeout", "max_retries"},
|
||||
exclude_none=True,
|
||||
)
|
||||
|
||||
if self.interceptor:
|
||||
transport = HTTPTransport(interceptor=self.interceptor)
|
||||
http_client = httpx.Client(transport=transport)
|
||||
params["http_client"] = http_client
|
||||
|
||||
if self.client_params:
|
||||
client_params.update(self.client_params)
|
||||
params.update(self.client_params)
|
||||
|
||||
return client_params
|
||||
self._client = Anthropic(**params)
|
||||
return self
|
||||
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def is_claude_3(self) -> bool:
|
||||
"""Check if the model is Claude 3 or higher."""
|
||||
return "claude-3" in self.model.lower()
|
||||
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def supports_tools(self) -> bool:
|
||||
"""Check if the model supports tool use."""
|
||||
return self.is_claude_3
|
||||
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def supports_function_calling(self) -> bool:
|
||||
"""Check if the model supports function calling."""
|
||||
return self.supports_tools
|
||||
|
||||
def call(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
tools: list[dict] | None = None,
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Call Anthropic messages API.
|
||||
@@ -133,7 +145,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
try:
|
||||
# Emit call started event
|
||||
self._emit_call_started_event(
|
||||
messages=messages, # type: ignore[arg-type]
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
callbacks=callbacks,
|
||||
available_functions=available_functions,
|
||||
@@ -143,7 +155,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
# Format messages for Anthropic
|
||||
formatted_messages, system_message = self._format_messages_for_anthropic(
|
||||
messages # type: ignore[arg-type]
|
||||
messages
|
||||
)
|
||||
|
||||
# Prepare completion parameters
|
||||
@@ -181,7 +193,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
self,
|
||||
messages: list[LLMMessage],
|
||||
system_message: str | None = None,
|
||||
tools: list[dict] | None = None,
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Prepare parameters for Anthropic messages API.
|
||||
|
||||
@@ -193,32 +205,30 @@ class AnthropicCompletion(BaseLLM):
|
||||
Returns:
|
||||
Parameters dictionary for Anthropic API
|
||||
"""
|
||||
params = {
|
||||
"model": self.model,
|
||||
"messages": messages,
|
||||
"max_tokens": self.max_tokens,
|
||||
"stream": self.stream,
|
||||
}
|
||||
|
||||
params = self.model_dump(
|
||||
include={
|
||||
"model",
|
||||
"max_tokens",
|
||||
"stream",
|
||||
"temperature",
|
||||
"top_p",
|
||||
"stop_sequences",
|
||||
},
|
||||
)
|
||||
params["messages"] = messages
|
||||
# Add system message if present
|
||||
if system_message:
|
||||
params["system"] = system_message
|
||||
|
||||
# Add optional parameters if set
|
||||
if self.temperature is not None:
|
||||
params["temperature"] = self.temperature
|
||||
if self.top_p is not None:
|
||||
params["top_p"] = self.top_p
|
||||
if self.stop_sequences:
|
||||
params["stop_sequences"] = self.stop_sequences
|
||||
|
||||
# Handle tools for Claude 3+
|
||||
if tools and self.supports_tools:
|
||||
params["tools"] = self._convert_tools_for_interference(tools)
|
||||
|
||||
return params
|
||||
|
||||
def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]:
|
||||
def _convert_tools_for_interference(
|
||||
self, tools: list[dict[str, Any]]
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Convert CrewAI tool format to Anthropic tool use format."""
|
||||
anthropic_tools = []
|
||||
|
||||
@@ -228,8 +238,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
continue
|
||||
|
||||
try:
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
|
||||
name, description, parameters = safe_tool_conversion(tool, "Anthropic")
|
||||
except (ImportError, KeyError, ValueError) as e:
|
||||
logging.error(f"Error converting tool to Anthropic format: {e}")
|
||||
@@ -303,8 +311,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Handle non-streaming message completion."""
|
||||
@@ -319,7 +327,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
try:
|
||||
response: Message = self.client.messages.create(**params)
|
||||
response: Message = self._client.messages.create(**params)
|
||||
|
||||
except Exception as e:
|
||||
if is_context_length_exceeded(e):
|
||||
@@ -391,8 +399,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str:
|
||||
"""Handle streaming message completion."""
|
||||
@@ -413,7 +421,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
stream_params = {k: v for k, v in params.items() if k != "stream"}
|
||||
|
||||
# Make streaming API call
|
||||
with self.client.messages.stream(**stream_params) as stream:
|
||||
with self._client.messages.stream(**stream_params) as stream:
|
||||
for event in stream:
|
||||
if hasattr(event, "delta") and hasattr(event.delta, "text"):
|
||||
text_delta = event.delta.text
|
||||
@@ -487,8 +495,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
tool_uses: list[ToolUseBlock],
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any],
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
) -> str:
|
||||
"""Handle the complete tool use conversation flow.
|
||||
|
||||
@@ -541,7 +549,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
try:
|
||||
# Send tool results back to Claude for final response
|
||||
final_response: Message = self.client.messages.create(**follow_up_params)
|
||||
final_response: Message = self._client.messages.create(**follow_up_params)
|
||||
|
||||
# Track token usage for follow-up call
|
||||
follow_up_usage = self._extract_anthropic_token_usage(final_response)
|
||||
@@ -588,48 +596,24 @@ class AnthropicCompletion(BaseLLM):
|
||||
return tool_results[0]["content"]
|
||||
raise e
|
||||
|
||||
def supports_function_calling(self) -> bool:
|
||||
"""Check if the model supports function calling."""
|
||||
return self.supports_tools
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
"""Check if the model supports stop words."""
|
||||
return True # All Claude models support stop sequences
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the model."""
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO
|
||||
|
||||
# Context window sizes for Anthropic models
|
||||
context_windows = {
|
||||
"claude-3-5-sonnet": 200000,
|
||||
"claude-3-5-haiku": 200000,
|
||||
"claude-3-opus": 200000,
|
||||
"claude-3-sonnet": 200000,
|
||||
"claude-3-haiku": 200000,
|
||||
"claude-3-7-sonnet": 200000,
|
||||
"claude-2.1": 200000,
|
||||
"claude-2": 100000,
|
||||
"claude-instant": 100000,
|
||||
}
|
||||
|
||||
# Find the best match for the model name
|
||||
for model_prefix, size in context_windows.items():
|
||||
for model_prefix, size in ANTHROPIC_CONTEXT_WINDOWS.items():
|
||||
if self.model.startswith(model_prefix):
|
||||
return int(size * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
|
||||
# Default context window size for Claude models
|
||||
return int(200000 * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
|
||||
def _extract_anthropic_token_usage(self, response: Message) -> dict[str, Any]:
|
||||
@staticmethod
|
||||
def _extract_anthropic_token_usage(response: Message) -> dict[str, Any]:
|
||||
"""Extract token usage from Anthropic response."""
|
||||
if hasattr(response, "usage") and response.usage:
|
||||
if response.usage:
|
||||
usage = response.usage
|
||||
input_tokens = getattr(usage, "input_tokens", 0)
|
||||
output_tokens = getattr(usage, "output_tokens", 0)
|
||||
return {
|
||||
"input_tokens": input_tokens,
|
||||
"output_tokens": output_tokens,
|
||||
"total_tokens": input_tokens + output_tokens,
|
||||
"input_tokens": usage.input_tokens,
|
||||
"output_tokens": usage.output_tokens,
|
||||
"total_tokens": usage.input_tokens + usage.output_tokens,
|
||||
}
|
||||
return {"total_tokens": 0}
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -13,23 +13,25 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
)
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
try:
|
||||
from azure.ai.inference import ( # type: ignore[import-not-found]
|
||||
from azure.ai.inference import (
|
||||
ChatCompletionsClient,
|
||||
)
|
||||
from azure.ai.inference.models import ( # type: ignore[import-not-found]
|
||||
from azure.ai.inference.models import (
|
||||
ChatCompletions,
|
||||
ChatCompletionsToolCall,
|
||||
StreamingChatCompletionsUpdate,
|
||||
)
|
||||
from azure.core.credentials import ( # type: ignore[import-not-found]
|
||||
from azure.core.credentials import (
|
||||
AzureKeyCredential,
|
||||
)
|
||||
from azure.core.exceptions import ( # type: ignore[import-not-found]
|
||||
from azure.core.exceptions import (
|
||||
HttpResponseError,
|
||||
)
|
||||
|
||||
@@ -64,7 +66,8 @@ class AzureCompletion(BaseLLM):
|
||||
max_tokens: int | None = None,
|
||||
stop: list[str] | None = None,
|
||||
stream: bool = False,
|
||||
**kwargs,
|
||||
interceptor: BaseInterceptor[Any, Any] | None = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize Azure AI Inference chat completion client.
|
||||
|
||||
@@ -82,8 +85,15 @@ class AzureCompletion(BaseLLM):
|
||||
max_tokens: Maximum tokens in response
|
||||
stop: Stop sequences
|
||||
stream: Enable streaming responses
|
||||
interceptor: HTTP interceptor (not yet supported for Azure).
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
if interceptor is not None:
|
||||
raise NotImplementedError(
|
||||
"HTTP interceptors are not yet supported for Azure AI Inference provider. "
|
||||
"Interceptors are currently supported for OpenAI and Anthropic providers only."
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
model=model, temperature=temperature, stop=stop or [], **kwargs
|
||||
)
|
||||
@@ -121,7 +131,7 @@ class AzureCompletion(BaseLLM):
|
||||
if self.api_version:
|
||||
client_kwargs["api_version"] = self.api_version
|
||||
|
||||
self.client = ChatCompletionsClient(**client_kwargs)
|
||||
self.client = ChatCompletionsClient(**client_kwargs) # type: ignore[arg-type]
|
||||
|
||||
self.top_p = top_p
|
||||
self.frequency_penalty = frequency_penalty
|
||||
@@ -249,7 +259,7 @@ class AzureCompletion(BaseLLM):
|
||||
def _prepare_completion_params(
|
||||
self,
|
||||
messages: list[LLMMessage],
|
||||
tools: list[dict] | None = None,
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Prepare parameters for Azure AI Inference chat completion.
|
||||
@@ -302,7 +312,9 @@ class AzureCompletion(BaseLLM):
|
||||
|
||||
return params
|
||||
|
||||
def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]:
|
||||
def _convert_tools_for_interference(
|
||||
self, tools: list[dict[str, Any]]
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Convert CrewAI tool format to Azure OpenAI function calling format."""
|
||||
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
|
||||
@@ -30,6 +30,8 @@ if TYPE_CHECKING:
|
||||
ToolTypeDef,
|
||||
)
|
||||
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
try:
|
||||
from boto3.session import Session
|
||||
@@ -157,8 +159,9 @@ class BedrockCompletion(BaseLLM):
|
||||
guardrail_config: dict[str, Any] | None = None,
|
||||
additional_model_request_fields: dict[str, Any] | None = None,
|
||||
additional_model_response_field_paths: list[str] | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
interceptor: BaseInterceptor[Any, Any] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize AWS Bedrock completion client.
|
||||
|
||||
Args:
|
||||
@@ -176,8 +179,15 @@ class BedrockCompletion(BaseLLM):
|
||||
guardrail_config: Guardrail configuration for content filtering
|
||||
additional_model_request_fields: Model-specific request parameters
|
||||
additional_model_response_field_paths: Custom response field paths
|
||||
interceptor: HTTP interceptor (not yet supported for Bedrock).
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
if interceptor is not None:
|
||||
raise NotImplementedError(
|
||||
"HTTP interceptors are not yet supported for AWS Bedrock provider. "
|
||||
"Interceptors are currently supported for OpenAI and Anthropic providers only."
|
||||
)
|
||||
|
||||
# Extract provider from kwargs to avoid duplicate argument
|
||||
kwargs.pop("provider", None)
|
||||
|
||||
@@ -233,6 +243,30 @@ class BedrockCompletion(BaseLLM):
|
||||
# Handle inference profiles for newer models
|
||||
self.model_id = model
|
||||
|
||||
@property
|
||||
def stop(self) -> list[str]:
|
||||
"""Get stop sequences sent to the API."""
|
||||
return list(self.stop_sequences)
|
||||
|
||||
@stop.setter
|
||||
def stop(self, value: Sequence[str] | str | None) -> None:
|
||||
"""Set stop sequences.
|
||||
|
||||
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
|
||||
are properly sent to the Bedrock API.
|
||||
|
||||
Args:
|
||||
value: Stop sequences as a Sequence, single string, or None
|
||||
"""
|
||||
if value is None:
|
||||
self.stop_sequences = []
|
||||
elif isinstance(value, str):
|
||||
self.stop_sequences = [value]
|
||||
elif isinstance(value, Sequence):
|
||||
self.stop_sequences = list(value)
|
||||
else:
|
||||
self.stop_sequences = []
|
||||
|
||||
def call(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
@@ -247,7 +281,7 @@ class BedrockCompletion(BaseLLM):
|
||||
try:
|
||||
# Emit call started event
|
||||
self._emit_call_started_event(
|
||||
messages=messages, # type: ignore[arg-type]
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
callbacks=callbacks,
|
||||
available_functions=available_functions,
|
||||
@@ -740,7 +774,9 @@ class BedrockCompletion(BaseLLM):
|
||||
return converse_messages, system_message
|
||||
|
||||
@staticmethod
|
||||
def _format_tools_for_converse(tools: list[dict]) -> list[ConverseToolTypeDef]:
|
||||
def _format_tools_for_converse(
|
||||
tools: list[dict[str, Any]],
|
||||
) -> list[ConverseToolTypeDef]:
|
||||
"""Convert CrewAI tools to Converse API format following AWS specification."""
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, cast
|
||||
from __future__ import annotations
|
||||
|
||||
from pydantic import BaseModel
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr, computed_field, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.types.llm_events import LLMCallType
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
@@ -14,6 +16,11 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
try:
|
||||
from google import genai # type: ignore[import-untyped]
|
||||
from google.genai import types # type: ignore[import-untyped]
|
||||
@@ -24,6 +31,27 @@ except ImportError:
|
||||
) from None
|
||||
|
||||
|
||||
GEMINI_CONTEXT_WINDOWS: dict[str, int] = {
|
||||
"gemini-2.0-flash": 1048576, # 1M tokens
|
||||
"gemini-2.0-flash-thinking": 32768,
|
||||
"gemini-2.0-flash-lite": 1048576,
|
||||
"gemini-2.5-flash": 1048576,
|
||||
"gemini-2.5-pro": 1048576,
|
||||
"gemini-1.5-pro": 2097152, # 2M tokens
|
||||
"gemini-1.5-flash": 1048576,
|
||||
"gemini-1.5-flash-8b": 1048576,
|
||||
"gemini-1.0-pro": 32768,
|
||||
"gemma-3-1b": 32000,
|
||||
"gemma-3-4b": 128000,
|
||||
"gemma-3-12b": 128000,
|
||||
"gemma-3-27b": 128000,
|
||||
}
|
||||
|
||||
# Context window validation constraints
|
||||
MIN_CONTEXT_WINDOW: int = 1024
|
||||
MAX_CONTEXT_WINDOW: int = 2097152
|
||||
|
||||
|
||||
class GeminiCompletion(BaseLLM):
|
||||
"""Google Gemini native completion implementation.
|
||||
|
||||
@@ -31,72 +59,166 @@ class GeminiCompletion(BaseLLM):
|
||||
offering native function calling, streaming support, and proper Gemini formatting.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str = "gemini-2.0-flash-001",
|
||||
api_key: str | None = None,
|
||||
project: str | None = None,
|
||||
location: str | None = None,
|
||||
temperature: float | None = None,
|
||||
top_p: float | None = None,
|
||||
top_k: int | None = None,
|
||||
max_output_tokens: int | None = None,
|
||||
stop_sequences: list[str] | None = None,
|
||||
stream: bool = False,
|
||||
safety_settings: dict[str, Any] | None = None,
|
||||
client_params: dict[str, Any] | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Initialize Google Gemini chat completion client.
|
||||
model: str = Field(
|
||||
default="gemini-2.0-flash-001",
|
||||
description="Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro')",
|
||||
)
|
||||
project: str | None = Field(
|
||||
default=None,
|
||||
description="Google Cloud project ID (for Vertex AI)",
|
||||
)
|
||||
location: str = Field(
|
||||
default="us-central1",
|
||||
description="Google Cloud location (for Vertex AI)",
|
||||
)
|
||||
top_p: float | None = Field(
|
||||
default=None,
|
||||
description="Nucleus sampling parameter",
|
||||
)
|
||||
top_k: int | None = Field(
|
||||
default=None,
|
||||
description="Top-k sampling parameter",
|
||||
)
|
||||
max_output_tokens: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum tokens in response",
|
||||
)
|
||||
safety_settings: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description="Safety filter settings",
|
||||
)
|
||||
_client: genai.Client = PrivateAttr( # type: ignore[no-any-unimported]
|
||||
default_factory=genai.Client,
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def initialize_client(self) -> Self:
|
||||
"""Initialize the Anthropic client after Pydantic validation.
|
||||
|
||||
This runs after all field validation is complete, ensuring that:
|
||||
- All BaseLLM fields are set (model, temperature, stop_sequences, etc.)
|
||||
- Field validators have run (stop_sequences is normalized to set[str])
|
||||
- API key and other configuration is ready
|
||||
"""
|
||||
self._client = genai.Client(**self._get_client_params())
|
||||
return self
|
||||
|
||||
# def __init__(
|
||||
# self,
|
||||
# model: str = "gemini-2.0-flash-001",
|
||||
# api_key: str | None = None,
|
||||
# project: str | None = None,
|
||||
# location: str | None = None,
|
||||
# temperature: float | None = None,
|
||||
# top_p: float | None = None,
|
||||
# top_k: int | None = None,
|
||||
# max_output_tokens: int | None = None,
|
||||
# stop_sequences: list[str] | None = None,
|
||||
# stream: bool = False,
|
||||
# safety_settings: dict[str, Any] | None = None,
|
||||
# client_params: dict[str, Any] | None = None,
|
||||
# interceptor: BaseInterceptor[Any, Any] | None = None,
|
||||
# **kwargs: Any,
|
||||
# # ):
|
||||
# """Initialize Google Gemini chat completion client.
|
||||
#
|
||||
# Args:
|
||||
# model: Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro')
|
||||
# api_key: Google API key (defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var)
|
||||
# project: Google Cloud project ID (for Vertex AI)
|
||||
# location: Google Cloud location (for Vertex AI, defaults to 'us-central1')
|
||||
# temperature: Sampling temperature (0-2)
|
||||
# top_p: Nucleus sampling parameter
|
||||
# top_k: Top-k sampling parameter
|
||||
# max_output_tokens: Maximum tokens in response
|
||||
# stop_sequences: Stop sequences
|
||||
# stream: Enable streaming responses
|
||||
# safety_settings: Safety filter settings
|
||||
# client_params: Additional parameters to pass to the Google Gen AI Client constructor.
|
||||
# Supports parameters like http_options, credentials, debug_config, etc.
|
||||
# interceptor: HTTP interceptor (not yet supported for Gemini).
|
||||
# **kwargs: Additional parameters
|
||||
# """
|
||||
# if interceptor is not None:
|
||||
# raise NotImplementedError(
|
||||
# "HTTP interceptors are not yet supported for Google Gemini provider. "
|
||||
# "Interceptors are currently supported for OpenAI and Anthropic providers only."
|
||||
# )
|
||||
#
|
||||
# super().__init__(
|
||||
# model=model, temperature=temperature, stop=stop_sequences or [], **kwargs
|
||||
# )
|
||||
#
|
||||
# # Store client params for later use
|
||||
# self.client_params = client_params or {}
|
||||
#
|
||||
# # Get API configuration with environment variable fallbacks
|
||||
# self.api_key = (
|
||||
# api_key or os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
|
||||
# )
|
||||
# self.project = project or os.getenv("GOOGLE_CLOUD_PROJECT")
|
||||
# self.location = location or os.getenv("GOOGLE_CLOUD_LOCATION") or "us-central1"
|
||||
#
|
||||
# use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true"
|
||||
#
|
||||
# self.client = self._initialize_client(use_vertexai)
|
||||
#
|
||||
# # Store completion parameters
|
||||
# self.top_p = top_p
|
||||
# self.top_k = top_k
|
||||
# self.max_output_tokens = max_output_tokens
|
||||
# self.stream = stream
|
||||
# self.safety_settings = safety_settings or {}
|
||||
# self.stop_sequences = stop_sequences or []
|
||||
#
|
||||
# # Model-specific settings
|
||||
# self.is_gemini_2 = "gemini-2" in model.lower()
|
||||
# self.is_gemini_1_5 = "gemini-1.5" in model.lower()
|
||||
# self.supports_tools = self.is_gemini_1_5 or self.is_gemini_2
|
||||
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def is_gemini_2(self) -> bool:
|
||||
"""Check if the model is Gemini 2.x."""
|
||||
return "gemini-2" in self.model.lower()
|
||||
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def is_gemini_1_5(self) -> bool:
|
||||
"""Check if the model is Gemini 1.5.x."""
|
||||
return "gemini-1.5" in self.model.lower()
|
||||
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def supports_tools(self) -> bool:
|
||||
"""Check if the model supports tool/function calling."""
|
||||
return self.is_gemini_1_5 or self.is_gemini_2
|
||||
|
||||
@property
|
||||
def stop(self) -> list[str]:
|
||||
"""Get stop sequences sent to the API."""
|
||||
return self.stop_sequences
|
||||
|
||||
@stop.setter
|
||||
def stop(self, value: list[str] | str | None) -> None:
|
||||
"""Set stop sequences.
|
||||
|
||||
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
|
||||
are properly sent to the Gemini API.
|
||||
|
||||
Args:
|
||||
model: Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro')
|
||||
api_key: Google API key (defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var)
|
||||
project: Google Cloud project ID (for Vertex AI)
|
||||
location: Google Cloud location (for Vertex AI, defaults to 'us-central1')
|
||||
temperature: Sampling temperature (0-2)
|
||||
top_p: Nucleus sampling parameter
|
||||
top_k: Top-k sampling parameter
|
||||
max_output_tokens: Maximum tokens in response
|
||||
stop_sequences: Stop sequences
|
||||
stream: Enable streaming responses
|
||||
safety_settings: Safety filter settings
|
||||
client_params: Additional parameters to pass to the Google Gen AI Client constructor.
|
||||
Supports parameters like http_options, credentials, debug_config, etc.
|
||||
**kwargs: Additional parameters
|
||||
value: Stop sequences as a list, single string, or None
|
||||
"""
|
||||
super().__init__(
|
||||
model=model, temperature=temperature, stop=stop_sequences or [], **kwargs
|
||||
)
|
||||
if value is None:
|
||||
self.stop_sequences = []
|
||||
elif isinstance(value, str):
|
||||
self.stop_sequences = [value]
|
||||
elif isinstance(value, list):
|
||||
self.stop_sequences = value
|
||||
else:
|
||||
self.stop_sequences = []
|
||||
|
||||
# Store client params for later use
|
||||
self.client_params = client_params or {}
|
||||
|
||||
# Get API configuration with environment variable fallbacks
|
||||
self.api_key = (
|
||||
api_key or os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
|
||||
)
|
||||
self.project = project or os.getenv("GOOGLE_CLOUD_PROJECT")
|
||||
self.location = location or os.getenv("GOOGLE_CLOUD_LOCATION") or "us-central1"
|
||||
|
||||
use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true"
|
||||
|
||||
self.client = self._initialize_client(use_vertexai)
|
||||
|
||||
# Store completion parameters
|
||||
self.top_p = top_p
|
||||
self.top_k = top_k
|
||||
self.max_output_tokens = max_output_tokens
|
||||
self.stream = stream
|
||||
self.safety_settings = safety_settings or {}
|
||||
self.stop_sequences = stop_sequences or []
|
||||
|
||||
# Model-specific settings
|
||||
self.is_gemini_2 = "gemini-2" in model.lower()
|
||||
self.is_gemini_1_5 = "gemini-1.5" in model.lower()
|
||||
self.supports_tools = self.is_gemini_1_5 or self.is_gemini_2
|
||||
|
||||
def _initialize_client(self, use_vertexai: bool = False) -> genai.Client:
|
||||
def _initialize_client(self, use_vertexai: bool = False) -> genai.Client: # type: ignore[no-any-unimported]
|
||||
"""Initialize the Google Gen AI client with proper parameter handling.
|
||||
|
||||
Args:
|
||||
@@ -110,6 +232,12 @@ class GeminiCompletion(BaseLLM):
|
||||
if self.client_params:
|
||||
client_params.update(self.client_params)
|
||||
|
||||
if self.interceptor:
|
||||
raise NotImplementedError(
|
||||
"HTTP interceptors are not yet supported for Google Gemini provider. "
|
||||
"Interceptors are currently supported for OpenAI and Anthropic providers only."
|
||||
)
|
||||
|
||||
if use_vertexai or self.project:
|
||||
client_params.update(
|
||||
{
|
||||
@@ -149,7 +277,7 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
if (
|
||||
hasattr(self, "client")
|
||||
and hasattr(self.client, "vertexai")
|
||||
and hasattr(self._client, "vertexai")
|
||||
and self.client.vertexai
|
||||
):
|
||||
# Vertex AI configuration
|
||||
@@ -171,11 +299,11 @@ class GeminiCompletion(BaseLLM):
|
||||
def call(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
tools: list[dict] | None = None,
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Call Google Gemini generate content API.
|
||||
@@ -193,7 +321,7 @@ class GeminiCompletion(BaseLLM):
|
||||
"""
|
||||
try:
|
||||
self._emit_call_started_event(
|
||||
messages=messages, # type: ignore[arg-type]
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
callbacks=callbacks,
|
||||
available_functions=available_functions,
|
||||
@@ -203,7 +331,7 @@ class GeminiCompletion(BaseLLM):
|
||||
self.tools = tools
|
||||
|
||||
formatted_content, system_instruction = self._format_messages_for_gemini(
|
||||
messages # type: ignore[arg-type]
|
||||
messages
|
||||
)
|
||||
|
||||
config = self._prepare_generation_config(
|
||||
@@ -245,10 +373,10 @@ class GeminiCompletion(BaseLLM):
|
||||
)
|
||||
raise
|
||||
|
||||
def _prepare_generation_config(
|
||||
def _prepare_generation_config( # type: ignore[no-any-unimported]
|
||||
self,
|
||||
system_instruction: str | None = None,
|
||||
tools: list[dict] | None = None,
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> types.GenerateContentConfig:
|
||||
"""Prepare generation config for Google Gemini API.
|
||||
@@ -262,7 +390,16 @@ class GeminiCompletion(BaseLLM):
|
||||
GenerateContentConfig object for Gemini API
|
||||
"""
|
||||
self.tools = tools
|
||||
config_params = {}
|
||||
config_params = self.model_dump(
|
||||
include={
|
||||
"temperature",
|
||||
"top_p",
|
||||
"top_k",
|
||||
"max_output_tokens",
|
||||
"stop_sequences",
|
||||
"safety_settings",
|
||||
}
|
||||
)
|
||||
|
||||
# Add system instruction if present
|
||||
if system_instruction:
|
||||
@@ -272,18 +409,6 @@ class GeminiCompletion(BaseLLM):
|
||||
)
|
||||
config_params["system_instruction"] = system_content
|
||||
|
||||
# Add generation config parameters
|
||||
if self.temperature is not None:
|
||||
config_params["temperature"] = self.temperature
|
||||
if self.top_p is not None:
|
||||
config_params["top_p"] = self.top_p
|
||||
if self.top_k is not None:
|
||||
config_params["top_k"] = self.top_k
|
||||
if self.max_output_tokens is not None:
|
||||
config_params["max_output_tokens"] = self.max_output_tokens
|
||||
if self.stop_sequences:
|
||||
config_params["stop_sequences"] = self.stop_sequences
|
||||
|
||||
if response_model:
|
||||
config_params["response_mime_type"] = "application/json"
|
||||
config_params["response_schema"] = response_model.model_json_schema()
|
||||
@@ -292,12 +417,11 @@ class GeminiCompletion(BaseLLM):
|
||||
if tools and self.supports_tools:
|
||||
config_params["tools"] = self._convert_tools_for_interference(tools)
|
||||
|
||||
if self.safety_settings:
|
||||
config_params["safety_settings"] = self.safety_settings
|
||||
|
||||
return types.GenerateContentConfig(**config_params)
|
||||
|
||||
def _convert_tools_for_interference(self, tools: list[dict]) -> list[types.Tool]:
|
||||
def _convert_tools_for_interference( # type: ignore[no-any-unimported]
|
||||
self, tools: list[dict[str, Any]]
|
||||
) -> list[types.Tool]:
|
||||
"""Convert CrewAI tool format to Gemini function declaration format."""
|
||||
gemini_tools = []
|
||||
|
||||
@@ -320,7 +444,7 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
return gemini_tools
|
||||
|
||||
def _format_messages_for_gemini(
|
||||
def _format_messages_for_gemini( # type: ignore[no-any-unimported]
|
||||
self, messages: str | list[LLMMessage]
|
||||
) -> tuple[list[types.Content], str | None]:
|
||||
"""Format messages for Gemini API.
|
||||
@@ -364,14 +488,14 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
return contents, system_instruction
|
||||
|
||||
def _handle_completion(
|
||||
def _handle_completion( # type: ignore[no-any-unimported]
|
||||
self,
|
||||
contents: list[types.Content],
|
||||
system_instruction: str | None,
|
||||
config: types.GenerateContentConfig,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Handle non-streaming content generation."""
|
||||
@@ -382,7 +506,7 @@ class GeminiCompletion(BaseLLM):
|
||||
}
|
||||
|
||||
try:
|
||||
response = self.client.models.generate_content(**api_params)
|
||||
response = self._client.models.generate_content(**api_params)
|
||||
|
||||
usage = self._extract_token_usage(response)
|
||||
except Exception as e:
|
||||
@@ -431,13 +555,13 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
return content
|
||||
|
||||
def _handle_streaming_completion(
|
||||
def _handle_streaming_completion( # type: ignore[no-any-unimported]
|
||||
self,
|
||||
contents: list[types.Content],
|
||||
config: types.GenerateContentConfig,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str:
|
||||
"""Handle streaming content generation."""
|
||||
@@ -450,7 +574,7 @@ class GeminiCompletion(BaseLLM):
|
||||
"config": config,
|
||||
}
|
||||
|
||||
for chunk in self.client.models.generate_content_stream(**api_params):
|
||||
for chunk in self._client.models.generate_content_stream(**api_params):
|
||||
if hasattr(chunk, "text") and chunk.text:
|
||||
full_response += chunk.text
|
||||
self._emit_stream_chunk_event(
|
||||
@@ -503,52 +627,30 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
return full_response
|
||||
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def supports_function_calling(self) -> bool:
|
||||
"""Check if the model supports function calling."""
|
||||
return self.supports_tools
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
"""Check if the model supports stop words."""
|
||||
return True
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the model."""
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
|
||||
|
||||
min_context = 1024
|
||||
max_context = 2097152
|
||||
|
||||
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
|
||||
if value < min_context or value > max_context:
|
||||
if value < MIN_CONTEXT_WINDOW or value > MAX_CONTEXT_WINDOW:
|
||||
raise ValueError(
|
||||
f"Context window for {key} must be between {min_context} and {max_context}"
|
||||
f"Context window for {key} must be between {MIN_CONTEXT_WINDOW} and {MAX_CONTEXT_WINDOW}"
|
||||
)
|
||||
|
||||
context_windows = {
|
||||
"gemini-2.0-flash": 1048576, # 1M tokens
|
||||
"gemini-2.0-flash-thinking": 32768,
|
||||
"gemini-2.0-flash-lite": 1048576,
|
||||
"gemini-2.5-flash": 1048576,
|
||||
"gemini-2.5-pro": 1048576,
|
||||
"gemini-1.5-pro": 2097152, # 2M tokens
|
||||
"gemini-1.5-flash": 1048576,
|
||||
"gemini-1.5-flash-8b": 1048576,
|
||||
"gemini-1.0-pro": 32768,
|
||||
"gemma-3-1b": 32000,
|
||||
"gemma-3-4b": 128000,
|
||||
"gemma-3-12b": 128000,
|
||||
"gemma-3-27b": 128000,
|
||||
}
|
||||
|
||||
# Find the best match for the model name
|
||||
for model_prefix, size in context_windows.items():
|
||||
for model_prefix, size in GEMINI_CONTEXT_WINDOWS.items():
|
||||
if self.model.startswith(model_prefix):
|
||||
return int(size * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
|
||||
# Default context window size for Gemini models
|
||||
return int(1048576 * CONTEXT_WINDOW_USAGE_RATIO) # 1M tokens
|
||||
|
||||
def _extract_token_usage(self, response: dict[str, Any]) -> dict[str, Any]:
|
||||
@staticmethod
|
||||
def _extract_token_usage(response: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Extract token usage from Gemini response."""
|
||||
if hasattr(response, "usage_metadata"):
|
||||
usage = response.usage_metadata
|
||||
@@ -560,8 +662,9 @@ class GeminiCompletion(BaseLLM):
|
||||
}
|
||||
return {"total_tokens": 0}
|
||||
|
||||
def _convert_contents_to_dict(
|
||||
self, contents: list[types.Content]
|
||||
@staticmethod
|
||||
def _convert_contents_to_dict( # type: ignore[no-any-unimported]
|
||||
contents: list[types.Content],
|
||||
) -> list[dict[str, str]]:
|
||||
"""Convert contents to dict format."""
|
||||
return [
|
||||
|
||||
@@ -4,16 +4,25 @@ from collections.abc import Iterator
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING, Any, Final
|
||||
|
||||
import httpx
|
||||
from openai import APIConnectionError, NotFoundError, OpenAI
|
||||
from openai.types.chat import ChatCompletion, ChatCompletionChunk
|
||||
from openai.types.chat.chat_completion import Choice
|
||||
from openai.types.chat.chat_completion_chunk import ChoiceDelta
|
||||
from pydantic import BaseModel
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
Field,
|
||||
PrivateAttr,
|
||||
model_validator,
|
||||
)
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.types.llm_events import LLMCallType
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llms.hooks.transport import HTTPTransport
|
||||
from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
@@ -27,6 +36,24 @@ if TYPE_CHECKING:
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
OPENAI_CONTEXT_WINDOWS: dict[str, int] = {
|
||||
"gpt-4": 8192,
|
||||
"gpt-4o": 128000,
|
||||
"gpt-4o-mini": 200000,
|
||||
"gpt-4-turbo": 128000,
|
||||
"gpt-4.1": 1047576,
|
||||
"gpt-4.1-mini-2025-04-14": 1047576,
|
||||
"gpt-4.1-nano-2025-04-14": 1047576,
|
||||
"o1-preview": 128000,
|
||||
"o1-mini": 128000,
|
||||
"o3-mini": 200000,
|
||||
"o4-mini": 200000,
|
||||
}
|
||||
|
||||
MIN_CONTEXT_WINDOW: Final[int] = 1024
|
||||
MAX_CONTEXT_WINDOW: Final[int] = 2097152
|
||||
|
||||
|
||||
class OpenAICompletion(BaseLLM):
|
||||
"""OpenAI native completion implementation.
|
||||
|
||||
@@ -34,105 +61,125 @@ class OpenAICompletion(BaseLLM):
|
||||
offering native structured outputs, function calling, and streaming support.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str = "gpt-4o",
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
organization: str | None = None,
|
||||
project: str | None = None,
|
||||
timeout: float | None = None,
|
||||
max_retries: int = 2,
|
||||
default_headers: dict[str, str] | None = None,
|
||||
default_query: dict[str, Any] | None = None,
|
||||
client_params: dict[str, Any] | None = None,
|
||||
temperature: float | None = None,
|
||||
top_p: float | None = None,
|
||||
frequency_penalty: float | None = None,
|
||||
presence_penalty: float | None = None,
|
||||
max_tokens: int | None = None,
|
||||
max_completion_tokens: int | None = None,
|
||||
seed: int | None = None,
|
||||
stream: bool = False,
|
||||
response_format: dict[str, Any] | type[BaseModel] | None = None,
|
||||
logprobs: bool | None = None,
|
||||
top_logprobs: int | None = None,
|
||||
reasoning_effort: str | None = None,
|
||||
provider: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize OpenAI chat completion client."""
|
||||
model: str = Field(
|
||||
default="gpt-4o",
|
||||
description="OpenAI model name (e.g., 'gpt-4o')",
|
||||
)
|
||||
organization: str | None = Field(
|
||||
default=None,
|
||||
description="Name of the OpenAI organization",
|
||||
)
|
||||
project: str | None = Field(
|
||||
default=None,
|
||||
description="Name of the OpenAI project",
|
||||
)
|
||||
api_base: str | None = Field(
|
||||
default=os.getenv("OPENAI_BASE_URL"),
|
||||
description="Base URL for OpenAI API",
|
||||
)
|
||||
default_headers: dict[str, str] | None = Field(
|
||||
default=None,
|
||||
description="Default headers for OpenAI API requests",
|
||||
)
|
||||
default_query: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description="Default query parameters for OpenAI API requests",
|
||||
)
|
||||
top_p: float | None = Field(
|
||||
default=None,
|
||||
description="Top-p sampling parameter",
|
||||
)
|
||||
frequency_penalty: float | None = Field(
|
||||
default=None,
|
||||
description="Frequency penalty parameter",
|
||||
)
|
||||
presence_penalty: float | None = Field(
|
||||
default=None,
|
||||
description="Presence penalty parameter",
|
||||
)
|
||||
max_completion_tokens: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum tokens for completion",
|
||||
)
|
||||
seed: int | None = Field(
|
||||
default=None,
|
||||
description="Random seed for reproducibility",
|
||||
)
|
||||
response_format: dict[str, Any] | type[BaseModel] | None = Field(
|
||||
default=None,
|
||||
description="Response format for structured output",
|
||||
)
|
||||
logprobs: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to include log probabilities",
|
||||
)
|
||||
top_logprobs: int | None = Field(
|
||||
default=None,
|
||||
description="Number of top log probabilities to return",
|
||||
)
|
||||
reasoning_effort: str | None = Field(
|
||||
default=None,
|
||||
description="Reasoning effort level for o1 models",
|
||||
)
|
||||
supports_function_calling: bool = Field(
|
||||
default=True,
|
||||
description="Whether the model supports function calling",
|
||||
)
|
||||
is_o1_model: bool = Field(
|
||||
default=False,
|
||||
description="Whether the model is an o1 model",
|
||||
)
|
||||
is_gpt4_model: bool = Field(
|
||||
default=False,
|
||||
description="Whether the model is a GPT-4 model",
|
||||
)
|
||||
_client: OpenAI = PrivateAttr(
|
||||
default_factory=OpenAI,
|
||||
)
|
||||
|
||||
if provider is None:
|
||||
provider = kwargs.pop("provider", "openai")
|
||||
|
||||
# Client configuration attributes
|
||||
self.organization = organization
|
||||
self.project = project
|
||||
self.max_retries = max_retries
|
||||
self.default_headers = default_headers
|
||||
self.default_query = default_query
|
||||
self.client_params = client_params
|
||||
self.timeout = timeout
|
||||
self.base_url = base_url
|
||||
self.api_base = kwargs.pop("api_base", None)
|
||||
|
||||
super().__init__(
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
api_key=api_key or os.getenv("OPENAI_API_KEY"),
|
||||
base_url=base_url,
|
||||
timeout=timeout,
|
||||
provider=provider,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
client_config = self._get_client_params()
|
||||
self.client = OpenAI(**client_config)
|
||||
|
||||
# Completion parameters
|
||||
self.top_p = top_p
|
||||
self.frequency_penalty = frequency_penalty
|
||||
self.presence_penalty = presence_penalty
|
||||
self.max_tokens = max_tokens
|
||||
self.max_completion_tokens = max_completion_tokens
|
||||
self.seed = seed
|
||||
self.stream = stream
|
||||
self.response_format = response_format
|
||||
self.logprobs = logprobs
|
||||
self.top_logprobs = top_logprobs
|
||||
self.reasoning_effort = reasoning_effort
|
||||
self.is_o1_model = "o1" in model.lower()
|
||||
self.is_gpt4_model = "gpt-4" in model.lower()
|
||||
|
||||
def _get_client_params(self) -> dict[str, Any]:
|
||||
"""Get OpenAI client parameters."""
|
||||
@model_validator(mode="after")
|
||||
def initialize_client(self) -> Self:
|
||||
"""Initialize the Anthropic client after Pydantic validation.
|
||||
|
||||
This runs after all field validation is complete, ensuring that:
|
||||
- All BaseLLM fields are set (model, temperature, stop_sequences, etc.)
|
||||
- Field validators have run (stop_sequences is normalized to set[str])
|
||||
- API key and other configuration is ready
|
||||
"""
|
||||
if self.api_key is None:
|
||||
self.api_key = os.getenv("OPENAI_API_KEY")
|
||||
if self.api_key is None:
|
||||
raise ValueError("OPENAI_API_KEY is required")
|
||||
|
||||
base_params = {
|
||||
"api_key": self.api_key,
|
||||
"organization": self.organization,
|
||||
"project": self.project,
|
||||
"base_url": self.base_url
|
||||
or self.api_base
|
||||
or os.getenv("OPENAI_BASE_URL")
|
||||
or None,
|
||||
"timeout": self.timeout,
|
||||
"max_retries": self.max_retries,
|
||||
"default_headers": self.default_headers,
|
||||
"default_query": self.default_query,
|
||||
}
|
||||
self.is_o1_model = "o1" in self.model.lower()
|
||||
self.supports_function_calling = not self.is_o1_model
|
||||
self.is_gpt4_model = "gpt-4" in self.model.lower()
|
||||
self.supports_stop_words = not self.is_o1_model
|
||||
|
||||
client_params = {k: v for k, v in base_params.items() if v is not None}
|
||||
params = self.model_dump(
|
||||
include={
|
||||
"api_key",
|
||||
"organization",
|
||||
"project",
|
||||
"base_url",
|
||||
"timeout",
|
||||
"max_retries",
|
||||
"default_headers",
|
||||
"default_query",
|
||||
},
|
||||
exclude_none=True,
|
||||
)
|
||||
|
||||
if self.interceptor:
|
||||
transport = HTTPTransport(interceptor=self.interceptor)
|
||||
http_client = httpx.Client(transport=transport)
|
||||
params["http_client"] = http_client
|
||||
|
||||
if self.client_params:
|
||||
client_params.update(self.client_params)
|
||||
params.update(self.client_params)
|
||||
|
||||
return client_params
|
||||
self._client = OpenAI(**params)
|
||||
return self
|
||||
|
||||
def call(
|
||||
self,
|
||||
@@ -203,38 +250,26 @@ class OpenAICompletion(BaseLLM):
|
||||
self, messages: list[LLMMessage], tools: list[dict[str, BaseTool]] | None = None
|
||||
) -> dict[str, Any]:
|
||||
"""Prepare parameters for OpenAI chat completion."""
|
||||
params: dict[str, Any] = {
|
||||
"model": self.model,
|
||||
"messages": messages,
|
||||
}
|
||||
if self.stream:
|
||||
params["stream"] = self.stream
|
||||
|
||||
params = self.model_dump(
|
||||
include={
|
||||
"model",
|
||||
"stream",
|
||||
"temperature",
|
||||
"top_p",
|
||||
"frequency_penalty",
|
||||
"presence_penalty",
|
||||
"max_completion_tokens",
|
||||
"max_tokens",
|
||||
"seed",
|
||||
"logprobs",
|
||||
"top_logprobs",
|
||||
"reasoning_effort",
|
||||
},
|
||||
exclude_none=True,
|
||||
)
|
||||
params["messages"] = messages
|
||||
params.update(self.additional_params)
|
||||
|
||||
if self.temperature is not None:
|
||||
params["temperature"] = self.temperature
|
||||
if self.top_p is not None:
|
||||
params["top_p"] = self.top_p
|
||||
if self.frequency_penalty is not None:
|
||||
params["frequency_penalty"] = self.frequency_penalty
|
||||
if self.presence_penalty is not None:
|
||||
params["presence_penalty"] = self.presence_penalty
|
||||
if self.max_completion_tokens is not None:
|
||||
params["max_completion_tokens"] = self.max_completion_tokens
|
||||
elif self.max_tokens is not None:
|
||||
params["max_tokens"] = self.max_tokens
|
||||
if self.seed is not None:
|
||||
params["seed"] = self.seed
|
||||
if self.logprobs is not None:
|
||||
params["logprobs"] = self.logprobs
|
||||
if self.top_logprobs is not None:
|
||||
params["top_logprobs"] = self.top_logprobs
|
||||
|
||||
# Handle o1 model specific parameters
|
||||
if self.is_o1_model and self.reasoning_effort:
|
||||
params["reasoning_effort"] = self.reasoning_effort
|
||||
|
||||
if tools:
|
||||
params["tools"] = self._convert_tools_for_interference(tools)
|
||||
params["tool_choice"] = "auto"
|
||||
@@ -286,14 +321,14 @@ class OpenAICompletion(BaseLLM):
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Handle non-streaming chat completion."""
|
||||
try:
|
||||
if response_model:
|
||||
parsed_response = self.client.beta.chat.completions.parse(
|
||||
parsed_response = self._client.beta.chat.completions.parse(
|
||||
**params,
|
||||
response_format=response_model,
|
||||
)
|
||||
@@ -317,7 +352,7 @@ class OpenAICompletion(BaseLLM):
|
||||
)
|
||||
return structured_json
|
||||
|
||||
response: ChatCompletion = self.client.chat.completions.create(**params)
|
||||
response: ChatCompletion = self._client.chat.completions.create(**params)
|
||||
|
||||
usage = self._extract_openai_token_usage(response)
|
||||
|
||||
@@ -409,8 +444,8 @@ class OpenAICompletion(BaseLLM):
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str:
|
||||
"""Handle streaming chat completion."""
|
||||
@@ -419,7 +454,7 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
if response_model:
|
||||
completion_stream: Iterator[ChatCompletionChunk] = (
|
||||
self.client.chat.completions.create(**params)
|
||||
self._client.chat.completions.create(**params)
|
||||
)
|
||||
|
||||
accumulated_content = ""
|
||||
@@ -462,7 +497,7 @@ class OpenAICompletion(BaseLLM):
|
||||
)
|
||||
return accumulated_content
|
||||
|
||||
stream: Iterator[ChatCompletionChunk] = self.client.chat.completions.create(
|
||||
stream: Iterator[ChatCompletionChunk] = self._client.chat.completions.create(
|
||||
**params
|
||||
)
|
||||
|
||||
@@ -540,58 +575,31 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
return full_response
|
||||
|
||||
def supports_function_calling(self) -> bool:
|
||||
"""Check if the model supports function calling."""
|
||||
return not self.is_o1_model
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
"""Check if the model supports stop words."""
|
||||
return not self.is_o1_model
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the model."""
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
|
||||
|
||||
min_context = 1024
|
||||
max_context = 2097152
|
||||
|
||||
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
|
||||
if value < min_context or value > max_context:
|
||||
if value < MIN_CONTEXT_WINDOW or value > MAX_CONTEXT_WINDOW:
|
||||
raise ValueError(
|
||||
f"Context window for {key} must be between {min_context} and {max_context}"
|
||||
f"Context window for {key} must be between {MIN_CONTEXT_WINDOW} and {MAX_CONTEXT_WINDOW}"
|
||||
)
|
||||
|
||||
# Context window sizes for OpenAI models
|
||||
context_windows = {
|
||||
"gpt-4": 8192,
|
||||
"gpt-4o": 128000,
|
||||
"gpt-4o-mini": 200000,
|
||||
"gpt-4-turbo": 128000,
|
||||
"gpt-4.1": 1047576,
|
||||
"gpt-4.1-mini-2025-04-14": 1047576,
|
||||
"gpt-4.1-nano-2025-04-14": 1047576,
|
||||
"o1-preview": 128000,
|
||||
"o1-mini": 128000,
|
||||
"o3-mini": 200000,
|
||||
"o4-mini": 200000,
|
||||
}
|
||||
|
||||
# Find the best match for the model name
|
||||
for model_prefix, size in context_windows.items():
|
||||
for model_prefix, size in OPENAI_CONTEXT_WINDOWS.items():
|
||||
if self.model.startswith(model_prefix):
|
||||
return int(size * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
|
||||
# Default context window size
|
||||
return int(8192 * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
|
||||
def _extract_openai_token_usage(self, response: ChatCompletion) -> dict[str, Any]:
|
||||
@staticmethod
|
||||
def _extract_openai_token_usage(response: ChatCompletion) -> dict[str, Any]:
|
||||
"""Extract token usage from OpenAI ChatCompletion response."""
|
||||
if hasattr(response, "usage") and response.usage:
|
||||
if response.usage:
|
||||
usage = response.usage
|
||||
return {
|
||||
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
|
||||
"completion_tokens": getattr(usage, "completion_tokens", 0),
|
||||
"total_tokens": getattr(usage, "total_tokens", 0),
|
||||
"prompt_tokens": usage.prompt_tokens,
|
||||
"completion_tokens": usage.completion_tokens,
|
||||
"total_tokens": usage.total_tokens,
|
||||
}
|
||||
return {"total_tokens": 0}
|
||||
|
||||
|
||||
37
lib/crewai/src/crewai/mcp/__init__.py
Normal file
37
lib/crewai/src/crewai/mcp/__init__.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""MCP (Model Context Protocol) client support for CrewAI agents.
|
||||
|
||||
This module provides native MCP client functionality, allowing CrewAI agents
|
||||
to connect to any MCP-compliant server using various transport types.
|
||||
"""
|
||||
|
||||
from crewai.mcp.client import MCPClient
|
||||
from crewai.mcp.config import (
|
||||
MCPServerConfig,
|
||||
MCPServerHTTP,
|
||||
MCPServerSSE,
|
||||
MCPServerStdio,
|
||||
)
|
||||
from crewai.mcp.filters import (
|
||||
StaticToolFilter,
|
||||
ToolFilter,
|
||||
ToolFilterContext,
|
||||
create_dynamic_tool_filter,
|
||||
create_static_tool_filter,
|
||||
)
|
||||
from crewai.mcp.transports.base import BaseTransport, TransportType
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BaseTransport",
|
||||
"MCPClient",
|
||||
"MCPServerConfig",
|
||||
"MCPServerHTTP",
|
||||
"MCPServerSSE",
|
||||
"MCPServerStdio",
|
||||
"StaticToolFilter",
|
||||
"ToolFilter",
|
||||
"ToolFilterContext",
|
||||
"TransportType",
|
||||
"create_dynamic_tool_filter",
|
||||
"create_static_tool_filter",
|
||||
]
|
||||
742
lib/crewai/src/crewai/mcp/client.py
Normal file
742
lib/crewai/src/crewai/mcp/client.py
Normal file
@@ -0,0 +1,742 @@
|
||||
"""MCP client with session management for CrewAI agents."""
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
from contextlib import AsyncExitStack
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
# BaseExceptionGroup is available in Python 3.11+
|
||||
try:
|
||||
from builtins import BaseExceptionGroup
|
||||
except ImportError:
|
||||
# Fallback for Python < 3.11 (shouldn't happen in practice)
|
||||
BaseExceptionGroup = Exception
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.mcp_events import (
|
||||
MCPConnectionCompletedEvent,
|
||||
MCPConnectionFailedEvent,
|
||||
MCPConnectionStartedEvent,
|
||||
MCPToolExecutionCompletedEvent,
|
||||
MCPToolExecutionFailedEvent,
|
||||
MCPToolExecutionStartedEvent,
|
||||
)
|
||||
from crewai.mcp.transports.base import BaseTransport
|
||||
from crewai.mcp.transports.http import HTTPTransport
|
||||
from crewai.mcp.transports.sse import SSETransport
|
||||
from crewai.mcp.transports.stdio import StdioTransport
|
||||
|
||||
|
||||
# MCP Connection timeout constants (in seconds)
|
||||
MCP_CONNECTION_TIMEOUT = 30 # Increased for slow servers
|
||||
MCP_TOOL_EXECUTION_TIMEOUT = 30
|
||||
MCP_DISCOVERY_TIMEOUT = 30 # Increased for slow servers
|
||||
MCP_MAX_RETRIES = 3
|
||||
|
||||
# Simple in-memory cache for MCP tool schemas (duration: 5 minutes)
|
||||
_mcp_schema_cache: dict[str, tuple[dict[str, Any], float]] = {}
|
||||
_cache_ttl = 300 # 5 minutes
|
||||
|
||||
|
||||
class MCPClient:
|
||||
"""MCP client with session management.
|
||||
|
||||
This client manages connections to MCP servers and provides a high-level
|
||||
interface for interacting with MCP tools, prompts, and resources.
|
||||
|
||||
Example:
|
||||
```python
|
||||
transport = StdioTransport(command="python", args=["server.py"])
|
||||
client = MCPClient(transport)
|
||||
async with client:
|
||||
tools = await client.list_tools()
|
||||
result = await client.call_tool("tool_name", {"arg": "value"})
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
transport: BaseTransport,
|
||||
connect_timeout: int = MCP_CONNECTION_TIMEOUT,
|
||||
execution_timeout: int = MCP_TOOL_EXECUTION_TIMEOUT,
|
||||
discovery_timeout: int = MCP_DISCOVERY_TIMEOUT,
|
||||
max_retries: int = MCP_MAX_RETRIES,
|
||||
cache_tools_list: bool = False,
|
||||
logger: logging.Logger | None = None,
|
||||
) -> None:
|
||||
"""Initialize MCP client.
|
||||
|
||||
Args:
|
||||
transport: Transport instance for MCP server connection.
|
||||
connect_timeout: Connection timeout in seconds.
|
||||
execution_timeout: Tool execution timeout in seconds.
|
||||
discovery_timeout: Tool discovery timeout in seconds.
|
||||
max_retries: Maximum retry attempts for operations.
|
||||
cache_tools_list: Whether to cache tool list results.
|
||||
logger: Optional logger instance.
|
||||
"""
|
||||
self.transport = transport
|
||||
self.connect_timeout = connect_timeout
|
||||
self.execution_timeout = execution_timeout
|
||||
self.discovery_timeout = discovery_timeout
|
||||
self.max_retries = max_retries
|
||||
self.cache_tools_list = cache_tools_list
|
||||
# self._logger = logger or logging.getLogger(__name__)
|
||||
self._session: Any = None
|
||||
self._initialized = False
|
||||
self._exit_stack = AsyncExitStack()
|
||||
self._was_connected = False
|
||||
|
||||
@property
|
||||
def connected(self) -> bool:
|
||||
"""Check if client is connected to server."""
|
||||
return self.transport.connected and self._initialized
|
||||
|
||||
@property
|
||||
def session(self) -> Any:
|
||||
"""Get the MCP session."""
|
||||
if self._session is None:
|
||||
raise RuntimeError("Client not connected. Call connect() first.")
|
||||
return self._session
|
||||
|
||||
def _get_server_info(self) -> tuple[str, str | None, str | None]:
|
||||
"""Get server information for events.
|
||||
|
||||
Returns:
|
||||
Tuple of (server_name, server_url, transport_type).
|
||||
"""
|
||||
if isinstance(self.transport, StdioTransport):
|
||||
server_name = f"{self.transport.command} {' '.join(self.transport.args)}"
|
||||
server_url = None
|
||||
transport_type = self.transport.transport_type.value
|
||||
elif isinstance(self.transport, HTTPTransport):
|
||||
server_name = self.transport.url
|
||||
server_url = self.transport.url
|
||||
transport_type = self.transport.transport_type.value
|
||||
elif isinstance(self.transport, SSETransport):
|
||||
server_name = self.transport.url
|
||||
server_url = self.transport.url
|
||||
transport_type = self.transport.transport_type.value
|
||||
else:
|
||||
server_name = "Unknown MCP Server"
|
||||
server_url = None
|
||||
transport_type = (
|
||||
self.transport.transport_type.value
|
||||
if hasattr(self.transport, "transport_type")
|
||||
else None
|
||||
)
|
||||
|
||||
return server_name, server_url, transport_type
|
||||
|
||||
async def connect(self) -> Self:
|
||||
"""Connect to MCP server and initialize session.
|
||||
|
||||
Returns:
|
||||
Self for method chaining.
|
||||
|
||||
Raises:
|
||||
ConnectionError: If connection fails.
|
||||
ImportError: If MCP SDK not available.
|
||||
"""
|
||||
if self.connected:
|
||||
return self
|
||||
|
||||
# Get server info for events
|
||||
server_name, server_url, transport_type = self._get_server_info()
|
||||
is_reconnect = self._was_connected
|
||||
|
||||
# Emit connection started event
|
||||
started_at = datetime.now()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MCPConnectionStartedEvent(
|
||||
server_name=server_name,
|
||||
server_url=server_url,
|
||||
transport_type=transport_type,
|
||||
is_reconnect=is_reconnect,
|
||||
connect_timeout=self.connect_timeout,
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
from mcp import ClientSession
|
||||
|
||||
# Use AsyncExitStack to manage transport and session contexts together
|
||||
# This ensures they're in the same async scope and prevents cancel scope errors
|
||||
# Always enter transport context via exit stack (it handles already-connected state)
|
||||
await self._exit_stack.enter_async_context(self.transport)
|
||||
|
||||
# Create ClientSession with transport streams
|
||||
self._session = ClientSession(
|
||||
self.transport.read_stream,
|
||||
self.transport.write_stream,
|
||||
)
|
||||
|
||||
# Enter the session's async context manager via exit stack
|
||||
await self._exit_stack.enter_async_context(self._session)
|
||||
|
||||
# Initialize the session (required by MCP protocol)
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
self._session.initialize(),
|
||||
timeout=self.connect_timeout,
|
||||
)
|
||||
except asyncio.CancelledError:
|
||||
# If initialization was cancelled (e.g., event loop closing),
|
||||
# cleanup and re-raise - don't suppress cancellation
|
||||
await self._cleanup_on_error()
|
||||
raise
|
||||
except BaseExceptionGroup as eg:
|
||||
# Handle exception groups from anyio task groups
|
||||
# Extract the actual meaningful error (not GeneratorExit)
|
||||
actual_error = None
|
||||
for exc in eg.exceptions:
|
||||
if isinstance(exc, Exception) and not isinstance(
|
||||
exc, GeneratorExit
|
||||
):
|
||||
# Check if it's an HTTP error (like 401)
|
||||
error_msg = str(exc).lower()
|
||||
if "401" in error_msg or "unauthorized" in error_msg:
|
||||
actual_error = exc
|
||||
break
|
||||
if "cancel scope" not in error_msg and "task" not in error_msg:
|
||||
actual_error = exc
|
||||
break
|
||||
|
||||
await self._cleanup_on_error()
|
||||
if actual_error:
|
||||
raise ConnectionError(
|
||||
f"Failed to connect to MCP server: {actual_error}"
|
||||
) from actual_error
|
||||
raise ConnectionError(f"Failed to connect to MCP server: {eg}") from eg
|
||||
|
||||
self._initialized = True
|
||||
self._was_connected = True
|
||||
|
||||
completed_at = datetime.now()
|
||||
connection_duration_ms = (completed_at - started_at).total_seconds() * 1000
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MCPConnectionCompletedEvent(
|
||||
server_name=server_name,
|
||||
server_url=server_url,
|
||||
transport_type=transport_type,
|
||||
started_at=started_at,
|
||||
completed_at=completed_at,
|
||||
connection_duration_ms=connection_duration_ms,
|
||||
is_reconnect=is_reconnect,
|
||||
),
|
||||
)
|
||||
|
||||
return self
|
||||
except ImportError as e:
|
||||
await self._cleanup_on_error()
|
||||
error_msg = (
|
||||
"MCP library not available. Please install with: pip install mcp"
|
||||
)
|
||||
self._emit_connection_failed(
|
||||
server_name,
|
||||
server_url,
|
||||
transport_type,
|
||||
error_msg,
|
||||
"import_error",
|
||||
started_at,
|
||||
)
|
||||
raise ImportError(error_msg) from e
|
||||
except asyncio.TimeoutError as e:
|
||||
await self._cleanup_on_error()
|
||||
error_msg = f"MCP connection timed out after {self.connect_timeout} seconds. The server may be slow or unreachable."
|
||||
self._emit_connection_failed(
|
||||
server_name,
|
||||
server_url,
|
||||
transport_type,
|
||||
error_msg,
|
||||
"timeout",
|
||||
started_at,
|
||||
)
|
||||
raise ConnectionError(error_msg) from e
|
||||
except asyncio.CancelledError:
|
||||
# Re-raise cancellation - don't suppress it
|
||||
await self._cleanup_on_error()
|
||||
self._emit_connection_failed(
|
||||
server_name,
|
||||
server_url,
|
||||
transport_type,
|
||||
"Connection cancelled",
|
||||
"cancelled",
|
||||
started_at,
|
||||
)
|
||||
raise
|
||||
except BaseExceptionGroup as eg:
|
||||
# Handle exception groups from anyio task groups at outer level
|
||||
actual_error = None
|
||||
for exc in eg.exceptions:
|
||||
if isinstance(exc, Exception) and not isinstance(exc, GeneratorExit):
|
||||
error_msg = str(exc).lower()
|
||||
if "401" in error_msg or "unauthorized" in error_msg:
|
||||
actual_error = exc
|
||||
break
|
||||
if "cancel scope" not in error_msg and "task" not in error_msg:
|
||||
actual_error = exc
|
||||
break
|
||||
|
||||
await self._cleanup_on_error()
|
||||
error_type = (
|
||||
"authentication"
|
||||
if actual_error
|
||||
and (
|
||||
"401" in str(actual_error).lower()
|
||||
or "unauthorized" in str(actual_error).lower()
|
||||
)
|
||||
else "network"
|
||||
)
|
||||
error_msg = str(actual_error) if actual_error else str(eg)
|
||||
self._emit_connection_failed(
|
||||
server_name,
|
||||
server_url,
|
||||
transport_type,
|
||||
error_msg,
|
||||
error_type,
|
||||
started_at,
|
||||
)
|
||||
if actual_error:
|
||||
raise ConnectionError(
|
||||
f"Failed to connect to MCP server: {actual_error}"
|
||||
) from actual_error
|
||||
raise ConnectionError(f"Failed to connect to MCP server: {eg}") from eg
|
||||
except Exception as e:
|
||||
await self._cleanup_on_error()
|
||||
error_type = (
|
||||
"authentication"
|
||||
if "401" in str(e).lower() or "unauthorized" in str(e).lower()
|
||||
else "network"
|
||||
)
|
||||
self._emit_connection_failed(
|
||||
server_name, server_url, transport_type, str(e), error_type, started_at
|
||||
)
|
||||
raise ConnectionError(f"Failed to connect to MCP server: {e}") from e
|
||||
|
||||
def _emit_connection_failed(
|
||||
self,
|
||||
server_name: str,
|
||||
server_url: str | None,
|
||||
transport_type: str | None,
|
||||
error: str,
|
||||
error_type: str,
|
||||
started_at: datetime,
|
||||
) -> None:
|
||||
"""Emit connection failed event."""
|
||||
failed_at = datetime.now()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MCPConnectionFailedEvent(
|
||||
server_name=server_name,
|
||||
server_url=server_url,
|
||||
transport_type=transport_type,
|
||||
error=error,
|
||||
error_type=error_type,
|
||||
started_at=started_at,
|
||||
failed_at=failed_at,
|
||||
),
|
||||
)
|
||||
|
||||
async def _cleanup_on_error(self) -> None:
|
||||
"""Cleanup resources when an error occurs during connection."""
|
||||
try:
|
||||
await self._exit_stack.aclose()
|
||||
|
||||
except Exception as e:
|
||||
# Best effort cleanup - ignore all other errors
|
||||
raise RuntimeError(f"Error during MCP client cleanup: {e}") from e
|
||||
finally:
|
||||
self._session = None
|
||||
self._initialized = False
|
||||
self._exit_stack = AsyncExitStack()
|
||||
|
||||
async def disconnect(self) -> None:
|
||||
"""Disconnect from MCP server and cleanup resources."""
|
||||
if not self.connected:
|
||||
return
|
||||
|
||||
try:
|
||||
await self._exit_stack.aclose()
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error during MCP client disconnect: {e}") from e
|
||||
finally:
|
||||
self._session = None
|
||||
self._initialized = False
|
||||
self._exit_stack = AsyncExitStack()
|
||||
|
||||
async def list_tools(self, use_cache: bool | None = None) -> list[dict[str, Any]]:
|
||||
"""List available tools from MCP server.
|
||||
|
||||
Args:
|
||||
use_cache: Whether to use cached results. If None, uses
|
||||
client's cache_tools_list setting.
|
||||
|
||||
Returns:
|
||||
List of tool definitions with name, description, and inputSchema.
|
||||
"""
|
||||
if not self.connected:
|
||||
await self.connect()
|
||||
|
||||
# Check cache if enabled
|
||||
use_cache = use_cache if use_cache is not None else self.cache_tools_list
|
||||
if use_cache:
|
||||
cache_key = self._get_cache_key("tools")
|
||||
if cache_key in _mcp_schema_cache:
|
||||
cached_data, cache_time = _mcp_schema_cache[cache_key]
|
||||
if time.time() - cache_time < _cache_ttl:
|
||||
# Logger removed - return cached data
|
||||
return cached_data
|
||||
|
||||
# List tools with timeout and retries
|
||||
tools = await self._retry_operation(
|
||||
self._list_tools_impl,
|
||||
timeout=self.discovery_timeout,
|
||||
)
|
||||
|
||||
# Cache results if enabled
|
||||
if use_cache:
|
||||
cache_key = self._get_cache_key("tools")
|
||||
_mcp_schema_cache[cache_key] = (tools, time.time())
|
||||
|
||||
return tools
|
||||
|
||||
async def _list_tools_impl(self) -> list[dict[str, Any]]:
|
||||
"""Internal implementation of list_tools."""
|
||||
tools_result = await asyncio.wait_for(
|
||||
self.session.list_tools(),
|
||||
timeout=self.discovery_timeout,
|
||||
)
|
||||
|
||||
return [
|
||||
{
|
||||
"name": tool.name,
|
||||
"description": getattr(tool, "description", ""),
|
||||
"inputSchema": getattr(tool, "inputSchema", {}),
|
||||
}
|
||||
for tool in tools_result.tools
|
||||
]
|
||||
|
||||
async def call_tool(
|
||||
self, tool_name: str, arguments: dict[str, Any] | None = None
|
||||
) -> Any:
|
||||
"""Call a tool on the MCP server.
|
||||
|
||||
Args:
|
||||
tool_name: Name of the tool to call.
|
||||
arguments: Tool arguments.
|
||||
|
||||
Returns:
|
||||
Tool execution result.
|
||||
"""
|
||||
if not self.connected:
|
||||
await self.connect()
|
||||
|
||||
arguments = arguments or {}
|
||||
cleaned_arguments = self._clean_tool_arguments(arguments)
|
||||
|
||||
# Get server info for events
|
||||
server_name, server_url, transport_type = self._get_server_info()
|
||||
|
||||
# Emit tool execution started event
|
||||
started_at = datetime.now()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MCPToolExecutionStartedEvent(
|
||||
server_name=server_name,
|
||||
server_url=server_url,
|
||||
transport_type=transport_type,
|
||||
tool_name=tool_name,
|
||||
tool_args=cleaned_arguments,
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
result = await self._retry_operation(
|
||||
lambda: self._call_tool_impl(tool_name, cleaned_arguments),
|
||||
timeout=self.execution_timeout,
|
||||
)
|
||||
|
||||
completed_at = datetime.now()
|
||||
execution_duration_ms = (completed_at - started_at).total_seconds() * 1000
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MCPToolExecutionCompletedEvent(
|
||||
server_name=server_name,
|
||||
server_url=server_url,
|
||||
transport_type=transport_type,
|
||||
tool_name=tool_name,
|
||||
tool_args=cleaned_arguments,
|
||||
result=result,
|
||||
started_at=started_at,
|
||||
completed_at=completed_at,
|
||||
execution_duration_ms=execution_duration_ms,
|
||||
),
|
||||
)
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
failed_at = datetime.now()
|
||||
error_type = (
|
||||
"timeout"
|
||||
if isinstance(e, (asyncio.TimeoutError, ConnectionError))
|
||||
and "timeout" in str(e).lower()
|
||||
else "server_error"
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MCPToolExecutionFailedEvent(
|
||||
server_name=server_name,
|
||||
server_url=server_url,
|
||||
transport_type=transport_type,
|
||||
tool_name=tool_name,
|
||||
tool_args=cleaned_arguments,
|
||||
error=str(e),
|
||||
error_type=error_type,
|
||||
started_at=started_at,
|
||||
failed_at=failed_at,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def _clean_tool_arguments(self, arguments: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Clean tool arguments by removing None values and fixing formats.
|
||||
|
||||
Args:
|
||||
arguments: Raw tool arguments.
|
||||
|
||||
Returns:
|
||||
Cleaned arguments ready for MCP server.
|
||||
"""
|
||||
cleaned = {}
|
||||
|
||||
for key, value in arguments.items():
|
||||
# Skip None values
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
# Fix sources array format: convert ["web"] to [{"type": "web"}]
|
||||
if key == "sources" and isinstance(value, list):
|
||||
fixed_sources = []
|
||||
for item in value:
|
||||
if isinstance(item, str):
|
||||
# Convert string to object format
|
||||
fixed_sources.append({"type": item})
|
||||
elif isinstance(item, dict):
|
||||
# Already in correct format
|
||||
fixed_sources.append(item)
|
||||
else:
|
||||
# Keep as is if unknown format
|
||||
fixed_sources.append(item)
|
||||
if fixed_sources:
|
||||
cleaned[key] = fixed_sources
|
||||
continue
|
||||
|
||||
# Recursively clean nested dictionaries
|
||||
if isinstance(value, dict):
|
||||
nested_cleaned = self._clean_tool_arguments(value)
|
||||
if nested_cleaned: # Only add if not empty
|
||||
cleaned[key] = nested_cleaned
|
||||
elif isinstance(value, list):
|
||||
# Clean list items
|
||||
cleaned_list = []
|
||||
for item in value:
|
||||
if isinstance(item, dict):
|
||||
cleaned_item = self._clean_tool_arguments(item)
|
||||
if cleaned_item:
|
||||
cleaned_list.append(cleaned_item)
|
||||
elif item is not None:
|
||||
cleaned_list.append(item)
|
||||
if cleaned_list:
|
||||
cleaned[key] = cleaned_list
|
||||
else:
|
||||
# Keep primitive values
|
||||
cleaned[key] = value
|
||||
|
||||
return cleaned
|
||||
|
||||
async def _call_tool_impl(self, tool_name: str, arguments: dict[str, Any]) -> Any:
|
||||
"""Internal implementation of call_tool."""
|
||||
result = await asyncio.wait_for(
|
||||
self.session.call_tool(tool_name, arguments),
|
||||
timeout=self.execution_timeout,
|
||||
)
|
||||
|
||||
# Extract result content
|
||||
if hasattr(result, "content") and result.content:
|
||||
if isinstance(result.content, list) and len(result.content) > 0:
|
||||
content_item = result.content[0]
|
||||
if hasattr(content_item, "text"):
|
||||
return str(content_item.text)
|
||||
return str(content_item)
|
||||
return str(result.content)
|
||||
|
||||
return str(result)
|
||||
|
||||
async def list_prompts(self) -> list[dict[str, Any]]:
|
||||
"""List available prompts from MCP server.
|
||||
|
||||
Returns:
|
||||
List of prompt definitions.
|
||||
"""
|
||||
if not self.connected:
|
||||
await self.connect()
|
||||
|
||||
return await self._retry_operation(
|
||||
self._list_prompts_impl,
|
||||
timeout=self.discovery_timeout,
|
||||
)
|
||||
|
||||
async def _list_prompts_impl(self) -> list[dict[str, Any]]:
|
||||
"""Internal implementation of list_prompts."""
|
||||
prompts_result = await asyncio.wait_for(
|
||||
self.session.list_prompts(),
|
||||
timeout=self.discovery_timeout,
|
||||
)
|
||||
|
||||
return [
|
||||
{
|
||||
"name": prompt.name,
|
||||
"description": getattr(prompt, "description", ""),
|
||||
"arguments": getattr(prompt, "arguments", []),
|
||||
}
|
||||
for prompt in prompts_result.prompts
|
||||
]
|
||||
|
||||
async def get_prompt(
|
||||
self, prompt_name: str, arguments: dict[str, Any] | None = None
|
||||
) -> dict[str, Any]:
|
||||
"""Get a prompt from the MCP server.
|
||||
|
||||
Args:
|
||||
prompt_name: Name of the prompt to get.
|
||||
arguments: Optional prompt arguments.
|
||||
|
||||
Returns:
|
||||
Prompt content and metadata.
|
||||
"""
|
||||
if not self.connected:
|
||||
await self.connect()
|
||||
|
||||
arguments = arguments or {}
|
||||
|
||||
return await self._retry_operation(
|
||||
lambda: self._get_prompt_impl(prompt_name, arguments),
|
||||
timeout=self.execution_timeout,
|
||||
)
|
||||
|
||||
async def _get_prompt_impl(
|
||||
self, prompt_name: str, arguments: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
"""Internal implementation of get_prompt."""
|
||||
result = await asyncio.wait_for(
|
||||
self.session.get_prompt(prompt_name, arguments),
|
||||
timeout=self.execution_timeout,
|
||||
)
|
||||
|
||||
return {
|
||||
"name": prompt_name,
|
||||
"messages": [
|
||||
{
|
||||
"role": msg.role,
|
||||
"content": msg.content,
|
||||
}
|
||||
for msg in result.messages
|
||||
],
|
||||
"arguments": arguments,
|
||||
}
|
||||
|
||||
async def _retry_operation(
|
||||
self,
|
||||
operation: Callable[[], Any],
|
||||
timeout: int | None = None,
|
||||
) -> Any:
|
||||
"""Retry an operation with exponential backoff.
|
||||
|
||||
Args:
|
||||
operation: Async operation to retry.
|
||||
timeout: Operation timeout in seconds.
|
||||
|
||||
Returns:
|
||||
Operation result.
|
||||
"""
|
||||
last_error = None
|
||||
timeout = timeout or self.execution_timeout
|
||||
|
||||
for attempt in range(self.max_retries):
|
||||
try:
|
||||
if timeout:
|
||||
return await asyncio.wait_for(operation(), timeout=timeout)
|
||||
return await operation()
|
||||
|
||||
except asyncio.TimeoutError as e: # noqa: PERF203
|
||||
last_error = f"Operation timed out after {timeout} seconds"
|
||||
if attempt < self.max_retries - 1:
|
||||
wait_time = 2**attempt
|
||||
await asyncio.sleep(wait_time)
|
||||
else:
|
||||
raise ConnectionError(last_error) from e
|
||||
|
||||
except Exception as e:
|
||||
error_str = str(e).lower()
|
||||
|
||||
# Classify errors as retryable or non-retryable
|
||||
if "authentication" in error_str or "unauthorized" in error_str:
|
||||
raise ConnectionError(f"Authentication failed: {e}") from e
|
||||
|
||||
if "not found" in error_str:
|
||||
raise ValueError(f"Resource not found: {e}") from e
|
||||
|
||||
# Retryable errors
|
||||
last_error = str(e)
|
||||
if attempt < self.max_retries - 1:
|
||||
wait_time = 2**attempt
|
||||
await asyncio.sleep(wait_time)
|
||||
else:
|
||||
raise ConnectionError(
|
||||
f"Operation failed after {self.max_retries} attempts: {last_error}"
|
||||
) from e
|
||||
|
||||
raise ConnectionError(f"Operation failed: {last_error}")
|
||||
|
||||
def _get_cache_key(self, resource_type: str) -> str:
|
||||
"""Generate cache key for resource.
|
||||
|
||||
Args:
|
||||
resource_type: Type of resource (e.g., "tools", "prompts").
|
||||
|
||||
Returns:
|
||||
Cache key string.
|
||||
"""
|
||||
# Use transport type and URL/command as cache key
|
||||
if isinstance(self.transport, StdioTransport):
|
||||
key = f"stdio:{self.transport.command}:{':'.join(self.transport.args)}"
|
||||
elif isinstance(self.transport, HTTPTransport):
|
||||
key = f"http:{self.transport.url}"
|
||||
elif isinstance(self.transport, SSETransport):
|
||||
key = f"sse:{self.transport.url}"
|
||||
else:
|
||||
key = f"{self.transport.transport_type}:unknown"
|
||||
|
||||
return f"mcp:{key}:{resource_type}"
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
"""Async context manager entry."""
|
||||
return await self.connect()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: Any,
|
||||
) -> None:
|
||||
"""Async context manager exit."""
|
||||
await self.disconnect()
|
||||
124
lib/crewai/src/crewai/mcp/config.py
Normal file
124
lib/crewai/src/crewai/mcp/config.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""MCP server configuration models for CrewAI agents.
|
||||
|
||||
This module provides Pydantic models for configuring MCP servers with
|
||||
various transport types, similar to OpenAI's Agents SDK.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.mcp.filters import ToolFilter
|
||||
|
||||
|
||||
class MCPServerStdio(BaseModel):
|
||||
"""Stdio MCP server configuration.
|
||||
|
||||
This configuration is used for connecting to local MCP servers
|
||||
that run as processes and communicate via standard input/output.
|
||||
|
||||
Example:
|
||||
```python
|
||||
mcp_server = MCPServerStdio(
|
||||
command="python",
|
||||
args=["path/to/server.py"],
|
||||
env={"API_KEY": "..."},
|
||||
tool_filter=create_static_tool_filter(
|
||||
allowed_tool_names=["read_file", "write_file"]
|
||||
),
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
command: str = Field(
|
||||
...,
|
||||
description="Command to execute (e.g., 'python', 'node', 'npx', 'uvx').",
|
||||
)
|
||||
args: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Command arguments (e.g., ['server.py'] or ['-y', '@mcp/server']).",
|
||||
)
|
||||
env: dict[str, str] | None = Field(
|
||||
default=None,
|
||||
description="Environment variables to pass to the process.",
|
||||
)
|
||||
tool_filter: ToolFilter | None = Field(
|
||||
default=None,
|
||||
description="Optional tool filter for filtering available tools.",
|
||||
)
|
||||
cache_tools_list: bool = Field(
|
||||
default=False,
|
||||
description="Whether to cache the tool list for faster subsequent access.",
|
||||
)
|
||||
|
||||
|
||||
class MCPServerHTTP(BaseModel):
|
||||
"""HTTP/Streamable HTTP MCP server configuration.
|
||||
|
||||
This configuration is used for connecting to remote MCP servers
|
||||
over HTTP/HTTPS using streamable HTTP transport.
|
||||
|
||||
Example:
|
||||
```python
|
||||
mcp_server = MCPServerHTTP(
|
||||
url="https://api.example.com/mcp",
|
||||
headers={"Authorization": "Bearer ..."},
|
||||
cache_tools_list=True,
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
url: str = Field(
|
||||
..., description="Server URL (e.g., 'https://api.example.com/mcp')."
|
||||
)
|
||||
headers: dict[str, str] | None = Field(
|
||||
default=None,
|
||||
description="Optional HTTP headers for authentication or other purposes.",
|
||||
)
|
||||
streamable: bool = Field(
|
||||
default=True,
|
||||
description="Whether to use streamable HTTP transport (default: True).",
|
||||
)
|
||||
tool_filter: ToolFilter | None = Field(
|
||||
default=None,
|
||||
description="Optional tool filter for filtering available tools.",
|
||||
)
|
||||
cache_tools_list: bool = Field(
|
||||
default=False,
|
||||
description="Whether to cache the tool list for faster subsequent access.",
|
||||
)
|
||||
|
||||
|
||||
class MCPServerSSE(BaseModel):
|
||||
"""Server-Sent Events (SSE) MCP server configuration.
|
||||
|
||||
This configuration is used for connecting to remote MCP servers
|
||||
using Server-Sent Events for real-time streaming communication.
|
||||
|
||||
Example:
|
||||
```python
|
||||
mcp_server = MCPServerSSE(
|
||||
url="https://api.example.com/mcp/sse",
|
||||
headers={"Authorization": "Bearer ..."},
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
url: str = Field(
|
||||
...,
|
||||
description="Server URL (e.g., 'https://api.example.com/mcp/sse').",
|
||||
)
|
||||
headers: dict[str, str] | None = Field(
|
||||
default=None,
|
||||
description="Optional HTTP headers for authentication or other purposes.",
|
||||
)
|
||||
tool_filter: ToolFilter | None = Field(
|
||||
default=None,
|
||||
description="Optional tool filter for filtering available tools.",
|
||||
)
|
||||
cache_tools_list: bool = Field(
|
||||
default=False,
|
||||
description="Whether to cache the tool list for faster subsequent access.",
|
||||
)
|
||||
|
||||
|
||||
# Type alias for all MCP server configurations
|
||||
MCPServerConfig = MCPServerStdio | MCPServerHTTP | MCPServerSSE
|
||||
166
lib/crewai/src/crewai/mcp/filters.py
Normal file
166
lib/crewai/src/crewai/mcp/filters.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""Tool filtering support for MCP servers.
|
||||
|
||||
This module provides utilities for filtering tools from MCP servers,
|
||||
including static allow/block lists and dynamic context-aware filtering.
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
pass
|
||||
|
||||
|
||||
class ToolFilterContext(BaseModel):
|
||||
"""Context for dynamic tool filtering.
|
||||
|
||||
This context is passed to dynamic tool filters to provide
|
||||
information about the agent, run context, and server.
|
||||
"""
|
||||
|
||||
agent: Any = Field(..., description="The agent requesting tools.")
|
||||
server_name: str = Field(..., description="Name of the MCP server.")
|
||||
run_context: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description="Optional run context for additional filtering logic.",
|
||||
)
|
||||
|
||||
|
||||
# Type alias for tool filter functions
|
||||
ToolFilter = (
|
||||
Callable[[ToolFilterContext, dict[str, Any]], bool]
|
||||
| Callable[[dict[str, Any]], bool]
|
||||
)
|
||||
|
||||
|
||||
class StaticToolFilter:
|
||||
"""Static tool filter with allow/block lists.
|
||||
|
||||
This filter provides simple allow/block list filtering based on
|
||||
tool names. Useful for restricting which tools are available
|
||||
from an MCP server.
|
||||
|
||||
Example:
|
||||
```python
|
||||
filter = StaticToolFilter(
|
||||
allowed_tool_names=["read_file", "write_file"],
|
||||
blocked_tool_names=["delete_file"],
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
allowed_tool_names: list[str] | None = None,
|
||||
blocked_tool_names: list[str] | None = None,
|
||||
) -> None:
|
||||
"""Initialize static tool filter.
|
||||
|
||||
Args:
|
||||
allowed_tool_names: List of tool names to allow. If None,
|
||||
all tools are allowed (unless blocked).
|
||||
blocked_tool_names: List of tool names to block. Blocked tools
|
||||
take precedence over allowed tools.
|
||||
"""
|
||||
self.allowed_tool_names = set(allowed_tool_names or [])
|
||||
self.blocked_tool_names = set(blocked_tool_names or [])
|
||||
|
||||
def __call__(self, tool: dict[str, Any]) -> bool:
|
||||
"""Filter tool based on allow/block lists.
|
||||
|
||||
Args:
|
||||
tool: Tool definition dictionary with at least 'name' key.
|
||||
|
||||
Returns:
|
||||
True if tool should be included, False otherwise.
|
||||
"""
|
||||
tool_name = tool.get("name", "")
|
||||
|
||||
# Blocked tools take precedence
|
||||
if self.blocked_tool_names and tool_name in self.blocked_tool_names:
|
||||
return False
|
||||
|
||||
# If allow list exists, tool must be in it
|
||||
if self.allowed_tool_names:
|
||||
return tool_name in self.allowed_tool_names
|
||||
|
||||
# No restrictions - allow all
|
||||
return True
|
||||
|
||||
|
||||
def create_static_tool_filter(
|
||||
allowed_tool_names: list[str] | None = None,
|
||||
blocked_tool_names: list[str] | None = None,
|
||||
) -> Callable[[dict[str, Any]], bool]:
|
||||
"""Create a static tool filter function.
|
||||
|
||||
This is a convenience function for creating static tool filters
|
||||
with allow/block lists.
|
||||
|
||||
Args:
|
||||
allowed_tool_names: List of tool names to allow. If None,
|
||||
all tools are allowed (unless blocked).
|
||||
blocked_tool_names: List of tool names to block. Blocked tools
|
||||
take precedence over allowed tools.
|
||||
|
||||
Returns:
|
||||
Tool filter function that returns True for allowed tools.
|
||||
|
||||
Example:
|
||||
```python
|
||||
filter_fn = create_static_tool_filter(
|
||||
allowed_tool_names=["read_file", "write_file"],
|
||||
blocked_tool_names=["delete_file"],
|
||||
)
|
||||
|
||||
# Use in MCPServerStdio
|
||||
mcp_server = MCPServerStdio(
|
||||
command="npx",
|
||||
args=["-y", "@modelcontextprotocol/server-filesystem"],
|
||||
tool_filter=filter_fn,
|
||||
)
|
||||
```
|
||||
"""
|
||||
return StaticToolFilter(
|
||||
allowed_tool_names=allowed_tool_names,
|
||||
blocked_tool_names=blocked_tool_names,
|
||||
)
|
||||
|
||||
|
||||
def create_dynamic_tool_filter(
|
||||
filter_func: Callable[[ToolFilterContext, dict[str, Any]], bool],
|
||||
) -> Callable[[ToolFilterContext, dict[str, Any]], bool]:
|
||||
"""Create a dynamic tool filter function.
|
||||
|
||||
This function wraps a dynamic filter function that has access
|
||||
to the tool filter context (agent, server, run context).
|
||||
|
||||
Args:
|
||||
filter_func: Function that takes (context, tool) and returns bool.
|
||||
|
||||
Returns:
|
||||
Tool filter function that can be used with MCP server configs.
|
||||
|
||||
Example:
|
||||
```python
|
||||
async def context_aware_filter(
|
||||
context: ToolFilterContext, tool: dict[str, Any]
|
||||
) -> bool:
|
||||
# Block dangerous tools for code reviewers
|
||||
if context.agent.role == "Code Reviewer":
|
||||
if tool["name"].startswith("danger_"):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
filter_fn = create_dynamic_tool_filter(context_aware_filter)
|
||||
|
||||
mcp_server = MCPServerStdio(
|
||||
command="python", args=["server.py"], tool_filter=filter_fn
|
||||
)
|
||||
```
|
||||
"""
|
||||
return filter_func
|
||||
15
lib/crewai/src/crewai/mcp/transports/__init__.py
Normal file
15
lib/crewai/src/crewai/mcp/transports/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""MCP transport implementations for various connection types."""
|
||||
|
||||
from crewai.mcp.transports.base import BaseTransport, TransportType
|
||||
from crewai.mcp.transports.http import HTTPTransport
|
||||
from crewai.mcp.transports.sse import SSETransport
|
||||
from crewai.mcp.transports.stdio import StdioTransport
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BaseTransport",
|
||||
"HTTPTransport",
|
||||
"SSETransport",
|
||||
"StdioTransport",
|
||||
"TransportType",
|
||||
]
|
||||
125
lib/crewai/src/crewai/mcp/transports/base.py
Normal file
125
lib/crewai/src/crewai/mcp/transports/base.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""Base transport interface for MCP connections."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from typing import Any, Protocol
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
class TransportType(str, Enum):
|
||||
"""MCP transport types."""
|
||||
|
||||
STDIO = "stdio"
|
||||
HTTP = "http"
|
||||
STREAMABLE_HTTP = "streamable-http"
|
||||
SSE = "sse"
|
||||
|
||||
|
||||
class ReadStream(Protocol):
|
||||
"""Protocol for read streams."""
|
||||
|
||||
async def read(self, n: int = -1) -> bytes:
|
||||
"""Read bytes from stream."""
|
||||
...
|
||||
|
||||
|
||||
class WriteStream(Protocol):
|
||||
"""Protocol for write streams."""
|
||||
|
||||
async def write(self, data: bytes) -> None:
|
||||
"""Write bytes to stream."""
|
||||
...
|
||||
|
||||
|
||||
class BaseTransport(ABC):
|
||||
"""Base class for MCP transport implementations.
|
||||
|
||||
This abstract base class defines the interface that all transport
|
||||
implementations must follow. Transports handle the low-level communication
|
||||
with MCP servers.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
"""Initialize the transport.
|
||||
|
||||
Args:
|
||||
**kwargs: Transport-specific configuration options.
|
||||
"""
|
||||
self._read_stream: ReadStream | None = None
|
||||
self._write_stream: WriteStream | None = None
|
||||
self._connected = False
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def transport_type(self) -> TransportType:
|
||||
"""Return the transport type."""
|
||||
...
|
||||
|
||||
@property
|
||||
def connected(self) -> bool:
|
||||
"""Check if transport is connected."""
|
||||
return self._connected
|
||||
|
||||
@property
|
||||
def read_stream(self) -> ReadStream:
|
||||
"""Get the read stream."""
|
||||
if self._read_stream is None:
|
||||
raise RuntimeError("Transport not connected. Call connect() first.")
|
||||
return self._read_stream
|
||||
|
||||
@property
|
||||
def write_stream(self) -> WriteStream:
|
||||
"""Get the write stream."""
|
||||
if self._write_stream is None:
|
||||
raise RuntimeError("Transport not connected. Call connect() first.")
|
||||
return self._write_stream
|
||||
|
||||
@abstractmethod
|
||||
async def connect(self) -> Self:
|
||||
"""Establish connection to MCP server.
|
||||
|
||||
Returns:
|
||||
Self for method chaining.
|
||||
|
||||
Raises:
|
||||
ConnectionError: If connection fails.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def disconnect(self) -> None:
|
||||
"""Close connection to MCP server."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def __aenter__(self) -> Self:
|
||||
"""Async context manager entry."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: Any,
|
||||
) -> None:
|
||||
"""Async context manager exit."""
|
||||
...
|
||||
|
||||
def _set_streams(self, read: ReadStream, write: WriteStream) -> None:
|
||||
"""Set the read and write streams.
|
||||
|
||||
Args:
|
||||
read: Read stream.
|
||||
write: Write stream.
|
||||
"""
|
||||
self._read_stream = read
|
||||
self._write_stream = write
|
||||
self._connected = True
|
||||
|
||||
def _clear_streams(self) -> None:
|
||||
"""Clear the read and write streams."""
|
||||
self._read_stream = None
|
||||
self._write_stream = None
|
||||
self._connected = False
|
||||
174
lib/crewai/src/crewai/mcp/transports/http.py
Normal file
174
lib/crewai/src/crewai/mcp/transports/http.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""HTTP and Streamable HTTP transport for MCP servers."""
|
||||
|
||||
import asyncio
|
||||
from typing import Any
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
# BaseExceptionGroup is available in Python 3.11+
|
||||
try:
|
||||
from builtins import BaseExceptionGroup
|
||||
except ImportError:
|
||||
# Fallback for Python < 3.11 (shouldn't happen in practice)
|
||||
BaseExceptionGroup = Exception
|
||||
|
||||
from crewai.mcp.transports.base import BaseTransport, TransportType
|
||||
|
||||
|
||||
class HTTPTransport(BaseTransport):
|
||||
"""HTTP/Streamable HTTP transport for connecting to remote MCP servers.
|
||||
|
||||
This transport connects to MCP servers over HTTP/HTTPS using the
|
||||
streamable HTTP client from the MCP SDK.
|
||||
|
||||
Example:
|
||||
```python
|
||||
transport = HTTPTransport(
|
||||
url="https://api.example.com/mcp",
|
||||
headers={"Authorization": "Bearer ..."}
|
||||
)
|
||||
async with transport:
|
||||
# Use transport...
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
url: str,
|
||||
headers: dict[str, str] | None = None,
|
||||
streamable: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize HTTP transport.
|
||||
|
||||
Args:
|
||||
url: Server URL (e.g., "https://api.example.com/mcp").
|
||||
headers: Optional HTTP headers.
|
||||
streamable: Whether to use streamable HTTP (default: True).
|
||||
**kwargs: Additional transport options.
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.url = url
|
||||
self.headers = headers or {}
|
||||
self.streamable = streamable
|
||||
self._transport_context: Any = None
|
||||
|
||||
@property
|
||||
def transport_type(self) -> TransportType:
|
||||
"""Return the transport type."""
|
||||
return TransportType.STREAMABLE_HTTP if self.streamable else TransportType.HTTP
|
||||
|
||||
async def connect(self) -> Self:
|
||||
"""Establish HTTP connection to MCP server.
|
||||
|
||||
Returns:
|
||||
Self for method chaining.
|
||||
|
||||
Raises:
|
||||
ConnectionError: If connection fails.
|
||||
ImportError: If MCP SDK not available.
|
||||
"""
|
||||
if self._connected:
|
||||
return self
|
||||
|
||||
try:
|
||||
from mcp.client.streamable_http import streamablehttp_client
|
||||
|
||||
self._transport_context = streamablehttp_client(
|
||||
self.url,
|
||||
headers=self.headers if self.headers else None,
|
||||
terminate_on_close=True,
|
||||
)
|
||||
|
||||
try:
|
||||
read, write, _ = await asyncio.wait_for(
|
||||
self._transport_context.__aenter__(), timeout=30.0
|
||||
)
|
||||
except asyncio.TimeoutError as e:
|
||||
self._transport_context = None
|
||||
raise ConnectionError(
|
||||
"Transport context entry timed out after 30 seconds. "
|
||||
"Server may be slow or unreachable."
|
||||
) from e
|
||||
except Exception as e:
|
||||
self._transport_context = None
|
||||
raise ConnectionError(f"Failed to enter transport context: {e}") from e
|
||||
self._set_streams(read=read, write=write)
|
||||
return self
|
||||
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"MCP library not available. Please install with: pip install mcp"
|
||||
) from e
|
||||
except Exception as e:
|
||||
self._clear_streams()
|
||||
if self._transport_context is not None:
|
||||
self._transport_context = None
|
||||
raise ConnectionError(f"Failed to connect to MCP server: {e}") from e
|
||||
|
||||
async def disconnect(self) -> None:
|
||||
"""Close HTTP connection."""
|
||||
if not self._connected:
|
||||
return
|
||||
|
||||
try:
|
||||
# Clear streams first
|
||||
self._clear_streams()
|
||||
# await self._exit_stack.aclose()
|
||||
|
||||
# Exit transport context - this will clean up background tasks
|
||||
# Give a small delay to allow background tasks to complete
|
||||
if self._transport_context is not None:
|
||||
try:
|
||||
# Wait a tiny bit for any pending operations
|
||||
await asyncio.sleep(0.1)
|
||||
await self._transport_context.__aexit__(None, None, None)
|
||||
except (RuntimeError, asyncio.CancelledError) as e:
|
||||
# Ignore "exit cancel scope in different task" errors and cancellation
|
||||
# These happen when asyncio.run() closes the event loop
|
||||
# while background tasks are still running
|
||||
error_msg = str(e).lower()
|
||||
if "cancel scope" not in error_msg and "task" not in error_msg:
|
||||
# Only suppress cancel scope/task errors, re-raise others
|
||||
if isinstance(e, RuntimeError):
|
||||
raise
|
||||
# For CancelledError, just suppress it
|
||||
except BaseExceptionGroup as eg:
|
||||
# Handle exception groups from anyio task groups
|
||||
# Suppress if they contain cancel scope errors
|
||||
should_suppress = False
|
||||
for exc in eg.exceptions:
|
||||
error_msg = str(exc).lower()
|
||||
if "cancel scope" in error_msg or "task" in error_msg:
|
||||
should_suppress = True
|
||||
break
|
||||
if not should_suppress:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"Error during HTTP transport disconnect: {e}"
|
||||
) from e
|
||||
|
||||
self._connected = False
|
||||
|
||||
except Exception as e:
|
||||
# Log but don't raise - cleanup should be best effort
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.warning(f"Error during HTTP transport disconnect: {e}")
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
"""Async context manager entry."""
|
||||
return await self.connect()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: Any,
|
||||
) -> None:
|
||||
"""Async context manager exit."""
|
||||
|
||||
await self.disconnect()
|
||||
113
lib/crewai/src/crewai/mcp/transports/sse.py
Normal file
113
lib/crewai/src/crewai/mcp/transports/sse.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""Server-Sent Events (SSE) transport for MCP servers."""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.mcp.transports.base import BaseTransport, TransportType
|
||||
|
||||
|
||||
class SSETransport(BaseTransport):
|
||||
"""SSE transport for connecting to remote MCP servers.
|
||||
|
||||
This transport connects to MCP servers using Server-Sent Events (SSE)
|
||||
for real-time streaming communication.
|
||||
|
||||
Example:
|
||||
```python
|
||||
transport = SSETransport(
|
||||
url="https://api.example.com/mcp/sse",
|
||||
headers={"Authorization": "Bearer ..."}
|
||||
)
|
||||
async with transport:
|
||||
# Use transport...
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
url: str,
|
||||
headers: dict[str, str] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize SSE transport.
|
||||
|
||||
Args:
|
||||
url: Server URL (e.g., "https://api.example.com/mcp/sse").
|
||||
headers: Optional HTTP headers.
|
||||
**kwargs: Additional transport options.
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.url = url
|
||||
self.headers = headers or {}
|
||||
self._transport_context: Any = None
|
||||
|
||||
@property
|
||||
def transport_type(self) -> TransportType:
|
||||
"""Return the transport type."""
|
||||
return TransportType.SSE
|
||||
|
||||
async def connect(self) -> Self:
|
||||
"""Establish SSE connection to MCP server.
|
||||
|
||||
Returns:
|
||||
Self for method chaining.
|
||||
|
||||
Raises:
|
||||
ConnectionError: If connection fails.
|
||||
ImportError: If MCP SDK not available.
|
||||
"""
|
||||
if self._connected:
|
||||
return self
|
||||
|
||||
try:
|
||||
from mcp.client.sse import sse_client
|
||||
|
||||
self._transport_context = sse_client(
|
||||
self.url,
|
||||
headers=self.headers if self.headers else None,
|
||||
terminate_on_close=True,
|
||||
)
|
||||
|
||||
read, write = await self._transport_context.__aenter__()
|
||||
|
||||
self._set_streams(read=read, write=write)
|
||||
|
||||
return self
|
||||
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"MCP library not available. Please install with: pip install mcp"
|
||||
) from e
|
||||
except Exception as e:
|
||||
self._clear_streams()
|
||||
raise ConnectionError(f"Failed to connect to SSE MCP server: {e}") from e
|
||||
|
||||
async def disconnect(self) -> None:
|
||||
"""Close SSE connection."""
|
||||
if not self._connected:
|
||||
return
|
||||
|
||||
try:
|
||||
self._clear_streams()
|
||||
if self._transport_context is not None:
|
||||
await self._transport_context.__aexit__(None, None, None)
|
||||
|
||||
except Exception as e:
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.warning(f"Error during SSE transport disconnect: {e}")
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
"""Async context manager entry."""
|
||||
return await self.connect()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: Any,
|
||||
) -> None:
|
||||
"""Async context manager exit."""
|
||||
await self.disconnect()
|
||||
153
lib/crewai/src/crewai/mcp/transports/stdio.py
Normal file
153
lib/crewai/src/crewai/mcp/transports/stdio.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""Stdio transport for MCP servers running as local processes."""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import subprocess
|
||||
from typing import Any
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.mcp.transports.base import BaseTransport, TransportType
|
||||
|
||||
|
||||
class StdioTransport(BaseTransport):
|
||||
"""Stdio transport for connecting to local MCP servers.
|
||||
|
||||
This transport connects to MCP servers running as local processes,
|
||||
communicating via standard input/output streams. Supports Python,
|
||||
Node.js, and other command-line servers.
|
||||
|
||||
Example:
|
||||
```python
|
||||
transport = StdioTransport(
|
||||
command="python",
|
||||
args=["path/to/server.py"],
|
||||
env={"API_KEY": "..."}
|
||||
)
|
||||
async with transport:
|
||||
# Use transport...
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
command: str,
|
||||
args: list[str] | None = None,
|
||||
env: dict[str, str] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize stdio transport.
|
||||
|
||||
Args:
|
||||
command: Command to execute (e.g., "python", "node", "npx").
|
||||
args: Command arguments (e.g., ["server.py"] or ["-y", "@mcp/server"]).
|
||||
env: Environment variables to pass to the process.
|
||||
**kwargs: Additional transport options.
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.command = command
|
||||
self.args = args or []
|
||||
self.env = env or {}
|
||||
self._process: subprocess.Popen[bytes] | None = None
|
||||
self._transport_context: Any = None
|
||||
|
||||
@property
|
||||
def transport_type(self) -> TransportType:
|
||||
"""Return the transport type."""
|
||||
return TransportType.STDIO
|
||||
|
||||
async def connect(self) -> Self:
|
||||
"""Start the MCP server process and establish connection.
|
||||
|
||||
Returns:
|
||||
Self for method chaining.
|
||||
|
||||
Raises:
|
||||
ConnectionError: If process fails to start.
|
||||
ImportError: If MCP SDK not available.
|
||||
"""
|
||||
if self._connected:
|
||||
return self
|
||||
|
||||
try:
|
||||
from mcp import StdioServerParameters
|
||||
from mcp.client.stdio import stdio_client
|
||||
|
||||
process_env = os.environ.copy()
|
||||
process_env.update(self.env)
|
||||
|
||||
server_params = StdioServerParameters(
|
||||
command=self.command,
|
||||
args=self.args,
|
||||
env=process_env if process_env else None,
|
||||
)
|
||||
self._transport_context = stdio_client(server_params)
|
||||
|
||||
try:
|
||||
read, write = await self._transport_context.__aenter__()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
self._transport_context = None
|
||||
raise ConnectionError(
|
||||
f"Failed to enter stdio transport context: {e}"
|
||||
) from e
|
||||
|
||||
self._set_streams(read=read, write=write)
|
||||
|
||||
return self
|
||||
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"MCP library not available. Please install with: pip install mcp"
|
||||
) from e
|
||||
except Exception as e:
|
||||
self._clear_streams()
|
||||
if self._transport_context is not None:
|
||||
self._transport_context = None
|
||||
raise ConnectionError(f"Failed to start MCP server process: {e}") from e
|
||||
|
||||
async def disconnect(self) -> None:
|
||||
"""Terminate the MCP server process and close connection."""
|
||||
if not self._connected:
|
||||
return
|
||||
|
||||
try:
|
||||
self._clear_streams()
|
||||
|
||||
if self._transport_context is not None:
|
||||
await self._transport_context.__aexit__(None, None, None)
|
||||
|
||||
if self._process is not None:
|
||||
try:
|
||||
self._process.terminate()
|
||||
try:
|
||||
await asyncio.wait_for(self._process.wait(), timeout=5.0)
|
||||
except asyncio.TimeoutError:
|
||||
self._process.kill()
|
||||
await self._process.wait()
|
||||
# except ProcessLookupError:
|
||||
# pass
|
||||
finally:
|
||||
self._process = None
|
||||
|
||||
except Exception as e:
|
||||
# Log but don't raise - cleanup should be best effort
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.warning(f"Error during stdio transport disconnect: {e}")
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
"""Async context manager entry."""
|
||||
return await self.connect()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: Any,
|
||||
) -> None:
|
||||
"""Async context manager exit."""
|
||||
await self.disconnect()
|
||||
@@ -1,21 +1,75 @@
|
||||
"""Utility functions for the crewai project module."""
|
||||
|
||||
from collections.abc import Callable
|
||||
from functools import lru_cache
|
||||
from typing import ParamSpec, TypeVar, cast
|
||||
from functools import wraps
|
||||
from typing import Any, ParamSpec, TypeVar, cast
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
cache = CacheHandler()
|
||||
|
||||
|
||||
def _make_hashable(arg: Any) -> Any:
|
||||
"""Convert argument to hashable form for caching.
|
||||
|
||||
Args:
|
||||
arg: The argument to convert.
|
||||
|
||||
Returns:
|
||||
Hashable representation of the argument.
|
||||
"""
|
||||
if isinstance(arg, BaseModel):
|
||||
return arg.model_dump_json()
|
||||
if isinstance(arg, dict):
|
||||
return tuple(sorted((k, _make_hashable(v)) for k, v in arg.items()))
|
||||
if isinstance(arg, list):
|
||||
return tuple(_make_hashable(item) for item in arg)
|
||||
if hasattr(arg, "__dict__"):
|
||||
return ("__instance__", id(arg))
|
||||
return arg
|
||||
|
||||
|
||||
def memoize(meth: Callable[P, R]) -> Callable[P, R]:
|
||||
"""Memoize a method by caching its results based on arguments.
|
||||
|
||||
Handles Pydantic BaseModel instances by converting them to JSON strings
|
||||
before hashing for cache lookup.
|
||||
|
||||
Args:
|
||||
meth: The method to memoize.
|
||||
|
||||
Returns:
|
||||
A memoized version of the method that caches results.
|
||||
"""
|
||||
return cast(Callable[P, R], lru_cache(typed=True)(meth))
|
||||
|
||||
@wraps(meth)
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
"""Wrapper that converts arguments to hashable form before caching.
|
||||
|
||||
Args:
|
||||
*args: Positional arguments to the memoized method.
|
||||
**kwargs: Keyword arguments to the memoized method.
|
||||
|
||||
Returns:
|
||||
The result of the memoized method call.
|
||||
"""
|
||||
hashable_args = tuple(_make_hashable(arg) for arg in args)
|
||||
hashable_kwargs = tuple(
|
||||
sorted((k, _make_hashable(v)) for k, v in kwargs.items())
|
||||
)
|
||||
cache_key = str((hashable_args, hashable_kwargs))
|
||||
|
||||
cached_result: R | None = cache.read(tool=meth.__name__, input=cache_key)
|
||||
if cached_result is not None:
|
||||
return cached_result
|
||||
|
||||
result = meth(*args, **kwargs)
|
||||
cache.add(tool=meth.__name__, input=cache_key, output=result)
|
||||
return result
|
||||
|
||||
return cast(Callable[P, R], wrapper)
|
||||
|
||||
@@ -67,31 +67,44 @@ def _prepare_documents_for_chromadb(
|
||||
ids: list[str] = []
|
||||
texts: list[str] = []
|
||||
metadatas: list[Mapping[str, str | int | float | bool]] = []
|
||||
seen_ids: dict[str, int] = {}
|
||||
|
||||
try:
|
||||
for doc in documents:
|
||||
if "doc_id" in doc:
|
||||
doc_id = str(doc["doc_id"])
|
||||
else:
|
||||
metadata = doc.get("metadata")
|
||||
if metadata and isinstance(metadata, dict) and "doc_id" in metadata:
|
||||
doc_id = str(metadata["doc_id"])
|
||||
else:
|
||||
content_for_hash = doc["content"]
|
||||
if metadata:
|
||||
metadata_str = json.dumps(metadata, sort_keys=True)
|
||||
content_for_hash = f"{content_for_hash}|{metadata_str}"
|
||||
doc_id = hashlib.sha256(content_for_hash.encode()).hexdigest()
|
||||
|
||||
for doc in documents:
|
||||
if "doc_id" in doc:
|
||||
ids.append(doc["doc_id"])
|
||||
else:
|
||||
content_for_hash = doc["content"]
|
||||
metadata = doc.get("metadata")
|
||||
if metadata:
|
||||
metadata_str = json.dumps(metadata, sort_keys=True)
|
||||
content_for_hash = f"{content_for_hash}|{metadata_str}"
|
||||
|
||||
content_hash = hashlib.blake2b(
|
||||
content_for_hash.encode(), digest_size=32
|
||||
).hexdigest()
|
||||
ids.append(content_hash)
|
||||
|
||||
texts.append(doc["content"])
|
||||
metadata = doc.get("metadata")
|
||||
if metadata:
|
||||
if isinstance(metadata, list):
|
||||
metadatas.append(metadata[0] if metadata and metadata[0] else {})
|
||||
if isinstance(metadata, list):
|
||||
processed_metadata = metadata[0] if metadata and metadata[0] else {}
|
||||
else:
|
||||
processed_metadata = metadata
|
||||
else:
|
||||
metadatas.append(metadata)
|
||||
else:
|
||||
metadatas.append({})
|
||||
processed_metadata = {}
|
||||
|
||||
if doc_id in seen_ids:
|
||||
idx = seen_ids[doc_id]
|
||||
texts[idx] = doc["content"]
|
||||
metadatas[idx] = processed_metadata
|
||||
else:
|
||||
idx = len(ids)
|
||||
ids.append(doc_id)
|
||||
texts.append(doc["content"])
|
||||
metadatas.append(processed_metadata)
|
||||
seen_ids[doc_id] = idx
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error preparing documents for ChromaDB: {e}") from e
|
||||
|
||||
return PreparedDocuments(ids, texts, metadatas)
|
||||
|
||||
|
||||
@@ -525,7 +525,11 @@ class Task(BaseModel):
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
if not self._guardrails and not self._guardrail:
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
else:
|
||||
pydantic_output, json_output = None, None
|
||||
|
||||
task_output = TaskOutput(
|
||||
name=self.name or self.description,
|
||||
description=self.description,
|
||||
|
||||
162
lib/crewai/src/crewai/tools/mcp_native_tool.py
Normal file
162
lib/crewai/src/crewai/tools/mcp_native_tool.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""Native MCP tool wrapper for CrewAI agents.
|
||||
|
||||
This module provides a tool wrapper that reuses existing MCP client sessions
|
||||
for better performance and connection management.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import Any
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
|
||||
class MCPNativeTool(BaseTool):
|
||||
"""Native MCP tool that reuses client sessions.
|
||||
|
||||
This tool wrapper is used when agents connect to MCP servers using
|
||||
structured configurations. It reuses existing client sessions for
|
||||
better performance and proper connection lifecycle management.
|
||||
|
||||
Unlike MCPToolWrapper which connects on-demand, this tool uses
|
||||
a shared MCP client instance that maintains a persistent connection.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mcp_client: Any,
|
||||
tool_name: str,
|
||||
tool_schema: dict[str, Any],
|
||||
server_name: str,
|
||||
) -> None:
|
||||
"""Initialize native MCP tool.
|
||||
|
||||
Args:
|
||||
mcp_client: MCPClient instance with active session.
|
||||
tool_name: Original name of the tool on the MCP server.
|
||||
tool_schema: Schema information for the tool.
|
||||
server_name: Name of the MCP server for prefixing.
|
||||
"""
|
||||
# Create tool name with server prefix to avoid conflicts
|
||||
prefixed_name = f"{server_name}_{tool_name}"
|
||||
|
||||
# Handle args_schema properly - BaseTool expects a BaseModel subclass
|
||||
args_schema = tool_schema.get("args_schema")
|
||||
|
||||
# Only pass args_schema if it's provided
|
||||
kwargs = {
|
||||
"name": prefixed_name,
|
||||
"description": tool_schema.get(
|
||||
"description", f"Tool {tool_name} from {server_name}"
|
||||
),
|
||||
}
|
||||
|
||||
if args_schema is not None:
|
||||
kwargs["args_schema"] = args_schema
|
||||
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# Set instance attributes after super().__init__
|
||||
self._mcp_client = mcp_client
|
||||
self._original_tool_name = tool_name
|
||||
self._server_name = server_name
|
||||
# self._logger = logging.getLogger(__name__)
|
||||
|
||||
@property
|
||||
def mcp_client(self) -> Any:
|
||||
"""Get the MCP client instance."""
|
||||
return self._mcp_client
|
||||
|
||||
@property
|
||||
def original_tool_name(self) -> str:
|
||||
"""Get the original tool name."""
|
||||
return self._original_tool_name
|
||||
|
||||
@property
|
||||
def server_name(self) -> str:
|
||||
"""Get the server name."""
|
||||
return self._server_name
|
||||
|
||||
def _run(self, **kwargs) -> str:
|
||||
"""Execute tool using the MCP client session.
|
||||
|
||||
Args:
|
||||
**kwargs: Arguments to pass to the MCP tool.
|
||||
|
||||
Returns:
|
||||
Result from the MCP tool execution.
|
||||
"""
|
||||
try:
|
||||
try:
|
||||
asyncio.get_running_loop()
|
||||
|
||||
import concurrent.futures
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
coro = self._run_async(**kwargs)
|
||||
future = executor.submit(asyncio.run, coro)
|
||||
return future.result()
|
||||
except RuntimeError:
|
||||
return asyncio.run(self._run_async(**kwargs))
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"Error executing MCP tool {self.original_tool_name}: {e!s}"
|
||||
) from e
|
||||
|
||||
async def _run_async(self, **kwargs) -> str:
|
||||
"""Async implementation of tool execution.
|
||||
|
||||
Args:
|
||||
**kwargs: Arguments to pass to the MCP tool.
|
||||
|
||||
Returns:
|
||||
Result from the MCP tool execution.
|
||||
"""
|
||||
# Note: Since we use asyncio.run() which creates a new event loop each time,
|
||||
# Always reconnect on-demand because asyncio.run() creates new event loops per call
|
||||
# All MCP transport context managers (stdio, streamablehttp_client, sse_client)
|
||||
# use anyio.create_task_group() which can't span different event loops
|
||||
if self._mcp_client.connected:
|
||||
await self._mcp_client.disconnect()
|
||||
|
||||
await self._mcp_client.connect()
|
||||
|
||||
try:
|
||||
result = await self._mcp_client.call_tool(self.original_tool_name, kwargs)
|
||||
|
||||
except Exception as e:
|
||||
error_str = str(e).lower()
|
||||
if (
|
||||
"not connected" in error_str
|
||||
or "connection" in error_str
|
||||
or "send" in error_str
|
||||
):
|
||||
await self._mcp_client.disconnect()
|
||||
await self._mcp_client.connect()
|
||||
# Retry the call
|
||||
result = await self._mcp_client.call_tool(
|
||||
self.original_tool_name, kwargs
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
finally:
|
||||
# Always disconnect after tool call to ensure clean context manager lifecycle
|
||||
# This prevents "exit cancel scope in different task" errors
|
||||
# All transport context managers must be exited in the same event loop they were entered
|
||||
await self._mcp_client.disconnect()
|
||||
|
||||
# Extract result content
|
||||
if isinstance(result, str):
|
||||
return result
|
||||
|
||||
# Handle various result formats
|
||||
if hasattr(result, "content") and result.content:
|
||||
if isinstance(result.content, list) and len(result.content) > 0:
|
||||
content_item = result.content[0]
|
||||
if hasattr(content_item, "text"):
|
||||
return str(content_item.text)
|
||||
return str(content_item)
|
||||
return str(result.content)
|
||||
|
||||
return str(result)
|
||||
@@ -22,12 +22,12 @@
|
||||
"summarize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
|
||||
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
|
||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
|
||||
"formatted_task_instructions": "Ensure your final answer strictly adheres to the following OpenAPI schema: {output_format}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.",
|
||||
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
|
||||
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary.",
|
||||
"lite_agent_system_prompt_with_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
|
||||
"lite_agent_system_prompt_without_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
"lite_agent_response_format": "\nIMPORTANT: Your final answer MUST contain all the information requested in the following format: {response_format}\n\nIMPORTANT: Ensure the final output does not include any code block markers like ```json or ```python.",
|
||||
"lite_agent_response_format": "Ensure your final answer strictly adheres to the following OpenAPI schema: {response_format}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.",
|
||||
"knowledge_search_query": "The original query is: {task_prompt}.",
|
||||
"knowledge_search_query_system_prompt": "Your goal is to rewrite the user query so that it is optimized for retrieval from a vector database. Consider how the query will be used to find relevant documents, and aim to make it more specific and context-aware. \n\n Do not include any other text than the rewritten query, especially any preamble or postamble and only add expected output format if its relevant to the rewritten query. \n\n Focus on the key words of the intended task and to retrieve the most relevant information. \n\n There will be some extra context provided that might need to be removed such as expected_output formats structured_outputs and other instructions."
|
||||
},
|
||||
|
||||
@@ -10,9 +10,9 @@ from pydantic import BaseModel, ValidationError
|
||||
from typing_extensions import Unpack
|
||||
|
||||
from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter
|
||||
from crewai.utilities.i18n import get_i18n
|
||||
from crewai.utilities.internal_instructor import InternalInstructor
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -22,6 +22,7 @@ if TYPE_CHECKING:
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
|
||||
_JSON_PATTERN: Final[re.Pattern[str]] = re.compile(r"({.*})", re.DOTALL)
|
||||
_I18N = get_i18n()
|
||||
|
||||
|
||||
class ConverterError(Exception):
|
||||
@@ -87,8 +88,7 @@ class Converter(OutputConverter):
|
||||
result = self.model.model_validate(result)
|
||||
elif isinstance(result, str):
|
||||
try:
|
||||
parsed = json.loads(result)
|
||||
result = self.model.model_validate(parsed)
|
||||
result = self.model.model_validate_json(result)
|
||||
except Exception as parse_err:
|
||||
raise ConverterError(
|
||||
f"Failed to convert partial JSON result into Pydantic: {parse_err}"
|
||||
@@ -172,6 +172,16 @@ def convert_to_model(
|
||||
model = output_pydantic or output_json
|
||||
if model is None:
|
||||
return result
|
||||
|
||||
if converter_cls:
|
||||
return convert_with_instructions(
|
||||
result=result,
|
||||
model=model,
|
||||
is_json_output=bool(output_json),
|
||||
agent=agent,
|
||||
converter_cls=converter_cls,
|
||||
)
|
||||
|
||||
try:
|
||||
escaped_result = json.dumps(json.loads(result, strict=False))
|
||||
return validate_model(
|
||||
@@ -251,7 +261,7 @@ def handle_partial_json(
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
except ValidationError:
|
||||
pass
|
||||
raise
|
||||
except Exception as e:
|
||||
Printer().print(
|
||||
content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.",
|
||||
@@ -335,25 +345,26 @@ def get_conversion_instructions(
|
||||
Returns:
|
||||
|
||||
"""
|
||||
instructions = "Please convert the following text into valid JSON."
|
||||
instructions = ""
|
||||
if (
|
||||
llm
|
||||
and not isinstance(llm, str)
|
||||
and hasattr(llm, "supports_function_calling")
|
||||
and llm.supports_function_calling()
|
||||
):
|
||||
model_schema = PydanticSchemaParser(model=model).get_schema()
|
||||
instructions += (
|
||||
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
|
||||
f"Use this format exactly:\n```json\n{model_schema}\n```"
|
||||
schema_dict = generate_model_description(model)
|
||||
schema = json.dumps(schema_dict, indent=2)
|
||||
formatted_task_instructions = _I18N.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
instructions += formatted_task_instructions
|
||||
else:
|
||||
model_description = generate_model_description(model)
|
||||
schema_json = json.dumps(model_description["json_schema"]["schema"], indent=2)
|
||||
instructions += (
|
||||
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
|
||||
f"Use this format exactly:\n```json\n{schema_json}\n```"
|
||||
schema_json = json.dumps(model_description, indent=2)
|
||||
formatted_task_instructions = _I18N.slice("formatted_task_instructions").format(
|
||||
output_format=schema_json
|
||||
)
|
||||
instructions += formatted_task_instructions
|
||||
return instructions
|
||||
|
||||
|
||||
|
||||
@@ -2117,15 +2117,14 @@ def test_agent_with_only_crewai_knowledge():
|
||||
goal="Provide information based on knowledge sources",
|
||||
backstory="You have access to specific knowledge sources.",
|
||||
llm=LLM(
|
||||
model="openrouter/openai/gpt-4o-mini",
|
||||
api_key=os.getenv("OPENROUTER_API_KEY"),
|
||||
model="gpt-4o-mini",
|
||||
),
|
||||
)
|
||||
|
||||
# Create a task that requires the agent to use the knowledge
|
||||
task = Task(
|
||||
description="What is Vidit's favorite color?",
|
||||
expected_output="Vidit's favorclearite color.",
|
||||
expected_output="Vidit's favorite color.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
|
||||
@@ -382,8 +382,8 @@ def test_guardrail_is_called_using_string():
|
||||
assert not guardrail_events["completed"][0].success
|
||||
assert guardrail_events["completed"][1].success
|
||||
assert (
|
||||
"Here are the top 10 best soccer players in the world, focusing exclusively on Brazilian players"
|
||||
in result.raw
|
||||
"top 10 best Brazilian soccer players" in result.raw or
|
||||
"Brazilian players" in result.raw
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,106 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"Reply with just
|
||||
the word: SUCCESS"}],"model":"claude-3-5-haiku-20241022","stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '145'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
user-agent:
|
||||
- Anthropic/Python 0.71.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 0.71.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//dJDLasMwEEX/5a5lsF1nUe1KCARCu6gpFEoRgzTEamzJ1qNNCf734tDQ
|
||||
F10N3HNmBu4JgzfcQ0L3lA0XV8Wq6MgeclGXdVOVdQ0BayAxxL0qq9rcXm/vb2g37ehtc+xeHu+m
|
||||
4xYC6X3kxeIYac8QCL5fAorRxkQuQUB7l9glyKfTxU98XMh5SLQP6/WmbTE/C8TkRxWYoneQYGdU
|
||||
ysHhE0SeMjvNkC73vUA+f5UnWDfmpJI/sIuQVSOgSXesdGBK1jv1UygvPDCZ/9hld7nPY8cDB+rV
|
||||
avjrf9Gq+01nAZ/T96gRiBxerWaVLAdILE0ZCgbz/AEAAP//AwA4VVIcmwEAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 9997ac4cbfb443fa-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Nov 2025 22:50:55 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- 87faf353-e074-4658-b885-bfac7aa5a7b5
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- '400000'
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- '400000'
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- '2025-11-04T22:50:55Z'
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- '80000'
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- '80000'
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- '2025-11-04T22:50:55Z'
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2025-11-04T22:50:54Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- '480000'
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- '480000'
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- '2025-11-04T22:50:55Z'
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- req_011CUofgQ1jTrjQ2sveXU1cC
|
||||
retry-after:
|
||||
- '5'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '461'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,108 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"Say ''Hello World''
|
||||
and nothing else"}],"model":"claude-3-5-haiku-20241022","stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '146'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
user-agent:
|
||||
- Anthropic/Python 0.71.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 0.71.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//dJBNS8QwEIb/irznFNqu3UPOHvYoeGhFJIRk2IZNk5pMRCn979LF4hee
|
||||
Bt7nmRl4F0zRkoeE8bpYqg5VV43aXUrV1u1tU7ctBJyFxJTPqm7u3OP9sTFD7uJwLkN/6seHQwcB
|
||||
fp9psyhnfSYIpOi3QOfsMuvAEDAxMAWGfFp2n+ltI9chcSLv400fk7dYnwUyx1kl0jkGSFCwiksK
|
||||
+ASZXgoFQ5CheC9Qrp/lAhfmworjhUKGbI4CRpuRlEmk2cWgfgr1zhNp+x/bd7f7NI80UdJeddNf
|
||||
/4s242+6CsTC36NOIFN6dYYUO0qQ2NqyOlms6wcAAAD//wMArYPuQZ8BAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 9997ac4268d972a4-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Nov 2025 22:50:53 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Response-Tracked:
|
||||
- 'true'
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- 87faf353-e074-4658-b885-bfac7aa5a7b5
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- '400000'
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- '400000'
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- '2025-11-04T22:50:53Z'
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- '80000'
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- '80000'
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- '2025-11-04T22:50:53Z'
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2025-11-04T22:50:53Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- '480000'
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- '480000'
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- '2025-11-04T22:50:53Z'
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- req_011CUofgGvqpCtoqCmKcwXdK
|
||||
retry-after:
|
||||
- '7'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '441'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,107 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"Count from 1 to
|
||||
3"}],"model":"claude-3-5-haiku-20241022","stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '129'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
user-agent:
|
||||
- Anthropic/Python 0.71.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 0.71.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA3SQy2rDMBBFf8XcTTcy2E5DQbuuUvrIJt3VRQh7EovYI1calbTB/14cGvqiq4F7
|
||||
zgzDPWLwLfXQaHqbWsoX+TLvrNunvCqqy7KoKii4FhpD3JmiXD2s7++u+HG1kcP77cGuN9tyuIaC
|
||||
vI00WxSj3REUgu/nwMboolgWKDSehVign45nX+gwk9PQuKFAFzFrfGJxvMu2wQ9ZmYnPFrrmmsua
|
||||
q5oXmJ4VovjRBLLRMzSIWyMpMD5BpJdE3BA0p75XSKev9BGOxyRG/J44QpdLhcY2HZkmkBXn2fwU
|
||||
ijMPZNv/2Hl3vk9jRwMF25vl8Nf/omX3m04KPsn3qCoUIoVX15ARRwEac5WtDS2m6QMAAP//AwC8
|
||||
QSj4vAEAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 9997ac45ea33de93-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Nov 2025 22:50:54 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- 87faf353-e074-4658-b885-bfac7aa5a7b5
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- '400000'
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- '400000'
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- '2025-11-04T22:50:54Z'
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- '80000'
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- '80000'
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- '2025-11-04T22:50:54Z'
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2025-11-04T22:50:53Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- '480000'
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- '480000'
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- '2025-11-04T22:50:54Z'
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- req_011CUofgKLSz1aDmL9qbduuR
|
||||
retry-after:
|
||||
- '6'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '801'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,116 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"user","content":"Reply with just the word: SUCCESS"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '98'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jJJNT+MwEIbv+RXWnJtVW/oBvbEVPexyq2AFCEXGnqReHI9lTxAr1P++
|
||||
clKa8CVxyWGeecfvO5mXTAgwGlYC1E6yqr3N1ze/Z8+Xm+X1+cU1P23qXydSnz5ubvXP8o+FUVLQ
|
||||
w19U/Kr6oaj2FtmQ67AKKBnT1MlyMZ2eLU6WkxbUpNEmWeU5n1FeG2fy6Xg6y8fLfHJ6UO/IKIyw
|
||||
EneZEEK8tN/k02l8hpUYj14rNcYoK4TVsUkICGRTBWSMJrJ0DKMeKnKMrrW+vVqvL7bbIQ1YNlEm
|
||||
h66xdgCkc8QyJWx93R/I/ujEUuUDPcR3UiiNM3FXBJSRXHo1Mnlo6T4T4r5N3LwJAT5Q7blgesT2
|
||||
ucmsGwf9ngfwwJhY2kF5PvpkWKGRpbFxsDBQUu1Q98p+u7LRhgYgG0T+6OWz2V1s46rvjO+BUugZ
|
||||
deEDaqPe5u3bAqYj/KrtuOLWMEQMT0ZhwQZD+g0aS9nY7jQg/ouMdVEaV2HwwXT3UfpivhjLcoHz
|
||||
+Rlk++w/AAAA//8DAGpm+y8tAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 9997a55e8e85d954-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Nov 2025 22:46:11 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=Ljd6Yw.qJgdFyASoXMTCHgeOXz.kPJVf9verbOyhWzg-1762296371-1.0.1.1-HutBZMolyfao56ckVJOnqKZgW8SSm0S_xA1DF2HIE4eYlqsLEi3OtkeTKNc536CxqhcmuTINB23o_A6nID5TAGpXCeNYBEgLJKiggQamQ9w;
|
||||
path=/; expires=Tue, 04-Nov-25 23:16:11 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=Tz6VwwwbLcFpqp9Poc_3sUeqc33hmGkTq8YCekrTAns-1762296371669-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '194'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '222'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999987'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999987'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_5f6372db5713441eb3ba1cc481aeb0fe
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,119 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"user","content":"Say ''Hello World'' and nothing
|
||||
else"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '99'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jJJNT+MwEIbv+RXWnJtVGvoBvXLhS0Jc2EUIRcaepN51PJY9QaxQ/zty
|
||||
UpoUWGkvPviZd/y+43nLhACjYSNAbSWr1tv8/OF68Xr/wtKfXMaLhb6xv+71Dd1dGV3cwiwp6Pk3
|
||||
Kv5Q/VDUeotsyA1YBZSMqet8vSrLs9XJet6DljTaJGs85wvKW+NMXhblIi/W+fx0r96SURhhIx4z
|
||||
IYR468/k02l8hY0oZh83LcYoG4TNoUgICGTTDcgYTWTpGGYjVOQYXW/9Aq0l8ZOC1dOKgHUXZXLp
|
||||
OmsnQDpHLFPK3tvTnuwObiw1PtBz/CSF2jgTt1VAGcmllyOTh57uMiGe+tTdURDwgVrPFdMf7J+b
|
||||
L4d2MM56hOWeMbG0E8169k2zSiNLY+NkaKCk2qIeleOEZacNTUA2ifzVy3e9h9jGNf/TfgRKoWfU
|
||||
lQ+ojTrOO5YFTIv4r7LDiHvDEDG8GIUVGwzpGzTWsrPDekD8GxnbqjauweCDGXak9tVyVch6hcvl
|
||||
GWS77B0AAP//AwC41MWDMQMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 9997a563da0042a3-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Nov 2025 22:46:12 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=71X9mE9Hmg7j990_h7K01BESKxBp2D4QYv9j1PmSm6I-1762296372-1.0.1.1-V7pmEV0YDa.OeJ8Pht15YJt2XRqusPvH52QlHRhBCRAoGIkSmqMCG.rYS44HRNCR3Kf2D4UeRaNaUMgws1tL74cvebKOa_aGVjBw_O2okGc;
|
||||
path=/; expires=Tue, 04-Nov-25 23:16:12 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=.9w.Y6a8QsaD_7IAK4u3JaHCreibv0u6ujLC7HVF2nY-1762296372265-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
X-Response-Tracked:
|
||||
- 'true'
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '332'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '349'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999987'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999990'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_21f5fd685fdf43e7b06e4ccf5f796b96
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,116 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"user","content":"Count from 1 to 3"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '82'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jJJBb9swDIXv/hUCz3YRu4nT5FpgC9DrLttQGIpM29pkUZDobEGR/z7I
|
||||
TmO364BdfODHR71H8yURAnQNewGqk6x6Z7LHr0/rc85lR8fdIXw7DWd3ePr869Pj6fClgzQq6PgD
|
||||
Fb+q7hT1ziBrshNWHiVjnJpvy6LYlffbYgQ91WiirHWcrSnrtdVZsSrW2Wqb5Q9XdUdaYYC9+J4I
|
||||
IcTL+I0+bY2/YS9W6WulxxBki7C/NQkBnkysgAxBB5aWIZ2hIstoR+t5KopU3N8tscdmCDJatIMx
|
||||
CyCtJZYx4mjs+UouNyuGWufpGN5JodFWh67yKAPZ+GxgcjDSSyLE8xh5eJMCnKfeccX0E8fn8vU0
|
||||
DuZFz/DhyphYmrlcFOkHw6oaWWoTFhsDJVWH9ayc1yuHWtMCJIvIf3v5aPYUW9v2f8bPQCl0jHXl
|
||||
PNZavc07t3mMV/ivttuKR8MQ0J+0woo1+vgbamzkYKbbgHAOjH3VaNuid15PB9K4alOuZFPiZrOD
|
||||
5JL8AQAA//8DAIyvq4AuAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 9997a567bce9c35e-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 04 Nov 2025 22:46:13 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=WbtKMfrbJkHmW8iHwTlAt1O0TT9hmE7i6Jc4CuzPFkk-1762296373-1.0.1.1-H4_jBpfR_9YQFFm2iDhVCcmwtOAfFhVkN6HaUsD3H8frMqxJjj7oiLathDv89L6e412o.pMtaQVL5e5XfVEv0diMAwtUsWsbzbTwF3rgkug;
|
||||
path=/; expires=Tue, 04-Nov-25 23:16:13 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=rxHDwk1CRF6MkO5Jc7ikrkXBxkrhhmf.yJD6Z94mvUI-1762296373153-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '552'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '601'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999992'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999992'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_f67a12044f894859b6b867e583e42e24
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
@@ -1,35 +1,128 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
body: '{"trace_id": "bf042234-54a3-4fc0-857d-1ae5585a174e", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-06T16:05:14.776800+00:00"}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '434'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.3.0
|
||||
X-Crewai-Version:
|
||||
- 1.3.0
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Thu, 06 Nov 2025 16:05:15 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 9e528076-59a8-4c21-a999-2367937321ed
|
||||
x-runtime:
|
||||
- '0.070063'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nTo give my best complete final answer to the task
|
||||
use the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!"}, {"role": "user", "content": "\nCurrent Task: Summarize the given context
|
||||
in one sentence\n\nThis is the expect criteria for your final answer: A one-sentence
|
||||
summary\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.\n\nThis is the context you''re working with:\nThe quick brown fox
|
||||
jumps over the lazy dog. This sentence contains every letter of the alphabet.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-3.5-turbo"}'
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Summarize the given
|
||||
context in one sentence\n\nThis is the expected criteria for your final answer:
|
||||
A one-sentence summary\nyou MUST return the actual complete content as the final
|
||||
answer, not a summary.\n\nThis is the context you''re working with:\nThe quick
|
||||
brown fox jumps over the lazy dog. This sentence contains every letter of the
|
||||
alphabet.\n\nBegin! This is VERY important to you, use the tools available and
|
||||
give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-3.5-turbo"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '961'
|
||||
- '963'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -39,30 +132,33 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7WTXzhDaFVbUrrQKXCo78KID8N9\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727213889,\n \"model\": \"gpt-3.5-turbo-0125\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer\\nFinal
|
||||
Answer: The quick brown fox jumps over the lazy dog. This sentence contains
|
||||
every letter of the alphabet.\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
190,\n \"completion_tokens\": 30,\n \"total_tokens\": 220,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFPBbtswDL37Kwidk6BOmgbLbRgwYLdtCLAVaxHIEm2rkUVVopOmRf59
|
||||
kJLG6dYBuxgwH9/z4yP9UgAIo8UShGolq87b8afbXTfbr74/89erb2o/axY0f7x1RkX+8VOMEoOq
|
||||
B1T8ypoo6rxFNuSOsAooGZNqubiZXl/Py3KegY402kRrPI9nk/mY+1DR+Kqczk/MlozCKJbwqwAA
|
||||
eMnP5NFpfBJLuBq9VjqMUTYolucmABHIpoqQMZrI0rEYDaAix+iy7S/QO40htWjgFoFl3EB62Rlr
|
||||
wQdSiBqYoDFbzB0VRobaOGlBurjDMLlzd+5zLnzMhSWsWoTH3qgNVIF2Dmp6goe+8xFoiyHLWPm8
|
||||
B03NBFatiRAxeVIIyZw0LgJuMezBIjMGoDqTpPWtrJAnl+MErPsoU5yut/YCkM4Ry7SOHOT9CTmc
|
||||
o7PU+EBV/IMqauNMbNcBZSSXYopMXmT0UADc5xX1b1IXPlDnec20wfy58kN51BPDVQzo7OYEMrG0
|
||||
Q306XYze0VtrZGlsvFiyUFK1qAfqcBGy14YugOJi6r/dvKd9nNy45n/kB0Ap9Ix67QNqo95OPLQF
|
||||
TD/Nv9rOKWfDImLYGoVrNhjSJjTWsrfHcxZxHxm7dW1cg8EHk286bbI4FL8BAAD//wMAHFSnRdID
|
||||
AAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85eb7568111cf3-GRU
|
||||
- 99a5d4d0bb8f7327-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -70,37 +166,54 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:38:09 GMT
|
||||
- Thu, 06 Nov 2025 16:05:16 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Thu, 06-Nov-25 16:35:16 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-REDACTED
|
||||
openai-processing-ms:
|
||||
- '662'
|
||||
- '836'
|
||||
openai-project:
|
||||
- proj_REDACTED
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '983'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '50000000'
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '49999772'
|
||||
- '199785'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
- 8.64s
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 64ms
|
||||
x-request-id:
|
||||
- req_833406276d399714b624a32627fc5b4a
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_c302b31f8f804399ae05fc424215303a
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,54 +1,165 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"model": "openai/gpt-4o-mini", "messages": [{"role": "system", "content":
|
||||
"Your goal is to rewrite the user query so that it is optimized for retrieval
|
||||
from a vector database. Consider how the query will be used to find relevant
|
||||
documents, and aim to make it more specific and context-aware. \n\n Do not include
|
||||
any other text than the rewritten query, especially any preamble or postamble
|
||||
and only add expected output format if its relevant to the rewritten query.
|
||||
\n\n Focus on the key words of the intended task and to retrieve the most relevant
|
||||
information. \n\n There will be some extra context provided that might need
|
||||
to be removed such as expected_output formats structured_outputs and other instructions."},
|
||||
{"role": "user", "content": "The original query is: What is Vidit''s favorite
|
||||
color?\n\nThis is the expected criteria for your final answer: Vidit''s favorclearite
|
||||
color.\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.."}], "stream": false, "stop": ["\nObservation:"]}'
|
||||
body: '{"trace_id": "9d9d9d14-e5bc-44bc-8cfc-3df9ba4e6055", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-06T15:58:15.778396+00:00"},
|
||||
"ephemeral_trace_id": "9d9d9d14-e5bc-44bc-8cfc-3df9ba4e6055"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '488'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.3.0
|
||||
X-Crewai-Version:
|
||||
- 1.3.0
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"f303021e-f1a0-4fd8-9c7d-8ba6779f8ad3","ephemeral_trace_id":"9d9d9d14-e5bc-44bc-8cfc-3df9ba4e6055","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.3.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.3.0","privacy_level":"standard"},"created_at":"2025-11-06T15:58:16.189Z","updated_at":"2025-11-06T15:58:16.189Z","access_code":"TRACE-c2990cd4d4","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '515'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Thu, 06 Nov 2025 15:58:16 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"8df0b730688b8bc094b74c66a6293578"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 38352441-7508-4e1e-9bff-77d1689dffdf
|
||||
x-runtime:
|
||||
- '0.085540'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 201
|
||||
message: Created
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Your goal is to rewrite the user
|
||||
query so that it is optimized for retrieval from a vector database. Consider
|
||||
how the query will be used to find relevant documents, and aim to make it more
|
||||
specific and context-aware. \n\n Do not include any other text than the rewritten
|
||||
query, especially any preamble or postamble and only add expected output format
|
||||
if its relevant to the rewritten query. \n\n Focus on the key words of the intended
|
||||
task and to retrieve the most relevant information. \n\n There will be some
|
||||
extra context provided that might need to be removed such as expected_output
|
||||
formats structured_outputs and other instructions."},{"role":"user","content":"The
|
||||
original query is: What is Vidit''s favorite color?\n\nThis is the expected
|
||||
criteria for your final answer: Vidit''s favorite color.\nyou MUST return the
|
||||
actual complete content as the final answer, not a summary.."}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- '*/*'
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1017'
|
||||
- '950'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- openrouter.ai
|
||||
http-referer:
|
||||
- https://litellm.ai
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- litellm/1.68.0
|
||||
x-title:
|
||||
- liteLLM
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://openrouter.ai/api/v1/chat/completions
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//4lKAAS4AAAAA//90kE1PIzEMhv8Kei97Sdnplwq5gTgAF8ShcFitRmnG
|
||||
nTFk4ihxq11V899Xs6gFJLja78djH8ANLFqKk+lqsZpP56vpYqJhublfP1eP65v1i79Lt9fdMwxS
|
||||
lj03lGHxkChe3cGgl4YCLCRRdPyzTTpZyKTnyDCQzQt5hYXvnJ576VMgZYkw8JmcUgP7XmvgO2FP
|
||||
BfbXAUHalGVTYOMuBIMtRy5dnckVibAoKgkG0Snvqf5my7GhP7CVQU+luJZgD8gSCBauFC7qoo40
|
||||
EpXiSPrEDeuPcrZ1e8msdOYlSIZBpu2uuHDEeWvi2L4NhuG3QflblPqRpaWcMv8P3Ka6ml/OLmaz
|
||||
6rKCwe6IkbL0SWuVV4pl/MNy5Di+6DRfGqioC+/Ci8p8NtcNqeNQxlTvfEfNSVwNX4R+1J/u+GAZ
|
||||
hn8AAAD//wMAIwJ79CICAAA=
|
||||
H4sIAAAAAAAAAwAAAP//jFJBbtswELzrFQQvvViBLMuy42sObYEWKIoiQFMEAkOu5G0oLkGu0xaB
|
||||
/15QciwlTYFceODsDGeG+5gJIdHInZB6r1j33uZX33+1H78y9tvVt+Lmpv68KrfXX96Xnz7cu61c
|
||||
JAbd/QTNT6wLTb23wEhuhHUAxZBUl5u6rKqqvqwHoCcDNtE6z3lFeY8O87Ioq7zY5MuTuN4Taohy
|
||||
J35kQgjxOJzJpzPwW+5EsXi66SFG1YHcnYeEkIFsupEqRoysHMvFBGpyDG6wfo0G+V0UrXqggAxC
|
||||
k6VwMZ8O0B6iSo7dwdoZoJwjVinx4PP2hBzPzix1PtBdfEGVLTqM+yaAiuSSi8jk5YAeMyFuhwYO
|
||||
z0JJH6j33DDdw/DccrMa9eRU/ISuTxgTKzsnbRevyDUGWKGNswqlVnoPZqJOfauDQZoB2Sz0v2Ze
|
||||
0x6Do+veIj8BWoNnMI0PYFA/DzyNBUhr+b+xc8mDYRkhPKCGhhFC+ggDrTrYcVlk/BMZ+qZF10Hw
|
||||
AceNaX2zrgvV1rBeX8rsmP0FAAD//wMA5SIzeT8DAAA=
|
||||
headers:
|
||||
Access-Control-Allow-Origin:
|
||||
- '*'
|
||||
CF-RAY:
|
||||
- 9402c9db99ec4722-BOM
|
||||
- 99a5ca96bb1443e7-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -56,73 +167,122 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 May 2025 12:55:14 GMT
|
||||
- Thu, 06 Nov 2025 15:58:16 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Thu, 06-Nov-25 16:28:16 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Accept-Encoding
|
||||
x-clerk-auth-message:
|
||||
- Invalid JWT form. A JWT consists of three parts separated by dots. (reason=token-invalid,
|
||||
token-carrier=header)
|
||||
x-clerk-auth-reason:
|
||||
- token-invalid
|
||||
x-clerk-auth-status:
|
||||
- signed-out
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-REDACTED
|
||||
openai-processing-ms:
|
||||
- '235'
|
||||
openai-project:
|
||||
- proj_REDACTED
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '420'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199785'
|
||||
x-ratelimit-reset-requests:
|
||||
- 8.64s
|
||||
x-ratelimit-reset-tokens:
|
||||
- 64ms
|
||||
x-request-id:
|
||||
- req_9810e9721aa9463c930414ab5174ab61
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"model": "openai/gpt-4o-mini", "messages": [{"role": "system", "content":
|
||||
"You are Information Agent. You have access to specific knowledge sources.\nYour
|
||||
personal goal is: Provide information based on knowledge sources\nTo give my
|
||||
best complete final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent
|
||||
Task: What is Vidit''s favorite color?\n\nThis is the expected criteria for
|
||||
your final answer: Vidit''s favorclearite color.\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}], "stream": false, "stop": ["\nObservation:"]}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Information Agent. You
|
||||
have access to specific knowledge sources.\nYour personal goal is: Provide information
|
||||
based on knowledge sources\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: What is Vidit''s
|
||||
favorite color?\n\nThis is the expected criteria for your final answer: Vidit''s
|
||||
favorite color.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- '*/*'
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '951'
|
||||
- '884'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
_cfuvid=REDACTED
|
||||
host:
|
||||
- openrouter.ai
|
||||
http-referer:
|
||||
- https://litellm.ai
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- litellm/1.68.0
|
||||
x-title:
|
||||
- liteLLM
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://openrouter.ai/api/v1/chat/completions
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//4lKAAS4AAAAA///iQjABAAAA//90kN1qGzEQRl9FfNdyul4nday73ARy
|
||||
VUpLE2jLIu+O15NoZ4QkOy1moa/R1+uTlE1wnEB7qU/zc84cwB0cepLZfHm+XMwXy/nF7II/3d7V
|
||||
H+tOPvsS3le3d+keFjHpnjtKcPgQSa5uYDFoRwEOGkk8v+tjmZ3rbGBhWOj6ntoCh3bry1mrQwxU
|
||||
WAUWbSJfqIM7rbVot8otZbivBwTtY9J1hpNdCBYbFs7bJpHPKnDIRSMsxBfeU/OfX5aOfsBVFgPl
|
||||
7HuCOyBpIDj4nDkXL2WiUSkkE+mNEX00rRfT856MN/0EarzkR0rGfJNrFh/M1dPbmS/ccfnz63c2
|
||||
G7/XxIVMq0GT4WzWYUdnsEi02WUfjiLPjCz9czCO3y3yz1xomCx6SjHxE8omNtViVV/WdbWqYLE7
|
||||
CsSkQyxN0QeSPF2wmgyOxz3lK4uixYdTcrmyb7ubjornkKexrW+31L0UV+M/pr6ufxF51TKOfwEA
|
||||
AP//AwBybekMaAIAAA==
|
||||
H4sIAAAAAAAAAwAAAP//jFPBahsxEL37KwZderGN7dqO41vaUghtT4FCacIiS7PrSbQaVZq1swT/
|
||||
e9HayTptCr0ING/e6M2b0dMAQJFVa1Bmq8XUwY0+/tiXXy4/2BDN/p7izdY4/vrp29Wvmxbv1TAz
|
||||
eHOPRp5ZY8N1cCjE/gibiFowV51eLGfz+Xx5ueqAmi26TKuCjOY8qsnTaDaZzUeTi9F0dWJvmQwm
|
||||
tYafAwCAp+7MOr3FR7WGyfA5UmNKukK1fkkCUJFdjiidEiXRXtSwBw17Qd9JvwbPezDaQ0U7BA1V
|
||||
lg3apz1GgFv/mbx2cNXd1/CdLMm7BKXecSRBMOw4AiXwLBCajSPjWrBsmhq9oAWOsCeLroUHz3s/
|
||||
husSWm5gq3cIKaChkgx0ih4lZ1sUTS6B3nAjxweHcA21bmGDoDcOQRhC5B3ZLLjmiJApHNFCxBTY
|
||||
Jxyf9xuxbJLOnvvGuTNAe8+i88w6p+9OyOHFW8dViLxJf1BVSZ7StoioE/vsYxIOqkMPA4C7bobN
|
||||
q7GoELkOUgg/YPfcdLk61lP96vTofHEChUW7Pj6bvh++Ua842Xa2Bcpos0XbU/uV0Y0lPgMGZ13/
|
||||
reat2sfOyVf/U74HjMEgaIsQ0ZJ53XGfFjH/rH+lvbjcCVYJ444MFkIY8yQslrpxx31XqU2CdVGS
|
||||
rzCGSMelL0OxWE50ucTF4lINDoPfAAAA//8DAPFGfbMCBAAA
|
||||
headers:
|
||||
Access-Control-Allow-Origin:
|
||||
- '*'
|
||||
CF-RAY:
|
||||
- 9402c9e1b94a4722-BOM
|
||||
- 99a5ca9c5ef543e7-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -130,20 +290,47 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 May 2025 12:55:15 GMT
|
||||
- Thu, 06 Nov 2025 15:58:19 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Accept-Encoding
|
||||
x-clerk-auth-message:
|
||||
- Invalid JWT form. A JWT consists of three parts separated by dots. (reason=token-invalid,
|
||||
token-carrier=header)
|
||||
x-clerk-auth-reason:
|
||||
- token-invalid
|
||||
x-clerk-auth-status:
|
||||
- signed-out
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-REDACTED
|
||||
openai-processing-ms:
|
||||
- '1326'
|
||||
openai-project:
|
||||
- proj_REDACTED
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1754'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9998'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199803'
|
||||
x-ratelimit-reset-requests:
|
||||
- 15.913s
|
||||
x-ratelimit-reset-tokens:
|
||||
- 59ms
|
||||
x-request-id:
|
||||
- req_f975e16b666e498b8bcfdfab525f71b3
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -0,0 +1,202 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "1703c4e0-d3be-411c-85e7-48018c2df384", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-07T01:58:22.260309+00:00"}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '434'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.3.0
|
||||
X-Crewai-Version:
|
||||
- 1.3.0
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Fri, 07 Nov 2025 01:58:22 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 4124c4ce-02cf-4d08-9b0b-8983c2e9da6e
|
||||
x-runtime:
|
||||
- '0.073764'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"Say hello in one
|
||||
word"}],"model":"claude-3-5-haiku-20241022","stop_sequences":["\nObservation:","\nThought:"],"stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '182'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
user-agent:
|
||||
- Anthropic/Python 0.71.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 0.71.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//dJBdS8QwEEX/y31Ope26u5J3dwWfBH0SCTEZtmHTpCYTXSn979LF4hc+
|
||||
DdxzZgbuiD5a8pAwXhdL1apaV512x1K1dXvZ1G0LAWch0eeDqpvN1f3q7bTPz91tc/ewLcfr/Xaz
|
||||
gwC/DzRblLM+EARS9HOgc3aZdWAImBiYAkM+jovPdJrJeUjckPfxAtOTQOY4qEQ6xwAJClZxSQGf
|
||||
INNLoWAIMhTvBcr5qRzhwlBYcTxSyJBNK2C06UiZRJpdDOqnUC88kbb/sWV3vk9DRz0l7dW6/+t/
|
||||
0ab7TSeBWPh7tBbIlF6dIcWOEiTmoqxOFtP0AQAA//8DAM5WvkqaAQAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99a939a5a931556e-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 07 Nov 2025 01:58:22 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- SCRUBBED-ORG-ID
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- '400000'
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- '400000'
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- '2025-11-07T01:58:22Z'
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- '80000'
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- '80000'
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- '2025-11-07T01:58:22Z'
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2025-11-07T01:58:22Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- '480000'
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- '480000'
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- '2025-11-07T01:58:22Z'
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- req_011CUshbL7CEVoner91hUvxL
|
||||
retry-after:
|
||||
- '41'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '390'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,35 +1,37 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Scorer. You''re an
|
||||
expert scorer, specialized in scoring titles.\nYour personal goal is: Score
|
||||
the title\nTo give my best complete final answer to the task use the exact following
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Give me an integer score between 1-5 for the following
|
||||
title: ''The impact of AI in the future of work''\n\nThis is the expect criteria
|
||||
for your final answer: The score of the title.\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4o"}'
|
||||
described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nEnsure your final answer strictly adheres
|
||||
to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '915'
|
||||
- '1394'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -39,29 +41,32 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7gN2SDetZsIJf8dMDl2RwE5Qyvp\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214503,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: 4\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
186,\n \"completion_tokens\": 15,\n \"total_tokens\": 201,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_52a7f40b0b\"\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBbpwwEL3zFaM5LxGw7GbDraoUpZGqtmoOTbMRcswAToxt2SZpu9p/
|
||||
rwybhbSJlAsS8+Y9vzczuwgARYUFIG+Z552R8cfrJvly+e3Hkj4/XC8Ntz//nIrvbXZxcfX1HBeB
|
||||
oe/uiftn1gnXnZHkhVYjzC0xT0E1PV1ny02yXq0HoNMVyUBrjI/zkzTuhBJxlmSrOMnjND/QWy04
|
||||
OSzgJgIA2A3fYFRV9AsLSBbPlY6cYw1hcWwCQKtlqCBzTjjPlMfFBHKtPKnB+1Wr+6b1BXwCpZ+A
|
||||
MwWNeCRg0IQAwJR7IrtV50IxCR+GvwJ2W3RcW9pike/nypbq3rEQT/VSzgCmlPYsjGfIdHtA9scU
|
||||
UjfG6jv3DxVroYRrS0vMaRUcO68NDug+ArgdptW/GAAaqzvjS68faHguO8tHPZy2NKHp5gB67Zmc
|
||||
6ss0W7yiV1bkmZBuNm/kjLdUTdRpOayvhJ4B0Sz1/25e0x6TC9W8R34COCfjqSqNpUrwl4mnNkvh
|
||||
iN9qO055MIyO7KPgVHpBNmyiopr1crwsdL+dp66shWrIGivG86pNmfNss0rrzTrDaB/9BQAA//8D
|
||||
AJ9ashhtAwAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85fa763ef91cf3-GRU
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -69,37 +74,54 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:23 GMT
|
||||
- Wed, 05 Nov 2025 22:10:56 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:40:56 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '194'
|
||||
- '770'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '796'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999781'
|
||||
- '199687'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 93ms
|
||||
x-request-id:
|
||||
- req_5345a8fffc6276bb9d0a23edecd063ff
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,24 +1,127 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Info Gatherer. You
|
||||
gather and summarize information quickly.\nYour personal goal is: Provide brief
|
||||
information\n\nYou ONLY have access to the following tools, and should NEVER
|
||||
make up tools that are not listed here:\n\nTool Name: search_web\nTool Arguments:
|
||||
{''query'': {''description'': None, ''type'': ''str''}}\nTool Description: Search
|
||||
the web for information about a topic.\n\nIMPORTANT: Use the following format
|
||||
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [search_web], just the name, exactly as
|
||||
it''s written.\nAction Input: the input to the action, just a simple JSON object,
|
||||
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
|
||||
result of the action\n```\n\nOnce all necessary information is gathered, return
|
||||
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n```\nIMPORTANT: Your final
|
||||
answer MUST contain all the information requested in the following format: {\n \"summary\":
|
||||
str,\n \"confidence\": int\n}\n\nIMPORTANT: Ensure the final output does not
|
||||
include any code block markers like ```json or ```python."}, {"role": "user",
|
||||
"content": "What is the population of Tokyo? Return your structured output in
|
||||
JSON format with the following fields: summary, confidence"}], "model": "gpt-4o-mini",
|
||||
"stop": []}'
|
||||
body: '{"trace_id": "REDACTED", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-05T22:53:58.718883+00:00"}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '434'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.3.0
|
||||
X-Crewai-Version:
|
||||
- 1.3.0
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:53:59 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- REDACTED
|
||||
x-runtime:
|
||||
- '0.077031'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Info Gatherer. You gather
|
||||
and summarize information quickly.\nYour personal goal is: Provide brief information\n\nYou
|
||||
ONLY have access to the following tools, and should NEVER make up tools that
|
||||
are not listed here:\n\nTool Name: search_web\nTool Arguments: {''query'': {''description'':
|
||||
None, ''type'': ''str''}}\nTool Description: Search the web for information
|
||||
about a topic.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [search_web], just the name, exactly as it''s written.\nAction Input:
|
||||
the input to the action, just a simple JSON object, enclosed in curly braces,
|
||||
using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
|
||||
all necessary information is gathered, return the following format:\n\n```\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n```Ensure your final answer strictly adheres to the following
|
||||
OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\":
|
||||
\"SimpleOutput\",\n \"strict\": true,\n \"schema\": {\n \"description\":
|
||||
\"Simple structure for agent outputs.\",\n \"properties\": {\n \"summary\":
|
||||
{\n \"description\": \"A brief summary of findings\",\n \"title\":
|
||||
\"Summary\",\n \"type\": \"string\"\n },\n \"confidence\":
|
||||
{\n \"description\": \"Confidence level from 1-100\",\n \"title\":
|
||||
\"Confidence\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"summary\",\n \"confidence\"\n ],\n \"title\":
|
||||
\"SimpleOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."},{"role":"user","content":"What is the population of Tokyo? Return
|
||||
your structured output in JSON format with the following fields: summary, confidence"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -27,13 +130,13 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1447'
|
||||
- '2157'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.68.2
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -43,142 +146,9 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.68.2
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-BHEkRwFyeEpDZhOMkhHgCJSR2PF2v\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1743447967,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I need to find the current population
|
||||
of Tokyo.\\nAction: search_web\\nAction Input: {\\\"query\\\":\\\"population
|
||||
of Tokyo 2023\\\"}\\nObservation: The population of Tokyo is approximately 14
|
||||
million in the city proper, while the greater Tokyo area has a population of
|
||||
around 37 million. \\n\\nThought: I now know the final answer\\nFinal Answer:
|
||||
{\\n \\\"summary\\\": \\\"The population of Tokyo is approximately 14 million
|
||||
in the city proper, and around 37 million in the greater Tokyo area.\\\",\\n
|
||||
\ \\\"confidence\\\": 90\\n}\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 286,\n \"completion_tokens\":
|
||||
113,\n \"total_tokens\": 399,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_9654a743ed\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92921f4648215c1f-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 31 Mar 2025 19:06:09 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=OWYkqAq6NMgagfjt7oqi12iJ5ECBTSDmDicA3PaziDo-1743447969-1.0.1.1-rq5Byse6zYlezkvLZz4NdC5S0JaKB1rLgWEO2WGINaZ0lvlmJTw3uVGk4VUfrnnYaNr8IUcyhSX5vzSrX7HjdmczCcSMJRbDdUtephXrT.A;
|
||||
path=/; expires=Mon, 31-Mar-25 19:36:09 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=u769MG.poap6iEjFpbByMFUC0FygMEqYSurr5DfLbas-1743447969501-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1669'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999672'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_824c5fb422e466b60dacb6e27a0cbbda
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Info Gatherer. You
|
||||
gather and summarize information quickly.\nYour personal goal is: Provide brief
|
||||
information\n\nYou ONLY have access to the following tools, and should NEVER
|
||||
make up tools that are not listed here:\n\nTool Name: search_web\nTool Arguments:
|
||||
{''query'': {''description'': None, ''type'': ''str''}}\nTool Description: Search
|
||||
the web for information about a topic.\n\nIMPORTANT: Use the following format
|
||||
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [search_web], just the name, exactly as
|
||||
it''s written.\nAction Input: the input to the action, just a simple JSON object,
|
||||
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
|
||||
result of the action\n```\n\nOnce all necessary information is gathered, return
|
||||
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n```\nIMPORTANT: Your final
|
||||
answer MUST contain all the information requested in the following format: {\n \"summary\":
|
||||
str,\n \"confidence\": int\n}\n\nIMPORTANT: Ensure the final output does not
|
||||
include any code block markers like ```json or ```python."}, {"role": "user",
|
||||
"content": "What is the population of Tokyo? Return your structured output in
|
||||
JSON format with the following fields: summary, confidence"}, {"role": "assistant",
|
||||
"content": "Thought: I need to find the current population of Tokyo.\nAction:
|
||||
search_web\nAction Input: {\"query\":\"population of Tokyo 2023\"}\nObservation:
|
||||
Tokyo''s population in 2023 was approximately 21 million people in the city
|
||||
proper, and 37 million in the greater metropolitan area."}], "model": "gpt-4o-mini",
|
||||
"stop": ["\nObservation:"], "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1796'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=u769MG.poap6iEjFpbByMFUC0FygMEqYSurr5DfLbas-1743447969501-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
@@ -190,19 +160,20 @@ interactions:
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFPLbtswELz7Kxa89GIHsuOnbkmBvg7tIbkUVSBsqJXFmuQSJNXECPzv
|
||||
BWk3ctIU6IUAOTvD2eHyaQQgVCNKELLDKI3Tk/df5h9vvq3V96/XvvfTT7e7qb6+MYt2fXWPYpwY
|
||||
fP+TZPzDupBsnKao2B5h6QkjJdXparFeLovlssiA4YZ0om1dnMx5YpRVk1kxm0+K1WS6PrE7VpKC
|
||||
KOHHCADgKa/Jp23oUZSQtfKJoRBwS6J8LgIQnnU6ERiCChFtFOMBlGwj2Wz9tuN+28USPoPlB9il
|
||||
JXYErbKoAW14IF/ZD3l3lXclPFUWoBKhNwb9vhIlVOKWd3t+F8Cx6zWmFEBZmBWzS1AB0DnPj8pg
|
||||
JL2H2RSM0vpUk26TKu7BeXbkAW0D6Lm3DVyuXhcaip4daxXRAnrCi0qMj3Yk21Y1ZCUlR5uisofz
|
||||
nj21fcCUu+21PgPQWo7ZcU777oQcnvPVvHWe78MrqmiVVaGrPWFgm7IMkZ3I6GEEcJffsX/xNMJ5
|
||||
Ni7WkXeUr7ucr456YhifAV3MTmDkiPqMtdmM39CrG4qodDibBCFRdtQM1GFssG8UnwGjs67/dvOW
|
||||
9rFzZbf/Iz8AUpKL1NTOU6Pky46HMk/pd/2r7DnlbFgE8r+UpDoq8uklGmqx18eZF2EfIpm6VXZL
|
||||
3nl1HPzW1Ytlge2SFouNGB1GvwEAAP//AwBMppztBgQAAA==
|
||||
H4sIAAAAAAAAAwAAAP//jFRNb9swDL3nVxA6J0W+2/jWFd3Wyz4LDMNcGIpM21plUpPkdV6R/z7I
|
||||
Tuu0y4pdBIjv8elRInU/AhA6FwkIVcmgamsmF1/LctG+Wl98umw3by8/89r9bov249m7L/hBjGMG
|
||||
b7+jCg9ZJ4prazBoph5WDmXAqDo7Xc8XZ4v5YtMBNedoYlppw2TJk1qTnsyn8+VkejqZne2zK9YK
|
||||
vUjg2wgA4L5bo0/K8ZdIYDp+iNTovSxRJI8kAOHYxIiQ3msfJAUxHkDFFJA669cVN2UVErgCQswh
|
||||
MBSacggVgmqcQwpg2TZGxsqAC7jm25ZPIKVzFUMJeJROVdkdbh9icEW2CQncp+JHg65NRZKKF9RS
|
||||
sUvp/daj+yl7zesKjxFBe5DWOv6laxnQtDBbQq2NiSRNvWsdWrCOLTqQlIPcchNgcfqc96Z7H7cX
|
||||
PncoT1JK6fBC+A5u4xLphSZpQJK/Q5fS62533u1inQSQCt/UteyqhVS8VIHjhvJD6wW7wftR0w+M
|
||||
Y67FuD9fMRU6R1IYLWymKe0OX91h0XgZO48aYw4AScShs9n1280e2T12mOHSOt76Z6mi0KR9lTmU
|
||||
nil2kw9sRYfuRgA3XSc3T5pTWMe1DVngW+yOWy7WvZ4YBmhAZ9PlHg0cpBmA1XI/AE8FsxyD1MYf
|
||||
DINQUlWYD6nD5Mgm13wAjA7K/tvOMe2+dE3l/8gPgFJoA+aZdZhr9bTkgeYwfjD/oj1ec2dYxOHR
|
||||
CrOg0cWnyLGQjenHXvjWB6yzQlOJzjrdz35hs9V6Kos1rlYbMdqN/gAAAP//AwDA54G2CQUAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 983ceae938953023-SJC
|
||||
- 999fee2b3e111b53-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -210,14 +181,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 23 Sep 2025 20:51:02 GMT
|
||||
- Wed, 05 Nov 2025 22:54:00 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=GCRvAgKG_bNwYFqI4.V.ETNDFENlZGsSPgqfmPRweBE-1758660662-1.0.1.1-BbV_KqvF6uEt_DEfefPzisFvVJNAN5NBAn7UyvcCjL4cC0Earh6WKRSQEBgXDhltOn0zo_0LaT1GsrScK1y2R6EE8NtKLTLI0DvmUDiiTdo;
|
||||
path=/; expires=Tue, 23-Sep-25 21:21:02 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 23:24:00 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=satXYLU.6M.wV_6k7mFk5Z6V97uowThF_xldugIJSJQ-1758660662273-0.0.1.1-604800000;
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
@@ -232,110 +203,291 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- REDACTED
|
||||
openai-processing-ms:
|
||||
- '1464'
|
||||
- '1270'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
- REDACTED
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1521'
|
||||
- '1417'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999605'
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999602'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
- '199511'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- 8.64s
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 146ms
|
||||
x-request-id:
|
||||
- req_b7cf0ed387424a5f913d455e7bcc6949
|
||||
- req_956101550d2e4e35b2818550ccbb94df
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"trace_id": "df56ad93-ab2e-4de8-b57c-e52cd231320c", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "0.193.2",
|
||||
"privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate":
|
||||
300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at":
|
||||
"2025-09-23T21:03:51.621012+00:00"}}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Info Gatherer. You gather
|
||||
and summarize information quickly.\nYour personal goal is: Provide brief information\n\nYou
|
||||
ONLY have access to the following tools, and should NEVER make up tools that
|
||||
are not listed here:\n\nTool Name: search_web\nTool Arguments: {''query'': {''description'':
|
||||
None, ''type'': ''str''}}\nTool Description: Search the web for information
|
||||
about a topic.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [search_web], just the name, exactly as it''s written.\nAction Input:
|
||||
the input to the action, just a simple JSON object, enclosed in curly braces,
|
||||
using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
|
||||
all necessary information is gathered, return the following format:\n\n```\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n```Ensure your final answer strictly adheres to the following
|
||||
OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\":
|
||||
\"SimpleOutput\",\n \"strict\": true,\n \"schema\": {\n \"description\":
|
||||
\"Simple structure for agent outputs.\",\n \"properties\": {\n \"summary\":
|
||||
{\n \"description\": \"A brief summary of findings\",\n \"title\":
|
||||
\"Summary\",\n \"type\": \"string\"\n },\n \"confidence\":
|
||||
{\n \"description\": \"Confidence level from 1-100\",\n \"title\":
|
||||
\"Confidence\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"summary\",\n \"confidence\"\n ],\n \"title\":
|
||||
\"SimpleOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."},{"role":"user","content":"What is the population of Tokyo? Return
|
||||
your structured output in JSON format with the following fields: summary, confidence"},{"role":"assistant","content":"Thought:
|
||||
I need to find the current population of Tokyo. \nAction: search_web\nAction
|
||||
Input: {\"query\":\"current population of Tokyo\"}\nObservation: Tokyo''s population
|
||||
in 2023 was approximately 21 million people in the city proper, and 37 million
|
||||
in the greater metropolitan area."}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '436'
|
||||
Content-Type:
|
||||
accept:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/0.193.2
|
||||
X-Crewai-Version:
|
||||
- 0.193.2
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2473'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- REDACTED
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: http://localhost:3000/crewai_plus/api/v1/tracing/batches
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFNNaxsxEL37Vww628Gfcby3UBraQguBXEo3LBNpdleNViMkbRLX+L8X
|
||||
ya7XSVPoRaB580Zv3ox2IwChlShAyBaj7JyZfPjeNMuP9uWhbta3zdOXX/NPN9fqW1Bfb3stxonB
|
||||
Dz9Jxj+sC8mdMxQ12wMsPWGkVHW2vpwvrhbz5TQDHSsyida4OFnypNNWT+bT+XIyXU9mV0d2y1pS
|
||||
EAX8GAEA7PKZdFpFL6KAXCtHOgoBGxLFKQlAeDYpIjAEHSLaKMYDKNlGsln6Xct908YCPoPlZ3hM
|
||||
R2wJam3RANrwTL60N/l2nW8F7EoLUIrQdx36bSkKKMUdP24ZWgyA4Nj1BpMTwDWgc55fdIeRzBbm
|
||||
M+i0MQnTNr8kddyC8+zIA1oFi/XbjCY76aGj6Nmx0REtoCe8KMX4oEWyrbUiKynJ2UxLuz9v2FPd
|
||||
B0ym296YMwCt5ZilZqvvj8j+ZK7hxnl+CG+ootZWh7byhIFtMjJEdiKj+xHAfR5i/2ouwnnuXKwi
|
||||
P1J+brnZHOqJYXfO0SMYOaIZ4qvl1fidepWiiNqEszUQEmVLaqAOO4O90nwGjM66/lvNe7UPnWvb
|
||||
/E/5AZCSXCRVOU9Ky9cdD2me0tf6V9rJ5SxYBPJPWlIVNfk0CUU19uaw8CJsQ6SuqrVtyDuvD1tf
|
||||
u2p1OcX6klarjRjtR78BAAD//wMAzspSwwMEAAA=
|
||||
headers:
|
||||
Content-Length:
|
||||
- '55'
|
||||
cache-control:
|
||||
- no-cache
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.crewai.com crewai.com; script-src ''self'' ''unsafe-inline''
|
||||
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com
|
||||
https://run.pstmn.io https://share.descript.com/; style-src ''self'' ''unsafe-inline''
|
||||
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self''
|
||||
data: *.crewai.com crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net; font-src ''self'' data: *.crewai.com crewai.com;
|
||||
connect-src ''self'' *.crewai.com crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ ws://localhost:3036
|
||||
wss://localhost:3036; frame-src ''self'' *.crewai.com crewai.com https://connect.useparagon.com/
|
||||
https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
content-type:
|
||||
- application/json; charset=utf-8
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
server-timing:
|
||||
- cache_read.active_support;dur=0.05, sql.active_record;dur=1.55, cache_generate.active_support;dur=2.03,
|
||||
cache_write.active_support;dur=0.18, cache_read_multi.active_support;dur=0.11,
|
||||
start_processing.action_controller;dur=0.00, process_action.action_controller;dur=2.68
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
CF-RAY:
|
||||
- 999fee34cbb91b53-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:54:01 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- REDACTED
|
||||
openai-processing-ms:
|
||||
- '732'
|
||||
openai-project:
|
||||
- REDACTED
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '765'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9998'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199441'
|
||||
x-ratelimit-reset-requests:
|
||||
- 15.886s
|
||||
x-ratelimit-reset-tokens:
|
||||
- 167ms
|
||||
x-request-id:
|
||||
- 3fadc173-fe84-48e8-b34f-d6ce5be9b584
|
||||
x-runtime:
|
||||
- '0.046122'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
- req_38b9ec4e10324fb69598cd32ed245de3
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly
|
||||
adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\":
|
||||
{\n \"name\": \"SimpleOutput\",\n \"strict\": true,\n \"schema\": {\n \"description\":
|
||||
\"Simple structure for agent outputs.\",\n \"properties\": {\n \"summary\":
|
||||
{\n \"description\": \"A brief summary of findings\",\n \"title\":
|
||||
\"Summary\",\n \"type\": \"string\"\n },\n \"confidence\":
|
||||
{\n \"description\": \"Confidence level from 1-100\",\n \"title\":
|
||||
\"Confidence\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"summary\",\n \"confidence\"\n ],\n \"title\":
|
||||
\"SimpleOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."},{"role":"user","content":"{\n \"summary\": \"Tokyo has a population
|
||||
of approximately 21 million in the city proper and 37 million in the greater
|
||||
metropolitan area.\",\n \"confidence\": 90\n}"}],"model":"gpt-4o-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"description":"Simple
|
||||
structure for agent outputs.","properties":{"summary":{"description":"A brief
|
||||
summary of findings","title":"Summary","type":"string"},"confidence":{"description":"Confidence
|
||||
level from 1-100","title":"Confidence","type":"integer"}},"required":["summary","confidence"],"title":"SimpleOutput","type":"object","additionalProperties":false},"name":"SimpleOutput","strict":true}},"stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1723'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- REDACTED
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFPLbtswELzrKxY8W4EtW3aia5BDgQJ9oAcXVSDQ5EpiTXFZkiosGP73
|
||||
QvJDctICvfCws7OcnSGPEQBTkmXARM2DaKyOn79XVfr8SX7d7j9/WH0R9rDd/vpoX9LqpduxWc+g
|
||||
3U8U4cp6ENRYjUGROcPCIQ/YT11s1snycZmsFgPQkETd0yob4hXFjTIqTubJKp5v4sXjhV2TEuhZ
|
||||
Bj8iAIDjcPY6jcQDy2A+u1Ya9J5XyLJbEwBzpPsK494rH7gJbDaCgkxAM0g/5sy3TcNdl7MsZ99o
|
||||
3xHU3AMHS7bVvF8IqARuraODanhA3UGygEZp3WPKQKgRhAodWEcWHXAjYbl521ENhjhoMDiypFXg
|
||||
BrhD/pCzWd6LKpVEIzBn2dP8NBXssGw9700zrdYTgBtDYdA4WPV6QU43czRV1tHOv6GyUhnl68Ih
|
||||
92R6I3wgywb0FAG8DiG0d74y66ixoQi0x+G6ZbI6z2Nj9hP0khALFLie1NMr625eITFwpf0kRia4
|
||||
qFGO1DFz3kpFEyCabP1ezd9mnzdXpvqf8SMgBNqAsrAOpRL3G49tDvuv8a+2m8uDYObR/VYCi6DQ
|
||||
9UlILHmrzw+W+c4HbIpSmQqdder8aktbpOs5L9eYpk8sOkV/AAAA//8DADo6EVPDAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 999fee3a4a241b53-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:54:02 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- REDACTED
|
||||
openai-processing-ms:
|
||||
- '668'
|
||||
openai-project:
|
||||
- REDACTED
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '692'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9998'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199735'
|
||||
x-ratelimit-reset-requests:
|
||||
- 15.025s
|
||||
x-ratelimit-reset-tokens:
|
||||
- 79ms
|
||||
x-request-id:
|
||||
- req_7e08fbc193574ac6955499d9d41b92dc
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -186,7 +186,7 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- crewai-REDACTED
|
||||
openai-processing-ms:
|
||||
- '15605'
|
||||
openai-version:
|
||||
@@ -344,7 +344,7 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- crewai-REDACTED
|
||||
openai-processing-ms:
|
||||
- '1350'
|
||||
openai-version:
|
||||
@@ -570,7 +570,7 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- crewai-REDACTED
|
||||
openai-processing-ms:
|
||||
- '16199'
|
||||
openai-version:
|
||||
@@ -732,7 +732,7 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- crewai-REDACTED
|
||||
openai-processing-ms:
|
||||
- '644'
|
||||
openai-version:
|
||||
@@ -984,7 +984,7 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- crewai-REDACTED
|
||||
openai-processing-ms:
|
||||
- '26924'
|
||||
openai-version:
|
||||
@@ -1119,4 +1119,823 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"trace_id": "18f7992e-da65-4b69-bbe7-212176a3d836", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-05T23:11:50.094791+00:00"},
|
||||
"ephemeral_trace_id": "18f7992e-da65-4b69-bbe7-212176a3d836"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '488'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.3.0
|
||||
X-Crewai-Version:
|
||||
- 1.3.0
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"b47c99de-55ce-4282-a2b2-1f0a26699e66","ephemeral_trace_id":"18f7992e-da65-4b69-bbe7-212176a3d836","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.3.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.3.0","privacy_level":"standard"},"created_at":"2025-11-05T23:11:50.423Z","updated_at":"2025-11-05T23:11:50.423Z","access_code":"TRACE-7fbb21b3f9","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '515'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 23:11:50 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"ad9765408bacdf25c542345ddca78bb2"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- abeaefb7-af6b-4523-b627-a53fe86ef881
|
||||
x-runtime:
|
||||
- '0.085107'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 201
|
||||
message: Created
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are plants Senior Data Researcher\n.
|
||||
You''re a seasoned researcher with a knack for uncovering the latest developments
|
||||
in plants. Known for your ability to find the most relevant information and
|
||||
present it in a clear and concise manner.\n\nYour personal goal is: Uncover
|
||||
cutting-edge developments in plants\n\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Conduct
|
||||
a thorough research about plants Make sure you find any interesting and relevant
|
||||
information given the current year is 2025.\n\n\nThis is the expected criteria
|
||||
for your final answer: A list with 10 bullet points of the most relevant information
|
||||
about plants\n\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nThis is the context you''re working with:\n### Previous attempt
|
||||
failed validation: Error while validating the task output: The guardrail result
|
||||
is not a valid pydantic model\n\n\n### Previous result:\n1. **Plant-Based Biofuels**:
|
||||
Advances in genetic engineering are allowing researchers to develop plants that
|
||||
produce higher yields of biofuels, reducing reliance on fossil fuels. For example,
|
||||
specialized strains of switchgrass and algae are being cultivated for more efficient
|
||||
biofuel production.\n\n2. **CRISPR Technology in Plant Breeding**: The use of
|
||||
CRISPR technology has accelerated the breeding of disease-resistant and drought-tolerant
|
||||
plants. Researchers are now able to edit plant genomes with precision, leading
|
||||
to breakthroughs in staple crops like rice and wheat for better resilience to
|
||||
climate change.\n\n3. **Vertical Farming Innovations**: Vertical farming techniques
|
||||
have evolved to integrate advanced hydroponics and aeroponics, improving space
|
||||
efficiency and speed of plant growth. This method not only conserves water but
|
||||
also minimizes the use of pesticides and herbicides.\n\n4. **Enhancing Plant
|
||||
Photosynthesis**: Researchers are exploring ways to enhance the photosynthesis
|
||||
process in plants, potentially increasing crop yields by up to 50%. New variations
|
||||
in light-harvesting proteins have been identified that could lead to crops that
|
||||
grow quicker and with less resource input.\n\n5. **Plant Communication**: Studies
|
||||
have shown that plants can communicate with each other through root exudates
|
||||
and volatile organic compounds. This research is leading to better understanding
|
||||
of plant ecology and could have implications for agriculture practices and pest
|
||||
control.\n\n6. **Soil Microbiomes and Plant Health**: Recent findings highlight
|
||||
the importance of soil microbiomes in promoting plant health and growth. The
|
||||
use of beneficial microbial inoculants is becoming a popular strategy to enhance
|
||||
nutrient uptake and improve plant resilience.\n\n7. **Sustainable Pest Management**:
|
||||
Innovative pest management strategies utilizing plant-based repellents are on
|
||||
the rise. This involves selecting and cultivating plants that naturally deter
|
||||
pests, minimizing the need for chemical pesticides and enhancing biodiversity.\n\n8.
|
||||
**Urban Forests as Carbon Sinks**: Urban greening initiatives are increasingly
|
||||
focusing on planting trees and shrubs in cities to capture carbon dioxide, improve
|
||||
air quality, and promote biodiversity. These efforts are being recognized for
|
||||
their role in mitigating urban heat and climate change impacts.\n\n9. **Phytoremediation**:
|
||||
Research into using specific plants for soil and water remediation has gained
|
||||
traction. Some plants can absorb heavy metals and toxins, offering a sustainable
|
||||
solution for cleaning contaminated environments.\n\n10. **Biophilic Design in
|
||||
Architecture**: There is a growing trend in incorporating plants into architectural
|
||||
designs, employing biophilic principles to enhance urban ecosystems. This includes
|
||||
the integration of green roofs and living walls, which provide aesthetic benefits
|
||||
while improving air quality and biodiversity.\n\n\nTry again, making sure to
|
||||
address the validation error.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '4382'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA2xXUW8buRF+z68YCOhLIBl24iQXvyVucnVbBzk7V7RoDsEsOdqdM3dmj+RK3h7u
|
||||
vxdD7kryIS+GJYrkzDfffPPx92cAK/arK1i5DrPrh7C5/k/7+I+bv1/nD/jw7rF/vP519/Gf79qf
|
||||
Pj1++vTv1dp2aPMrubzsOnPaD4Eyq9RlFwkz2akXb16/ePnD5cuL87LQq6dg29ohby7PLjY9C29e
|
||||
nL94tTm/3Fxczts7ZUdpdQX/fQYA8Hv5a4GKp8fVFZTDyjc9pYQtra4OPwJYRQ32zQpT4pRR8mp9
|
||||
XHQqmaTE/qXTse3yFdyA6B4cCrS8I0BoLQFASXuKX+UjCwZ4Vz5dfZWLM3j+/EcSyuwwhAlu1fOW
|
||||
ycPngJITbDXCdeAeM8EdJQ5M4uj58yu4EbBk15C4Fd6yQ8kwRG0jpQQdJmiIBHr0BCzgaUdBB5YW
|
||||
2pP7SFoWokgeXNQhQe4s2kjQaySI85UZsgI95kg9wZ4wdxTBqXi2SiVIo+sAE/hYYFhDR5jXgOJh
|
||||
G1Q9S3sGPye7Hf0OxZEvYQDZCdJCJtcJ/zZSgsAPBNd3N/ef7zbXmN6uYahYLJf0yP+jcnbSqSEU
|
||||
S3dHNd+MHNTSyQppjLtSBPGGjB8dQcrYBIKJKfgEntLAmcDNEKds6J19la/ywkrzQboa7DXGRgWu
|
||||
cchjJMhdSbRWCT7MILK0Vpk7SoTRdRTnwNiT5FpWC6VhPYWdYkaWmiSkgRxTsuhZjPvJLiOO4GoE
|
||||
iQykHNGAB4cDOs7TGdyI6A5rNVhcGL2V0PN2MnijaoY0pUx9qnB0mjVNkjujAgyYuz1O5V5sksam
|
||||
4pt1YQLmXtPQUWS3hOJZH9nT2oiQIzdjraQe0HQdSkvQc+a2xlsCp5apYvzSMP4XxcJG+IixtxNu
|
||||
JFM7J7jn3MG7mxLNnTaa2SXD+FY9RYHdsneLsU/gtG9YCLrJRx1U2NVskQ4fy4EYrRyOMQBLphC4
|
||||
tbYqv43zLZaIDpl7I1stTht1nztwkwuUziysXoWzxgQkO44qPUnGcNoZdqSMOZYmCtaEVh+IhGGT
|
||||
uac17DsOVK81bgu2832lW8RDh3FHyT4+afYwLRSxH9LWEiJxU82C/Ojs+4CNWqumXCG/NMj/ysnp
|
||||
juIEup05fNP3oxDcF47A55kPBvX1mO3uDfm2CELhdhGYUXbEgTwI7aHXQG4MGI9kWlg9q0prV8pM
|
||||
Z673RUqDSqqMt43akqQz+NJxggfRfSjXsqlZwWMYAtfunhWt6BbsMDJl65xSYFr6VjCPEUMRMtNv
|
||||
R+sjNrkj8DSQ+FJ966eO+sKnwQB37GeivjLUbtlFbVh7Ou34ItH3yqHgXsH8G2HInYH3rqpdwWIU
|
||||
T9GC8MvllVb98dwiF6HmZz8o08/6QLfgxpTV2OiXHYW+aqAfMOZ+iLqjI+XGIeMDraFRTQuDqzDP
|
||||
GM2aB1kDRfvCwKdEkJacinicBJktKGN6KoPC5t2ePYUJxlRDH6L2anI6JtO2orjYRnZjMPkskL42
|
||||
SO97jHkG7Z4kWSsZnnfWHl+4J7itHTZL63vWMik0aFvq1ETCh1mNZ7WNlMaQyRvktcKFJxawwSgo
|
||||
muarCmQ2qxsqQmCqUzZaDjOqJZsdezr2LHjMaHSp9etKtdewx0zRBkwe0/pp43uau9OUr5L7kIcN
|
||||
YUMowRDJcbJyn4AFzQQYgu4L1zD2NlayQo8PBBZNmNYloI2PvCOb9PWQytw3BvPnbjIl78nziaye
|
||||
+o7DoLuvA8jA/kT7J16hX7zJDM1x6M4Qk5/lVQ7DywM2HDhPJ5OlI9xN0FPGMA8jDWHM1e9E7cs8
|
||||
wZ6lFMNoWH9W8W3UzyAaSRdrQNHmfAJyutka5t5gMdnb0HZLLpsPSBrGqsrGsT9JdiCUcSiY/WCY
|
||||
/RwbFPgxEsnS5d83YtdcdIerHystcdTlMAF6GyPSwlhO3Gq0+T0Zrcz5ziwcreMM+wJ1okAuL1Cf
|
||||
ugIbcZgLset55rSAUygtXVJdH1QAOcJvIxr+lZBpHAaNed7asHreUUzFQVRAWTgbSXaUAEMqfTAB
|
||||
wo4NJbPDlmnVDktqhq/2QC0ThVDF2jAz4TX7U9n4trDRctq8L+x4zzoETHmZ1PcninFr9WYMqdrd
|
||||
U38z49KcyMFU+Vh7yVd5LZ5v0U/L1jrcl8Of3KrFz56qVb/cDZ6srXxl5uHaHtN3Glm3W+tODJmi
|
||||
zCCagFA24MZ+05Sk57iS2eRQLHkZSYeg5oYoQiB+sVvUczr29cW5QTmPlyrGLPD5aOsSJ/hwMAUG
|
||||
4b39mzktrZtG5yil7RiKlcjVIVePujgfD983icvwGJ5eyFKfEetZhS03FkB4eb55df6Xo6dlWdyU
|
||||
SW6tQvXkM6qnwg7ovc0oQ7MjaIM2ZvlUPSRyYzR5cR2GQGY3m1lQCzvNui7WKAMJxXYyfSmsVzla
|
||||
lLPTh12k7ZjQXpcyhnCygCKaKwvtSfnLvPLH4REZtB2iNulPW1dbFk7dN0texR6MKeuwKqt/PAP4
|
||||
pTxWxyfvz5VN0SF/y/pA5bo3b9/W81bHR/Jx9fX5q3k1a8ZwXLi4PL9cf+fEb57ssZROHrwrh64j
|
||||
f9x7fB3j6FlPFp6d5P1/AAAA///CdA82syF+z8xLJ8Z4hERycmpBSWpKfEFRakpmMqqfEcqKUrPA
|
||||
BSZ2ZfBwBjtYCVRbZCanxpdkphaB4iIlNS2xNAfStVeC9JLi0zLz0lOLCooyIf37tIJ4k2QjC1PD
|
||||
NAszIyWuWi4AAAAA//8DAFv43+PuEAAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99a008535b928ce2-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 23:11:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=gemAT_QB_h_zisIV4_4r2Ltg8oBSvOOHIp9lOpAcJxE-1762384318-1.0.1.1-hbHi8L_6ckzVRc7q12W69jloWLCbjFefoSgd465kdaTlFOdUKu4Ft.90.XtUYVTzXluYj28p0e07ASms9gCIdr8CHLfbhFQKf6nZqk7.KZ4;
|
||||
path=/; expires=Wed, 05-Nov-25 23:41:58 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=FuiwbPturMkq3oPWfHULVuvVE6SZkSH8wf8u2lWOeHo-1762384318759-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-REDACTED
|
||||
openai-processing-ms:
|
||||
- '8275'
|
||||
openai-project:
|
||||
- proj_REDACTED
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '8409'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '198937'
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 318ms
|
||||
x-request-id:
|
||||
- req_6e2ec55058f6435fa6f190848d95a858
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly
|
||||
adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\":
|
||||
{\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\":
|
||||
{\n \"properties\": {\n \"valid\": {\n \"description\":
|
||||
\"Whether the task output complies with the guardrail\",\n \"title\":
|
||||
\"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\":
|
||||
{\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\":
|
||||
\"null\"\n }\n ],\n \"default\": null,\n \"description\":
|
||||
\"A feedback about the task output if it is not valid\",\n \"title\":
|
||||
\"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\":
|
||||
\"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."},{"role":"user","content":"1. **Genetically Modified Plants for
|
||||
Climate Resilience**: In 2025, significant progress has been made in developing
|
||||
genetically engineered crops that are more resilient to extreme weather conditions
|
||||
such as drought, heat, and flooding. Using advanced gene editing techniques
|
||||
like CRISPR-Cas9, plants such as maize and soybeans have been tailored to survive
|
||||
and produce stable yields despite climate stress.\n\n2. **Enhanced Carbon Capture
|
||||
through Plant Engineering**: Researchers have identified and bioengineered certain
|
||||
plant species to increase their carbon sequestration capacity. Innovations include
|
||||
modifying root systems and photosynthetic pathways to absorb and store more
|
||||
atmospheric carbon dioxide, contributing to climate change mitigation strategies.\n\n3.
|
||||
**Vertical Farming Integration with AI and Robotics**: Modern vertical farms
|
||||
combine hydroponics and aeroponics with artificial intelligence and robotics
|
||||
to optimize plant growth cycles. AI monitors environmental conditions and nutrient
|
||||
levels in real-time, while robots manage planting and harvesting, significantly
|
||||
increasing efficiency and reducing labor costs.\n\n4. **Discovery of Plant Immune
|
||||
System Pathways**: Cutting-edge research has unveiled new molecular pathways
|
||||
in plants that govern their immune responses to pathogens. This knowledge is
|
||||
being applied to develop crop varieties with enhanced natural resistance, reducing
|
||||
the dependence on chemical pesticides.\n\n5. **Microbiome Engineering for Soil
|
||||
and Plant Health**: Advances in understanding the plant microbiome have led
|
||||
to the creation of customized microbial inoculants that improve nutrient uptake,
|
||||
boost growth, and enhance stress tolerance. These soil and root microbiome treatments
|
||||
are now widely used to promote sustainable agriculture.\n\n6. **Smart Plant
|
||||
Sensors for Real-Time Monitoring**: Biotechnological breakthroughs have resulted
|
||||
in the development of nanosensors that can be integrated into plants to provide
|
||||
real-time data on plant health, water status, and nutrient deficiencies. This
|
||||
technology enables precision agriculture by allowing farmers to make timely,
|
||||
data-driven decisions.\n\n7. **Phytoremediation with Genetically Enhanced Species**:
|
||||
New genetically modified plants have been developed with an increased ability
|
||||
to absorb heavy metals and pollutants from contaminated soils and water bodies.
|
||||
These plants serve as eco-friendly, cost-effective solutions for environmental
|
||||
cleanup.\n\n8. **Urban Greening for Climate Resilience**: Cities in 2025 are
|
||||
increasingly adopting urban forestry projects that use specially selected plant
|
||||
species to combat the urban heat island effect, improve air quality, and support
|
||||
urban biodiversity. These initiatives also play a vital role in enhancing mental
|
||||
health and wellbeing for residents.\n\n9. **Plant-Based Bioplastics and Sustainable
|
||||
Materials**: Innovations in plant biotechnology have enabled the production
|
||||
of biodegradable plastics and other sustainable materials derived from plant
|
||||
biomass. This technology offers alternatives to petroleum-based products, helping
|
||||
reduce plastic pollution and carbon emissions.\n\n10. **Advancements in Photosynthesis
|
||||
Efficiency**: Scientists have successfully introduced and optimized synthetic
|
||||
pathways to enhance photosynthesis in crops, resulting in a 30-50% increase
|
||||
in growth rates and yields. This breakthrough addresses the global food security
|
||||
challenge by enabling more efficient energy conversion in plants."}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether
|
||||
the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A
|
||||
feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5198'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=gemAT_QB_h_zisIV4_4r2Ltg8oBSvOOHIp9lOpAcJxE-1762384318-1.0.1.1-hbHi8L_6ckzVRc7q12W69jloWLCbjFefoSgd465kdaTlFOdUKu4Ft.90.XtUYVTzXluYj28p0e07ASms9gCIdr8CHLfbhFQKf6nZqk7.KZ4;
|
||||
_cfuvid=FuiwbPturMkq3oPWfHULVuvVE6SZkSH8wf8u2lWOeHo-1762384318759-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4ySQW/UMBCF7/kV1pyTapNNy25uqAgh4NZyALaKvPYkMXVsy56Uwmr/O3Ky3aS0
|
||||
SFxymG/e5L3xHBLGQEmoGIiOk+idzq6/to+37+jzTXj7wd/Yj/7T9Zf33W3/7Xdnf0IaFXb/AwU9
|
||||
qS6E7Z1GUtZMWHjkhHFq/uaqWG/Kdb4dQW8l6ihrHWXlRZ71yqisWBWX2arM8vIk76wSGKBi3xPG
|
||||
GDuM32jUSHyEiq3Sp0qPIfAWoTo3MQbe6lgBHoIKxA1BOkNhDaEZvR928MC1kjuoyA+Y7qBBlHsu
|
||||
7ndQmUHr41LosRkCj+4jWgBujCUe04+W707keDapbeu83Ye/pNAoo0JXe+TBmmgokHUw0mPC2N24
|
||||
jOFZPnDe9o5qsvc4/m5bbKd5MD/Cgp4YWeJ6UV5v0lfG1RKJKx0W2wTBRYdyls6r54NUdgGSReiX
|
||||
Zl6bPQVXpv2f8TMQAh2hrJ1HqcTzwHObx3ii/2o7L3k0DAH9gxJYk0IfH0Jiwwc93Q2EX4Gwrxtl
|
||||
WvTOq+l4GleXothc5s3mqoDkmPwBAAD//wMAy7pUCUsDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99a008889ae18ce2-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 23:11:59 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-REDACTED
|
||||
openai-processing-ms:
|
||||
- '377'
|
||||
openai-project:
|
||||
- proj_REDACTED
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '643'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '198876'
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 336ms
|
||||
x-request-id:
|
||||
- req_50adc1a4d2d2408281d33289f59d147d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly
|
||||
adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\":
|
||||
{\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\":
|
||||
{\n \"properties\": {\n \"valid\": {\n \"description\":
|
||||
\"Whether the task output complies with the guardrail\",\n \"title\":
|
||||
\"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\":
|
||||
{\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\":
|
||||
\"null\"\n }\n ],\n \"default\": null,\n \"description\":
|
||||
\"A feedback about the task output if it is not valid\",\n \"title\":
|
||||
\"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\":
|
||||
\"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."},{"role":"user","content":"{\"valid\":true,\"feedback\":null}"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether
|
||||
the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A
|
||||
feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1765'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=gemAT_QB_h_zisIV4_4r2Ltg8oBSvOOHIp9lOpAcJxE-1762384318-1.0.1.1-hbHi8L_6ckzVRc7q12W69jloWLCbjFefoSgd465kdaTlFOdUKu4Ft.90.XtUYVTzXluYj28p0e07ASms9gCIdr8CHLfbhFQKf6nZqk7.KZ4;
|
||||
_cfuvid=FuiwbPturMkq3oPWfHULVuvVE6SZkSH8wf8u2lWOeHo-1762384318759-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4ySMW/bMBCFd/0K4mYpkGXJcbQVRZcMXYoORR0INHmS2FAkQZ6SBob/e0HJsZQ0
|
||||
Bbpw4Hfv+N7xTgljoCTUDETPSQxOZ59/dC/5l+fd117mvrv7dtt2n+63/fM9H78HSKPCHn+hoFfV
|
||||
jbCD00jKmhkLj5wwdt3c7ortvtxWxQQGK1FHWecoK2822aCMyoq8qLK8zDblRd5bJTBAzX4mjDF2
|
||||
ms5o1Ej8DTXL09ebAUPgHUJ9LWIMvNXxBngIKhA3BOkChTWEZvJ+OsAT10oeoCY/YnqAFlEeuXg8
|
||||
QG1Grc9rocd2DDy6j2gFuDGWeEw/WX64kPPVpLad8/YY3kmhVUaFvvHIgzXRUCDrYKLnhLGHaRjj
|
||||
m3zgvB0cNWQfcXpuW+7mfrB8wkLvLowscb0SVVX6QbtGInGlw2qaILjoUS7SZfR8lMquQLIK/beZ
|
||||
j3rPwZXp/qf9AoRARygb51Eq8TbwUuYxrui/yq5DngxDQP+kBDak0MePkNjyUc97A+ElEA5Nq0yH
|
||||
3nk1L0/rmlIU+2rT7ncFJOfkDwAAAP//AwBk3ndHSwMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99a0095c2f319a1a-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 23:12:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-REDACTED
|
||||
openai-processing-ms:
|
||||
- '443'
|
||||
openai-project:
|
||||
- proj_REDACTED
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '625'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199732'
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 80ms
|
||||
x-request-id:
|
||||
- req_7aeda9ef4e374a68a5a0950ed0c8e88b
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are plants Reporting Analyst\n.
|
||||
You''re a meticulous analyst with a keen eye for detail. You''re known for your
|
||||
ability to turn complex data into clear and concise reports, making it easy
|
||||
for others to understand and act on the information you provide.\n\nYour personal
|
||||
goal is: Create detailed reports based on plants data analysis and research
|
||||
findings\n\nTo give my best complete final answer to the task respond using
|
||||
the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!"},{"role":"user","content":"\nCurrent Task: Review the context you got and
|
||||
expand each topic into a full section for a report. Make sure the report is
|
||||
detailed and contains any and all relevant information.\n\n\nThis is the expected
|
||||
criteria for your final answer: A fully fledge reports with the mains topics,
|
||||
each with a full section of information. Formatted as markdown without ''```''\n\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nThis
|
||||
is the context you''re working with:\n1. **Genetically Modified Plants for Climate
|
||||
Resilience**: In 2025, significant progress has been made in developing genetically
|
||||
engineered crops that are more resilient to extreme weather conditions such
|
||||
as drought, heat, and flooding. Using advanced gene editing techniques like
|
||||
CRISPR-Cas9, plants such as maize and soybeans have been tailored to survive
|
||||
and produce stable yields despite climate stress.\n\n2. **Enhanced Carbon Capture
|
||||
through Plant Engineering**: Researchers have identified and bioengineered certain
|
||||
plant species to increase their carbon sequestration capacity. Innovations include
|
||||
modifying root systems and photosynthetic pathways to absorb and store more
|
||||
atmospheric carbon dioxide, contributing to climate change mitigation strategies.\n\n3.
|
||||
**Vertical Farming Integration with AI and Robotics**: Modern vertical farms
|
||||
combine hydroponics and aeroponics with artificial intelligence and robotics
|
||||
to optimize plant growth cycles. AI monitors environmental conditions and nutrient
|
||||
levels in real-time, while robots manage planting and harvesting, significantly
|
||||
increasing efficiency and reducing labor costs.\n\n4. **Discovery of Plant Immune
|
||||
System Pathways**: Cutting-edge research has unveiled new molecular pathways
|
||||
in plants that govern their immune responses to pathogens. This knowledge is
|
||||
being applied to develop crop varieties with enhanced natural resistance, reducing
|
||||
the dependence on chemical pesticides.\n\n5. **Microbiome Engineering for Soil
|
||||
and Plant Health**: Advances in understanding the plant microbiome have led
|
||||
to the creation of customized microbial inoculants that improve nutrient uptake,
|
||||
boost growth, and enhance stress tolerance. These soil and root microbiome treatments
|
||||
are now widely used to promote sustainable agriculture.\n\n6. **Smart Plant
|
||||
Sensors for Real-Time Monitoring**: Biotechnological breakthroughs have resulted
|
||||
in the development of nanosensors that can be integrated into plants to provide
|
||||
real-time data on plant health, water status, and nutrient deficiencies. This
|
||||
technology enables precision agriculture by allowing farmers to make timely,
|
||||
data-driven decisions.\n\n7. **Phytoremediation with Genetically Enhanced Species**:
|
||||
New genetically modified plants have been developed with an increased ability
|
||||
to absorb heavy metals and pollutants from contaminated soils and water bodies.
|
||||
These plants serve as eco-friendly, cost-effective solutions for environmental
|
||||
cleanup.\n\n8. **Urban Greening for Climate Resilience**: Cities in 2025 are
|
||||
increasingly adopting urban forestry projects that use specially selected plant
|
||||
species to combat the urban heat island effect, improve air quality, and support
|
||||
urban biodiversity. These initiatives also play a vital role in enhancing mental
|
||||
health and wellbeing for residents.\n\n9. **Plant-Based Bioplastics and Sustainable
|
||||
Materials**: Innovations in plant biotechnology have enabled the production
|
||||
of biodegradable plastics and other sustainable materials derived from plant
|
||||
biomass. This technology offers alternatives to petroleum-based products, helping
|
||||
reduce plastic pollution and carbon emissions.\n\n10. **Advancements in Photosynthesis
|
||||
Efficiency**: Scientists have successfully introduced and optimized synthetic
|
||||
pathways to enhance photosynthesis in crops, resulting in a 30-50% increase
|
||||
in growth rates and yields. This breakthrough addresses the global food security
|
||||
challenge by enabling more efficient energy conversion in plants.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '4846'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFrtjt02kv3vpyj0YIEEuH3hz8Tj/eX0ODMGxhuv7R0guxkYFFWSmKZY
|
||||
CkndtjIIMA8xTzhPsqgqUh9t78cfw7clUVRVnVOnqvi3BwBXrr16AVd2MNmOk7+++bFf/u3pzx/+
|
||||
c35695dvXn3Af3/z8NPbZz/8+GN/8+vViZ+g5me0uT51tjROHrOjoJdtRJORV3307TePnzx/+uTb
|
||||
Z3JhpBY9P9ZP+frp+dH16IK7fvzw8bPrh0+vHz0tjw/kLKarF/BfDwAA/ib/8kZDi5+uXsDDU/3L
|
||||
iCmZHq9erDcBXEXy/Jcrk5JL2YR8ddouWgoZg+z9w0BzP+QX8BoC3YE1AXp3QTDQ8weACekO40/h
|
||||
exeMh5fy68VP4afwO7ihcYo4YEj8wDucKGagAC/biwkWW/gDXtDTNGLICVyAt96EDO+tw2ARTGjh
|
||||
O0cZ7RDIU7/AV2yEr3nx6+trecfv4NEZ/ogBs7PG+wXeUOs6h62ulaCjCDfejSbzDpLzsjY/+2FA
|
||||
WNBE4EVhMAlGE2+xhYj8H9N4hClSHzHJ5vKA0OubAEPvAmJ0oQfqwEaaEmTjPEVsIRPcuTywVVt5
|
||||
zAV2duK7E14wurzI111MdKZxnn/zMmWfdjChR3DjZGxOZ/gzP2N6ftxU0/FOAFuX+a+ZyKcTTCZm
|
||||
Z2dvol/g5t3r92/fXd+Y9HvYTHiCxNbNLuUEg7kgpNlaTKmb2XqtOgRbmMQVvEHMDtM///4PTBNa
|
||||
J0ZO2Uwey2d7d4swGvereizR0qAJ/EQeTAb8NLjGZcAw6M4zeYz8X7YTfsoRR4Q7NHnACJYCfxMF
|
||||
Nrn1c8uf10YJwRMMaPJJ3oKfeNNwZzJG6CKN0HkivvusvnVpddYoIWENL8seterQC/kLstdij2JF
|
||||
vj9BxDRRSI7dz8GTsgRA+bN+o7z2ek4I2HVOwnU5AQbTeHHHgAnVgok/cjQuZOMC202ialiSY3dw
|
||||
zEI3B6tfzEsvDn2bAC8YYA4tRsBwcZECg8T4sp0zfM/3QY7O+OLIFkcKKUcmFRDT6z5iifpc/EVz
|
||||
njB2FEe29oWDgRi6q6+hWdYt8+dMkdrZZnfhMG1nCXobnSAO+kh3eYBpMAkT3A0YvuRLExHm0JkL
|
||||
RbbAueJPQ5xjX/fa75A8ViQXQ/bEW8OFqpXEnAKef+XHF35hjq6Zs4RWR9RCQjsL3BpGWKeI7Uwc
|
||||
MSZoccLQiksGEy/ImDCja1MGjNFw7CgkGfFsqHSGNxSRLhhPAuyIYhkKTBBsXuiM83NEiC7dqgOT
|
||||
pchxNqCLwKzL944uu95I2KGlQKOz4CmxDV0A00fGceaFdJPiPhrHOTh20fnIgY/P8KrC68bEhgLc
|
||||
mEkez4OAp1Drq423+NE3ZRcUvkA/9UmrCyb8ZUYJL76d6TIhBvb/HNomornd8ZMyel2BLdU4ukea
|
||||
xa3MlBwHkS7YgjWTseyvTPx/+QRhlUz8vzxSmgaMzsLND4/PzOhooh3YmwKC0QQ3zV4wIHzVOQsm
|
||||
l7BI63s3DDIlznYAkyASZeDlXEa7vnoaKFNaQh6ETSaThzuzpBNvkabsRma+YqR5yuZWw/vNjnUq
|
||||
mWFhwUIS7Ns8yEtGk2RzsoW0pIxjUhBzwi22Z0zpe1zIBImch5Htgl2HDFD0mlf2lr6/feU/FGIq
|
||||
eQk1ltn38vTKabyjmx8eQ+c+yXec4YPAdOfMDZ8MZMPp4CIEUaLGhdvExO0n+escG9YNGAZi+uxN
|
||||
YtRaDCWw1ky7ulqt+Xpk8WBC9supcIVSXc+ExfTSIL/ABUtxIiVBsVKNaqM47aIZ8Y4i72olhGDY
|
||||
29eNSRw25GfditpfZBsHNODoUpIrG+6x6ygyMdzMPrvLCiaJqZXCJBQ5/AIY8JxxIFnDKZRmJrK5
|
||||
EQmm2fVIY72nxvhqz9EE0+tu1NGH3DAn5uxCivc44skZ/oJRSft7E0f+7tchY18QLTh8+VqWfUcN
|
||||
ZWfTStO7+6gDlhkcIsbLFe9dL4Ltq5evv5bnY3lePXCpr+3Ka5k8Il6Knd2v4qqWKO6Jj6m2xRiO
|
||||
jyfAcfK0SPB7Tsx2Z/cR80BtkSTD0kaaKPA+eFMG15975EZMNEeLMCes8b3qpRpcrHzN3LOdsV1N
|
||||
dcDpSMFl/gR+VTB+YVaoWXIyHHaZaaqSTcZxwiiBd4JhHl3r8nIC7/ohi1lDkj/wemHOUTP4HisM
|
||||
lYjGX2c3KkrYfb6n6PIwJjDtzzOnskOMSHCRT9AuwYwl1e7tofRY0rpdrMd0gtF84qvsPE29wlku
|
||||
1D/emZTxDDcU7BwjKkxLFKxmwk9oJahNut3Lu4SVRFzoTyo9MHAa0a8vuVnew5afIlrHONTMMCG2
|
||||
J0iuD8K3/G7FJz8wmjAbD940FCEgthoLwzyawEme4qYX0xIw9gsE4irFL9AQsSSo+kfRXqmxmTMY
|
||||
nwg6YxlvhtPLHBsT1jj/jElCwsiSezKlvKnBl066ZYQcTUjCdfrCSjlqCs6SrkXoIqahbAzBk7hx
|
||||
o1nWnIhZJVCLIz/qArQYEqeIiWqKNEz/G2/4+6Tx9Ax/cMmy4JFkoCriNesQhPfiV3hbEiI/8g45
|
||||
PPmzJC0L0ucgz7OPI14czckvMIfbQHcBRvIoFcuaVz8T4EzcF5VKarKeNy+pTXbjdDdVoCcwvXEh
|
||||
ZVmRegyJYa1KJNE0uMSY5K/fXt6oFFDgO062nxdKjkWY0vktLhJwxmuqS9a0mMr2LE7MA1OkjK76
|
||||
PqKlPgi+yq6cZX2EJutzjoUd56kWO3YTjMg6zKUxbRHaIk4Yi6zk6lL0IztOBWat3yQ/lLp0p+tl
|
||||
J5LrBPWT6M20VWatS6IGuGLg5S2ybEYnan6edpb/zOYUofFkRQPWLwRmKn6NCzlJ1o7Y7LB5mX3A
|
||||
WOtfkSMiYxgwDHSa2W5+kZSB5uL8wgnUDjgqqTIpWNdi2tcTftVc3TGhrBrM0x3G7Wkm/hO0juks
|
||||
DXg/qU7kFcUnaFHlUrunBMscoejcVCzbfUDj83ACzk+sQPaZvYKUxduGviK/LF13TPetXzYqUQ69
|
||||
B89nZ3jjbKTG0Yh7cS/f/p71Ia+oqP2T7KdaSrEzrk9LuY5rjSFol6sUew1DMCmRdWbNgLoEK1YN
|
||||
4TRHKQaU+pz/59//MYi+G1GFQEd25mXAwOQulCWnWwaLC8e8U7ixdGvO8GFNyOz4UmQUXHrtuGj4
|
||||
MyqlnKy9mPKFIlWIwa6FSRHWjexMabToRV5HlDUHpDp0y7/mYpwvEXuClN0oRFq2XVoTCqfaOFjb
|
||||
HTVIE4KdU6ZRhM+2P04PFLMzHF0j5d1rtaw4NBsOvZ7gchTElXolTQPFuUQlJ4Wq0RvMXEVs3am1
|
||||
tZKYy1bBsULY5GzsrTAogplWdG0Sd4u/fdWR5omTWDpG9w6MeyKI6J2YjAuGtVLpWPd59yuLJtnT
|
||||
CvdTcUwNtBVqm/m1R0GTyhUx/Q0z1S9zESe7fWdmYa1YtZCwpJhjC5nIaOccZBHuKPr2ThIwxbWN
|
||||
kBBvlYh3vMHYrSCusvQgs/ZNFe2buf9DwX9zhvejibm2SZHjRWnuHcvAD25EeKMytBT4+/apQMeF
|
||||
QJfCj4KeqQZ/MIG0Jkll4T1KcGywbSVHRLQssQQsCtrsUpq1oqwq+OKEOw+Nrp0IZiJ0YRYtUEV3
|
||||
km+rL2+Ra3CuCzjiaCec9Z2s7jUSPee8dNrQ0mKBicMCARSUFEBq4k5gzcxU3iwSWZLCjszf+dnm
|
||||
WY0lrvhuUXk2uiwM3pps4M5F5EJEZTTHxL5IK7xda1Zp1JaMvdexO2DsQJZO3FCLuEYaO2NkJnCB
|
||||
+3ese1n7S+NWl2L51JsoHOxiLN2d0wqm8rNoyaxdAxjRpDmiwNwl8GjatOanyjp5Vyrt11Lh2moN
|
||||
cKDAQyLUAqVlABT62Pl716EGdywOVySPa2Szz1bVXTqUpnzJzvRrf+C0b8Brg3U1wUhZ+WqjqQr3
|
||||
rYHFjr5uo+OmrAvTnO8n4m/P8HZYuEc1Yut2BfV+NLG26N5rN4CffXWIuP3jpbGgKG0wYOc480qv
|
||||
+39rk9aul1zcdWkygWkSxUa+vMVMn1y3iLAShmJIiE1E7+jshN/FUDWjC5L3dwBJFbers2uTo+rK
|
||||
2l1qa/7i5p4wG9/FtDWYOHazr/0PNnlFOcfgCUaMdo6clGJiyaxusxilna7SxO62XGFaq8nair/v
|
||||
G9PXkRPTbtqa+tBQK0W/dNSUHlxo5ySddo1xOOhKdtscqOtOrNxqrVto03o0QZQFcL25U3YnEY7X
|
||||
a+eOIzeU1KzCV8SA6sRV8lKsdYHVcKGL8Vt+YcLmFEZdZvn9aSqjN/G4S3Ge+FVbPWEmQc6wlbwR
|
||||
B3WRlLNqWHat459ryauQTwXZ0utWh+4l4K5dXfVclk6451+dlkiaA4sGH7WH7DKYkkCYL9l4R2Le
|
||||
QH4Phs/P8B9Sgf+RW4xVB395/Kc3agG85XVBm7EWWbRJhbxjrL6uWoo1x/Mj40a+Le9b+l8e5O1q
|
||||
BL7pl9nUwZ93HXdP1FYu6ETSrX1HaRhea3IuHQY2pBRXkXjInAoll84r6jQvodcg1pRZ8bmGyTYZ
|
||||
ylQW1hxJRXIJYD1K11w6D4fRzxruwG0NJsydxN3vM6kGrR2OoV7lcR645EvTmfN9s+zelAbTlpro
|
||||
YibS1ograd8SeekXVZFuXFxtygnC+Vw64Cs9lDKtdr0bR627YEzlEZnIa4OyYQRoTHP9THOq5tuP
|
||||
gCQgtJ2T9m3bYzOqFBUleFWiL0lMWADEjX9M0mDYt6Uj6o5kPKeRqkAqurkIGhVA5y2otzjlB2r3
|
||||
1ksrm0LtDRz74hwi6QQdpVxfXtFyuidL1YreXWrp7mQMdCf9NszcZWUCCcdscUTq78+qYq+/kwTx
|
||||
naPJm5Rrr/b9Lh2/YV7mMacICKaYXiJyk7Jb+dgcjgtw9alaS8fwu6qdOvE+20UHgIfXk3Q89ppg
|
||||
rJuAFlkH7ISwZMn19TzIOdcTDgJmzq0N+XunBoz3dLeOTrhnvFXX9+ruTLywFkZJagfyi+hB6XPz
|
||||
Ly+KBYx1LXz19s8vv66pfJEu+KfF+FsTSEj9q7d/evk1K0tnB9CGJNtp33UwDJ1QCI7zz35OPLGL
|
||||
Pc5jmZdUy+3A3+zcqTZG1ZLRTK6V4qH2oQ5BsrZYSncv5TW713avNG92A6aOKE/RiRXv9Se498v9
|
||||
BZ1Ylxw4UTKeHaSjae2bCiUliapDYm+W6xIz0rbouH+cyd4KL2wfuQ+sUrsnsC5qb1Hnu8xrLlg3
|
||||
+UKT71noHIuz9bSJMEBahQdKx0T7o5DpjmvSg2A19rYcD5FRoobq1n4+gWlbXvizKpUzlPcYepVt
|
||||
qndyNNU4qxN4OHLv3M3DNcy38zvbrDG5BK/WfoWM8HT2FVo+XVO7qzyeleFxkdqntXe8jnQPB1R2
|
||||
3ZrPR7Jl4KeHcTjJHXm4Nmam4y63rsoZvlu2WRdPqUu13rKzVxEWSUbJpa8rOQ11clAm+IckU+O0
|
||||
jFBlzJpOtdElvzTXrt2K2m9e5XOzwJOH188e/sum3ErS07Gkfpd2gAWqMa0tmrAOlKjS0+6kynZG
|
||||
hUsKmQALTmVwUD5K+E26YmZEMCPNmkB0Yd76zQ+Pt/buLtSm+p/jUQwZYsWQymizDB1qMbGNlatX
|
||||
t+lu5wJe55kVNSOwRR4F6jDURkp8gutnbszw6yQMToJHJ5swWrxz4SzMth4+CUfQ7zszp7L1eZr0
|
||||
2NM+Ax6zYh1cdzzbYSJZc2KZ4JapSyUy0YY7sXiAF7yfx9HEpXZr1as6ZWUh+vkxtNMx953+pyHt
|
||||
va0fyt2qPDlvTpEm9H4Tj+VQngSSgYB3gNEc8/AZvpeKUT/oepOXX6xXC05p1wiQcmc3ez19PuSW
|
||||
GoULCfXhZ5XdqmSrCKqtl7VpbMn79bTEcda+7XjXkZG6rlL0l5TQ2vbXqeIdei/heoZXYxON3erQ
|
||||
dn/ecSDfKmZ5nMRW3ZCzHa8q0/+Npre56bHKOB1R9gXTVbV5/7TINnBbzvtDoFxGJMMnUcPs/e6C
|
||||
CYF0PinHT/9arvy2Hjj11E+RmnTv0atORiwfmdYo8OHSlGm6kqu/PQD4qxxsnQ9nVa8YwFP+mOkW
|
||||
5XXPnz3R9a62A7Xb1UffPH9ULmceMGxXHj978vT0hSU/tsjDgrQ7HXtljR2w3Z7djtKauXW0u/Bg
|
||||
9+Gfb+hLa+vHu9D/f5bfLlgeK2L7cYrYOnv86O22iD9Lxffl21ZDy4avStX2MTuM7IwWOzN7PQd8
|
||||
pZXdx86FHqMILL6lmz4+tY+fP3vUPf/m8dWD3x78NwAAAP//AwAfj+9mGy0AAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99a009e659845642-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 23:13:18 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=.rEqb26S2HsQ1o0Ja7e_a0FNSP6mXo5NcckqrjzXhfM-1762384398-1.0.1.1-S88CeJ3IiOILT3Z9ld.7miZJEPOqGZkjAcRdt0Eu4iURqGJH8ZVYHAwikAjb8v2MT0drLKNlaneksrq1QCLU4G_CpqKTCkfeU2nh1ozS7uY;
|
||||
path=/; expires=Wed, 05-Nov-25 23:43:18 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=dSZh5_iAI7iopvBuWuh0oj7Zgv954H3Cju_z8Rt.r1k-1762384398265-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-REDACTED
|
||||
openai-processing-ms:
|
||||
- '23278'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '23447'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '198820'
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 354ms
|
||||
x-request-id:
|
||||
- req_fb0d5f8e98354927a30075e26853a018
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,293 +1,127 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: !!binary |
|
||||
CtUYCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSrBgKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKbAQoQTGJgn0jZwk8xZOPTRSq/ERII9BoPGRqqFQMqClRvb2wgVXNhZ2UwATmYHiCs
|
||||
a0z4F0F4viKsa0z4F0oaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjYxLjBKJwoJdG9vbF9uYW1lEhoK
|
||||
GEFzayBxdWVzdGlvbiB0byBjb3dvcmtlckoOCghhdHRlbXB0cxICGAF6AhgBhQEAAQAAEpACChDx
|
||||
Om9x4LijPHlQEGGjLUV5EggvAjBGPeqUVCoOVGFzayBFeGVjdXRpb24wATmoxSblakz4F0Goy3Ul
|
||||
bEz4F0ouCghjcmV3X2tleRIiCiA1ZTZlZmZlNjgwYTVkOTdkYzM4NzNiMTQ4MjVjY2ZhM0oxCgdj
|
||||
cmV3X2lkEiYKJDdkOTg1YjEwLWYyZWMtNDUyNC04OGRiLTFiNGM5ODA1YmRmM0ouCgh0YXNrX2tl
|
||||
eRIiCiAyN2VmMzhjYzk5ZGE0YThkZWQ3MGVkNDA2ZTQ0YWI4NkoxCgd0YXNrX2lkEiYKJGI2YTZk
|
||||
OGI1LWIxZGQtNDFhNy05MmU5LWNjMjE3MDA4MmYxN3oCGAGFAQABAAASlgcKEM/q8s55CGLCbZGZ
|
||||
evGMEAgSCEUAwtRck4dQKgxDcmV3IENyZWF0ZWQwATmQ1lMnbEz4F0HIl1UnbEz4F0oaCg5jcmV3
|
||||
YWlfdmVyc2lvbhIICgYwLjYxLjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMS43Si4KCGNyZXdf
|
||||
a2V5EiIKIDVlNmVmZmU2ODBhNWQ5N2RjMzg3M2IxNDgyNWNjZmEzSjEKB2NyZXdfaWQSJgokNGQx
|
||||
YjU4N2ItMWYyOS00ODQ0LWE0OTUtNDJhN2EyYTU1YmVjShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1
|
||||
ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoV
|
||||
Y3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrIAgoLY3Jld19hZ2VudHMSuAIKtQJbeyJrZXkiOiAi
|
||||
OTJlN2ViMTkxNjY0YzkzNTc4NWVkN2Q0MjQwYTI5NGQiLCAiaWQiOiAiYjA1MzkwMzMtMjRkZC00
|
||||
ZDhlLTljYzUtZGVhMmZhOGVkZTY4IiwgInJvbGUiOiAiU2NvcmVyIiwgInZlcmJvc2U/IjogZmFs
|
||||
c2UsICJtYXhfaXRlciI6IDE1LCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xs
|
||||
bSI6ICIiLCAibGxtIjogImdwdC00byIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJh
|
||||
bGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29s
|
||||
c19uYW1lcyI6IFtdfV1K+wEKCmNyZXdfdGFza3MS7AEK6QFbeyJrZXkiOiAiMjdlZjM4Y2M5OWRh
|
||||
NGE4ZGVkNzBlZDQwNmU0NGFiODYiLCAiaWQiOiAiOGEwOThjYmMtNWNlMy00MzFlLThjM2EtNWMy
|
||||
MWIyODFmZjY5IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZh
|
||||
bHNlLCAiYWdlbnRfcm9sZSI6ICJTY29yZXIiLCAiYWdlbnRfa2V5IjogIjkyZTdlYjE5MTY2NGM5
|
||||
MzU3ODVlZDdkNDI0MGEyOTRkIiwgInRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASjgIKEMkJ
|
||||
cznGd0/eTsg6XFnIPKASCMFMEHNfIPJUKgxUYXNrIENyZWF0ZWQwATlgimEnbEz4F0GA2GEnbEz4
|
||||
F0ouCghjcmV3X2tleRIiCiA1ZTZlZmZlNjgwYTVkOTdkYzM4NzNiMTQ4MjVjY2ZhM0oxCgdjcmV3
|
||||
X2lkEiYKJDRkMWI1ODdiLTFmMjktNDg0NC1hNDk1LTQyYTdhMmE1NWJlY0ouCgh0YXNrX2tleRIi
|
||||
CiAyN2VmMzhjYzk5ZGE0YThkZWQ3MGVkNDA2ZTQ0YWI4NkoxCgd0YXNrX2lkEiYKJDhhMDk4Y2Jj
|
||||
LTVjZTMtNDMxZS04YzNhLTVjMjFiMjgxZmY2OXoCGAGFAQABAAASkAIKEOIa+bhB8mGS1b74h7MV
|
||||
3tsSCC3cx9TG/vK2Kg5UYXNrIEV4ZWN1dGlvbjABOZD/YSdsTPgXQZgOAXtsTPgXSi4KCGNyZXdf
|
||||
a2V5EiIKIDVlNmVmZmU2ODBhNWQ5N2RjMzg3M2IxNDgyNWNjZmEzSjEKB2NyZXdfaWQSJgokNGQx
|
||||
YjU4N2ItMWYyOS00ODQ0LWE0OTUtNDJhN2EyYTU1YmVjSi4KCHRhc2tfa2V5EiIKIDI3ZWYzOGNj
|
||||
OTlkYTRhOGRlZDcwZWQ0MDZlNDRhYjg2SjEKB3Rhc2tfaWQSJgokOGEwOThjYmMtNWNlMy00MzFl
|
||||
LThjM2EtNWMyMWIyODFmZjY5egIYAYUBAAEAABKWBwoQR7eeuiGe51vFGT6sALyewhIIn/c9+Bos
|
||||
sw4qDENyZXcgQ3JlYXRlZDABORD/pntsTPgXQeDuqntsTPgXShoKDmNyZXdhaV92ZXJzaW9uEggK
|
||||
BjAuNjEuMEoaCg5weXRob25fdmVyc2lvbhIICgYzLjExLjdKLgoIY3Jld19rZXkSIgogNWU2ZWZm
|
||||
ZTY4MGE1ZDk3ZGMzODczYjE0ODI1Y2NmYTNKMQoHY3Jld19pZBImCiQ5MGEwOTY1Ny0xNDY3LTQz
|
||||
MmMtYjQwZS02M2QzYTRhNzNlZmJKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jl
|
||||
d19tZW1vcnkSAhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9v
|
||||
Zl9hZ2VudHMSAhgBSsgCCgtjcmV3X2FnZW50cxK4Agq1Alt7ImtleSI6ICI5MmU3ZWIxOTE2NjRj
|
||||
OTM1Nzg1ZWQ3ZDQyNDBhMjk0ZCIsICJpZCI6ICJmYWFhMjdiZC1hOWMxLTRlMDktODM2Ny1jYjFi
|
||||
MGI5YmFiNTciLCAicm9sZSI6ICJTY29yZXIiLCAidmVyYm9zZT8iOiBmYWxzZSwgIm1heF9pdGVy
|
||||
IjogMTUsICJtYXhfcnBtIjogbnVsbCwgImZ1bmN0aW9uX2NhbGxpbmdfbGxtIjogIiIsICJsbG0i
|
||||
OiAiZ3B0LTRvIiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhl
|
||||
Y3V0aW9uPyI6IGZhbHNlLCAibWF4X3JldHJ5X2xpbWl0IjogMiwgInRvb2xzX25hbWVzIjogW119
|
||||
XUr7AQoKY3Jld190YXNrcxLsAQrpAVt7ImtleSI6ICIyN2VmMzhjYzk5ZGE0YThkZWQ3MGVkNDA2
|
||||
ZTQ0YWI4NiIsICJpZCI6ICJkOTdlMDUyOS02NTY0LTQ4YmUtYjllZC0xOGJjNjdhMmE2OTIiLCAi
|
||||
YXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9y
|
||||
b2xlIjogIlNjb3JlciIsICJhZ2VudF9rZXkiOiAiOTJlN2ViMTkxNjY0YzkzNTc4NWVkN2Q0MjQw
|
||||
YTI5NGQiLCAidG9vbHNfbmFtZXMiOiBbXX1degIYAYUBAAEAABKOAgoQ9JDe0CwaHzWJEVKFYjBJ
|
||||
VhIIML5EydDNmjcqDFRhc2sgQ3JlYXRlZDABOTjlxXtsTPgXQXCsxntsTPgXSi4KCGNyZXdfa2V5
|
||||
EiIKIDVlNmVmZmU2ODBhNWQ5N2RjMzg3M2IxNDgyNWNjZmEzSjEKB2NyZXdfaWQSJgokOTBhMDk2
|
||||
NTctMTQ2Ny00MzJjLWI0MGUtNjNkM2E0YTczZWZiSi4KCHRhc2tfa2V5EiIKIDI3ZWYzOGNjOTlk
|
||||
YTRhOGRlZDcwZWQ0MDZlNDRhYjg2SjEKB3Rhc2tfaWQSJgokZDk3ZTA1MjktNjU2NC00OGJlLWI5
|
||||
ZWQtMThiYzY3YTJhNjkyegIYAYUBAAEAAA==
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nEnsure your final answer strictly adheres
|
||||
to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '3160'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.27.0
|
||||
content-length:
|
||||
- '1394'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBbpwwEL3zFSOflwgI2W64VYkq9VBVVVtFUYmQYwZw19iOPWyarvbf
|
||||
K8NmIWkq9YLEvHnj997MPgJgsmYFMNFxEr1V8dVtm3y3n3fXw6cH9XC7pcubm4368nX7xOk3WwWG
|
||||
uf+Jgp5ZZ8L0ViFJoydYOOSEYWr6bp2db5L1OhuB3tSoAq21FOdnadxLLeMsyS7iJI/T/EjvjBTo
|
||||
WQE/IgCA/fgNQnWNv1gByeq50qP3vEVWnJoAmDMqVBj3XnrimthqBoXRhHrU/q0zQ9tRAR9Bm0cQ
|
||||
XEMrdwgc2mAAuPaP6Er9QWqu4P34V8C+1AAl88I4LFkBeakPywccNoPnwaUelFoAXGtDPKQ0Wrs7
|
||||
IoeTGWVa68y9f0VljdTSd5VD7o0Owj0Zy0b0EAHcjaENL3Jg1pneUkVmi+Nz2WU+zWPzshZodgTJ
|
||||
EFdz/Txdr96YV9VIXCq/iJ0JLjqsZ+q8Iz7U0iyAaOH6bzVvzZ6cS93+z/gZEAItYV1Zh7UULx3P
|
||||
bQ7DLf+r7ZTyKJh5dDspsCKJLmyixoYPajow5p88YV81UrforJPTlTW2ykW2uUibzTpj0SH6AwAA
|
||||
//8DAAY2e2R0AwAA
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:07 GMT
|
||||
- Wed, 05 Nov 2025 22:11:02 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:41:02 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '864'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '3087'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199687'
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 93ms
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Scorer. You''re an
|
||||
expert scorer, specialized in scoring titles.\nYour personal goal is: Score
|
||||
the title\nTo give my best complete final answer to the task use the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Give me an integer score between 1-5 for the following
|
||||
title: ''The impact of AI in the future of work''\n\nThis is the expect criteria
|
||||
for your final answer: The score of the title.\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4o"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '915'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7g6ECkdgdJF0ALFHrI5SacpmMHJ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214486,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer\\nFinal
|
||||
Answer: 4\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
186,\n \"completion_tokens\": 13,\n \"total_tokens\": 199,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_52a7f40b0b\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85fa08e9c01cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:07 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1622'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999781'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_35eb9905a91a608029995346fbf896f5
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "4"}, {"role": "system", "content":
|
||||
"I''m gonna convert this raw text into valid JSON.\n\nThe json should have the
|
||||
following structure, with the following keys:\n{\n score: int\n}"}], "model":
|
||||
"gpt-4o", "tool_choice": {"type": "function", "function": {"name": "ScoreOutput"}},
|
||||
"tools": [{"type": "function", "function": {"name": "ScoreOutput", "description":
|
||||
"Correctly extracted `ScoreOutput` with all the required parameters with correct
|
||||
types", "parameters": {"properties": {"score": {"title": "Score", "type": "integer"}},
|
||||
"required": ["score"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '615'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7g8zB4Od4RfK0sv4EeIWbU46WGJ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214488,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_kt0n3uJwbBJvTbBYypMna9WS\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"ScoreOutput\",\n
|
||||
\ \"arguments\": \"{\\\"score\\\":4}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
100,\n \"completion_tokens\": 5,\n \"total_tokens\": 105,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85fa159d0d1cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:08 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '145'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999947'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_eeca485911339e63d0876ba33e3d0dcc
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -1,35 +1,37 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Scorer. You''re an
|
||||
expert scorer, specialized in scoring titles.\nYour personal goal is: Score
|
||||
the title\nTo give my best complete final answer to the task use the exact following
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Give me an integer score between 1-5 for the following
|
||||
title: ''The impact of AI in the future of work''\n\nThis is the expect criteria
|
||||
for your final answer: The score of the title.\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4o"}'
|
||||
described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nEnsure your final answer strictly adheres
|
||||
to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '915'
|
||||
- '1394'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -39,29 +41,32 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7gHpcYNCeB1VPV4HB3fcxap5Zs3\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214497,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: 5\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
186,\n \"completion_tokens\": 15,\n \"total_tokens\": 201,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_52a7f40b0b\"\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBbtNAEL37K0Z7jqvEcdLgGyAKHJAAlQOQytqux/aQ9exqd91SRfn3
|
||||
ap00dqFIXCx53rzZ997MPgEQVIkChGplUJ3V6dvvzfzTUn7+im/cN9rt+PILfbh6+PF+8+6axSwy
|
||||
zO0vVOGJdaFMZzUGMidYOZQB49TF5TpbbubrVT4AnalQR1pjQ5pfLNKOmNJsnq3SeZ4u8hO9NaTQ
|
||||
iwJ+JgAA++EbhXKFv0UB89lTpUPvZYOiODcBCGd0rAjpPfkgOYjZCCrDAXnQft2avmlDAR+BzT0o
|
||||
ydDQHYKEJhoAyf4e3ZaviKWG18NfAfstA2yFV8bhVhSQb/kwfcBh3XsZXXKv9QSQzCbImNJg7eaE
|
||||
HM5mtGmsM7f+D6qoicm3pUPpDUfhPhgrBvSQANwMofXPchDWmc6GMpgdDs9lr/LjPDEua4JmJzCY
|
||||
IPVYXy7WsxfmlRUGSdpPYhdKqharkTruSPYVmQmQTFz/real2UfnxM3/jB8BpdAGrErrsCL13PHY
|
||||
5jDe8r/azikPgoVHd0cKy0Do4iYqrGWvjwcm/IMP2JU1cYPOOjpeWW3LXGWb1aLerDORHJJHAAAA
|
||||
//8DAKUvzEN0AwAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85fa50e91d1cf3-GRU
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -69,111 +74,17 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:18 GMT
|
||||
- Wed, 05 Nov 2025 22:10:54 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '208'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:40:54 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999781'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_cde8ce8b2f9d9fdf61c9fa57b3533b09
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "5"}, {"role": "system", "content":
|
||||
"I''m gonna convert this raw text into valid JSON.\n\nThe json should have the
|
||||
following structure, with the following keys:\n{\n score: int\n}"}], "model":
|
||||
"gpt-4o", "tool_choice": {"type": "function", "function": {"name": "ScoreOutput"}},
|
||||
"tools": [{"type": "function", "function": {"name": "ScoreOutput", "description":
|
||||
"Correctly extracted `ScoreOutput` with all the required parameters with correct
|
||||
types", "parameters": {"properties": {"score": {"title": "Score", "type": "integer"}},
|
||||
"required": ["score"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '615'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7gIZve3ZatwmBGEZC5vq0KyNoer\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214498,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_r9KqsHWbX5RJmpAjboufenUD\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"ScoreOutput\",\n
|
||||
\ \"arguments\": \"{\\\"score\\\":5}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
100,\n \"completion_tokens\": 5,\n \"total_tokens\": 105,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85fa54ce921cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:18 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
@@ -182,62 +93,75 @@ interactions:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '201'
|
||||
- '730'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '754'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999947'
|
||||
- '199687'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 93ms
|
||||
x-request-id:
|
||||
- req_26afd78702318c20698fb0f69e884cee
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Scorer. You''re an
|
||||
expert scorer, specialized in scoring titles.\nYour personal goal is: Score
|
||||
the title\nTo give my best complete final answer to the task use the exact following
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Given the score the title ''The impact of AI in
|
||||
the future of work'' got, give me an integer score between 1-5 for the following
|
||||
title: ''Return of the Jedi''\n\nThis is the expect criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\n\nThis is the context you''re working with:\n5\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o"}'
|
||||
described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent
|
||||
Task: Given the score the title ''The impact of AI in the future of work'' got,
|
||||
give me an integer score between 1-5 for the following title: ''Return of the
|
||||
Jedi''\n\nThis is the expected criteria for your final answer: The score of
|
||||
the title.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI
|
||||
schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\":
|
||||
\"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nThis is
|
||||
the context you''re working with:\n{\n \"score\": 4\n}\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1014'
|
||||
- '1512'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
- __cf_bm=REDACTED;
|
||||
_cfuvid=REDACTED
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -247,29 +171,32 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7gJtNzcSrxFvm0ZW3YWTS29zXY4\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214499,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: 4\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
209,\n \"completion_tokens\": 15,\n \"total_tokens\": 224,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBjtMwEL3nK0Y+N6skTUuVG1tA4lBOe6Aiq8h1JonBGRvb2YKq/jty
|
||||
2m2ysEh7sWS/eeP33swpAmCyZgUw0XEveqPi7b5Ndh/3Jt3hYTtkuN3d/9wev/ov+4fDB7YIDH34
|
||||
jsI/s+6E7o1CLzVdYGGRewxd03frbLlJ1qt8BHpdowq01vg4v0vjXpKMsyRbxUkep/mV3mkp0LEC
|
||||
vkUAAKfxDEKpxl+sgGTx/NKjc7xFVtyKAJjVKrww7px0npNniwkUmjzSqP2h00Pb+QI+A+kjCE7Q
|
||||
yicEDm0wAJzcEW1JnyRxBe/HWwGnkgBK5oS2WLICliWd5x9YbAbHg0salJoBnEh7HlIarT1ekfPN
|
||||
jNKtsfrg/qKyRpJ0XWWRO01BuPPasBE9RwCPY2jDixyYsbo3vvL6B47fLbP80o9Nw5rQLLuCXnuu
|
||||
Zqx8vXilX1Wj51K5WexMcNFhPVGnGfGhlnoGRDPX/6p5rffFuaT2Le0nQAg0HuvKWKyleOl4KrMY
|
||||
dvl/ZbeUR8HMoX2SAisv0YZJ1NjwQV0WjLnfzmNfNZJatMbKy5Y1pspFtlmlzWadsegc/QEAAP//
|
||||
AwA95GMtdAMAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85fa5a0e381cf3-GRU
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -277,139 +204,48 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:19 GMT
|
||||
- Wed, 05 Nov 2025 22:10:55 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '369'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999758'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_6bf91248e69797b82612f729998244a4
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "4"}, {"role": "system", "content":
|
||||
"I''m gonna convert this raw text into valid JSON.\n\nThe json should have the
|
||||
following structure, with the following keys:\n{\n score: int\n}"}], "model":
|
||||
"gpt-4o", "tool_choice": {"type": "function", "function": {"name": "ScoreOutput"}},
|
||||
"tools": [{"type": "function", "function": {"name": "ScoreOutput", "description":
|
||||
"Correctly extracted `ScoreOutput` with all the required parameters with correct
|
||||
types", "parameters": {"properties": {"score": {"title": "Score", "type": "integer"}},
|
||||
"required": ["score"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '615'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7gKvnUU5ovpyWJidIVbzE9iftLT\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214500,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_TPSNuX6inpyw6Mt5l7oKo52Z\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"ScoreOutput\",\n
|
||||
\ \"arguments\": \"{\\\"score\\\":4}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
100,\n \"completion_tokens\": 5,\n \"total_tokens\": 105,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85fa5ebd181cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:20 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '168'
|
||||
- '983'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '1002'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999947'
|
||||
- '199659'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 102ms
|
||||
x-request-id:
|
||||
- req_e569eccb13b64502d7058424df211cf1
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,105 +1,4 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "e97144c4-2bdc-48ac-bbe5-59e4d9814c49", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "crew", "flow_name": null, "crewai_version": "1.2.1", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-10-31T07:44:22.182046+00:00"},
|
||||
"ephemeral_trace_id": "e97144c4-2bdc-48ac-bbe5-59e4d9814c49"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '488'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.2.1
|
||||
X-Crewai-Organization-Id:
|
||||
- 73c2b193-f579-422c-84c7-76a39a1da77f
|
||||
X-Crewai-Version:
|
||||
- 1.2.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"dfc603d5-afb3-49bb-808c-dfae122dde9d","ephemeral_trace_id":"e97144c4-2bdc-48ac-bbe5-59e4d9814c49","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.2.1","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.2.1","privacy_level":"standard"},"created_at":"2025-10-31T07:44:22.756Z","updated_at":"2025-10-31T07:44:22.756Z","access_code":"TRACE-0d13ac15e6","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '515'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 07:44:22 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"c886631dcc4aae274f1bdcfb3b17fb01"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 9b9d1dac-6f4b-455b-8be9-fa8bb0c85a4f
|
||||
x-runtime:
|
||||
- '0.073038'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 201
|
||||
message: Created
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Crew Manager. You are
|
||||
a seasoned manager with a knack for getting the best out of your team.\nYou
|
||||
@@ -137,11 +36,12 @@ interactions:
|
||||
score between 1-5 for the following title: ''The impact of AI in the future
|
||||
of work''\n\nThis is the expected criteria for your final answer: The score
|
||||
of the title.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\nEnsure your final answer contains only the content in the following
|
||||
format: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\":
|
||||
not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI
|
||||
schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\":
|
||||
\"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nEnsure
|
||||
the final output does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o"}'
|
||||
headers:
|
||||
@@ -152,7 +52,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3379'
|
||||
- '3433'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -176,29 +76,27 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//nFRLb9NAEL7nV4z2wsWJGtq6bW6ocIgQD0EQEhhFm/XYHrreMbvjhFL1
|
||||
v6P1uknKQ0JcLGu/eXwz38zcTQAUlWoByjRaTNvZ6fVHzE/f2A8/vuU/rj7Q9dd31asXZy9ff3q7
|
||||
rIzKogdvvqKRB6+Z4bazKMQuwcajFoxR5xf5/PIqz/PTAWi5RBvd6k6mZzx9evL0bHpyOT3JR8eG
|
||||
yWBQC/g8AQC4G76Roivxu1rASfbw0mIIuka12BsBKM82vigdAgXRTlR2AA07QTewXjGMnBGkQRAd
|
||||
bjJYgkMsQRhKtFjrEQyGPbkauEq2JBbhyapBoLbTRiLwbAnkBrjqpfcY33bsb57EaPH5vWGPfgaF
|
||||
K9wzE1u1gOcPWaJlNDQc/9A/mMDSdb0s4K5QkWGhFlCoF1tt++ilXQmd5y2V8R/ICdboB7oIG5Qd
|
||||
ooP59Bwq9v/BfAbX7AKV6KHSRtgHCL1pQAfwaHGrncEMjNWe5DYDdLWusUUnWWLGsdmk7VGuPYlZ
|
||||
oTIokiLfJdW1aiiM5HdkLWwQ+pDk0CFgCIP7t15bktvHYsQC0LchvpIc8RuYpPz7lGOLh5xJlULd
|
||||
F+7NJqDf6qTMapwJaHSATeyjR4O0xXI2YMkv8fwnCTY6lsKp0ZgUjAIbT4KeNNS0RZfGY9VwXzey
|
||||
gGVKsNMkew3HzMLgMXTsStiRNPs5xdnxxHus+qDjwrne2iNAO8cyMBh27cuI3O+3y3Lded6EX1xV
|
||||
RY5Cs/aoA7u4SUG4UwN6PwH4Mmxx/2gxVee57WQtfINDuovTeYqnDnfjgM7z8xEVFm0PwOVVnv0h
|
||||
4LpE0WTD0SFQRpsGy4Pr4WroviQ+AiZHZf9O50+xU+nk6n8JfwCMwU6wXHceSzKPSz6YeYx39W9m
|
||||
+zYPhFWcVzK4FkIfpSix0r1NJ0+F2yDYrityNfrOU7p7Vbc2m2p+cXl+nl+oyf3kJwAAAP//AwDP
|
||||
kOYqAAYAAA==
|
||||
H4sIAAAAAAAAA5RUTW/bMAy951cQuvTiFE6aNplvwQYMwb4OKwYMcxEoEm1rkUVDotN2Rf77IDuJ
|
||||
07UDtoth8JHvPdKkn0YAwmiRgVCVZFU3dvz2e5nOZzgpftWqnPtq86WoJ5/lkr59+PRRJLGCNj9R
|
||||
8bHqUlHdWGRDroeVR8kYWSfzm+nVIr25etMBNWm0saxseDyj8TSdzsbpYpzeHAorMgqDyODHCADg
|
||||
qXtGi07jg8ggTY6RGkOQJYrslAQgPNkYETIEE1g6FskAKnKMrnN9W1FbVpzBChyiBibQaLGUjMAV
|
||||
AsuwBSogKPLGlX3MsEW4uK0QTN1IxTFhuQLjOrhoufUYY/fktxeRMoa/KvLoE7ivCEwAj6EhF8zG
|
||||
IhTkAXfStpKjRmhV1QmHS8hd7pYqDjSDd0djkTfSKopv6I8psHJNyxk85SKW5yLLxXuzQ5AOjGMs
|
||||
0XeNIGyQ7xEdTMbXnfr/t3WZiyTvJ/nAndLtiSNU1FoNG+zVNGxkQA3kQFnpDT8m4NHiTjqFCUin
|
||||
O3ceAx/FDrxR7aV2V7FcHR0cZhAt9DPOxf78a3ss2iDjsrnW2jNAOkcs4+C6Pbs7IPvTZlkqG0+b
|
||||
8EepKIwzoVp7lIFc3KLA1IgO3Y8A7roNbp8tpWg81Q2vmbbYyc1ns55PDDczoJN0cUCZWNoBWFxP
|
||||
k1cI1xpZGhvOjkAoqSrUQ+lwMbLVhs6A0VnbL+28xt23blz5L/QDoBQ2jHrdeNRGPW95SPMY/yl/
|
||||
SzuNuTMsAvqdUbhmgz5+Co2FbG1/7iI8BsZ6XRhXom+86W++aNZyvlioa4lFKkb70W8AAAD//wMA
|
||||
pMkkSfwEAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 997186dc6d3aea38-FCO
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -206,14 +104,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 07:44:26 GMT
|
||||
- Wed, 05 Nov 2025 22:10:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=n45oVEbg4Ph05GBqJp2KyKI77cF1e_lNGmWrdQjbV20-1761896666-1.0.1.1-hTLlylCKTisapDYTpS63zm.2k2AGNs0DvyKGQ6MEtJHyYJBoXKqzsHRbsZN_dbtjm4Kj_5RG3J73ysTSs817q_9mvPtjHgZOvOPhDwGxV_M;
|
||||
path=/; expires=Fri, 31-Oct-25 08:14:26 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:40:42 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=gOhnFtutoiWlRm84LU88kCfEmlv5P_3_ZJ_wlDnkYy4-1761896666288-0.0.1.1-604800000;
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
@@ -228,37 +126,31 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '3152'
|
||||
- '2837'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '3196'
|
||||
- '2972'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-requests:
|
||||
- '9999'
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999195'
|
||||
x-ratelimit-reset-project-requests:
|
||||
- 6ms
|
||||
- '29181'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
- 1.638s
|
||||
x-request-id:
|
||||
- req_5011a1ac60f3411f9164a03bedad1bc5
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
@@ -269,14 +161,13 @@ interactions:
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent
|
||||
Task: Evaluate and provide an integer score between 1-5 for the title ''The
|
||||
impact of AI in the future of work''. Consider factors such as relevance, clarity,
|
||||
engagement, and potential impact of the title.\n\nThis is the expected criteria
|
||||
for your final answer: Your best answer to your coworker asking you this, accounting
|
||||
for the context shared.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nThis is the context you''re working with:\nThis
|
||||
score will be used to assess the quality of the title in terms of its relevance
|
||||
and impact.\n\nBegin! This is VERY important to you, use the tools available
|
||||
Task: Give an integer score between 1-5 for the title ''The impact of AI in
|
||||
the future of work''.\n\nThis is the expected criteria for your final answer:
|
||||
Your best answer to your coworker asking you this, accounting for the context
|
||||
shared.\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.\n\nThis is the context you''re working with:\nThe title should be
|
||||
scored based on clarity, relevance, and interest in the context of the future
|
||||
of work and AI.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
@@ -286,7 +177,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1222'
|
||||
- '1131'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -310,30 +201,29 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFRNj9tGDL37VxBzlo2113Y2ewsKBNkCRS6L9tANDHqGkpgdDYUZyo4T
|
||||
7H8vKDlrJ02BXgx5HvnIx69vMwDHwd2D8y2q7/o4/+0v2m7LzR/1l+Vt1z2u059fb99/8L8fTo9f
|
||||
P7rKPGT/mbx+91p46fpIypIm2GdCJWNdvtku795ut9vtCHQSKJpb0+t8vVjOO048X92sNvOb9Xy5
|
||||
Pru3wp6Ku4e/ZwAA38ZfSzQF+uLu4ab6/tJRKdiQu381AnBZor04LIWLYlJXXUAvSSmNuT+2MjSt
|
||||
3sMDJDmCxwQNHwgQGhMAmMqR8lN6zwkjvBv/mfFRhhigeMkE2hIoayR4co8tAXc9egWp4d0DcBrx
|
||||
etAhk70dJT8/OUBYgwyj1WYBj68UXKDlpo0nyBTpgEnHhCYWTlbWwqkBE3gJUVrs7dXI+4ieCmAK
|
||||
8Fn20GF+Ji0LeFAj95Ewj2DRjNy0Wks+Yg5G46XrhsQe1chGXdKzr6DDZ3thBcJygloy4BCYkkVS
|
||||
gSEFylbnMKkVPxSw8kETMXmaFFJqsKGOkkKkA0XLp0jkUAEWU2L+50qZEginhB17o9JsNd1nQctU
|
||||
KVNRQJ+lFOAUhqKZqSzggxzpQLm66oofe7UnKNH0xhN01jabWIrRdO1PgCHYV+nJc20hTSL0WQ5i
|
||||
5TgQUJxSVwFKrakC1gK92CwxxnPfKyiDb02QWbOka1oo5FXyyO5bjJFSY1l/PFDGGCurMBdAa46k
|
||||
pnodgmqszdS8SdWRtb0Kbi05V+ecRmvdGeeMU1HCYOOC0FOuyStsFtcrkakeCtpepiHGKwBTEkXT
|
||||
MS7jpzPy8rp+UZo+y7785OpqTlzanc2rJFu1otK7EX2ZAXwa13z4YXNdn6XrdafyTGO41Xo18bnL
|
||||
ebmgy83mjKooxgtw+/ZN9QvCXSBFjuXqUjiPvqVwcb2cFZtvuQJmV7L/nc6vuCfpnJr/Q38BvKde
|
||||
Kez6TIH9j5IvZpns/P6X2WuZx4RdoXxgTztlytaKQDUOcbqJrpyKUrerOTWU+8zTYaz73dqv7jbL
|
||||
+m67crOX2T8AAAD//wMAu+XbmCcGAAA=
|
||||
H4sIAAAAAAAAAwAAAP//nFRNb+M2EL37Vwx4lg3bcT7gW9APxJcWu1hg0W4WBk2NpGmoGYIc2esu
|
||||
8t8L0nHktClQ9CII8zhPb97o8fsEwFBt1mBcZ9X1wU9/+K2d3z/9+Ht8+HOx+fX2pw+CV/efP26Y
|
||||
fnm4NlXukN0f6PTcNXPSB49KwifYRbSKmXVxe7O8upvfrJYF6KVGn9vaoNPVbDHtiWm6nC+vp/PV
|
||||
dLF6ae+EHCazhi8TAIDv5ZmFco3fzBrm1bnSY0q2RbN+PQRgovhcMTYlSmpZTTWCTliRi/ZPnQxt
|
||||
p2v41CEoqUd4NPmd+mCdgjRwvwFi0A6hGXSImGsHiU+PBiiB82hjBTVFdOqPENHj3rKCSulRCeRe
|
||||
aCzX7/BUpe7pCf0xd+0QiBUjJiVuc8Uy2KEmZIfghB1GxhoOpB10csjMB/IeiBs/lEOZNnjrMM1g
|
||||
o5AG54iLPCe8x2MqMpKTgOXjaSjLhN6qYizMMijYfkftQHqcwYMccI+xAlJwMvg6y0ye2i6z9hIR
|
||||
kFvbZsUSIQV01JDL6pE7m0Wdh4JmiNphnD3yI/9MbD3cczpgXMMGDoU7uUyo/2clFlaQtUsD12X4
|
||||
cUfn1ZwMv/C4AmwadEr7vAJb1xFTKt6/LtAqdLmrmJ402jx5I/FgYw3ecjvYFivYDfrGIepDlP15
|
||||
WfZkVH1k2+ef4sKn0EWbXta9E0l6shN7ZJ1d/rsRmyHZHCAevL8ALLOozQEsqfn6gjy/5sRLG6Ls
|
||||
0t9aTUNMqdtGtEk4ZyKpBFPQ5wnA15LH4U3ETIjSB92qPGH53HJ5e+Iz4z0woourM6qi1o/A1c2q
|
||||
eodwW6Na8uki0sZZ12E9to75z9GQC2ByMfY/5bzHfRqduP0v9CPgHAbFehsi1uTejjwei5ij9W/H
|
||||
Xm0ugk3CuCeHWyWMeRU1Nnbwp8vLpGNS7LcNcYsxRDrdYE3Yrtzy7nrR3N0szeR58hcAAAD//wMA
|
||||
yAb6DNAFAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 997186f4ef56dd1f-FCO
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -341,14 +231,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 07:44:28 GMT
|
||||
- Wed, 05 Nov 2025 22:10:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=8qRmqHic3PKOkCdnlc5s2fTHlZ8fBzfDa2aJ.xrQBBg-1761896668-1.0.1.1-JIBosm31AwPEXmz19O636o_doSclt_nENWvAfvp_gbjWPtfO2e99BjAvWJsUWjHVZGRlO6DJILFTRbA7iKdYGQykSCe_mj9a9644nS5E6VA;
|
||||
path=/; expires=Fri, 31-Oct-25 08:14:28 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:40:44 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=TlYX1UMlEMLrIXQ.QBUJAS4tT0N5uBkshUKYyJjd9.g-1761896668679-0.0.1.1-604800000;
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
@@ -363,37 +253,31 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '2019'
|
||||
- '2541'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '2126'
|
||||
- '2570'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999720'
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999720'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
- '199743'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 77ms
|
||||
x-request-id:
|
||||
- req_019c6c76f5414041b69f973973952df4
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
@@ -434,29 +318,24 @@ interactions:
|
||||
score between 1-5 for the following title: ''The impact of AI in the future
|
||||
of work''\n\nThis is the expected criteria for your final answer: The score
|
||||
of the title.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\nEnsure your final answer contains only the content in the following
|
||||
format: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\":
|
||||
not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI
|
||||
schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\":
|
||||
\"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nEnsure
|
||||
the final output does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"},{"role":"assistant","content":"To
|
||||
complete the task, I need to delegate the scoring of the title ''The impact
|
||||
of AI in the future of work'' to the Scorer. \n\nAction: Delegate work to coworker\nAction
|
||||
Input: {\"task\": \"Evaluate and provide an integer score between 1-5 for the
|
||||
title ''The impact of AI in the future of work''. Consider factors such as relevance,
|
||||
clarity, engagement, and potential impact of the title.\", \"context\": \"This
|
||||
score will be used to assess the quality of the title in terms of its relevance
|
||||
and impact.\", \"coworker\": \"Scorer\"}\nObservation: I would score the title
|
||||
\"The impact of AI in the future of work\" a 4 out of 5. The title is highly
|
||||
relevant given the increasing role of AI in shaping workplaces and job markets.
|
||||
It is clear and straightforward in communicating the topic, making it easy for
|
||||
audiences to understand the focus at a glance. The engagement level is solid,
|
||||
as AI and future work dynamics attract broad interest across industries. However,
|
||||
the title could be slightly more compelling by adding specifics or a provocative
|
||||
element to enhance its potential impact, such as mentioning specific sectors
|
||||
or challenges. Overall, it is a strong, relevant, and clear title with potential
|
||||
for broad impact, hence a 4 instead of a perfect 5."}],"model":"gpt-4o"}'
|
||||
Answer, your job depends on it!\n\nThought:"},{"role":"assistant","content":"Thought:
|
||||
I need to delegate the task of scoring the title ''The impact of AI in the future
|
||||
of work'' to the Scorer, who is responsible for evaluating such tasks. \n\nAction:
|
||||
Delegate work to coworker\nAction Input: {\"task\":\"Give an integer score between
|
||||
1-5 for the title ''The impact of AI in the future of work''.\",\"context\":\"The
|
||||
title should be scored based on clarity, relevance, and interest in the context
|
||||
of the future of work and AI.\",\"coworker\":\"Scorer\"}\nObservation: I would
|
||||
score the title \"The impact of AI in the future of work\" a 4 out of 5. It
|
||||
is clear, relevant, and interesting, effectively addressing the topic at hand
|
||||
with straightforward language, but it could be improved with a more dynamic
|
||||
or specific phrasing to boost engagement."}],"model":"gpt-4o"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -465,12 +344,12 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '4667'
|
||||
- '4232'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=n45oVEbg4Ph05GBqJp2KyKI77cF1e_lNGmWrdQjbV20-1761896666-1.0.1.1-hTLlylCKTisapDYTpS63zm.2k2AGNs0DvyKGQ6MEtJHyYJBoXKqzsHRbsZN_dbtjm4Kj_5RG3J73ysTSs817q_9mvPtjHgZOvOPhDwGxV_M;
|
||||
_cfuvid=gOhnFtutoiWlRm84LU88kCfEmlv5P_3_ZJ_wlDnkYy4-1761896666288-0.0.1.1-604800000
|
||||
- __cf_bm=REDACTED;
|
||||
_cfuvid=REDACTED
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
@@ -492,25 +371,23 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jJNdb9MwFIbv8yssX7corda05A6QkLhhwJiQWKbItU8SU8f27GPGqPLf
|
||||
JydZkxaQuHHk85xPvyfHhBAqBc0J5Q1D3lq1fPcNsl34Ut+m2Hj2/bo9fLyxD28/t+H37ZouYoTZ
|
||||
/wCOL1GvuGmtApRGD5g7YAgx62qbrXavsyzb9aA1AlQMqy0ur8xyna6vlulumWZjYGMkB09zcpcQ
|
||||
QsixP2OLWsAvmpN08WJpwXtWA81PToRQZ1S0UOa99Mg00sUEudEIuu/6a2NC3WBOPhBtHskhHtgA
|
||||
qaRmijDtH8EV+n1/e9PfcnIsNCEFtc5YcCjBF3Q0RrPnxsHMEm0oUfW2gt4MeDGDT3ZkUiPU4Ao6
|
||||
wC5+usVQzcFDkA5E9Ly7qBWv96PfZanrgDbgWHBebFDuBJgQMurG1KezuSqmPBS6m7+fgyp4FuXT
|
||||
QakZYFobZDFNr9z9SLqTVsrU1pm9vwilldTSN6UD5o2Oung0lva0S+JwcSfCmczx/VuLJZoD9OVW
|
||||
aTouBZ3WcMLbzQjRIFPzsBM5y1gKQCaVn+0V5Yw3IKbYaQlZENLMQDKb+892/pZ7mF3q+n/ST4Bz
|
||||
sAiitA6E5OcjT24Ootj/cju9c98w9eB+Sg4lSnBRCwEVC2r4g6h/8ghtWUldg7NODr9RZUu+r1bb
|
||||
3WaTbWnSJc8AAAD//wMAQ/5c5U8EAAA=
|
||||
H4sIAAAAAAAAAwAAAP//jFJda9wwEHz3rxD7fFd8l/tw/BYKpYVSSD9SSi8YRV7b6slaVVo3Kcf9
|
||||
9yL7cnbaBPoikGZnNLO7h0QI0CXkAlQjWbXOzF9/q9O36+X1jfNfbj6+f5CVC/bn9tPm6/7DNcwi
|
||||
g+5+oOJH1itFrTPImuwAK4+SMaoutpvlRZZuVpc90FKJJtJqx/MVzZfpcjVPs3m6OREb0goD5OJ7
|
||||
IoQQh/6MFm2JD5CLdPb40mIIskbIz0VCgCcTX0CGoANLyzAbQUWW0fauPzfU1Q3n4p2wdC/28eAG
|
||||
RaWtNELacI9+Z9/0t6v+lovDDoIijzvIV8eprseqCzLGsp0xE0BaSyxjW/pEtyfkeM5gqHae7sJf
|
||||
VKi01aEpPMpANvoNTA569JgIcdv3qnsSH5yn1nHBtMf+u8tFNujBOJ0RXWxPIBNLM2FdrGfP6BUl
|
||||
stQmTLoNSqoGy5E6jkZ2paYJkExS/+vmOe0hubb1/8iPgFLoGMvCeSy1epp4LPMYl/elsnOXe8MQ
|
||||
0P/SCgvW6OMkSqxkZ4a9gvA7MLZFpW2N3nk9LFflCrnNMrWWWKWQHJM/AAAA//8DAJjmLpVlAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 997187038d85ea38-FCO
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -518,7 +395,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 07:44:29 GMT
|
||||
- Wed, 05 Nov 2025 22:10:50 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
@@ -534,155 +411,31 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '740'
|
||||
- '1276'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '772'
|
||||
- '5184'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-requests:
|
||||
- '9999'
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29998885'
|
||||
x-ratelimit-reset-project-requests:
|
||||
- 6ms
|
||||
- '28993'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 2ms
|
||||
- 2.013s
|
||||
x-request-id:
|
||||
- req_5c525e6992a14138826044dd5a2becf9
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Please convert the following text
|
||||
into valid JSON.\n\nOutput ONLY the valid JSON and nothing else.\n\nThe JSON
|
||||
must follow this schema exactly:\n```json\n{\n score: int\n}\n```"},{"role":"user","content":"{\n \"properties\":
|
||||
{\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n}"}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '779'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=n45oVEbg4Ph05GBqJp2KyKI77cF1e_lNGmWrdQjbV20-1761896666-1.0.1.1-hTLlylCKTisapDYTpS63zm.2k2AGNs0DvyKGQ6MEtJHyYJBoXKqzsHRbsZN_dbtjm4Kj_5RG3J73ysTSs817q_9mvPtjHgZOvOPhDwGxV_M;
|
||||
_cfuvid=gOhnFtutoiWlRm84LU88kCfEmlv5P_3_ZJ_wlDnkYy4-1761896666288-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jJJBj9MwEIXv+RXWnBuUljZNc13BAXFDYgXsKnLtSWpwPJY9QUDV/46c
|
||||
dJssLBIXH/zNG783nnMmBBgNtQB1kqx6b/O7eywP4dMv87548/mePn7A4a6qlHz91r0LsEoKOn5F
|
||||
xU+qV4p6b5ENuQmrgJIxdV3vy3V1KMvyMIKeNNok6zznW8o3xWabF1VelFfhiYzCCLX4kgkhxHk8
|
||||
k0Wn8QfUolg93fQYo+wQ6luREBDIphuQMZrI0jGsZqjIMbrR9fkBoqKAD1AXl2VNwHaIMll0g7UL
|
||||
IJ0jlini6O7xSi43P5Y6H+gY/5BCa5yJpyagjOTS25HJw0gvmRCPY+7hWRTwgXrPDdM3HJ9bb/dT
|
||||
P5gnPdPdlTGxtAvRbrN6oV2jkaWxcTE4UFKdUM/Secpy0IYWIFuE/tvMS72n4MZ1/9N+BkqhZ9SN
|
||||
D6iNeh54LguY9vBfZbchj4YhYvhuFDZsMKSP0NjKwU4rAvFnZOyb1rgOgw9m2pPWN+rYrvfVblfu
|
||||
IbtkvwEAAP//AwCH29h7MAMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 997187099d02ea38-FCO
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 07:44:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '409'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '447'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999903'
|
||||
x-ratelimit-reset-project-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_c1d2e65ce5244e49bbc31969732c9b54
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -1,213 +1,22 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Scorer. You''re an
|
||||
expert scorer, specialized in scoring titles.\nYour personal goal is: Score
|
||||
the title\nTo give my best complete final answer to the task use the exact following
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Give me an integer score between 1-5 for the following
|
||||
title: ''The impact of AI in the future of work''\n\nThis is the expect criteria
|
||||
for your final answer: The score of the title.\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4o"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '915'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7frQCjT9BcDGcDj4QyiHzmwbFSt\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214471,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer\\nFinal
|
||||
Answer: 4\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
186,\n \"completion_tokens\": 13,\n \"total_tokens\": 199,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_52a7f40b0b\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85f9af4ef31cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:47:52 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '170'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999781'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_c024216dd5260be75d28056c46183b74
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "4"}, {"role": "system", "content":
|
||||
"I''m gonna convert this raw text into valid JSON.\n\nThe json should have the
|
||||
following structure, with the following keys:\n{\n score: int\n}"}], "model":
|
||||
"gpt-4o", "tool_choice": {"type": "function", "function": {"name": "ScoreOutput"}},
|
||||
"tools": [{"type": "function", "function": {"name": "ScoreOutput", "description":
|
||||
"Correctly extracted `ScoreOutput` with all the required parameters with correct
|
||||
types", "parameters": {"properties": {"score": {"title": "Score", "type": "integer"}},
|
||||
"required": ["score"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '615'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7fsjohZBgZL7M0zgaX4R7BxjHuT\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214472,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_MzP98lapLUxbi46aCd9gP0Mf\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"ScoreOutput\",\n
|
||||
\ \"arguments\": \"{\\\"score\\\":4}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
100,\n \"completion_tokens\": 5,\n \"total_tokens\": 105,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85f9b2fc671cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:47:52 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '163'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999947'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_d24b98d762df8198d3d365639be80fe4
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Please convert the following text
|
||||
into valid JSON.\n\nOutput ONLY the valid JSON and nothing else.\n\nThe JSON
|
||||
must follow this schema exactly:\n```json\n{\n score: int\n}\n```"},{"role":"user","content":"4"}],"model":"gpt-4.1-mini"}'
|
||||
described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nEnsure your final answer strictly adheres
|
||||
to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -216,7 +25,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '277'
|
||||
- '1394'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -240,23 +49,24 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4yST2+cMBDF73wKa85LBITsEm5VKvWUQ0/9RwReM7BOzdi1TbXVar97ZdgspE2l
|
||||
XjjMb97w3nhOEWMgWygZiAP3YjAqfvj0+ajl7iM9dI9ff2w5T+Rx3+fvvjx+OL6HTVDo/TMK/6K6
|
||||
EXowCr3UNGNhkXsMU9PdNi12t0lRTGDQLaog642P85s0HiTJOEuyuzjJ4zS/yA9aCnRQsm8RY4yd
|
||||
pm8wSi0eoWTJ5qUyoHO8RyivTYyB1SpUgDsnnefkYbNAockjTd6bpnl2mio6VRRYBU5oixWULK/o
|
||||
XFHTNGupxW50PPinUakV4ETa85B/Mv10IeerTaV7Y/Xe/SGFTpJ0h9oid5qCJee1gYmeI8aepnWM
|
||||
rxKCsXowvvb6O06/y+/ncbC8wgLT2wv02nO11LfZ5o1pdYueS+VW6wTBxQHbRbnsno+t1CsQrTL/
|
||||
beat2XNuSf3/jF+AEGg8trWx2ErxOvDSZjHc6L/arjueDIND+1MKrL1EG96hxY6Paj4ccL+cx6Hu
|
||||
JPVojZXz9XSmzkVW3KVdsc0gOke/AQAA//8DAILgqohMAwAA
|
||||
H4sIAAAAAAAAA4xSwWrcMBC9+ysGndfBdrybrW8lEEgJ5NIGSh2MIo9tpfJISHLSsuy/F8nbtdOm
|
||||
0IvB8+Y9vTczhwSAyZZVwMTAvRiNSq+/9tnttbN7enjW9/rhxtD93ZfhKr/ru09sExj66RmF/826
|
||||
EHo0Cr3UNMPCIvcYVPOrXXG5z3bbLAKjblEFWm98Wl7k6ShJpkVWbNOsTPPyRB+0FOhYBd8SAIBD
|
||||
/Aaj1OIPVkEUi5URneM9surcBMCsVqHCuHPSeU6ebRZQaPJI0fvnQU/94Cu4BdKvIDhBL18QOPQh
|
||||
AHByr2hrupHEFXyMfxUcagKomRPaYs0qKGs6rh+w2E2Oh5Q0KbUCOJH2PEwpRns8IcdzGKV7Y/WT
|
||||
+4PKOknSDY1F7jQF485rwyJ6TAAe49CmN3NgxurR+Mbr7xifKz6Usx5blrVCixPotedqqV/mu807
|
||||
ek2LnkvlVmNngosB24W67IhPrdQrIFml/tvNe9pzckn9/8gvgBBoPLaNsdhK8Tbx0mYx3PK/2s5T
|
||||
joaZQ/siBTZeog2baLHjk5oPjLmfzuPYdJJ6tMbK+co605Si2G/zbr8rWHJMfgEAAP//AwAwqfO7
|
||||
dAMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 996f4750dfd259cb-MXP
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -264,14 +74,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 01:11:28 GMT
|
||||
- Wed, 05 Nov 2025 22:10:50 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=NFLqe8oMW.d350lBeNJ9PQDQM.Rj0B9eCRBNNKM18qg-1761873088-1.0.1.1-Ipgawg95icfLAihgKfper9rYrjt3ZrKVSv_9lKRqJzx.FBfkZrcDqSW3Zt7TiktUIOSgO9JpX3Ia3Fu9g3DMTwWpaGJtoOj3u0I2USV9.qQ;
|
||||
path=/; expires=Fri, 31-Oct-25 01:41:28 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:40:50 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=dQQqd3jb3DFD.LOIZmhxylJs2Rzp3rGIU3yFiaKkBls-1761873088861-0.0.1.1-604800000;
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
@@ -286,152 +96,31 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '481'
|
||||
- '482'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '570'
|
||||
- '495'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999952'
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999955'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
- '199687'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 93ms
|
||||
x-request-id:
|
||||
- req_1b331f2fb8d943249e9c336608e2f2cf
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Please convert the following text
|
||||
into valid JSON.\n\nOutput ONLY the valid JSON and nothing else.\n\nThe JSON
|
||||
must follow this schema exactly:\n```json\n{\n score: int\n}\n```"},{"role":"user","content":"4"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '541'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=NFLqe8oMW.d350lBeNJ9PQDQM.Rj0B9eCRBNNKM18qg-1761873088-1.0.1.1-Ipgawg95icfLAihgKfper9rYrjt3ZrKVSv_9lKRqJzx.FBfkZrcDqSW3Zt7TiktUIOSgO9JpX3Ia3Fu9g3DMTwWpaGJtoOj3u0I2USV9.qQ;
|
||||
_cfuvid=dQQqd3jb3DFD.LOIZmhxylJs2Rzp3rGIU3yFiaKkBls-1761873088861-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrK4g9W4HlyI6sWx8IeuulQIs2gUCTK4kpRRLkKnBg+N8L
|
||||
SrYl5wH0osPOznBmtIeEMVASSgai5SQ6p9MvP3/t3Tf6JFX79ffL04/b5+/Z/X672zT55x4WkWF3
|
||||
TyjozLoRtnMaSVkzwsIjJ4yq2d0mK+5ul8V2ADorUUda4yjNb7K0U0alq+VqnS7zNMtP9NYqgQFK
|
||||
9idhjLHD8I1GjcQ9lGy5OE86DIE3COVliTHwVscJ8BBUIG4IFhMorCE0g/fDAwRhPT5AmR/nOx7r
|
||||
PvBo1PRazwBujCUegw7uHk/I8eJH28Z5uwuvqFAro0JbeeTBmvh2IOtgQI8JY49D7v4qCjhvO0cV
|
||||
2b84PFesRzmY6p7AM0aWuJ7G21NV12KVROJKh1ltILhoUU7MqWPeS2VnQDKL/NbLe9pjbGWa/5Gf
|
||||
ACHQEcrKeZRKXOed1jzGW/xo7VLxYBgC+mclsCKFPv4GiTXv9XggEF4CYVfVyjTonVfjldSuysWq
|
||||
WGd1sVlBckz+AQAA//8DAKv/0dE0AwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 996f4755989559cb-MXP
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 01:11:29 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '400'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '659'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999955'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999955'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_7829900551634a0db8009042f31db7fc
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -1,105 +1,4 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "f4e3d2a7-6f34-4327-afca-c78e71cadd72", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "crew", "flow_name": null, "crewai_version": "1.2.1", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-10-31T21:52:20.918825+00:00"},
|
||||
"ephemeral_trace_id": "f4e3d2a7-6f34-4327-afca-c78e71cadd72"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '488'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.2.1
|
||||
X-Crewai-Organization-Id:
|
||||
- 73c2b193-f579-422c-84c7-76a39a1da77f
|
||||
X-Crewai-Version:
|
||||
- 1.2.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"2adb4334-2adb-4585-90b9-03921447ab54","ephemeral_trace_id":"f4e3d2a7-6f34-4327-afca-c78e71cadd72","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.2.1","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.2.1","privacy_level":"standard"},"created_at":"2025-10-31T21:52:21.259Z","updated_at":"2025-10-31T21:52:21.259Z","access_code":"TRACE-c984d48836","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '515'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 21:52:21 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"de8355cd003b150e7c530e4f15d97140"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 09d43be3-106a-44dd-a9a2-816d53f91d5d
|
||||
x-runtime:
|
||||
- '0.066900'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 201
|
||||
message: Created
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo
|
||||
@@ -110,13 +9,14 @@ interactions:
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nEnsure your final answer contains only
|
||||
the content in the following format: {\n \"properties\": {\n \"score\":
|
||||
{\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n}\n\nEnsure the final output does not include any code block markers
|
||||
like ```json or ```python.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o"}'
|
||||
as the final answer, not a summary.\nEnsure your final answer strictly adheres
|
||||
to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -125,7 +25,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1334'
|
||||
- '1388'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -149,26 +49,23 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFPLbtswELz7KxY824WdOPHjFgQokPbQFi2aolEgrMmVzIQiWXLlNAn8
|
||||
7wElxZLTFuhFAmf2Ncvh8whAaCXWIOQWWVbeTC6vw8fi+pF31dWHO39xap++XH3/ef9kLpc/7sQ4
|
||||
ZbjNHUl+zXonXeUNsXa2pWUgZEpVZ4vz2Wq+OJvPGqJyikxKKz1P5m5yMj2ZT6bLyfS8S9w6LSmK
|
||||
NdyMAACem28a0Sr6LdYwHb8iFcWIJYn1IQhABGcSIjBGHRkti3FPSmeZbDP1FVj3ABItlHpHgFCm
|
||||
iQFtfKCQ2ffaooGL5rSG58wCZMIH5ymwppiJDkxwlC7QAEkYazYNlomvLT0ekI++47RlKikcsYqi
|
||||
DNqnXbZB37YESU5pSUHTDAoXgLcETRvYYCQFzoLmCIEM7dBKArQKdOVRciba8vv0249bNYF+1TqQ
|
||||
Sk1u3mhJx9su7q2UTzX7mruRh2JaSxwIVEonEWg+H+2tQBOpizmsbp7Z/fCmAhV1xGQUWxszINBa
|
||||
x5jqNh657Zj9wRXGlT64TXyTKgptddzmgTA6mxwQ2XnRsPtRUpvcVx8ZKl145Tlnd09Nu5PlrK0n
|
||||
er/37GrVkewYTY+fLjvPHtfLFTFqEwf+FRLlllSf2psda6XdgBgNVP85zd9qt8q1Lf+nfE9ISZ5J
|
||||
5T6Q0vJYcR8WKN39v8IOW24GFpHCTkvKWVNIN6GowNq0L1XEx8hU5YW2JQUfdPtcC5/LTTFbLM/O
|
||||
zhditB+9AAAA//8DAB7xWDm3BAAA
|
||||
H4sIAAAAAAAAAwAAAP//jFLBbtswDL37Kwie48FOHCf1bStQoC2G3YYVS2EoEu1okyVBktsOQf59
|
||||
kJ3G7tYBuxgwH9/TeySPCQBKgRUgP7DAO6vS64c2uxMb+nL37fa6fOD5y6fspvxKq/v7/DMuIsPs
|
||||
fxAPr6wP3HRWUZBGjzB3xAJF1XxTLlfbrFznA9AZQSrSWhvSwqTLbFmk2TbNyjPxYCQnjxV8TwAA
|
||||
jsM3WtSCXrCCbPFa6ch71hJWlyYAdEbFCjLvpQ9MB1xMIDc6kB5c34I2z8CZhlY+ETBoo2Ng2j+T
|
||||
2+kbqZmCj8NfBccdem4c7bCC4jRXdNT0nsVAuldqBjCtTWBxIEOWxzNyurhXprXO7P0fVGyklv5Q
|
||||
O2Le6OjUB2NxQE8JwOMwpf5NcLTOdDbUwfyk4bnlVTHq4bSXCc03ZzCYwNRUX+X54h29WlBgUvnZ
|
||||
nJEzfiAxUaelsF5IMwOSWeq/3bynPSaXuv0f+QngnGwgUVtHQvK3iac2R/Fs/9V2mfJgGD25J8mp
|
||||
DpJc3ISghvVqvCj0v3ygrm6kbslZJ8ezamxdrtdlIbZ7tsbklPwGAAD//wMAQREcd18DAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99766103c9f57d16-EWR
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -176,14 +73,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 21:52:23 GMT
|
||||
- Wed, 05 Nov 2025 22:10:52 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=M0OyXPOd4vZCE92p.8e.is2jhrt7g6vYTBI3Y2Pg7PE-1761947543-1.0.1.1-orJHNWV50gzMMUsFex2S_O1ofp7KQ_r.9iAzzWwYGyBW1puzUvacw0OkY2KXSZf2mcUI_Rwg6lzRuwAT6WkysTCS52D.rp3oNdgPcSk3JSk;
|
||||
path=/; expires=Fri, 31-Oct-25 22:22:23 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:40:52 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=LmEPJTcrhfn7YibgpOHVOK1U30pNnM9.PFftLZG98qs-1761947543691-0.0.1.1-604800000;
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
@@ -198,37 +95,31 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '1824'
|
||||
- '1337'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1855'
|
||||
- '1487'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-requests:
|
||||
- '9999'
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999700'
|
||||
x-ratelimit-reset-project-requests:
|
||||
- 6ms
|
||||
- '29687'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 626ms
|
||||
x-request-id:
|
||||
- req_ef5bf5e7aa51435489f0c9d725916ff7
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
@@ -244,16 +135,14 @@ interactions:
|
||||
Jedi'', you MUST give it a score, use your best judgment\n\nThis is the expected
|
||||
criteria for your final answer: The score of the title.\nyou MUST return the
|
||||
actual complete content as the final answer, not a summary.\nEnsure your final
|
||||
answer contains only the content in the following format: {\n \"properties\":
|
||||
answer strictly adheres to the following OpenAPI schema: {\n \"properties\":
|
||||
{\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n}\n\nEnsure the final output does not include any code block markers
|
||||
like ```json or ```python.\n\nThis is the context you''re working with:\n{\n \"properties\":
|
||||
{\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\",\n \"description\":
|
||||
\"The assigned score for the title based on its relevance and impact\"\n }\n },\n \"required\":
|
||||
[\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false,\n \"score\": 4\n}\n\nBegin! This is VERY important to you, use the tools
|
||||
available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o"}'
|
||||
false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the
|
||||
final output does not include any code block markers like ```json or ```python.\n\nThis
|
||||
is the context you''re working with:\n{\"score\": 4}\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}],"model":"gpt-4o"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -262,12 +151,12 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1840'
|
||||
- '1550'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=M0OyXPOd4vZCE92p.8e.is2jhrt7g6vYTBI3Y2Pg7PE-1761947543-1.0.1.1-orJHNWV50gzMMUsFex2S_O1ofp7KQ_r.9iAzzWwYGyBW1puzUvacw0OkY2KXSZf2mcUI_Rwg6lzRuwAT6WkysTCS52D.rp3oNdgPcSk3JSk;
|
||||
_cfuvid=LmEPJTcrhfn7YibgpOHVOK1U30pNnM9.PFftLZG98qs-1761947543691-0.0.1.1-604800000
|
||||
- __cf_bm=REDACTED;
|
||||
_cfuvid=REDACTED
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
@@ -289,25 +178,23 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jJNdb5swFIbv8yssX5OJpCRpuJumrZoqtdO6rRelQo59AG/G9uxDuyji
|
||||
v1cGGkjWSbsB+Tzn0+/xYUYIlYKmhPKKIa+tmn+4d9fyvr76Wl5cXcer2215s5R3H3c/xPf9DY1C
|
||||
hNn9BI6vUe+4qa0ClEb3mDtgCCHrYrNebJPNKkk6UBsBKoSVFueJmS/jZTKPL+fxegisjOTgaUoe
|
||||
ZoQQcui+oUUt4A9NSRy9WmrwnpVA06MTIdQZFSyUeS89Mo00GiE3GkF3XX+rTFNWmJLPRJtnwpkm
|
||||
pXwCwkgZWidM+2dwmf4kNVPkfXdKySHThGTUOmPBoQSf0cEYzJ4bBxNLsKFE1dkyetfjaAL3dmBS
|
||||
I5TgMtrDNvzaqK/m4HcjHYjg+XBWKxwfB7/zUrcN2gaHgtNivXZHwISQQTmmvpzMVTDlYfA5jpZk
|
||||
up1eqYOi8SwoqhulJoBpbZCFvJ2YjwNpj/IpU1pndv4slBZSS1/lDpg3Okjl0Vja0XYWpg1r0pwo
|
||||
HwSpLeZofkFXLomXfT46LuZILy8GiAaZmkRdrqI38uUCkEnlJ4tGOeMViDF03ErWCGkmYDaZ+u9u
|
||||
3srdTy51+T/pR8A5WASRWwdC8tOJRzcHQft/uR1vuWuYenBPkkOOElxQQkDBGtU/Ker3HqHOC6lL
|
||||
cNbJ/l0VNl+stut1wra7DZ21sxcAAAD//wMAwih5UmAEAAA=
|
||||
H4sIAAAAAAAAAwAAAP//jFJNa9wwEL37VwxzXhfHm/2ob00gtPRQSiildIOZlce2WllSJTlpWPa/
|
||||
F9mbtbcf0ItAevOe3puZQwKAssICULQURGdVevulyd7L6sOnPNi2/dF+Xr9+bu5v7vv925uPuIgM
|
||||
s//GIrywXgnTWcVBGj3CwjEFjqpXm3W+3GbrVT4AnalYRVpjQ3pt0jzLr9Nsm2brE7E1UrDHAr4m
|
||||
AACH4YwWdcU/sYBs8fLSsffUMBbnIgB0RsUXJO+lD6QDLiZQGB1YD67fgTZPIEhDIx8ZCJroGEj7
|
||||
J3Y7fSc1KXgz3Ao47NAL43iHBayOc0XHde8pBtK9UjOAtDaBYkOGLA8n5Hh2r0xjndn736hYSy19
|
||||
Wzomb3R06oOxOKDHBOBh6FJ/ERytM50NZTDfefhuuVyOejjNZUKvNicwmEBqxlqdenupV1YcSCo/
|
||||
6zMKEi1XE3UaCvWVNDMgmaX+083ftMfkUjf/Iz8BQrANXJXWcSXFZeKpzHFc23+Vnbs8GEbP7lEK
|
||||
LoNkFydRcU29GjcK/bMP3JW11A076+S4VrUtabPdihVxnWFyTH4BAAD//wMAt5Pw3F8DAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99766114cf7c7d16-EWR
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -315,7 +202,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 21:52:25 GMT
|
||||
- Wed, 05 Nov 2025 22:10:53 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
@@ -331,37 +218,31 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '1188'
|
||||
- '1009'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1206'
|
||||
- '1106'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-requests:
|
||||
- '9999'
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999586'
|
||||
x-ratelimit-reset-project-requests:
|
||||
- 6ms
|
||||
- '29647'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 706ms
|
||||
x-request-id:
|
||||
- req_030ffb3d92bb47589d61d50b48f068d4
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -1,257 +1,136 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Scorer. You''re an
|
||||
expert scorer, specialized in scoring titles.\nYour personal goal is: Score
|
||||
the title\nTo give my best complete final answer to the task use the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Give me an integer score between 1-5 for the following
|
||||
title: ''The impact of AI in the future of work''\n\nThis is the expect criteria
|
||||
for your final answer: The score of the title.\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4o"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '915'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7gLWcgGc51SE8JOmR5KTAnU3XHn\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214501,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer\\nFinal
|
||||
Answer: 4\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
186,\n \"completion_tokens\": 13,\n \"total_tokens\": 199,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_52a7f40b0b\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85fa681aae1cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:21 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '165'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999781'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_2b2337796a8c3494046908ea09b8246c
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
CoEpCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS2CgKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKQAgoQhLhVvOnglfVOi+c2xoWiyBII1Y/SAd6OFdMqDlRhc2sgRXhlY3V0aW9uMAE5
|
||||
KB3Nwm5M+BdB4Bg2KG9M+BdKLgoIY3Jld19rZXkSIgogZDQyNjA4MzNhYjBjMjBiYjQ0OTIyYzc5
|
||||
OWFhOTZiNGFKMQoHY3Jld19pZBImCiQyMTZiZGRkNi1jNWE5LTQ0OTYtYWVjNy1iM2UwMGE3NDk0
|
||||
NWNKLgoIdGFza19rZXkSIgogNjA5ZGVlMzkxMDg4Y2QxYzg3YjhmYTY2YWE2N2FkYmVKMQoHdGFz
|
||||
a19pZBImCiRiZGQ4YmVmMS1mYTU2LTRkMGMtYWI0NC03YjIxNGM2Njg4YjV6AhgBhQEAAQAAEv8I
|
||||
ChBPYylfehvX8BbndToiYG8mEgjmS5KdOSYVrioMQ3JldyBDcmVhdGVkMAE5KD/4KW9M+BdBuBX7
|
||||
KW9M+BdKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC42MS4wShoKDnB5dGhvbl92ZXJzaW9uEggKBjMu
|
||||
MTEuN0ouCghjcmV3X2tleRIiCiBhOTU0MGNkMGVhYTUzZjY3NTQzN2U5YmQ0ZmE1ZTQ0Y0oxCgdj
|
||||
cmV3X2lkEiYKJGIwY2JjYzI3LTFhZjAtNDU4Mi04YzlkLTE1NTQ0ZjU3MGE2Y0ocCgxjcmV3X3By
|
||||
b2Nlc3MSDAoKc2VxdWVudGlhbEoRCgtjcmV3X21lbW9yeRICEABKGgoUY3Jld19udW1iZXJfb2Zf
|
||||
dGFza3MSAhgCShsKFWNyZXdfbnVtYmVyX29mX2FnZW50cxICGAFKyAIKC2NyZXdfYWdlbnRzErgC
|
||||
CrUCW3sia2V5IjogIjkyZTdlYjE5MTY2NGM5MzU3ODVlZDdkNDI0MGEyOTRkIiwgImlkIjogImU5
|
||||
MjVlMDQzLTU3YjgtNDYyNS1iYTQ1LTNhNzQzY2QwOWE4YSIsICJyb2xlIjogIlNjb3JlciIsICJ2
|
||||
ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAxNSwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rp
|
||||
b25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8iLCAiZGVsZWdhdGlvbl9lbmFibGVk
|
||||
PyI6IGZhbHNlLCAiYWxsb3dfY29kZV9leGVjdXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGlt
|
||||
aXQiOiAyLCAidG9vbHNfbmFtZXMiOiBbXX1dSuQDCgpjcmV3X3Rhc2tzEtUDCtIDW3sia2V5Ijog
|
||||
IjI3ZWYzOGNjOTlkYTRhOGRlZDcwZWQ0MDZlNDRhYjg2IiwgImlkIjogIjQ0YjE0OTMyLWIxMDAt
|
||||
NDFkMC04YzBmLTgwODRlNTU4YmEzZCIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJodW1h
|
||||
bl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAiU2NvcmVyIiwgImFnZW50X2tleSI6ICI5
|
||||
MmU3ZWIxOTE2NjRjOTM1Nzg1ZWQ3ZDQyNDBhMjk0ZCIsICJ0b29sc19uYW1lcyI6IFtdfSwgeyJr
|
||||
ZXkiOiAiYjBkMzRhNmY2MjFhN2IzNTgwZDVkMWY0ZTI2NjViOTIiLCAiaWQiOiAiYTMwY2MzMTct
|
||||
ZjcwMi00ZDZkLWE3NWItY2MxZDI3OWM3YWZhIiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwg
|
||||
Imh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJTY29yZXIiLCAiYWdlbnRfa2V5
|
||||
IjogIjkyZTdlYjE5MTY2NGM5MzU3ODVlZDdkNDI0MGEyOTRkIiwgInRvb2xzX25hbWVzIjogW119
|
||||
XXoCGAGFAQABAAASjgIKEOwmsObl8Aep6MgWWZBR25ISCMk2VYgEdtVpKgxUYXNrIENyZWF0ZWQw
|
||||
ATnYkggqb0z4F0GY8Agqb0z4F0ouCghjcmV3X2tleRIiCiBhOTU0MGNkMGVhYTUzZjY3NTQzN2U5
|
||||
YmQ0ZmE1ZTQ0Y0oxCgdjcmV3X2lkEiYKJGIwY2JjYzI3LTFhZjAtNDU4Mi04YzlkLTE1NTQ0ZjU3
|
||||
MGE2Y0ouCgh0YXNrX2tleRIiCiAyN2VmMzhjYzk5ZGE0YThkZWQ3MGVkNDA2ZTQ0YWI4NkoxCgd0
|
||||
YXNrX2lkEiYKJDQ0YjE0OTMyLWIxMDAtNDFkMC04YzBmLTgwODRlNTU4YmEzZHoCGAGFAQABAAAS
|
||||
kAIKEM8nkjhkcMAAcDa5TcwWiVMSCEmwL0+H+FIbKg5UYXNrIEV4ZWN1dGlvbjABOagXCSpvTPgX
|
||||
QXB/goBvTPgXSi4KCGNyZXdfa2V5EiIKIGE5NTQwY2QwZWFhNTNmNjc1NDM3ZTliZDRmYTVlNDRj
|
||||
SjEKB2NyZXdfaWQSJgokYjBjYmNjMjctMWFmMC00NTgyLThjOWQtMTU1NDRmNTcwYTZjSi4KCHRh
|
||||
c2tfa2V5EiIKIDI3ZWYzOGNjOTlkYTRhOGRlZDcwZWQ0MDZlNDRhYjg2SjEKB3Rhc2tfaWQSJgok
|
||||
NDRiMTQ5MzItYjEwMC00MWQwLThjMGYtODA4NGU1NThiYTNkegIYAYUBAAEAABKOAgoQIV5wjBGq
|
||||
gGxaW4dd0yo9yhIIySKkZmJIz2AqDFRhc2sgQ3JlYXRlZDABOdgW3YBvTPgXQXh/44BvTPgXSi4K
|
||||
CGNyZXdfa2V5EiIKIGE5NTQwY2QwZWFhNTNmNjc1NDM3ZTliZDRmYTVlNDRjSjEKB2NyZXdfaWQS
|
||||
JgokYjBjYmNjMjctMWFmMC00NTgyLThjOWQtMTU1NDRmNTcwYTZjSi4KCHRhc2tfa2V5EiIKIGIw
|
||||
ZDM0YTZmNjIxYTdiMzU4MGQ1ZDFmNGUyNjY1YjkySjEKB3Rhc2tfaWQSJgokYTMwY2MzMTctZjcw
|
||||
Mi00ZDZkLWE3NWItY2MxZDI3OWM3YWZhegIYAYUBAAEAABKQAgoQDWnyLVhFvJ9HGT8paVXkJxII
|
||||
WoynnEyhCrsqDlRhc2sgRXhlY3V0aW9uMAE5INvkgG9M+BdBELqp3W9M+BdKLgoIY3Jld19rZXkS
|
||||
IgogYTk1NDBjZDBlYWE1M2Y2NzU0MzdlOWJkNGZhNWU0NGNKMQoHY3Jld19pZBImCiRiMGNiY2My
|
||||
Ny0xYWYwLTQ1ODItOGM5ZC0xNTU0NGY1NzBhNmNKLgoIdGFza19rZXkSIgogYjBkMzRhNmY2MjFh
|
||||
N2IzNTgwZDVkMWY0ZTI2NjViOTJKMQoHdGFza19pZBImCiRhMzBjYzMxNy1mNzAyLTRkNmQtYTc1
|
||||
Yi1jYzFkMjc5YzdhZmF6AhgBhQEAAQAAEpYHChAKN7rFU9r/qXd3Pi0qtGuuEghWidwFFzXA+CoM
|
||||
Q3JldyBDcmVhdGVkMAE5gKn93m9M+BdBCAQH329M+BdKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC42
|
||||
MS4wShoKDnB5dGhvbl92ZXJzaW9uEggKBjMuMTEuN0ouCghjcmV3X2tleRIiCiA1ZTZlZmZlNjgw
|
||||
YTVkOTdkYzM4NzNiMTQ4MjVjY2ZhM0oxCgdjcmV3X2lkEiYKJGZkZGI4MDY3LTUyZDQtNDRmNC1h
|
||||
ZmU1LTU4Y2UwYmJjM2NjNkocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoRCgtjcmV3X21l
|
||||
bW9yeRICEABKGgoUY3Jld19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVyX29mX2Fn
|
||||
ZW50cxICGAFKyAIKC2NyZXdfYWdlbnRzErgCCrUCW3sia2V5IjogIjkyZTdlYjE5MTY2NGM5MzU3
|
||||
ODVlZDdkNDI0MGEyOTRkIiwgImlkIjogIjE2ZGQ4NmUzLTk5Y2UtNDVhZi1iYzY5LTk3NDMxOTBl
|
||||
YjUwMiIsICJyb2xlIjogIlNjb3JlciIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAx
|
||||
NSwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJn
|
||||
cHQtNG8iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxsb3dfY29kZV9leGVjdXRp
|
||||
b24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNfbmFtZXMiOiBbXX1dSvsB
|
||||
CgpjcmV3X3Rhc2tzEuwBCukBW3sia2V5IjogIjI3ZWYzOGNjOTlkYTRhOGRlZDcwZWQ0MDZlNDRh
|
||||
Yjg2IiwgImlkIjogIjA1OWZjYmM2LWUzOWItNDIyMS1iZGUyLTZiNTBkN2I3MWRlMCIsICJhc3lu
|
||||
Y19leGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUi
|
||||
OiAiU2NvcmVyIiwgImFnZW50X2tleSI6ICI5MmU3ZWIxOTE2NjRjOTM1Nzg1ZWQ3ZDQyNDBhMjk0
|
||||
ZCIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEo4CChAUgMiGvp21ReE/B78im5S7Eghi
|
||||
jovsilVpfyoMVGFzayBDcmVhdGVkMAE5+Jkm329M+BdB+JMn329M+BdKLgoIY3Jld19rZXkSIgog
|
||||
NWU2ZWZmZTY4MGE1ZDk3ZGMzODczYjE0ODI1Y2NmYTNKMQoHY3Jld19pZBImCiRmZGRiODA2Ny01
|
||||
MmQ0LTQ0ZjQtYWZlNS01OGNlMGJiYzNjYzZKLgoIdGFza19rZXkSIgogMjdlZjM4Y2M5OWRhNGE4
|
||||
ZGVkNzBlZDQwNmU0NGFiODZKMQoHdGFza19pZBImCiQwNTlmY2JjNi1lMzliLTQyMjEtYmRlMi02
|
||||
YjUwZDdiNzFkZTB6AhgBhQEAAQAAEpACChBg/D9k2+taXX67WvVBp+VcEghqguJlB/GnECoOVGFz
|
||||
ayBFeGVjdXRpb24wATlw/Sffb0z4F0FwimsEcEz4F0ouCghjcmV3X2tleRIiCiA1ZTZlZmZlNjgw
|
||||
YTVkOTdkYzM4NzNiMTQ4MjVjY2ZhM0oxCgdjcmV3X2lkEiYKJGZkZGI4MDY3LTUyZDQtNDRmNC1h
|
||||
ZmU1LTU4Y2UwYmJjM2NjNkouCgh0YXNrX2tleRIiCiAyN2VmMzhjYzk5ZGE0YThkZWQ3MGVkNDA2
|
||||
ZTQ0YWI4NkoxCgd0YXNrX2lkEiYKJDA1OWZjYmM2LWUzOWItNDIyMS1iZGUyLTZiNTBkN2I3MWRl
|
||||
MHoCGAGFAQABAAASlgcKEDm+8DkvaTH4DOuPopZIICgSCJDjbz82oeHxKgxDcmV3IENyZWF0ZWQw
|
||||
ATlYhLQGcEz4F0HQvbwGcEz4F0oaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjYxLjBKGgoOcHl0aG9u
|
||||
X3ZlcnNpb24SCAoGMy4xMS43Si4KCGNyZXdfa2V5EiIKIDVlNmVmZmU2ODBhNWQ5N2RjMzg3M2Ix
|
||||
NDgyNWNjZmEzSjEKB2NyZXdfaWQSJgokZTA3OGEwOWUtNmY3MC00YjE1LTkwYjMtMGQ2NDdiNDI1
|
||||
ODBiShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRj
|
||||
cmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrIAgoL
|
||||
Y3Jld19hZ2VudHMSuAIKtQJbeyJrZXkiOiAiOTJlN2ViMTkxNjY0YzkzNTc4NWVkN2Q0MjQwYTI5
|
||||
NGQiLCAiaWQiOiAiOTkxZWRlZTYtMGI0Ni00OTExLTg5MjQtZjFjN2NiZTg0NzUxIiwgInJvbGUi
|
||||
OiAiU2NvcmVyIiwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRlciI6IDE1LCAibWF4X3JwbSI6
|
||||
IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdwdC00byIsICJkZWxl
|
||||
Z2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwg
|
||||
Im1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K+wEKCmNyZXdfdGFza3MS
|
||||
7AEK6QFbeyJrZXkiOiAiMjdlZjM4Y2M5OWRhNGE4ZGVkNzBlZDQwNmU0NGFiODYiLCAiaWQiOiAi
|
||||
YjQ0Y2FlODAtMWM3NC00ZjU3LTg4Y2UtMTVhZmZlNDk1NWM2IiwgImFzeW5jX2V4ZWN1dGlvbj8i
|
||||
OiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJTY29yZXIiLCAi
|
||||
YWdlbnRfa2V5IjogIjkyZTdlYjE5MTY2NGM5MzU3ODVlZDdkNDI0MGEyOTRkIiwgInRvb2xzX25h
|
||||
bWVzIjogW119XXoCGAGFAQABAAASjgIKEJKci6aiZkfH4+PbS6B+iqcSCFcq8/ly2cQTKgxUYXNr
|
||||
IENyZWF0ZWQwATnY6ewGcEz4F0FQTe4GcEz4F0ouCghjcmV3X2tleRIiCiA1ZTZlZmZlNjgwYTVk
|
||||
OTdkYzM4NzNiMTQ4MjVjY2ZhM0oxCgdjcmV3X2lkEiYKJGUwNzhhMDllLTZmNzAtNGIxNS05MGIz
|
||||
LTBkNjQ3YjQyNTgwYkouCgh0YXNrX2tleRIiCiAyN2VmMzhjYzk5ZGE0YThkZWQ3MGVkNDA2ZTQ0
|
||||
YWI4NkoxCgd0YXNrX2lkEiYKJGI0NGNhZTgwLTFjNzQtNGY1Ny04OGNlLTE1YWZmZTQ5NTVjNnoC
|
||||
GAGFAQABAAA=
|
||||
body: '{"trace_id": "00000000-0000-0000-0000-000000000000", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-05T22:10:38.307164+00:00"},
|
||||
"ephemeral_trace_id": "00000000-0000-0000-0000-000000000000"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '5252'
|
||||
- '488'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
- application/json
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.27.0
|
||||
- CrewAI-CLI/1.3.0
|
||||
X-Crewai-Version:
|
||||
- 1.3.0
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
string: '{"id": "00000000-0000-0000-0000-000000000000","ephemeral_trace_id": "00000000-0000-0000-0000-000000000000","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.3.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.3.0","privacy_level":"standard"},"created_at":"2025-11-05T22:10:38.904Z","updated_at":"2025-11-05T22:10:38.904Z","access_code": "TRACE-0000000000","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '2'
|
||||
- '515'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:22 GMT
|
||||
- Wed, 05 Nov 2025 22:10:38 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"06db9ad73130a1da388846e83fc98135"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 34f34729-198e-482e-8c87-163a997bc3f4
|
||||
x-runtime:
|
||||
- '0.239932'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
code: 201
|
||||
message: Created
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "4"}, {"role": "system", "content":
|
||||
"I''m gonna convert this raw text into valid JSON.\n\nThe json should have the
|
||||
following structure, with the following keys:\n{\n score: int\n}"}], "model":
|
||||
"gpt-4o", "tool_choice": {"type": "function", "function": {"name": "ScoreOutput"}},
|
||||
"tools": [{"type": "function", "function": {"name": "ScoreOutput", "description":
|
||||
"Correctly extracted `ScoreOutput` with all the required parameters with correct
|
||||
types", "parameters": {"properties": {"score": {"title": "Score", "type": "integer"}},
|
||||
"required": ["score"], "type": "object"}}}]}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nEnsure your final answer strictly adheres
|
||||
to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '615'
|
||||
- '1394'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -261,32 +140,32 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7gLFujxt3lCTjCAqcN0vCEyx0ZC\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214501,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_Oztn2jqT5UJAXmx6kuOpM4wH\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"ScoreOutput\",\n
|
||||
\ \"arguments\": \"{\\\"score\\\":4}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
100,\n \"completion_tokens\": 5,\n \"total_tokens\": 105,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJNb5wwEL3zK0Y+LxGwLN1w64eq5tZDVbUqEfKaAdyYsWubpNVq/3tl
|
||||
2CykTaRckJg3b/zemzlGAEw2rAQmeu7FYFT8/nuXFLui0+2Hz4e7T/Tr6zt/yPlOfMtvErYJDH34
|
||||
icI/sq6EHoxCLzXNsLDIPYap6Zsi2+6TYrufgEE3qAKtMz7Or9J4kCTjLMl2cZLHaX6m91oKdKyE
|
||||
HxEAwHH6BqHU4G9WQrJ5rAzoHO+QlZcmAGa1ChXGnZPOc/Jss4BCk0eatH/p9dj1voQbIP0AghN0
|
||||
8h6BQxcMACf3gLaij5K4grfTXwnHigAq5oS2WLES8opO6wcstqPjwSWNSq0ATqQ9DylN1m7PyOli
|
||||
RunOWH1w/1BZK0m6vrbInaYg3Hlt2ISeIoDbKbTxSQ7MWD0YX3t9h9Nz2XU+z2PLslZodga99lwt
|
||||
9W1abJ6ZVzfouVRuFTsTXPTYLNRlR3xspF4B0cr1/2qemz07l9S9ZvwCCIHGY1Mbi40UTx0vbRbD
|
||||
Lb/Udkl5Eswc2nspsPYSbdhEgy0f1XxgzP1xHoe6ldShNVbOV9aaOhfZfpe2+yJj0Sn6CwAA//8D
|
||||
ACQm7KN0AwAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85fa6bafd31cf3-GRU
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -294,37 +173,54 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:22 GMT
|
||||
- Wed, 05 Nov 2025 22:10:39 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:40:39 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '203'
|
||||
- '491'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '511'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999947'
|
||||
- '199687'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 93ms
|
||||
x-request-id:
|
||||
- req_d13a07d98d55b75c847778f3bd31ba49
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,539 +1,22 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Scorer. You''re an
|
||||
expert scorer, specialized in scoring titles.\nYour personal goal is: Score
|
||||
the title\nTo give my best complete final answer to the task use the exact following
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Give me an integer score between 1-5 for the following
|
||||
title: ''The impact of AI in the future of work''\n\nThis is the expect criteria
|
||||
for your final answer: The score of the title.\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4o"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '915'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7gMdbh6Ncs7ekM3mpk0rfbH9oHy\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214502,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: 4\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
186,\n \"completion_tokens\": 15,\n \"total_tokens\": 201,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_52a7f40b0b\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85fa6eecc81cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:22 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '231'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999781'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_04be4057cf9dce611e16f95ffa36a88a
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "4"}, {"role": "system", "content":
|
||||
"I''m gonna convert this raw text into valid JSON.\n\nThe json should have the
|
||||
following structure, with the following keys:\n{\n score: int\n}"}], "model":
|
||||
"gpt-4o", "tool_choice": {"type": "function", "function": {"name": "ScoreOutput"}},
|
||||
"tools": [{"type": "function", "function": {"name": "ScoreOutput", "description":
|
||||
"Correctly extracted `ScoreOutput` with all the required parameters with correct
|
||||
types", "parameters": {"properties": {"score": {"title": "Score", "type": "integer"}},
|
||||
"required": ["score"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '615'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7gNjFjFYTE9aEM06OLFECfe74NF\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727214503,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_BriklYUCRXEHjLYyyPiFo1w7\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"ScoreOutput\",\n
|
||||
\ \"arguments\": \"{\\\"score\\\":4}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
100,\n \"completion_tokens\": 5,\n \"total_tokens\": 105,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85fa72fadb1cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:48:23 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '221'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999947'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_d75a37a0ce046c6a74a19fb24a97be79
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"trace_id": "b4e722b9-c407-4653-ba06-1786963c9c4a", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "crew", "flow_name": null, "crewai_version": "0.201.1", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-10-08T18:15:00.412875+00:00"}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '428'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/0.201.1
|
||||
X-Crewai-Organization-Id:
|
||||
- d3a3d10c-35db-423f-a7a4-c026030ba64d
|
||||
X-Crewai-Version:
|
||||
- 0.201.1
|
||||
method: POST
|
||||
uri: http://localhost:3000/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"1a36dd2f-483b-4934-a9b6-f7b95cee2824","trace_id":"b4e722b9-c407-4653-ba06-1786963c9c4a","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"0.201.1","privacy_level":"standard","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"0.201.1","privacy_level":"standard"},"created_at":"2025-10-08T18:15:00.934Z","updated_at":"2025-10-08T18:15:00.934Z"}'
|
||||
headers:
|
||||
Content-Length:
|
||||
- '480'
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.crewai.com crewai.com; script-src ''self'' ''unsafe-inline''
|
||||
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com
|
||||
https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://share.descript.com/; style-src ''self''
|
||||
''unsafe-inline'' *.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts;
|
||||
img-src ''self'' data: *.crewai.com crewai.com https://zeus.tools.crewai.com
|
||||
https://dashboard.tools.crewai.com https://cdn.jsdelivr.net; font-src ''self''
|
||||
data: *.crewai.com crewai.com; connect-src ''self'' *.crewai.com crewai.com
|
||||
https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/*
|
||||
https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/
|
||||
https://*.sentry.io https://www.google-analytics.com ws://localhost:3036 wss://localhost:3036;
|
||||
frame-src ''self'' *.crewai.com crewai.com https://connect.useparagon.com/
|
||||
https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/
|
||||
https://docs.google.com https://drive.google.com https://slides.google.com
|
||||
https://accounts.google.com https://*.google.com https://www.youtube.com https://share.descript.com'
|
||||
content-type:
|
||||
- application/json; charset=utf-8
|
||||
etag:
|
||||
- W/"31db72e28a68dfa1c4f3568b388bc2f0"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
server-timing:
|
||||
- cache_read.active_support;dur=0.18, sql.active_record;dur=73.01, cache_generate.active_support;dur=16.53,
|
||||
cache_write.active_support;dur=0.22, cache_read_multi.active_support;dur=0.33,
|
||||
start_processing.action_controller;dur=0.01, instantiation.active_record;dur=1.29,
|
||||
feature_operation.flipper;dur=0.50, start_transaction.active_record;dur=0.01,
|
||||
transaction.active_record;dur=21.52, process_action.action_controller;dur=459.22
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- ebc8e3ab-5979-48b7-8816-667a1fd98ce2
|
||||
x-runtime:
|
||||
- '0.524429'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 201
|
||||
message: Created
|
||||
- request:
|
||||
body: '{"events": [{"event_id": "4a27c1d9-f908-42e4-b4dc-7091db74915b", "timestamp":
|
||||
"2025-10-08T18:15:00.950855+00:00", "type": "crew_kickoff_started", "event_data":
|
||||
{"timestamp": "2025-10-08T18:15:00.412055+00:00", "type": "crew_kickoff_started",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"crew_name": "crew", "crew": null, "inputs": null}}, {"event_id": "eaa85c90-02d2-444c-bf52-1178d68bae6d",
|
||||
"timestamp": "2025-10-08T18:15:00.952277+00:00", "type": "task_started", "event_data":
|
||||
{"task_description": "Give me an integer score between 1-5 for the following
|
||||
title: ''The impact of AI in the future of work''", "expected_output": "The
|
||||
score of the title.", "task_name": "Give me an integer score between 1-5 for
|
||||
the following title: ''The impact of AI in the future of work''", "context":
|
||||
"", "agent_role": "Scorer", "task_id": "3dca2ae4-e374-42e6-a6de-ecae1e8ac310"}},
|
||||
{"event_id": "8f1cce5b-7a60-4b53-aac1-05a9d7c3335e", "timestamp": "2025-10-08T18:15:00.952865+00:00",
|
||||
"type": "agent_execution_started", "event_data": {"agent_role": "Scorer", "agent_goal":
|
||||
"Score the title", "agent_backstory": "You''re an expert scorer, specialized
|
||||
in scoring titles."}}, {"event_id": "754a8fb5-bb3a-4204-839e-7b622eb3d6dd",
|
||||
"timestamp": "2025-10-08T18:15:00.953005+00:00", "type": "llm_call_started",
|
||||
"event_data": {"timestamp": "2025-10-08T18:15:00.952957+00:00", "type": "llm_call_started",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_name": "Give me an integer score between 1-5 for the following title:
|
||||
''The impact of AI in the future of work''", "task_id": "3dca2ae4-e374-42e6-a6de-ecae1e8ac310",
|
||||
"agent_id": "2ba6f80d-a1da-409f-bd89-13a286b7dfb7", "agent_role": "Scorer",
|
||||
"from_task": null, "from_agent": null, "model": "gpt-4o-mini", "messages": [{"role":
|
||||
"system", "content": "You are Scorer. You''re an expert scorer, specialized
|
||||
in scoring titles.\nYour personal goal is: Score the title\nTo give my best
|
||||
complete final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent
|
||||
described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nEnsure your final answer contains only
|
||||
the content in the following format: {\n \"score\": int\n}\n\nEnsure the final
|
||||
output does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
as the final answer, not a summary.\nEnsure your final answer strictly adheres
|
||||
to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks":
|
||||
["<crewai.utilities.token_counter_callback.TokenCalcHandler object at 0x300f52060>"],
|
||||
"available_functions": null}}, {"event_id": "1a15dcc3-8827-4803-ac61-ab70d5be90f3",
|
||||
"timestamp": "2025-10-08T18:15:01.085142+00:00", "type": "llm_call_completed",
|
||||
"event_data": {"timestamp": "2025-10-08T18:15:01.084844+00:00", "type": "llm_call_completed",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_name": "Give me an integer score between 1-5 for the following title:
|
||||
''The impact of AI in the future of work''", "task_id": "3dca2ae4-e374-42e6-a6de-ecae1e8ac310",
|
||||
"agent_id": "2ba6f80d-a1da-409f-bd89-13a286b7dfb7", "agent_role": "Scorer",
|
||||
"from_task": null, "from_agent": null, "messages": [{"role": "system", "content":
|
||||
"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour
|
||||
personal goal is: Score the title\nTo give my best complete final answer to
|
||||
the task respond using the exact following format:\n\nThought: I now can give
|
||||
a great answer\nFinal Answer: Your final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Give me
|
||||
an integer score between 1-5 for the following title: ''The impact of AI in
|
||||
the future of work''\n\nThis is the expected criteria for your final answer:
|
||||
The score of the title.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\nEnsure your final answer contains only the content
|
||||
in the following format: {\n \"score\": int\n}\n\nEnsure the final output does
|
||||
not include any code block markers like ```json or ```python.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}], "response": "Thought: I now can give
|
||||
a great answer\nFinal Answer: 4", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>",
|
||||
"model": "gpt-4o-mini"}}, {"event_id": "c6742bd6-5a92-41e8-94f6-49ba267d785f",
|
||||
"timestamp": "2025-10-08T18:15:01.085480+00:00", "type": "agent_execution_completed",
|
||||
"event_data": {"agent_role": "Scorer", "agent_goal": "Score the title", "agent_backstory":
|
||||
"You''re an expert scorer, specialized in scoring titles."}}, {"event_id": "486c254c-57b6-477b-aadc-d5be745613fb",
|
||||
"timestamp": "2025-10-08T18:15:01.085639+00:00", "type": "task_failed", "event_data":
|
||||
{"serialization_error": "Circular reference detected (id repeated)", "object_type":
|
||||
"TaskFailedEvent"}}, {"event_id": "b2ce4ceb-74f6-4379-b65e-8d6dc371f956", "timestamp":
|
||||
"2025-10-08T18:15:01.086242+00:00", "type": "crew_kickoff_failed", "event_data":
|
||||
{"timestamp": "2025-10-08T18:15:01.086226+00:00", "type": "crew_kickoff_failed",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"crew_name": "crew", "crew": null, "error": "Failed to convert text into a Pydantic
|
||||
model due to error: ''NoneType'' object has no attribute ''supports_function_calling''"}}],
|
||||
"batch_metadata": {"events_count": 8, "batch_sequence": 1, "is_final_batch":
|
||||
false}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '5982'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/0.201.1
|
||||
X-Crewai-Organization-Id:
|
||||
- d3a3d10c-35db-423f-a7a4-c026030ba64d
|
||||
X-Crewai-Version:
|
||||
- 0.201.1
|
||||
method: POST
|
||||
uri: http://localhost:3000/crewai_plus/api/v1/tracing/batches/b4e722b9-c407-4653-ba06-1786963c9c4a/events
|
||||
response:
|
||||
body:
|
||||
string: '{"events_created":8,"trace_batch_id":"1a36dd2f-483b-4934-a9b6-f7b95cee2824"}'
|
||||
headers:
|
||||
Content-Length:
|
||||
- '76'
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.crewai.com crewai.com; script-src ''self'' ''unsafe-inline''
|
||||
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com
|
||||
https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://share.descript.com/; style-src ''self''
|
||||
''unsafe-inline'' *.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts;
|
||||
img-src ''self'' data: *.crewai.com crewai.com https://zeus.tools.crewai.com
|
||||
https://dashboard.tools.crewai.com https://cdn.jsdelivr.net; font-src ''self''
|
||||
data: *.crewai.com crewai.com; connect-src ''self'' *.crewai.com crewai.com
|
||||
https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/*
|
||||
https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/
|
||||
https://*.sentry.io https://www.google-analytics.com ws://localhost:3036 wss://localhost:3036;
|
||||
frame-src ''self'' *.crewai.com crewai.com https://connect.useparagon.com/
|
||||
https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/
|
||||
https://docs.google.com https://drive.google.com https://slides.google.com
|
||||
https://accounts.google.com https://*.google.com https://www.youtube.com https://share.descript.com'
|
||||
content-type:
|
||||
- application/json; charset=utf-8
|
||||
etag:
|
||||
- W/"ec084df3e365d72581f5734016786212"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
server-timing:
|
||||
- cache_read.active_support;dur=0.05, sql.active_record;dur=60.65, cache_generate.active_support;dur=2.12,
|
||||
cache_write.active_support;dur=0.12, cache_read_multi.active_support;dur=0.09,
|
||||
start_processing.action_controller;dur=0.00, instantiation.active_record;dur=0.51,
|
||||
start_transaction.active_record;dur=0.00, transaction.active_record;dur=115.06,
|
||||
process_action.action_controller;dur=475.82
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 96f38d13-8f41-4b6f-b41a-cc526f821efd
|
||||
x-runtime:
|
||||
- '0.520997'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"status": "completed", "duration_ms": 1218, "final_event_count": 8}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '68'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/0.201.1
|
||||
X-Crewai-Organization-Id:
|
||||
- d3a3d10c-35db-423f-a7a4-c026030ba64d
|
||||
X-Crewai-Version:
|
||||
- 0.201.1
|
||||
method: PATCH
|
||||
uri: http://localhost:3000/crewai_plus/api/v1/tracing/batches/b4e722b9-c407-4653-ba06-1786963c9c4a/finalize
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"1a36dd2f-483b-4934-a9b6-f7b95cee2824","trace_id":"b4e722b9-c407-4653-ba06-1786963c9c4a","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"completed","duration_ms":1218,"crewai_version":"0.201.1","privacy_level":"standard","total_events":8,"execution_context":{"crew_name":"crew","flow_name":null,"privacy_level":"standard","crewai_version":"0.201.1","crew_fingerprint":null},"created_at":"2025-10-08T18:15:00.934Z","updated_at":"2025-10-08T18:15:02.539Z"}'
|
||||
headers:
|
||||
Content-Length:
|
||||
- '482'
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.crewai.com crewai.com; script-src ''self'' ''unsafe-inline''
|
||||
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com
|
||||
https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://share.descript.com/; style-src ''self''
|
||||
''unsafe-inline'' *.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts;
|
||||
img-src ''self'' data: *.crewai.com crewai.com https://zeus.tools.crewai.com
|
||||
https://dashboard.tools.crewai.com https://cdn.jsdelivr.net; font-src ''self''
|
||||
data: *.crewai.com crewai.com; connect-src ''self'' *.crewai.com crewai.com
|
||||
https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/*
|
||||
https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/
|
||||
https://*.sentry.io https://www.google-analytics.com ws://localhost:3036 wss://localhost:3036;
|
||||
frame-src ''self'' *.crewai.com crewai.com https://connect.useparagon.com/
|
||||
https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/
|
||||
https://docs.google.com https://drive.google.com https://slides.google.com
|
||||
https://accounts.google.com https://*.google.com https://www.youtube.com https://share.descript.com'
|
||||
content-type:
|
||||
- application/json; charset=utf-8
|
||||
etag:
|
||||
- W/"f69bd753b6206f7d8f00bfae64391d7a"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
server-timing:
|
||||
- cache_read.active_support;dur=0.13, sql.active_record;dur=20.82, cache_generate.active_support;dur=2.02,
|
||||
cache_write.active_support;dur=0.18, cache_read_multi.active_support;dur=0.08,
|
||||
start_processing.action_controller;dur=0.00, instantiation.active_record;dur=1.08,
|
||||
unpermitted_parameters.action_controller;dur=0.00, start_transaction.active_record;dur=0.00,
|
||||
transaction.active_record;dur=2.90, process_action.action_controller;dur=844.67
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 80da6a7c-c07d-4a00-b5d9-fb85448ef76a
|
||||
x-runtime:
|
||||
- '0.904849'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Please convert the following text
|
||||
into valid JSON.\n\nOutput ONLY the valid JSON and nothing else.\n\nThe JSON
|
||||
must follow this schema exactly:\n```json\n{\n score: int\n}\n```"},{"role":"user","content":"4"}],"model":"gpt-4.1-mini"}'
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -542,7 +25,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '277'
|
||||
- '1394'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -566,23 +49,27 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jJIxb9swEIV3/QriZiuwFNlxtBadig6ZGrQKJJo8SbQpkiWpwoHh/16Q
|
||||
ci0lTYEuGu679/TueOeEEBAcSgKsp54NRqafvj2ffh4PX56e2n37+vW7Gj8/n/qjE/64OcAqKPT+
|
||||
gMz/Ud0xPRiJXmg1YWaRegyu2cM22z3crx/XEQyaowyyzvi0uMvSQSiR5ut8k66LNCuu8l4Lhg5K
|
||||
8iMhhJBz/IagiuMJShLNYmVA52iHUN6aCAGrZagAdU44T5WH1QyZVh5VzN40zcFpValzpQKrwDFt
|
||||
sYKSFJW6VKppmqXUYjs6GvKrUcoFoEppT8P8MfTLlVxuMaXujNV7904KrVDC9bVF6rQKkZzXBiK9
|
||||
JIS8xHWMbyYEY/VgfO31EePvisfJDuZXmGF2f4Veeyrn+jZffeBWc/RUSLdYJzDKeuSzct49HbnQ
|
||||
C5AsZv47zEfe09xCdf9jPwPG0HjktbHIBXs78NxmMdzov9puO46BwaH9JRjWXqAN78CxpaOcDgfc
|
||||
q/M41K1QHVpjxXQ9rakLlu82Wbvb5pBckt8AAAD//wMA5Zmg4EwDAAA=
|
||||
H4sIAAAAAAAAAwAAAP//jFPLbtswELz7KxY824btOImjW5CiqE8tiqBAWwUGTa2krSkuQ67sBoH/
|
||||
vaD8kNMH0Isg7OwMZ7nD1wGAokJloEytxTTejh6+VpNPy9nzfPNZlptnXu4e7N3jl+YbvZuwGiYG
|
||||
r3+gkRNrbLjxFoXYHWATUAsm1entzexqMbm5vu2Ahgu0iVZ5Gc3H01FDjkazyex6NJmPpvMjvWYy
|
||||
GFUG3wcAAK/dNxl1Bf5UGUyGp0qDMeoKVXZuAlCBbaooHSNF0U7UsAcNO0HXeX+sua1qyeCxRhAS
|
||||
i5Cr9E+N10aAS7hfAjmQGqFspQ2YajsOm1wBRTAWdRhCQItb7WQI2hVg2BmKOIalgDamDVrQvkDA
|
||||
0qKRCBoiVY5KMtrJgdGGgE5A2JMBqbUkcUubxBMGLRKSH3KCAaOM4QPvcIthCCRguLUFrBEaDgjR
|
||||
o0naoNfcSudcXnzn+zRVgGjYY1Ju9AaTRkdNW0RryVVj+LjFoK3tDqDOswR2VWcXyxKN0PZ4Z+Pc
|
||||
5e49OW3h3sUdhgxecweQq2g4YK4ymOduf7mDgGUbdQqCa629ALRzLDoFqdv+0xHZn/dtufKB1/E3
|
||||
qirJUaxXAXVkl3Ybhb3q0P0A4KnLVfsmKsoHbryshDfYHTe7mx/0VJ/nHl0cQ6eERdu+fnV7Yr3R
|
||||
WxUommy8SKYy2tRY9NQ+xrotiC+AwcXUf7r5m/ZhcnLV/8j3gDHoBYuVD1iQeTtx3xYwPfd/tZ1v
|
||||
uTOsIoYtGVwJYUibKLDUrT28QRVfomCzKslVGHygw0Ms/WpuZovrabm4manBfvALAAD//wMAJZym
|
||||
nZcEAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 996f475b7e3fedda-MXP
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -590,14 +77,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 01:11:31 GMT
|
||||
- Wed, 05 Nov 2025 22:10:59 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=9OwoBJAn84Nsq0RZdCIu06cNB6RLqor4C1.Q58nU28U-1761873091-1.0.1.1-p82_h8Vnxe0NfH5Iv6MFt.SderZj.v9VnCx_ro6ti2MGhlJOLFsPd6XhBxPsnmuV7Vt_4_uqAbE57E5f1Epl1cmGBT.0844N3CLnTwZFWQI;
|
||||
path=/; expires=Fri, 31-Oct-25 01:41:31 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:40:59 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=E4.xW3I8m58fngo4vkTKo8hmBumar1HkV.yU8KKjlZg-1761873091967-0.0.1.1-604800000;
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
@@ -612,152 +99,31 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '1770'
|
||||
- '1476'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1998'
|
||||
- '1508'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999955'
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999952'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
- '199687'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 93ms
|
||||
x-request-id:
|
||||
- req_ba7a12cb40744f648d17844196f9c2c6
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Please convert the following text
|
||||
into valid JSON.\n\nOutput ONLY the valid JSON and nothing else.\n\nThe JSON
|
||||
must follow this schema exactly:\n```json\n{\n score: int\n}\n```"},{"role":"user","content":"4"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '541'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=9OwoBJAn84Nsq0RZdCIu06cNB6RLqor4C1.Q58nU28U-1761873091-1.0.1.1-p82_h8Vnxe0NfH5Iv6MFt.SderZj.v9VnCx_ro6ti2MGhlJOLFsPd6XhBxPsnmuV7Vt_4_uqAbE57E5f1Epl1cmGBT.0844N3CLnTwZFWQI;
|
||||
_cfuvid=E4.xW3I8m58fngo4vkTKo8hmBumar1HkV.yU8KKjlZg-1761873091967-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBbqMwFLzzFdY7hypQklKuVS899NpK2wo59gPcGtuyH92uovz7ypAE
|
||||
0t1KvXB482Y8M7x9whgoCRUD0XESvdPp3dPzZ/gty77xO7fZPj4MMs/ldYf3Lt/CKjLs7g0FnVhX
|
||||
wvZOIylrJlh45IRRNbvZZuXN9fo2H4HeStSR1jpKi6ss7ZVRab7ON+m6SLPiSO+sEhigYr8Sxhjb
|
||||
j99o1Ej8hIqtV6dJjyHwFqE6LzEG3uo4AR6CCsQNwWoGhTWEZvS+f4EgrMcXqIrDcsdjMwQejZpB
|
||||
6wXAjbHEY9DR3esROZz9aNs6b3fhCxUaZVToao88WBPfDmQdjOghYex1zD1cRAHnbe+oJvuO43Pl
|
||||
ZpKDue4ZPGFkiet5fHus6lKslkhc6bCoDQQXHcqZOXfMB6nsAkgWkf/18j/tKbYy7U/kZ0AIdISy
|
||||
dh6lEpd55zWP8Ra/WztXPBqGgP5DCaxJoY+/QWLDBz0dCIQ/gbCvG2Va9M6r6UoaVxciLzdZU25z
|
||||
SA7JXwAAAP//AwAXjqY4NAMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 996f47692b63edda-MXP
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 31 Oct 2025 01:11:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '929'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '991'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999955'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999955'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_892607f68e764ba3846c431954608c36
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -1,41 +1,149 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Guardrail Agent. You
|
||||
are a expert at validating the output of a task. By providing effective feedback
|
||||
if the output is not valid.\nYour personal goal is: Validate the output of the
|
||||
task\n\nTo give my best complete final answer to the task respond using the
|
||||
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||
Your final answer must be the great and the most complete as possible, it must
|
||||
be outcome described.\n\nI MUST use these formats, my job depends on it!\nIMPORTANT:
|
||||
Your final answer MUST contain all the information requested in the following
|
||||
format: {\n \"valid\": bool,\n \"feedback\": str | None\n}\n\nIMPORTANT: Ensure
|
||||
the final output does not include any code block markers like ```json or ```python."},
|
||||
{"role": "user", "content": "\n Ensure the following task result complies
|
||||
with the given guardrail.\n\n Task result:\n \n Lorem Ipsum
|
||||
is simply dummy text of the printing and typesetting industry. Lorem Ipsum has
|
||||
been the industry''s standard dummy text ever\n \n\n Guardrail:\n Ensure
|
||||
the result has less than 10 words\n \n Your task:\n - Confirm
|
||||
if the Task result complies with the guardrail.\n - If not, provide clear
|
||||
feedback explaining what is wrong (e.g., by how much it violates the rule, or
|
||||
what specific part fails).\n - Focus only on identifying issues \u2014
|
||||
do not propose corrections.\n - If the Task result complies with the
|
||||
guardrail, saying that is valid\n "}], "model": "gpt-4o-mini", "stop":
|
||||
["\nObservation:"]}'
|
||||
body: '{"trace_id": "00000000-0000-0000-0000-000000000000", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-05T22:19:56.074812+00:00"}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '434'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.3.0
|
||||
X-Crewai-Version:
|
||||
- 1.3.0
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:19:56 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 230c6cb5-92c7-448d-8c94-e5548a9f4259
|
||||
x-runtime:
|
||||
- '0.073220'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
- request:
|
||||
body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Guardrail Agent.
|
||||
You are a expert at validating the output of a task. By providing effective
|
||||
feedback if the output is not valid.\\nYour personal goal is: Validate the output
|
||||
of the task\\n\\nTo give my best complete final answer to the task respond using
|
||||
the exact following format:\\n\\nThought: I now can give a great answer\\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\\n\\nI MUST use these formats, my job depends
|
||||
on it!Ensure your final answer strictly adheres to the following OpenAPI schema:
|
||||
{\\n \\\"type\\\": \\\"json_schema\\\",\\n \\\"json_schema\\\": {\\n \\\"name\\\":
|
||||
\\\"LLMGuardrailResult\\\",\\n \\\"strict\\\": true,\\n \\\"schema\\\":
|
||||
{\\n \\\"properties\\\": {\\n \\\"valid\\\": {\\n \\\"description\\\":
|
||||
\\\"Whether the task output complies with the guardrail\\\",\\n \\\"title\\\":
|
||||
\\\"Valid\\\",\\n \\\"type\\\": \\\"boolean\\\"\\n },\\n \\\"feedback\\\":
|
||||
{\\n \\\"anyOf\\\": [\\n {\\n \\\"type\\\":
|
||||
\\\"string\\\"\\n },\\n {\\n \\\"type\\\":
|
||||
\\\"null\\\"\\n }\\n ],\\n \\\"default\\\": null,\\n
|
||||
\ \\\"description\\\": \\\"A feedback about the task output if it is
|
||||
not valid\\\",\\n \\\"title\\\": \\\"Feedback\\\"\\n }\\n },\\n
|
||||
\ \\\"required\\\": [\\n \\\"valid\\\",\\n \\\"feedback\\\"\\n
|
||||
\ ],\\n \\\"title\\\": \\\"LLMGuardrailResult\\\",\\n \\\"type\\\":
|
||||
\\\"object\\\",\\n \\\"additionalProperties\\\": false\\n }\\n }\\n}\\n\\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\"},{\"role\":\"user\",\"content\":\"\\n
|
||||
\ Ensure the following task result complies with the given guardrail.\\n\\n
|
||||
\ Task result:\\n \\n Lorem Ipsum is simply dummy text of
|
||||
the printing and typesetting industry. Lorem Ipsum has been the industry's standard
|
||||
dummy text ever\\n \\n\\n Guardrail:\\n Ensure the result
|
||||
has less than 10 words\\n\\n Your task:\\n - Confirm if the Task
|
||||
result complies with the guardrail.\\n - If not, provide clear feedback
|
||||
explaining what is wrong (e.g., by how much it violates the rule, or what specific
|
||||
part fails).\\n - Focus only on identifying issues \u2014 do not propose
|
||||
corrections.\\n - If the Task result complies with the guardrail, saying
|
||||
that is valid\\n \"}],\"model\":\"gpt-4o\"}"
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1629'
|
||||
- '2452'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.68.2
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -45,11 +153,9 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.68.2
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
@@ -61,19 +167,19 @@ interactions:
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFPLbtswELz7KxY824GkxLGtW4KiQB+XBmkRtAqENbmSmFAkQVJ2UsP/
|
||||
HlByLKdNgV4IcGdnOPvgbgLApGA5MN5g4K1Vs+ub718rm324+5z+/CLt1dXD5fU3s1jd3Wx//GbT
|
||||
yDDrB+LhlXXGTWsVBWn0AHNHGCiqpouL+XKVZum8B1ojSEVabcPswsxaqeUsS7KLWbKYpcsDuzGS
|
||||
k2c5/JoAAOz6M/rUgp5YDsn0NdKS91gTy49JAMwZFSMMvZc+oA5sOoLc6EC6t37bmK5uQg6fQJst
|
||||
cNRQyw0BQh39A2q/JQdQ6I9So4Kr/p7DrtAABdugkqJgOVSoPE2HYEUk1sgfY7xgtw1BQP8Ijnyn
|
||||
AsTHUWoP6SVsjRN+CvTEiYTUNYSGoO7QCYdSgZKtDGAqqCiaCA1qSJOBBetnOAicFazQ+9MCHVWd
|
||||
x9hk3Sl1AqDWJmAcUt/a+wOyPzZTmdo6s/Z/UFkltfRN6Qi90bFxPhjLenQ/Abjvh9a9mQOzzrQ2
|
||||
lME8Uv/ceTIf9Ni4KyM6Tw9gMAHVCWt+OX1HrxQUUCp/MnbGkTckRuq4I9gJaU6AyUnVf7t5T3uo
|
||||
XOr6f+RHgHOygURpHQnJ31Y8pjmKX+lfaccu94aZJ7eRnMogycVJCKqwU8OCM//sA7VlJXVNzjo5
|
||||
bHlly+R8lS2zLFklbLKfvAAAAP//AwCHe/Jh8wMAAA==
|
||||
H4sIAAAAAAAAAwAAAP//jFPBjtowEL3zFSOfYUXowkJubaWq7aUV2kvVrKLBniQujp3akwBC/Hvl
|
||||
wG5gu5V68WHezPObeTPHEYDQSqQgZIUs68ZMPv4oV4u1PszWX9d7++Fz59bf2u2h/r7adUGMY4Xb
|
||||
/CLJz1V30tWNIdbOnmHpCZkia/KwmL1bJslq0QO1U2RiWdnw5N5NZtPZ/WS6nEwXl8LKaUlBpPBz
|
||||
BABw7N8o0SraixSm4+dITSFgSSJ9SQIQ3pkYERiCDoyWxXgApbNMtlf9WLm2rDiFL2CJFLADWZHc
|
||||
gi6AKwLGsAVPoTUMNRGHPurpd6s91WQZXAEVdtqWYChEGC0kU9g5r8JdZjP7SVs08N6GHfkUjpkF
|
||||
yESHRqtMpFCgCTQ+BwsitUG5jfFMPL76PqpGbQPUztPtP2PotDPIUUXUV7bolUdt7qBnoT1D412n
|
||||
FamBBzeuZZglz1pFZk/XY/JUtAGjS7Y15gpAax1jdLk36OmCnF4sMa5svNuEV6Wi0FaHKveEwdk4
|
||||
/sCuET16GgE89da3N26Kxru64Zzdlvrv7perM58Ylm1AF8kFZMdohvh8flmYW75cEaM24Wp5hERZ
|
||||
kRpKh03DVml3BYyuuv5bzVvc5861Lf+HfgCkpIZJ5Y0npeVtx0Oap3iL/0p7mXIvWATynZaUsyYf
|
||||
nVBUYGvOZyLCITDVeaFtSb7x+nwrRZPLTZE8LOfzxYMYnUZ/AAAA//8DAK3pA/U0BAAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 937b20ddf9607def-GRU
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -81,15 +187,17 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 29 Apr 2025 01:46:56 GMT
|
||||
- Wed, 05 Nov 2025 22:19:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=nHa2kVJI_yO1RIsmZcEednJ1e9UVy1liv_sjBNtSj7Q-1745891216-1.0.1.1-jUH9kFawVBjnbq8sIL2.MQx.p7JvBZWUhqlkNKRlStWSgQxT0eZMPcgq9TCQoJAjuyNwhqfpK4HuX6x5n8UbQgAb6JrWJEG823e6GpGROEA;
|
||||
path=/; expires=Tue, 29-Apr-25 02:16:56 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:49:58 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=gg2UeahMCOOR8YhitRtzDwENMOnTOuQdyTMVJVHG0Mg-1745891216085-0.0.1.1-604800000;
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
@@ -101,84 +209,85 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '896'
|
||||
- '2201'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '2401'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999631'
|
||||
- '29439'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 1.122s
|
||||
x-request-id:
|
||||
- req_859221ed1aedb26cc9d335004ccf183e
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Guardrail Agent. You
|
||||
are a expert at validating the output of a task. By providing effective feedback
|
||||
if the output is not valid.\nYour personal goal is: Validate the output of the
|
||||
task\n\nTo give my best complete final answer to the task respond using the
|
||||
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||
Your final answer must be the great and the most complete as possible, it must
|
||||
be outcome described.\n\nI MUST use these formats, my job depends on it!\nIMPORTANT:
|
||||
Your final answer MUST contain all the information requested in the following
|
||||
format: {\n \"valid\": bool,\n \"feedback\": str | None\n}\n\nIMPORTANT: Ensure
|
||||
the final output does not include any code block markers like ```json or ```python."},
|
||||
{"role": "user", "content": "\n Ensure the following task result complies
|
||||
with the given guardrail.\n\n Task result:\n \n Lorem Ipsum
|
||||
is simply dummy text of the printing and typesetting industry. Lorem Ipsum has
|
||||
been the industry''s standard dummy text ever\n \n\n Guardrail:\n Ensure
|
||||
the result has less than 500 words\n \n Your task:\n -
|
||||
Confirm if the Task result complies with the guardrail.\n - If not, provide
|
||||
clear feedback explaining what is wrong (e.g., by how much it violates the rule,
|
||||
or what specific part fails).\n - Focus only on identifying issues \u2014
|
||||
do not propose corrections.\n - If the Task result complies with the
|
||||
guardrail, saying that is valid\n "}], "model": "gpt-4o-mini", "stop":
|
||||
["\nObservation:"]}'
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly
|
||||
adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\":
|
||||
{\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\":
|
||||
{\n \"properties\": {\n \"valid\": {\n \"description\":
|
||||
\"Whether the task output complies with the guardrail\",\n \"title\":
|
||||
\"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\":
|
||||
{\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\":
|
||||
\"null\"\n }\n ],\n \"default\": null,\n \"description\":
|
||||
\"A feedback about the task output if it is not valid\",\n \"title\":
|
||||
\"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\":
|
||||
\"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."},{"role":"user","content":"{\n \"valid\": false,\n \"feedback\":
|
||||
\"The task result contains more than 10 words, violating the guardrail. The
|
||||
text provided contains about 21 words.\"\n}"}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether
|
||||
the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A
|
||||
feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1630'
|
||||
- '1884'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=nHa2kVJI_yO1RIsmZcEednJ1e9UVy1liv_sjBNtSj7Q-1745891216-1.0.1.1-jUH9kFawVBjnbq8sIL2.MQx.p7JvBZWUhqlkNKRlStWSgQxT0eZMPcgq9TCQoJAjuyNwhqfpK4HuX6x5n8UbQgAb6JrWJEG823e6GpGROEA;
|
||||
_cfuvid=gg2UeahMCOOR8YhitRtzDwENMOnTOuQdyTMVJVHG0Mg-1745891216085-0.0.1.1-604800000
|
||||
- __cf_bm=REDACTED;
|
||||
_cfuvid=REDACTED
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.68.2
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.68.2
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
@@ -190,18 +299,18 @@ interactions:
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jJJNb9swDIbv/hWEzvHgfHRpfesOG3opsGE7LYXBSLStRZY0iU43BPnv
|
||||
g5wPu10H7GLAfPhSfEkeMgChlShByBZZdt7kH758e1wzbnfbO6o/f1osV3T/+BO7UNNDIWZJ4bY/
|
||||
SPJF9U66zhti7ewJy0DIlKrO16ub27v5srgZQOcUmSRrPOcrl3fa6nxRLFZ5sc7nt2d167SkKEr4
|
||||
ngEAHIZv6tMq+iVKKGaXSEcxYkOivCYBiOBMigiMUUdGy2I2Qukskx1a/9q6vmm5hAew7hkkWmj0
|
||||
ngChSf0D2vhMAWBjP2qLBu6H/xIOGwuwEXs0Wm1ECRx6mp1iNZHaotylsO2N2djj9PFAdR/RnOEE
|
||||
oLWOMQ1wsP10JserUeMaH9w2vpKKWlsd2yoQRmeTqcjOi4EeM4CnYaD9ixkJH1znuWK3o+G583KG
|
||||
4Vz2ONLF7RmyYzQT1XI5e6NepYhRmzhZiZAoW1KjdNwf9kq7Ccgmrv/u5q3aJ+faNv9TfgRSkmdS
|
||||
lQ+ktHzpeEwLlM78X2nXKQ8Ni0hhryVVrCmkTSiqsTen4xPxd2TqqlrbhoIP+nSBta/SueD7QtWF
|
||||
yI7ZHwAAAP//AwAiLXhqjwMAAA==
|
||||
H4sIAAAAAAAAAwAAAP//jFNBbtswELzrFQueZcNyHFnWNbcCLRDAhzRVINDkSmJNkQS5chMY/nsh
|
||||
ybGUNgV64WFnZzg7S54jAKYky4GJhpNonV48fK932aN+zr7t/WO6S7fpV/f09PZgn7/sMxb3DHv4
|
||||
iYLeWUthW6eRlDUjLDxywl412abruyxJdtkAtFai7mm1o8XGLtar9Waxyhar9EpsrBIYWA4/IgCA
|
||||
83D2Fo3EV5bDKn6vtBgCr5HltyYA5q3uK4yHoAJxQyyeQGENoRlcnwt24lrJguUV1wHjglWI8sDF
|
||||
sWB5wfYNAvFwBI+h0wQ9lSsToLUegRpuIFnBL+tliOGkrOakTA3UINQd99JzpZcwqOArgfP2pCTK
|
||||
SYcfbEewTkaNZcEuc6ceqy7wPijTaT0DuDGWeB/0kNHLFbncUtG2dt4ewh9UVimjQlN65MGaPoFA
|
||||
1rEBvUQAL0P63YdAmfO2dVSSPeJw3d12M+qxad8zdH0FyRLXU32zSuNP9EqJxJUOs/0xwUWDcqJO
|
||||
y+adVHYGRLOp/3bzmfY4uTL1/8hPgBDoCGXpPEolPk48tXnsv8O/2m4pD4ZZQH9SAktS6PtNSKx4
|
||||
p8eXysJbIGzLSpkavfNqfK6VK8WhSrbZ/X26ZdEl+g0AAP//AwAJs8yXtwMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 937b2311ee091b1b-GRU
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -209,9 +318,11 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 29 Apr 2025 01:48:26 GMT
|
||||
- Wed, 05 Nov 2025 22:19:59 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
@@ -223,27 +334,294 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '610'
|
||||
- '419'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '432'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999631'
|
||||
- '29702'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 596ms
|
||||
x-request-id:
|
||||
- req_c136835c16be6bc1e4d820f239c4b620
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Guardrail Agent.
|
||||
You are a expert at validating the output of a task. By providing effective
|
||||
feedback if the output is not valid.\\nYour personal goal is: Validate the output
|
||||
of the task\\n\\nTo give my best complete final answer to the task respond using
|
||||
the exact following format:\\n\\nThought: I now can give a great answer\\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\\n\\nI MUST use these formats, my job depends
|
||||
on it!Ensure your final answer strictly adheres to the following OpenAPI schema:
|
||||
{\\n \\\"type\\\": \\\"json_schema\\\",\\n \\\"json_schema\\\": {\\n \\\"name\\\":
|
||||
\\\"LLMGuardrailResult\\\",\\n \\\"strict\\\": true,\\n \\\"schema\\\":
|
||||
{\\n \\\"properties\\\": {\\n \\\"valid\\\": {\\n \\\"description\\\":
|
||||
\\\"Whether the task output complies with the guardrail\\\",\\n \\\"title\\\":
|
||||
\\\"Valid\\\",\\n \\\"type\\\": \\\"boolean\\\"\\n },\\n \\\"feedback\\\":
|
||||
{\\n \\\"anyOf\\\": [\\n {\\n \\\"type\\\":
|
||||
\\\"string\\\"\\n },\\n {\\n \\\"type\\\":
|
||||
\\\"null\\\"\\n }\\n ],\\n \\\"default\\\": null,\\n
|
||||
\ \\\"description\\\": \\\"A feedback about the task output if it is
|
||||
not valid\\\",\\n \\\"title\\\": \\\"Feedback\\\"\\n }\\n },\\n
|
||||
\ \\\"required\\\": [\\n \\\"valid\\\",\\n \\\"feedback\\\"\\n
|
||||
\ ],\\n \\\"title\\\": \\\"LLMGuardrailResult\\\",\\n \\\"type\\\":
|
||||
\\\"object\\\",\\n \\\"additionalProperties\\\": false\\n }\\n }\\n}\\n\\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\"},{\"role\":\"user\",\"content\":\"\\n
|
||||
\ Ensure the following task result complies with the given guardrail.\\n\\n
|
||||
\ Task result:\\n \\n Lorem Ipsum is simply dummy text of
|
||||
the printing and typesetting industry. Lorem Ipsum has been the industry's standard
|
||||
dummy text ever\\n \\n\\n Guardrail:\\n Ensure the result
|
||||
has less than 500 words\\n\\n Your task:\\n - Confirm if the Task
|
||||
result complies with the guardrail.\\n - If not, provide clear feedback
|
||||
explaining what is wrong (e.g., by how much it violates the rule, or what specific
|
||||
part fails).\\n - Focus only on identifying issues \u2014 do not propose
|
||||
corrections.\\n - If the Task result complies with the guardrail, saying
|
||||
that is valid\\n \"}],\"model\":\"gpt-4o\"}"
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2453'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4ySTW/bMAyG7/4VBM/JkDif860dNmAfvQ0thqUwGIm2tcqSJsnpuiL/vZCTxunW
|
||||
AbsYMB++FF+SjxkAKokFoGgoitbp8btv9eXV9bLqVu931/nlz99X8hN9fvhyUU9vbnCUFHb7g0V8
|
||||
Vr0RtnWao7LmgIVnipyqTlfLfLaezmbLHrRWsk6y2sXx3I7zST4fT9bjyfIobKwSHLCA7xkAwGP/
|
||||
TS0ayb+wgMnoOdJyCFQzFqckAPRWpwhSCCpEMhFHAxTWRDZ9118b29VNLOAjGHsPggzUasdAUKfW
|
||||
gUy4Z78xH5QhDRf9XwGPG9yRVnKDBUTf8Qg2WDHLLYm7FDOd1vvzFz1XXSB9RGeAjLGR0sB6r7dH
|
||||
sj+507Z23m7DH1KslFGhKT1TsCY5CdE67Ok+A7jtp9i9GAw6b1sXy2jvuH9uvn57qIfD3gaaz44w
|
||||
2kh6iC+m+eiVeqXkSEqHsz2gINGwHKTD0qiTyp6B7Mz13928VvvgXJn6f8oPQAh2kWXpPEslXjoe
|
||||
0jyns/5X2mnKfcMY2O+U4DIq9mkTkivq9OHiMDyEyG1ZKVOzd14dzq5ypdhW09V6sViuMNtnTwAA
|
||||
AP//AwA2fPW9fwMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:22:16 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:52:16 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '327'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '372'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29438'
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1.124s
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly
|
||||
adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\":
|
||||
{\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\":
|
||||
{\n \"properties\": {\n \"valid\": {\n \"description\":
|
||||
\"Whether the task output complies with the guardrail\",\n \"title\":
|
||||
\"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\":
|
||||
{\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\":
|
||||
\"null\"\n }\n ],\n \"default\": null,\n \"description\":
|
||||
\"A feedback about the task output if it is not valid\",\n \"title\":
|
||||
\"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\":
|
||||
\"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."},{"role":"user","content":"{\"valid\": true, \"feedback\": null}"}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether
|
||||
the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A
|
||||
feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1762'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
_cfuvid=REDACTED
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jJJBj9MwEIXv+RXWnBOUtmlacgMOe4EeKiGE6Cpy7Ulq1rGNPalAVf87
|
||||
ctJtsrBIXHzwN+/5zXguCWOgJFQMxImT6JzOPnxt33/6vMz3xfrHwwP/uCvPu3fdl8VuX+xLSKPC
|
||||
Hr+joGfVG2E7p5GUNSMWHjlhdF1syuVqu1itygF0VqKOstZRVthsmS+LLN9m+c1XnKwSGKBi3xLG
|
||||
GLsMZ4xoJP6EiuXp802HIfAWoboXMQbe6ngDPAQViBuCdILCGkIzpL4c4My1kgeoyPeYHqBBlEcu
|
||||
ng5QmV7r61zosekDj7kjmgFujCUe+x4iP97I9R5S29Z5ewx/SKFRRoVT7ZEHa2KgQNbBQK8JY4/D
|
||||
MPoX/YHztnNUk33C4blVsRn9YBr/RN/eGFnieiZal+krdrVE4kqH2TRBcHFCOUmn0fNeKjsDyazp
|
||||
v8O85j02rkz7P/YTEAIdoaydR6nEy4anMo9xOf9Vdh/yEBgC+rMSWJNCHz9CYsN7Pe4NhF+BsKsb
|
||||
ZVr0zqtxeRpXi2Oz2GzX63IDyTX5DQAA//8DAMF71y1FAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:22:17 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
openai-processing-ms:
|
||||
- '1081'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1241'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29478'
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1.042s
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -208,4 +208,100 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"trace_id": "22b47496-d65c-4781-846f-5493606a51cc", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-05T23:31:51.004551+00:00"}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '434'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.3.0
|
||||
X-Crewai-Version:
|
||||
- 1.3.0
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 23:31:51 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- ba6636f8-2374-4a67-8176-88341f2999ed
|
||||
x-runtime:
|
||||
- '0.081473'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
version: 1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -62,18 +62,23 @@ class TestAgentEvaluator:
|
||||
agents=mock_crew.agents, evaluators=[GoalAlignmentEvaluator()]
|
||||
)
|
||||
|
||||
task_completed_event = threading.Event()
|
||||
task_completed_condition = threading.Condition()
|
||||
task_completed = False
|
||||
|
||||
@crewai_event_bus.on(TaskCompletedEvent)
|
||||
async def on_task_completed(source, event):
|
||||
# TaskCompletedEvent fires AFTER evaluation results are stored
|
||||
task_completed_event.set()
|
||||
nonlocal task_completed
|
||||
with task_completed_condition:
|
||||
task_completed = True
|
||||
task_completed_condition.notify()
|
||||
|
||||
mock_crew.kickoff()
|
||||
|
||||
assert task_completed_event.wait(timeout=5), (
|
||||
"Timeout waiting for task completion"
|
||||
)
|
||||
with task_completed_condition:
|
||||
assert task_completed_condition.wait_for(
|
||||
lambda: task_completed, timeout=5
|
||||
), "Timeout waiting for task completion"
|
||||
|
||||
results = agent_evaluator.get_evaluation_results()
|
||||
|
||||
|
||||
@@ -601,3 +601,81 @@ def test_file_path_validation():
|
||||
match="file_path/file_paths must be a Path, str, or a list of these types",
|
||||
):
|
||||
PDFKnowledgeSource()
|
||||
|
||||
|
||||
def test_hash_based_id_generation_without_doc_id(mock_vector_db):
|
||||
"""Test that documents without doc_id generate hash-based IDs. Duplicates are deduplicated before upsert."""
|
||||
import hashlib
|
||||
import json
|
||||
from crewai.rag.chromadb.utils import _prepare_documents_for_chromadb
|
||||
from crewai.rag.types import BaseRecord
|
||||
|
||||
documents: list[BaseRecord] = [
|
||||
{"content": "First document content", "metadata": {"source": "test1", "category": "research"}},
|
||||
{"content": "Second document content", "metadata": {"source": "test2", "category": "research"}},
|
||||
{"content": "Third document content"}, # No metadata
|
||||
]
|
||||
|
||||
result = _prepare_documents_for_chromadb(documents)
|
||||
|
||||
assert len(result.ids) == 3
|
||||
|
||||
# Unique documents should get 64-character hex hashes (no suffix)
|
||||
for doc_id in result.ids:
|
||||
assert len(doc_id) == 64, f"ID should be 64 characters: {doc_id}"
|
||||
assert all(c in "0123456789abcdef" for c in doc_id), f"ID should be hex: {doc_id}"
|
||||
|
||||
# Different documents should have different hashes
|
||||
assert result.ids[0] != result.ids[1] != result.ids[2]
|
||||
|
||||
# Verify hashes match expected values
|
||||
expected_hash_1 = hashlib.sha256(
|
||||
f"First document content|{json.dumps({'category': 'research', 'source': 'test1'}, sort_keys=True)}".encode()
|
||||
).hexdigest()
|
||||
assert result.ids[0] == expected_hash_1, "First document hash should match expected"
|
||||
|
||||
expected_hash_3 = hashlib.sha256("Third document content".encode()).hexdigest()
|
||||
assert result.ids[2] == expected_hash_3, "Third document hash should match expected"
|
||||
|
||||
# Test that duplicate documents are deduplicated (same ID, only one sent)
|
||||
duplicate_documents: list[BaseRecord] = [
|
||||
{"content": "Same content", "metadata": {"source": "test"}},
|
||||
{"content": "Same content", "metadata": {"source": "test"}},
|
||||
{"content": "Same content", "metadata": {"source": "test"}},
|
||||
]
|
||||
duplicate_result = _prepare_documents_for_chromadb(duplicate_documents)
|
||||
# Duplicates should be deduplicated - only one ID should remain
|
||||
assert len(duplicate_result.ids) == 1, "Duplicate documents should be deduplicated"
|
||||
assert len(duplicate_result.ids[0]) == 64, "Deduplicated ID should be clean hash"
|
||||
# Verify it's the expected hash
|
||||
expected_hash = hashlib.sha256(
|
||||
f"Same content|{json.dumps({'source': 'test'}, sort_keys=True)}".encode()
|
||||
).hexdigest()
|
||||
assert duplicate_result.ids[0] == expected_hash, "Deduplicated ID should match expected hash"
|
||||
|
||||
|
||||
def test_hash_based_id_generation_with_doc_id_in_metadata(mock_vector_db):
|
||||
"""Test that documents with doc_id in metadata use the doc_id directly, not hash-based."""
|
||||
from crewai.rag.chromadb.utils import _prepare_documents_for_chromadb
|
||||
from crewai.rag.types import BaseRecord
|
||||
|
||||
documents_with_doc_id: list[BaseRecord] = [
|
||||
{"content": "First document", "metadata": {"doc_id": "custom-id-1", "source": "test1"}},
|
||||
{"content": "Second document", "metadata": {"doc_id": "custom-id-2"}},
|
||||
]
|
||||
|
||||
documents_without_doc_id: list[BaseRecord] = [
|
||||
{"content": "First document", "metadata": {"source": "test1"}},
|
||||
{"content": "Second document"},
|
||||
]
|
||||
|
||||
result_with_doc_id = _prepare_documents_for_chromadb(documents_with_doc_id)
|
||||
result_without_doc_id = _prepare_documents_for_chromadb(documents_without_doc_id)
|
||||
|
||||
assert result_with_doc_id.ids == ["custom-id-1", "custom-id-2"]
|
||||
|
||||
assert len(result_without_doc_id.ids) == 2
|
||||
# Unique documents get 64-character hashes
|
||||
for doc_id in result_without_doc_id.ids:
|
||||
assert len(doc_id) == 64, "ID should be 64 characters"
|
||||
assert all(c in "0123456789abcdef" for c in doc_id), "ID should be hex"
|
||||
|
||||
@@ -664,3 +664,37 @@ def test_anthropic_token_usage_tracking():
|
||||
assert usage["input_tokens"] == 50
|
||||
assert usage["output_tokens"] == 25
|
||||
assert usage["total_tokens"] == 75
|
||||
|
||||
|
||||
def test_anthropic_stop_sequences_sync():
|
||||
"""Test that stop and stop_sequences attributes stay synchronized."""
|
||||
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
|
||||
|
||||
# Test setting stop as a list
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop_sequences == ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop == ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Test setting stop as a string
|
||||
llm.stop = "\nFinal Answer:"
|
||||
assert llm.stop_sequences == ["\nFinal Answer:"]
|
||||
assert llm.stop == ["\nFinal Answer:"]
|
||||
|
||||
# Test setting stop as None
|
||||
llm.stop = None
|
||||
assert llm.stop_sequences == []
|
||||
assert llm.stop == []
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
|
||||
def test_anthropic_stop_sequences_sent_to_api():
|
||||
"""Test that stop_sequences are properly sent to the Anthropic API."""
|
||||
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
|
||||
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
|
||||
result = llm.call("Say hello in one word")
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
@@ -736,3 +736,56 @@ def test_bedrock_client_error_handling():
|
||||
with pytest.raises(RuntimeError) as exc_info:
|
||||
llm.call("Hello")
|
||||
assert "throttled" in str(exc_info.value).lower()
|
||||
|
||||
|
||||
def test_bedrock_stop_sequences_sync():
|
||||
"""Test that stop and stop_sequences attributes stay synchronized."""
|
||||
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
|
||||
|
||||
# Test setting stop as a list
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
assert list(llm.stop_sequences) == ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop == ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Test setting stop as a string
|
||||
llm.stop = "\nFinal Answer:"
|
||||
assert list(llm.stop_sequences) == ["\nFinal Answer:"]
|
||||
assert llm.stop == ["\nFinal Answer:"]
|
||||
|
||||
# Test setting stop as None
|
||||
llm.stop = None
|
||||
assert list(llm.stop_sequences) == []
|
||||
assert llm.stop == []
|
||||
|
||||
|
||||
def test_bedrock_stop_sequences_sent_to_api():
|
||||
"""Test that stop_sequences are properly sent to the Bedrock API."""
|
||||
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
|
||||
|
||||
# Set stop sequences via the stop attribute (simulating CrewAgentExecutor)
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Patch the API call to capture parameters without making real call
|
||||
with patch.object(llm.client, 'converse') as mock_converse:
|
||||
mock_response = {
|
||||
'output': {
|
||||
'message': {
|
||||
'role': 'assistant',
|
||||
'content': [{'text': 'Hello'}]
|
||||
}
|
||||
},
|
||||
'usage': {
|
||||
'inputTokens': 10,
|
||||
'outputTokens': 5,
|
||||
'totalTokens': 15
|
||||
}
|
||||
}
|
||||
mock_converse.return_value = mock_response
|
||||
|
||||
llm.call("Say hello in one word")
|
||||
|
||||
# Verify stop_sequences were passed to the API in the inference config
|
||||
call_kwargs = mock_converse.call_args[1]
|
||||
assert "inferenceConfig" in call_kwargs
|
||||
assert "stopSequences" in call_kwargs["inferenceConfig"]
|
||||
assert call_kwargs["inferenceConfig"]["stopSequences"] == ["\nObservation:", "\nThought:"]
|
||||
|
||||
@@ -648,3 +648,55 @@ def test_gemini_token_usage_tracking():
|
||||
assert usage["candidates_token_count"] == 25
|
||||
assert usage["total_token_count"] == 75
|
||||
assert usage["total_tokens"] == 75
|
||||
|
||||
|
||||
def test_gemini_stop_sequences_sync():
|
||||
"""Test that stop and stop_sequences attributes stay synchronized."""
|
||||
llm = LLM(model="google/gemini-2.0-flash-001")
|
||||
|
||||
# Test setting stop as a list
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop_sequences == ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop == ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Test setting stop as a string
|
||||
llm.stop = "\nFinal Answer:"
|
||||
assert llm.stop_sequences == ["\nFinal Answer:"]
|
||||
assert llm.stop == ["\nFinal Answer:"]
|
||||
|
||||
# Test setting stop as None
|
||||
llm.stop = None
|
||||
assert llm.stop_sequences == []
|
||||
assert llm.stop == []
|
||||
|
||||
|
||||
def test_gemini_stop_sequences_sent_to_api():
|
||||
"""Test that stop_sequences are properly sent to the Gemini API."""
|
||||
llm = LLM(model="google/gemini-2.0-flash-001")
|
||||
|
||||
# Set stop sequences via the stop attribute (simulating CrewAgentExecutor)
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Patch the API call to capture parameters without making real call
|
||||
with patch.object(llm.client.models, 'generate_content') as mock_generate:
|
||||
mock_response = MagicMock()
|
||||
mock_response.text = "Hello"
|
||||
mock_response.candidates = []
|
||||
mock_response.usage_metadata = MagicMock(
|
||||
prompt_token_count=10,
|
||||
candidates_token_count=5,
|
||||
total_token_count=15
|
||||
)
|
||||
mock_generate.return_value = mock_response
|
||||
|
||||
llm.call("Say hello in one word")
|
||||
|
||||
# Verify stop_sequences were passed to the API in the config
|
||||
call_kwargs = mock_generate.call_args[1]
|
||||
assert "config" in call_kwargs
|
||||
# The config object should have stop_sequences set
|
||||
config = call_kwargs["config"]
|
||||
# Check if the config has stop_sequences attribute
|
||||
assert hasattr(config, 'stop_sequences') or 'stop_sequences' in config.__dict__
|
||||
if hasattr(config, 'stop_sequences'):
|
||||
assert config.stop_sequences == ["\nObservation:", "\nThought:"]
|
||||
|
||||
1
lib/crewai/tests/llms/hooks/__init__.py
Normal file
1
lib/crewai/tests/llms/hooks/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Tests for LLM interceptor hooks functionality."""
|
||||
311
lib/crewai/tests/llms/hooks/test_anthropic_interceptor.py
Normal file
311
lib/crewai/tests/llms/hooks/test_anthropic_interceptor.py
Normal file
@@ -0,0 +1,311 @@
|
||||
"""Tests for Anthropic provider with interceptor integration."""
|
||||
|
||||
import os
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_anthropic_api_key(monkeypatch):
|
||||
"""Set dummy Anthropic API key for tests that don't make real API calls."""
|
||||
if "ANTHROPIC_API_KEY" not in os.environ:
|
||||
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-test-key-dummy")
|
||||
|
||||
|
||||
class AnthropicTestInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Test interceptor for Anthropic provider."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize tracking and modification state."""
|
||||
self.outbound_calls: list[httpx.Request] = []
|
||||
self.inbound_calls: list[httpx.Response] = []
|
||||
self.custom_header_value = "anthropic-test-value"
|
||||
|
||||
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Track and modify outbound Anthropic requests.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
Modified request with custom headers.
|
||||
"""
|
||||
self.outbound_calls.append(message)
|
||||
message.headers["X-Anthropic-Interceptor"] = self.custom_header_value
|
||||
message.headers["X-Request-ID"] = "test-request-456"
|
||||
return message
|
||||
|
||||
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Track inbound Anthropic responses.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
The response with tracking header.
|
||||
"""
|
||||
self.inbound_calls.append(message)
|
||||
message.headers["X-Response-Tracked"] = "true"
|
||||
return message
|
||||
|
||||
|
||||
class TestAnthropicInterceptorIntegration:
|
||||
"""Test suite for Anthropic provider with interceptor."""
|
||||
|
||||
def test_anthropic_llm_accepts_interceptor(self) -> None:
|
||||
"""Test that Anthropic LLM accepts interceptor parameter."""
|
||||
interceptor = AnthropicTestInterceptor()
|
||||
|
||||
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor)
|
||||
|
||||
assert llm.interceptor is interceptor
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
|
||||
def test_anthropic_call_with_interceptor_tracks_requests(self) -> None:
|
||||
"""Test that interceptor tracks Anthropic API requests."""
|
||||
interceptor = AnthropicTestInterceptor()
|
||||
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", interceptor=interceptor)
|
||||
|
||||
# Make a simple completion call
|
||||
result = llm.call(
|
||||
messages=[{"role": "user", "content": "Say 'Hello World' and nothing else"}]
|
||||
)
|
||||
|
||||
# Verify custom headers were added
|
||||
for request in interceptor.outbound_calls:
|
||||
assert "X-Anthropic-Interceptor" in request.headers
|
||||
assert request.headers["X-Anthropic-Interceptor"] == "anthropic-test-value"
|
||||
assert "X-Request-ID" in request.headers
|
||||
assert request.headers["X-Request-ID"] == "test-request-456"
|
||||
|
||||
# Verify response was tracked
|
||||
for response in interceptor.inbound_calls:
|
||||
assert "X-Response-Tracked" in response.headers
|
||||
assert response.headers["X-Response-Tracked"] == "true"
|
||||
|
||||
# Verify result is valid
|
||||
assert result is not None
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
def test_anthropic_without_interceptor_works(self) -> None:
|
||||
"""Test that Anthropic LLM works without interceptor."""
|
||||
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
|
||||
|
||||
assert llm.interceptor is None
|
||||
|
||||
def test_multiple_anthropic_llms_different_interceptors(self) -> None:
|
||||
"""Test that multiple Anthropic LLMs can have different interceptors."""
|
||||
interceptor1 = AnthropicTestInterceptor()
|
||||
interceptor1.custom_header_value = "claude-opus-value"
|
||||
|
||||
interceptor2 = AnthropicTestInterceptor()
|
||||
interceptor2.custom_header_value = "claude-sonnet-value"
|
||||
|
||||
llm1 = LLM(model="anthropic/claude-3-opus-20240229", interceptor=interceptor1)
|
||||
llm2 = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor2)
|
||||
|
||||
assert llm1.interceptor is interceptor1
|
||||
assert llm2.interceptor is interceptor2
|
||||
assert llm1.interceptor.custom_header_value == "claude-opus-value"
|
||||
assert llm2.interceptor.custom_header_value == "claude-sonnet-value"
|
||||
|
||||
|
||||
class AnthropicLoggingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Interceptor that logs Anthropic request/response details."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize logging lists."""
|
||||
self.request_urls: list[str] = []
|
||||
self.request_methods: list[str] = []
|
||||
self.response_status_codes: list[int] = []
|
||||
self.anthropic_version_headers: list[str] = []
|
||||
|
||||
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Log outbound request details.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
The request unchanged.
|
||||
"""
|
||||
self.request_urls.append(str(message.url))
|
||||
self.request_methods.append(message.method)
|
||||
if "anthropic-version" in message.headers:
|
||||
self.anthropic_version_headers.append(message.headers["anthropic-version"])
|
||||
return message
|
||||
|
||||
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Log inbound response details.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
The response unchanged.
|
||||
"""
|
||||
self.response_status_codes.append(message.status_code)
|
||||
return message
|
||||
|
||||
|
||||
class TestAnthropicLoggingInterceptor:
|
||||
"""Test suite for logging interceptor with Anthropic."""
|
||||
|
||||
def test_logging_interceptor_instantiation(self) -> None:
|
||||
"""Test that logging interceptor can be created with Anthropic LLM."""
|
||||
interceptor = AnthropicLoggingInterceptor()
|
||||
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor)
|
||||
|
||||
assert llm.interceptor is interceptor
|
||||
assert isinstance(llm.interceptor, AnthropicLoggingInterceptor)
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
|
||||
def test_logging_interceptor_tracks_details(self) -> None:
|
||||
"""Test that logging interceptor tracks request/response details."""
|
||||
interceptor = AnthropicLoggingInterceptor()
|
||||
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", interceptor=interceptor)
|
||||
|
||||
# Make a completion call
|
||||
result = llm.call(messages=[{"role": "user", "content": "Count from 1 to 3"}])
|
||||
|
||||
# Verify URL points to Anthropic API
|
||||
for url in interceptor.request_urls:
|
||||
assert "anthropic" in url.lower() or "api" in url.lower()
|
||||
|
||||
# Verify methods are POST (messages endpoint uses POST)
|
||||
for method in interceptor.request_methods:
|
||||
assert method == "POST"
|
||||
|
||||
# Verify successful status codes
|
||||
for status_code in interceptor.response_status_codes:
|
||||
assert 200 <= status_code < 300
|
||||
|
||||
|
||||
# Verify result is valid
|
||||
assert result is not None
|
||||
|
||||
|
||||
class AnthropicHeaderInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Interceptor that adds Anthropic-specific headers."""
|
||||
|
||||
def __init__(self, workspace_id: str, user_id: str) -> None:
|
||||
"""Initialize with Anthropic-specific metadata.
|
||||
|
||||
Args:
|
||||
workspace_id: The workspace ID to inject.
|
||||
user_id: The user ID to inject.
|
||||
"""
|
||||
self.workspace_id = workspace_id
|
||||
self.user_id = user_id
|
||||
|
||||
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Add custom metadata headers to request.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
Request with metadata headers.
|
||||
"""
|
||||
message.headers["X-Workspace-ID"] = self.workspace_id
|
||||
message.headers["X-User-ID"] = self.user_id
|
||||
message.headers["X-Custom-Client"] = "crewai-interceptor"
|
||||
return message
|
||||
|
||||
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Pass through inbound response.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
The response unchanged.
|
||||
"""
|
||||
return message
|
||||
|
||||
|
||||
class TestAnthropicHeaderInterceptor:
|
||||
"""Test suite for header interceptor with Anthropic."""
|
||||
|
||||
def test_header_interceptor_with_anthropic(self) -> None:
|
||||
"""Test that header interceptor can be used with Anthropic LLM."""
|
||||
interceptor = AnthropicHeaderInterceptor(
|
||||
workspace_id="ws-789", user_id="user-012"
|
||||
)
|
||||
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor)
|
||||
|
||||
assert llm.interceptor is interceptor
|
||||
assert llm.interceptor.workspace_id == "ws-789"
|
||||
assert llm.interceptor.user_id == "user-012"
|
||||
|
||||
def test_header_interceptor_adds_headers(self) -> None:
|
||||
"""Test that header interceptor adds custom headers to requests."""
|
||||
interceptor = AnthropicHeaderInterceptor(workspace_id="ws-123", user_id="u-456")
|
||||
request = httpx.Request("POST", "https://api.anthropic.com/v1/messages")
|
||||
|
||||
modified_request = interceptor.on_outbound(request)
|
||||
|
||||
assert "X-Workspace-ID" in modified_request.headers
|
||||
assert modified_request.headers["X-Workspace-ID"] == "ws-123"
|
||||
assert "X-User-ID" in modified_request.headers
|
||||
assert modified_request.headers["X-User-ID"] == "u-456"
|
||||
assert "X-Custom-Client" in modified_request.headers
|
||||
assert modified_request.headers["X-Custom-Client"] == "crewai-interceptor"
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
|
||||
def test_header_interceptor_with_real_call(self) -> None:
|
||||
"""Test that header interceptor works with real Anthropic API call."""
|
||||
interceptor = AnthropicHeaderInterceptor(workspace_id="ws-999", user_id="u-888")
|
||||
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", interceptor=interceptor)
|
||||
|
||||
# Make a simple call
|
||||
result = llm.call(
|
||||
messages=[{"role": "user", "content": "Reply with just the word: SUCCESS"}]
|
||||
)
|
||||
|
||||
# Verify the call succeeded
|
||||
assert result is not None
|
||||
assert len(result) > 0
|
||||
|
||||
# Verify the interceptor was configured
|
||||
assert llm.interceptor is interceptor
|
||||
|
||||
|
||||
class TestMixedProviderInterceptors:
|
||||
"""Test suite for using interceptors with different providers."""
|
||||
|
||||
def test_openai_and_anthropic_different_interceptors(self) -> None:
|
||||
"""Test that OpenAI and Anthropic LLMs can have different interceptors."""
|
||||
openai_interceptor = AnthropicTestInterceptor()
|
||||
openai_interceptor.custom_header_value = "openai-specific"
|
||||
|
||||
anthropic_interceptor = AnthropicTestInterceptor()
|
||||
anthropic_interceptor.custom_header_value = "anthropic-specific"
|
||||
|
||||
openai_llm = LLM(model="gpt-4", interceptor=openai_interceptor)
|
||||
anthropic_llm = LLM(
|
||||
model="anthropic/claude-3-5-sonnet-20241022", interceptor=anthropic_interceptor
|
||||
)
|
||||
|
||||
assert openai_llm.interceptor is openai_interceptor
|
||||
assert anthropic_llm.interceptor is anthropic_interceptor
|
||||
assert openai_llm.interceptor.custom_header_value == "openai-specific"
|
||||
assert anthropic_llm.interceptor.custom_header_value == "anthropic-specific"
|
||||
|
||||
def test_same_interceptor_different_providers(self) -> None:
|
||||
"""Test that same interceptor instance can be used with multiple providers."""
|
||||
shared_interceptor = AnthropicTestInterceptor()
|
||||
|
||||
openai_llm = LLM(model="gpt-4", interceptor=shared_interceptor)
|
||||
anthropic_llm = LLM(
|
||||
model="anthropic/claude-3-5-sonnet-20241022", interceptor=shared_interceptor
|
||||
)
|
||||
|
||||
assert openai_llm.interceptor is shared_interceptor
|
||||
assert anthropic_llm.interceptor is shared_interceptor
|
||||
assert openai_llm.interceptor is anthropic_llm.interceptor
|
||||
287
lib/crewai/tests/llms/hooks/test_base_interceptor.py
Normal file
287
lib/crewai/tests/llms/hooks/test_base_interceptor.py
Normal file
@@ -0,0 +1,287 @@
|
||||
"""Tests for base interceptor functionality."""
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
class SimpleInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Simple test interceptor implementation."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize tracking lists."""
|
||||
self.outbound_calls: list[httpx.Request] = []
|
||||
self.inbound_calls: list[httpx.Response] = []
|
||||
|
||||
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Track outbound calls.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
The request unchanged.
|
||||
"""
|
||||
self.outbound_calls.append(message)
|
||||
return message
|
||||
|
||||
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Track inbound calls.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
The response unchanged.
|
||||
"""
|
||||
self.inbound_calls.append(message)
|
||||
return message
|
||||
|
||||
|
||||
class ModifyingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Interceptor that modifies requests and responses."""
|
||||
|
||||
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Add custom header to outbound request.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
Modified request with custom header.
|
||||
"""
|
||||
message.headers["X-Custom-Header"] = "test-value"
|
||||
message.headers["X-Intercepted"] = "true"
|
||||
return message
|
||||
|
||||
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Add custom header to inbound response.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
Modified response with custom header.
|
||||
"""
|
||||
message.headers["X-Response-Intercepted"] = "true"
|
||||
return message
|
||||
|
||||
|
||||
class AsyncInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Interceptor with async support."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize tracking lists."""
|
||||
self.async_outbound_calls: list[httpx.Request] = []
|
||||
self.async_inbound_calls: list[httpx.Response] = []
|
||||
|
||||
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Handle sync outbound.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
The request unchanged.
|
||||
"""
|
||||
return message
|
||||
|
||||
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Handle sync inbound.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
The response unchanged.
|
||||
"""
|
||||
return message
|
||||
|
||||
async def aon_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Handle async outbound.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
Modified request with async header.
|
||||
"""
|
||||
self.async_outbound_calls.append(message)
|
||||
message.headers["X-Async-Outbound"] = "true"
|
||||
return message
|
||||
|
||||
async def aon_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Handle async inbound.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
Modified response with async header.
|
||||
"""
|
||||
self.async_inbound_calls.append(message)
|
||||
message.headers["X-Async-Inbound"] = "true"
|
||||
return message
|
||||
|
||||
|
||||
class TestBaseInterceptor:
|
||||
"""Test suite for BaseInterceptor class."""
|
||||
|
||||
def test_interceptor_instantiation(self) -> None:
|
||||
"""Test that interceptor can be instantiated."""
|
||||
interceptor = SimpleInterceptor()
|
||||
assert interceptor is not None
|
||||
assert isinstance(interceptor, BaseInterceptor)
|
||||
|
||||
def test_on_outbound_called(self) -> None:
|
||||
"""Test that on_outbound is called and tracks requests."""
|
||||
interceptor = SimpleInterceptor()
|
||||
request = httpx.Request("GET", "https://api.example.com/test")
|
||||
|
||||
result = interceptor.on_outbound(request)
|
||||
|
||||
assert len(interceptor.outbound_calls) == 1
|
||||
assert interceptor.outbound_calls[0] is request
|
||||
assert result is request
|
||||
|
||||
def test_on_inbound_called(self) -> None:
|
||||
"""Test that on_inbound is called and tracks responses."""
|
||||
interceptor = SimpleInterceptor()
|
||||
response = httpx.Response(200, json={"status": "ok"})
|
||||
|
||||
result = interceptor.on_inbound(response)
|
||||
|
||||
assert len(interceptor.inbound_calls) == 1
|
||||
assert interceptor.inbound_calls[0] is response
|
||||
assert result is response
|
||||
|
||||
def test_multiple_outbound_calls(self) -> None:
|
||||
"""Test that interceptor tracks multiple outbound calls."""
|
||||
interceptor = SimpleInterceptor()
|
||||
requests = [
|
||||
httpx.Request("GET", "https://api.example.com/1"),
|
||||
httpx.Request("POST", "https://api.example.com/2"),
|
||||
httpx.Request("PUT", "https://api.example.com/3"),
|
||||
]
|
||||
|
||||
for req in requests:
|
||||
interceptor.on_outbound(req)
|
||||
|
||||
assert len(interceptor.outbound_calls) == 3
|
||||
assert interceptor.outbound_calls == requests
|
||||
|
||||
def test_multiple_inbound_calls(self) -> None:
|
||||
"""Test that interceptor tracks multiple inbound calls."""
|
||||
interceptor = SimpleInterceptor()
|
||||
responses = [
|
||||
httpx.Response(200, json={"id": 1}),
|
||||
httpx.Response(201, json={"id": 2}),
|
||||
httpx.Response(404, json={"error": "not found"}),
|
||||
]
|
||||
|
||||
for resp in responses:
|
||||
interceptor.on_inbound(resp)
|
||||
|
||||
assert len(interceptor.inbound_calls) == 3
|
||||
assert interceptor.inbound_calls == responses
|
||||
|
||||
|
||||
class TestModifyingInterceptor:
|
||||
"""Test suite for interceptor that modifies messages."""
|
||||
|
||||
def test_outbound_header_modification(self) -> None:
|
||||
"""Test that interceptor can add headers to outbound requests."""
|
||||
interceptor = ModifyingInterceptor()
|
||||
request = httpx.Request("GET", "https://api.example.com/test")
|
||||
|
||||
result = interceptor.on_outbound(request)
|
||||
|
||||
assert result is request
|
||||
assert "X-Custom-Header" in result.headers
|
||||
assert result.headers["X-Custom-Header"] == "test-value"
|
||||
assert "X-Intercepted" in result.headers
|
||||
assert result.headers["X-Intercepted"] == "true"
|
||||
|
||||
def test_inbound_header_modification(self) -> None:
|
||||
"""Test that interceptor can add headers to inbound responses."""
|
||||
interceptor = ModifyingInterceptor()
|
||||
response = httpx.Response(200, json={"status": "ok"})
|
||||
|
||||
result = interceptor.on_inbound(response)
|
||||
|
||||
assert result is response
|
||||
assert "X-Response-Intercepted" in result.headers
|
||||
assert result.headers["X-Response-Intercepted"] == "true"
|
||||
|
||||
def test_preserves_existing_headers(self) -> None:
|
||||
"""Test that interceptor preserves existing headers."""
|
||||
interceptor = ModifyingInterceptor()
|
||||
request = httpx.Request(
|
||||
"GET",
|
||||
"https://api.example.com/test",
|
||||
headers={"Authorization": "Bearer token123", "Content-Type": "application/json"},
|
||||
)
|
||||
|
||||
result = interceptor.on_outbound(request)
|
||||
|
||||
assert result.headers["Authorization"] == "Bearer token123"
|
||||
assert result.headers["Content-Type"] == "application/json"
|
||||
assert result.headers["X-Custom-Header"] == "test-value"
|
||||
|
||||
|
||||
class TestAsyncInterceptor:
|
||||
"""Test suite for async interceptor functionality."""
|
||||
|
||||
def test_sync_methods_work(self) -> None:
|
||||
"""Test that sync methods still work on async interceptor."""
|
||||
interceptor = AsyncInterceptor()
|
||||
request = httpx.Request("GET", "https://api.example.com/test")
|
||||
response = httpx.Response(200)
|
||||
|
||||
req_result = interceptor.on_outbound(request)
|
||||
resp_result = interceptor.on_inbound(response)
|
||||
|
||||
assert req_result is request
|
||||
assert resp_result is response
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_outbound(self) -> None:
|
||||
"""Test async outbound hook."""
|
||||
interceptor = AsyncInterceptor()
|
||||
request = httpx.Request("GET", "https://api.example.com/test")
|
||||
|
||||
result = await interceptor.aon_outbound(request)
|
||||
|
||||
assert result is request
|
||||
assert len(interceptor.async_outbound_calls) == 1
|
||||
assert interceptor.async_outbound_calls[0] is request
|
||||
assert "X-Async-Outbound" in result.headers
|
||||
assert result.headers["X-Async-Outbound"] == "true"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_inbound(self) -> None:
|
||||
"""Test async inbound hook."""
|
||||
interceptor = AsyncInterceptor()
|
||||
response = httpx.Response(200, json={"status": "ok"})
|
||||
|
||||
result = await interceptor.aon_inbound(response)
|
||||
|
||||
assert result is response
|
||||
assert len(interceptor.async_inbound_calls) == 1
|
||||
assert interceptor.async_inbound_calls[0] is response
|
||||
assert "X-Async-Inbound" in result.headers
|
||||
assert result.headers["X-Async-Inbound"] == "true"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_default_async_not_implemented(self) -> None:
|
||||
"""Test that default async methods raise NotImplementedError."""
|
||||
interceptor = SimpleInterceptor()
|
||||
request = httpx.Request("GET", "https://api.example.com/test")
|
||||
response = httpx.Response(200)
|
||||
|
||||
with pytest.raises(NotImplementedError):
|
||||
await interceptor.aon_outbound(request)
|
||||
|
||||
with pytest.raises(NotImplementedError):
|
||||
await interceptor.aon_inbound(response)
|
||||
262
lib/crewai/tests/llms/hooks/test_openai_interceptor.py
Normal file
262
lib/crewai/tests/llms/hooks/test_openai_interceptor.py
Normal file
@@ -0,0 +1,262 @@
|
||||
"""Tests for OpenAI provider with interceptor integration."""
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
class OpenAITestInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Test interceptor for OpenAI provider."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize tracking and modification state."""
|
||||
self.outbound_calls: list[httpx.Request] = []
|
||||
self.inbound_calls: list[httpx.Response] = []
|
||||
self.custom_header_value = "openai-test-value"
|
||||
|
||||
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Track and modify outbound OpenAI requests.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
Modified request with custom headers.
|
||||
"""
|
||||
self.outbound_calls.append(message)
|
||||
message.headers["X-OpenAI-Interceptor"] = self.custom_header_value
|
||||
message.headers["X-Request-ID"] = "test-request-123"
|
||||
return message
|
||||
|
||||
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Track inbound OpenAI responses.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
The response with tracking header.
|
||||
"""
|
||||
self.inbound_calls.append(message)
|
||||
message.headers["X-Response-Tracked"] = "true"
|
||||
return message
|
||||
|
||||
|
||||
class TestOpenAIInterceptorIntegration:
|
||||
"""Test suite for OpenAI provider with interceptor."""
|
||||
|
||||
def test_openai_llm_accepts_interceptor(self) -> None:
|
||||
"""Test that OpenAI LLM accepts interceptor parameter."""
|
||||
interceptor = OpenAITestInterceptor()
|
||||
|
||||
llm = LLM(model="gpt-4", interceptor=interceptor)
|
||||
|
||||
assert llm.interceptor is interceptor
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_openai_call_with_interceptor_tracks_requests(self) -> None:
|
||||
"""Test that interceptor tracks OpenAI API requests."""
|
||||
interceptor = OpenAITestInterceptor()
|
||||
llm = LLM(model="gpt-4o-mini", interceptor=interceptor)
|
||||
|
||||
# Make a simple completion call
|
||||
result = llm.call(
|
||||
messages=[{"role": "user", "content": "Say 'Hello World' and nothing else"}]
|
||||
)
|
||||
|
||||
# Verify custom headers were added
|
||||
for request in interceptor.outbound_calls:
|
||||
assert "X-OpenAI-Interceptor" in request.headers
|
||||
assert request.headers["X-OpenAI-Interceptor"] == "openai-test-value"
|
||||
assert "X-Request-ID" in request.headers
|
||||
assert request.headers["X-Request-ID"] == "test-request-123"
|
||||
|
||||
# Verify response was tracked
|
||||
for response in interceptor.inbound_calls:
|
||||
assert "X-Response-Tracked" in response.headers
|
||||
assert response.headers["X-Response-Tracked"] == "true"
|
||||
|
||||
# Verify result is valid
|
||||
assert result is not None
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
def test_openai_without_interceptor_works(self) -> None:
|
||||
"""Test that OpenAI LLM works without interceptor."""
|
||||
llm = LLM(model="gpt-4")
|
||||
|
||||
assert llm.interceptor is None
|
||||
|
||||
def test_multiple_openai_llms_different_interceptors(self) -> None:
|
||||
"""Test that multiple OpenAI LLMs can have different interceptors."""
|
||||
interceptor1 = OpenAITestInterceptor()
|
||||
interceptor1.custom_header_value = "llm1-value"
|
||||
|
||||
interceptor2 = OpenAITestInterceptor()
|
||||
interceptor2.custom_header_value = "llm2-value"
|
||||
|
||||
llm1 = LLM(model="gpt-4", interceptor=interceptor1)
|
||||
llm2 = LLM(model="gpt-3.5-turbo", interceptor=interceptor2)
|
||||
|
||||
assert llm1.interceptor is interceptor1
|
||||
assert llm2.interceptor is interceptor2
|
||||
assert llm1.interceptor.custom_header_value == "llm1-value"
|
||||
assert llm2.interceptor.custom_header_value == "llm2-value"
|
||||
|
||||
|
||||
class LoggingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Interceptor that logs request/response details for testing."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize logging lists."""
|
||||
self.request_urls: list[str] = []
|
||||
self.request_methods: list[str] = []
|
||||
self.response_status_codes: list[int] = []
|
||||
|
||||
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Log outbound request details.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
The request unchanged.
|
||||
"""
|
||||
self.request_urls.append(str(message.url))
|
||||
self.request_methods.append(message.method)
|
||||
return message
|
||||
|
||||
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Log inbound response details.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
The response unchanged.
|
||||
"""
|
||||
self.response_status_codes.append(message.status_code)
|
||||
return message
|
||||
|
||||
|
||||
class TestOpenAILoggingInterceptor:
|
||||
"""Test suite for logging interceptor with OpenAI."""
|
||||
|
||||
def test_logging_interceptor_instantiation(self) -> None:
|
||||
"""Test that logging interceptor can be created with OpenAI LLM."""
|
||||
interceptor = LoggingInterceptor()
|
||||
llm = LLM(model="gpt-4", interceptor=interceptor)
|
||||
|
||||
assert llm.interceptor is interceptor
|
||||
assert isinstance(llm.interceptor, LoggingInterceptor)
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_logging_interceptor_tracks_details(self) -> None:
|
||||
"""Test that logging interceptor tracks request/response details."""
|
||||
interceptor = LoggingInterceptor()
|
||||
llm = LLM(model="gpt-4o-mini", interceptor=interceptor)
|
||||
|
||||
# Make a completion call
|
||||
result = llm.call(
|
||||
messages=[{"role": "user", "content": "Count from 1 to 3"}]
|
||||
)
|
||||
|
||||
# Verify URL points to OpenAI API
|
||||
for url in interceptor.request_urls:
|
||||
assert "openai" in url.lower() or "api" in url.lower()
|
||||
|
||||
# Verify methods are POST (chat completions use POST)
|
||||
for method in interceptor.request_methods:
|
||||
assert method == "POST"
|
||||
|
||||
# Verify successful status codes
|
||||
for status_code in interceptor.response_status_codes:
|
||||
assert 200 <= status_code < 300
|
||||
|
||||
# Verify result is valid
|
||||
assert result is not None
|
||||
|
||||
|
||||
class AuthInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Interceptor that adds authentication headers."""
|
||||
|
||||
def __init__(self, api_key: str, org_id: str) -> None:
|
||||
"""Initialize with auth credentials.
|
||||
|
||||
Args:
|
||||
api_key: The API key to inject.
|
||||
org_id: The organization ID to inject.
|
||||
"""
|
||||
self.api_key = api_key
|
||||
self.org_id = org_id
|
||||
|
||||
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Add authentication headers to request.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
Request with auth headers.
|
||||
"""
|
||||
message.headers["X-Custom-API-Key"] = self.api_key
|
||||
message.headers["X-Organization-ID"] = self.org_id
|
||||
return message
|
||||
|
||||
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Pass through inbound response.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
The response unchanged.
|
||||
"""
|
||||
return message
|
||||
|
||||
|
||||
class TestOpenAIAuthInterceptor:
|
||||
"""Test suite for authentication interceptor with OpenAI."""
|
||||
|
||||
def test_auth_interceptor_with_openai(self) -> None:
|
||||
"""Test that auth interceptor can be used with OpenAI LLM."""
|
||||
interceptor = AuthInterceptor(api_key="custom-key-123", org_id="org-456")
|
||||
llm = LLM(model="gpt-4", interceptor=interceptor)
|
||||
|
||||
assert llm.interceptor is interceptor
|
||||
assert llm.interceptor.api_key == "custom-key-123"
|
||||
assert llm.interceptor.org_id == "org-456"
|
||||
|
||||
def test_auth_interceptor_adds_headers(self) -> None:
|
||||
"""Test that auth interceptor adds custom headers to requests."""
|
||||
interceptor = AuthInterceptor(api_key="test-key", org_id="test-org")
|
||||
request = httpx.Request("POST", "https://api.openai.com/v1/chat/completions")
|
||||
|
||||
modified_request = interceptor.on_outbound(request)
|
||||
|
||||
assert "X-Custom-API-Key" in modified_request.headers
|
||||
assert modified_request.headers["X-Custom-API-Key"] == "test-key"
|
||||
assert "X-Organization-ID" in modified_request.headers
|
||||
assert modified_request.headers["X-Organization-ID"] == "test-org"
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_auth_interceptor_with_real_call(self) -> None:
|
||||
"""Test that auth interceptor works with real OpenAI API call."""
|
||||
interceptor = AuthInterceptor(api_key="custom-123", org_id="org-789")
|
||||
llm = LLM(model="gpt-4o-mini", interceptor=interceptor)
|
||||
|
||||
# Make a simple call
|
||||
result = llm.call(
|
||||
messages=[{"role": "user", "content": "Reply with just the word: SUCCESS"}]
|
||||
)
|
||||
|
||||
# Verify the call succeeded
|
||||
assert result is not None
|
||||
assert len(result) > 0
|
||||
|
||||
# Verify headers were added to outbound requests
|
||||
# (We can't directly inspect the request sent to OpenAI in this test,
|
||||
# but we verify the interceptor was configured and the call succeeded)
|
||||
assert llm.interceptor is interceptor
|
||||
248
lib/crewai/tests/llms/hooks/test_transport.py
Normal file
248
lib/crewai/tests/llms/hooks/test_transport.py
Normal file
@@ -0,0 +1,248 @@
|
||||
"""Tests for transport layer with interceptor integration."""
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.llms.hooks.transport import AsyncHTTPTransport, HTTPTransport
|
||||
|
||||
|
||||
class TrackingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Test interceptor that tracks all calls."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize tracking lists."""
|
||||
self.outbound_calls: list[httpx.Request] = []
|
||||
self.inbound_calls: list[httpx.Response] = []
|
||||
self.async_outbound_calls: list[httpx.Request] = []
|
||||
self.async_inbound_calls: list[httpx.Response] = []
|
||||
|
||||
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Track outbound calls and add header.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
Modified request with tracking header.
|
||||
"""
|
||||
self.outbound_calls.append(message)
|
||||
message.headers["X-Intercepted-Sync"] = "true"
|
||||
return message
|
||||
|
||||
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Track inbound calls.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
The response with tracking header.
|
||||
"""
|
||||
self.inbound_calls.append(message)
|
||||
message.headers["X-Response-Intercepted-Sync"] = "true"
|
||||
return message
|
||||
|
||||
async def aon_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Track async outbound calls and add header.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
Modified request with tracking header.
|
||||
"""
|
||||
self.async_outbound_calls.append(message)
|
||||
message.headers["X-Intercepted-Async"] = "true"
|
||||
return message
|
||||
|
||||
async def aon_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Track async inbound calls.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
The response with tracking header.
|
||||
"""
|
||||
self.async_inbound_calls.append(message)
|
||||
message.headers["X-Response-Intercepted-Async"] = "true"
|
||||
return message
|
||||
|
||||
|
||||
class TestHTTPTransport:
|
||||
"""Test suite for sync HTTPTransport with interceptor."""
|
||||
|
||||
def test_transport_instantiation(self) -> None:
|
||||
"""Test that transport can be instantiated with interceptor."""
|
||||
interceptor = TrackingInterceptor()
|
||||
transport = HTTPTransport(interceptor=interceptor)
|
||||
|
||||
assert transport.interceptor is interceptor
|
||||
|
||||
def test_transport_requires_interceptor(self) -> None:
|
||||
"""Test that transport requires interceptor parameter."""
|
||||
# HTTPTransport requires an interceptor parameter
|
||||
with pytest.raises(TypeError):
|
||||
HTTPTransport()
|
||||
|
||||
def test_interceptor_called_on_request(self) -> None:
|
||||
"""Test that interceptor hooks are called during request handling."""
|
||||
interceptor = TrackingInterceptor()
|
||||
transport = HTTPTransport(interceptor=interceptor)
|
||||
|
||||
# Create a mock parent transport that returns a response
|
||||
mock_response = httpx.Response(200, json={"success": True})
|
||||
mock_parent_handle = Mock(return_value=mock_response)
|
||||
|
||||
# Monkey-patch the parent's handle_request
|
||||
original_handle = httpx.HTTPTransport.handle_request
|
||||
httpx.HTTPTransport.handle_request = mock_parent_handle
|
||||
|
||||
try:
|
||||
request = httpx.Request("GET", "https://api.example.com/test")
|
||||
response = transport.handle_request(request)
|
||||
|
||||
# Verify interceptor was called
|
||||
assert len(interceptor.outbound_calls) == 1
|
||||
assert len(interceptor.inbound_calls) == 1
|
||||
assert interceptor.outbound_calls[0] is request
|
||||
assert interceptor.inbound_calls[0] is response
|
||||
|
||||
# Verify headers were added
|
||||
assert "X-Intercepted-Sync" in request.headers
|
||||
assert request.headers["X-Intercepted-Sync"] == "true"
|
||||
assert "X-Response-Intercepted-Sync" in response.headers
|
||||
assert response.headers["X-Response-Intercepted-Sync"] == "true"
|
||||
finally:
|
||||
# Restore original method
|
||||
httpx.HTTPTransport.handle_request = original_handle
|
||||
|
||||
|
||||
|
||||
class TestAsyncHTTPTransport:
|
||||
"""Test suite for async AsyncHTTPransport with interceptor."""
|
||||
|
||||
def test_async_transport_instantiation(self) -> None:
|
||||
"""Test that async transport can be instantiated with interceptor."""
|
||||
interceptor = TrackingInterceptor()
|
||||
transport = AsyncHTTPTransport(interceptor=interceptor)
|
||||
|
||||
assert transport.interceptor is interceptor
|
||||
|
||||
def test_async_transport_requires_interceptor(self) -> None:
|
||||
"""Test that async transport requires interceptor parameter."""
|
||||
# AsyncHTTPransport requires an interceptor parameter
|
||||
with pytest.raises(TypeError):
|
||||
AsyncHTTPTransport()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_interceptor_called_on_request(self) -> None:
|
||||
"""Test that async interceptor hooks are called during request handling."""
|
||||
interceptor = TrackingInterceptor()
|
||||
transport = AsyncHTTPTransport(interceptor=interceptor)
|
||||
|
||||
# Create a mock parent transport that returns a response
|
||||
mock_response = httpx.Response(200, json={"success": True})
|
||||
|
||||
async def mock_handle(*args, **kwargs):
|
||||
return mock_response
|
||||
|
||||
mock_parent_handle = Mock(side_effect=mock_handle)
|
||||
|
||||
# Monkey-patch the parent's handle_async_request
|
||||
original_handle = httpx.AsyncHTTPTransport.handle_async_request
|
||||
httpx.AsyncHTTPTransport.handle_async_request = mock_parent_handle
|
||||
|
||||
try:
|
||||
request = httpx.Request("GET", "https://api.example.com/test")
|
||||
response = await transport.handle_async_request(request)
|
||||
|
||||
# Verify async interceptor was called
|
||||
assert len(interceptor.async_outbound_calls) == 1
|
||||
assert len(interceptor.async_inbound_calls) == 1
|
||||
assert interceptor.async_outbound_calls[0] is request
|
||||
assert interceptor.async_inbound_calls[0] is response
|
||||
|
||||
# Verify sync interceptor was NOT called
|
||||
assert len(interceptor.outbound_calls) == 0
|
||||
assert len(interceptor.inbound_calls) == 0
|
||||
|
||||
# Verify async headers were added
|
||||
assert "X-Intercepted-Async" in request.headers
|
||||
assert request.headers["X-Intercepted-Async"] == "true"
|
||||
assert "X-Response-Intercepted-Async" in response.headers
|
||||
assert response.headers["X-Response-Intercepted-Async"] == "true"
|
||||
finally:
|
||||
# Restore original method
|
||||
httpx.AsyncHTTPTransport.handle_async_request = original_handle
|
||||
|
||||
|
||||
|
||||
class TestTransportIntegration:
|
||||
"""Test suite for transport integration scenarios."""
|
||||
|
||||
def test_multiple_requests_same_interceptor(self) -> None:
|
||||
"""Test that multiple requests through same interceptor are tracked."""
|
||||
interceptor = TrackingInterceptor()
|
||||
transport = HTTPTransport(interceptor=interceptor)
|
||||
|
||||
mock_response = httpx.Response(200)
|
||||
mock_parent_handle = Mock(return_value=mock_response)
|
||||
|
||||
original_handle = httpx.HTTPTransport.handle_request
|
||||
httpx.HTTPTransport.handle_request = mock_parent_handle
|
||||
|
||||
try:
|
||||
# Make multiple requests
|
||||
requests = [
|
||||
httpx.Request("GET", "https://api.example.com/1"),
|
||||
httpx.Request("POST", "https://api.example.com/2"),
|
||||
httpx.Request("PUT", "https://api.example.com/3"),
|
||||
]
|
||||
|
||||
for req in requests:
|
||||
transport.handle_request(req)
|
||||
|
||||
# Verify all requests were intercepted
|
||||
assert len(interceptor.outbound_calls) == 3
|
||||
assert len(interceptor.inbound_calls) == 3
|
||||
assert interceptor.outbound_calls == requests
|
||||
finally:
|
||||
httpx.HTTPTransport.handle_request = original_handle
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_async_requests_same_interceptor(self) -> None:
|
||||
"""Test that multiple async requests through same interceptor are tracked."""
|
||||
interceptor = TrackingInterceptor()
|
||||
transport = AsyncHTTPTransport(interceptor=interceptor)
|
||||
|
||||
mock_response = httpx.Response(200)
|
||||
|
||||
async def mock_handle(*args, **kwargs):
|
||||
return mock_response
|
||||
|
||||
mock_parent_handle = Mock(side_effect=mock_handle)
|
||||
|
||||
original_handle = httpx.AsyncHTTPTransport.handle_async_request
|
||||
httpx.AsyncHTTPTransport.handle_async_request = mock_parent_handle
|
||||
|
||||
try:
|
||||
# Make multiple async requests
|
||||
requests = [
|
||||
httpx.Request("GET", "https://api.example.com/1"),
|
||||
httpx.Request("POST", "https://api.example.com/2"),
|
||||
httpx.Request("DELETE", "https://api.example.com/3"),
|
||||
]
|
||||
|
||||
for req in requests:
|
||||
await transport.handle_async_request(req)
|
||||
|
||||
# Verify all requests were intercepted
|
||||
assert len(interceptor.async_outbound_calls) == 3
|
||||
assert len(interceptor.async_inbound_calls) == 3
|
||||
assert interceptor.async_outbound_calls == requests
|
||||
finally:
|
||||
httpx.AsyncHTTPTransport.handle_async_request = original_handle
|
||||
319
lib/crewai/tests/llms/hooks/test_unsupported_providers.py
Normal file
319
lib/crewai/tests/llms/hooks/test_unsupported_providers.py
Normal file
@@ -0,0 +1,319 @@
|
||||
"""Tests for interceptor behavior with unsupported providers."""
|
||||
|
||||
import os
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_provider_api_keys(monkeypatch):
|
||||
"""Set dummy API keys for providers that require them."""
|
||||
if "OPENAI_API_KEY" not in os.environ:
|
||||
monkeypatch.setenv("OPENAI_API_KEY", "sk-test-key-dummy")
|
||||
if "ANTHROPIC_API_KEY" not in os.environ:
|
||||
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-test-key-dummy")
|
||||
if "GOOGLE_API_KEY" not in os.environ:
|
||||
monkeypatch.setenv("GOOGLE_API_KEY", "test-google-key-dummy")
|
||||
|
||||
|
||||
class DummyInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Simple dummy interceptor for testing."""
|
||||
|
||||
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
"""Pass through outbound request.
|
||||
|
||||
Args:
|
||||
message: The outbound request.
|
||||
|
||||
Returns:
|
||||
The request unchanged.
|
||||
"""
|
||||
message.headers["X-Dummy"] = "true"
|
||||
return message
|
||||
|
||||
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
"""Pass through inbound response.
|
||||
|
||||
Args:
|
||||
message: The inbound response.
|
||||
|
||||
Returns:
|
||||
The response unchanged.
|
||||
"""
|
||||
return message
|
||||
|
||||
|
||||
class TestAzureProviderInterceptor:
|
||||
"""Test suite for Azure provider with interceptor (unsupported)."""
|
||||
|
||||
def test_azure_llm_accepts_interceptor_parameter(self) -> None:
|
||||
"""Test that Azure LLM raises NotImplementedError with interceptor."""
|
||||
interceptor = DummyInterceptor()
|
||||
|
||||
# Azure provider should raise NotImplementedError
|
||||
with pytest.raises(NotImplementedError) as exc_info:
|
||||
LLM(
|
||||
model="azure/gpt-4",
|
||||
interceptor=interceptor,
|
||||
api_key="test-key",
|
||||
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
|
||||
)
|
||||
|
||||
assert "interceptor" in str(exc_info.value).lower()
|
||||
|
||||
def test_azure_raises_not_implemented_on_initialization(self) -> None:
|
||||
"""Test that Azure raises NotImplementedError when interceptor is used."""
|
||||
interceptor = DummyInterceptor()
|
||||
|
||||
with pytest.raises(NotImplementedError) as exc_info:
|
||||
LLM(
|
||||
model="azure/gpt-4",
|
||||
interceptor=interceptor,
|
||||
api_key="test-key",
|
||||
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
|
||||
)
|
||||
|
||||
error_msg = str(exc_info.value).lower()
|
||||
assert "interceptor" in error_msg
|
||||
assert "azure" in error_msg
|
||||
|
||||
def test_azure_without_interceptor_works(self) -> None:
|
||||
"""Test that Azure LLM works without interceptor."""
|
||||
llm = LLM(
|
||||
model="azure/gpt-4",
|
||||
api_key="test-key",
|
||||
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
|
||||
)
|
||||
|
||||
# Azure provider doesn't have interceptor attribute
|
||||
assert not hasattr(llm, 'interceptor') or llm.interceptor is None
|
||||
|
||||
|
||||
class TestBedrockProviderInterceptor:
|
||||
"""Test suite for Bedrock provider with interceptor (unsupported)."""
|
||||
|
||||
def test_bedrock_llm_accepts_interceptor_parameter(self) -> None:
|
||||
"""Test that Bedrock LLM raises NotImplementedError with interceptor."""
|
||||
interceptor = DummyInterceptor()
|
||||
|
||||
# Bedrock provider should raise NotImplementedError
|
||||
with pytest.raises(NotImplementedError) as exc_info:
|
||||
LLM(
|
||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
interceptor=interceptor,
|
||||
aws_access_key_id="test-access-key",
|
||||
aws_secret_access_key="test-secret-key",
|
||||
aws_region_name="us-east-1",
|
||||
)
|
||||
|
||||
error_msg = str(exc_info.value).lower()
|
||||
assert "interceptor" in error_msg
|
||||
assert "bedrock" in error_msg
|
||||
|
||||
def test_bedrock_raises_not_implemented_on_initialization(self) -> None:
|
||||
"""Test that Bedrock raises NotImplementedError when interceptor is used."""
|
||||
interceptor = DummyInterceptor()
|
||||
|
||||
with pytest.raises(NotImplementedError) as exc_info:
|
||||
LLM(
|
||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
interceptor=interceptor,
|
||||
aws_access_key_id="test-access-key",
|
||||
aws_secret_access_key="test-secret-key",
|
||||
aws_region_name="us-east-1",
|
||||
)
|
||||
|
||||
error_msg = str(exc_info.value).lower()
|
||||
assert "interceptor" in error_msg
|
||||
assert "bedrock" in error_msg
|
||||
|
||||
def test_bedrock_without_interceptor_works(self) -> None:
|
||||
"""Test that Bedrock LLM works without interceptor."""
|
||||
llm = LLM(
|
||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
aws_access_key_id="test-access-key",
|
||||
aws_secret_access_key="test-secret-key",
|
||||
aws_region_name="us-east-1",
|
||||
)
|
||||
|
||||
# Bedrock provider doesn't have interceptor attribute
|
||||
assert not hasattr(llm, 'interceptor') or llm.interceptor is None
|
||||
|
||||
|
||||
class TestGeminiProviderInterceptor:
|
||||
"""Test suite for Gemini provider with interceptor (unsupported)."""
|
||||
|
||||
def test_gemini_llm_accepts_interceptor_parameter(self) -> None:
|
||||
"""Test that Gemini LLM raises NotImplementedError with interceptor."""
|
||||
interceptor = DummyInterceptor()
|
||||
|
||||
# Gemini provider should raise NotImplementedError
|
||||
with pytest.raises(NotImplementedError) as exc_info:
|
||||
LLM(
|
||||
model="gemini/gemini-pro",
|
||||
interceptor=interceptor,
|
||||
api_key="test-gemini-key",
|
||||
)
|
||||
|
||||
error_msg = str(exc_info.value).lower()
|
||||
assert "interceptor" in error_msg
|
||||
assert "gemini" in error_msg
|
||||
|
||||
def test_gemini_raises_not_implemented_on_initialization(self) -> None:
|
||||
"""Test that Gemini raises NotImplementedError when interceptor is used."""
|
||||
interceptor = DummyInterceptor()
|
||||
|
||||
with pytest.raises(NotImplementedError) as exc_info:
|
||||
LLM(
|
||||
model="gemini/gemini-pro",
|
||||
interceptor=interceptor,
|
||||
api_key="test-gemini-key",
|
||||
)
|
||||
|
||||
error_msg = str(exc_info.value).lower()
|
||||
assert "interceptor" in error_msg
|
||||
assert "gemini" in error_msg
|
||||
|
||||
def test_gemini_without_interceptor_works(self) -> None:
|
||||
"""Test that Gemini LLM works without interceptor."""
|
||||
llm = LLM(
|
||||
model="gemini/gemini-pro",
|
||||
api_key="test-gemini-key",
|
||||
)
|
||||
|
||||
# Gemini provider doesn't have interceptor attribute
|
||||
assert not hasattr(llm, 'interceptor') or llm.interceptor is None
|
||||
|
||||
|
||||
class TestUnsupportedProviderMessages:
|
||||
"""Test suite for error messages from unsupported providers."""
|
||||
|
||||
def test_azure_error_message_is_clear(self) -> None:
|
||||
"""Test that Azure error message clearly states lack of support."""
|
||||
interceptor = DummyInterceptor()
|
||||
|
||||
with pytest.raises(NotImplementedError) as exc_info:
|
||||
LLM(
|
||||
model="azure/gpt-4",
|
||||
interceptor=interceptor,
|
||||
api_key="test-key",
|
||||
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
|
||||
)
|
||||
|
||||
error_message = str(exc_info.value).lower()
|
||||
assert "azure" in error_message
|
||||
assert "interceptor" in error_message
|
||||
|
||||
def test_bedrock_error_message_is_clear(self) -> None:
|
||||
"""Test that Bedrock error message clearly states lack of support."""
|
||||
interceptor = DummyInterceptor()
|
||||
|
||||
with pytest.raises(NotImplementedError) as exc_info:
|
||||
LLM(
|
||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
interceptor=interceptor,
|
||||
aws_access_key_id="test-access-key",
|
||||
aws_secret_access_key="test-secret-key",
|
||||
aws_region_name="us-east-1",
|
||||
)
|
||||
|
||||
error_message = str(exc_info.value).lower()
|
||||
assert "bedrock" in error_message
|
||||
assert "interceptor" in error_message
|
||||
|
||||
def test_gemini_error_message_is_clear(self) -> None:
|
||||
"""Test that Gemini error message clearly states lack of support."""
|
||||
interceptor = DummyInterceptor()
|
||||
|
||||
with pytest.raises(NotImplementedError) as exc_info:
|
||||
LLM(
|
||||
model="gemini/gemini-pro",
|
||||
interceptor=interceptor,
|
||||
api_key="test-gemini-key",
|
||||
)
|
||||
|
||||
error_message = str(exc_info.value).lower()
|
||||
assert "gemini" in error_message
|
||||
assert "interceptor" in error_message
|
||||
|
||||
|
||||
class TestProviderSupportMatrix:
|
||||
"""Test suite to document which providers support interceptors."""
|
||||
|
||||
def test_supported_providers_accept_interceptor(self) -> None:
|
||||
"""Test that supported providers accept and use interceptors."""
|
||||
interceptor = DummyInterceptor()
|
||||
|
||||
# OpenAI - SUPPORTED
|
||||
openai_llm = LLM(model="gpt-4", interceptor=interceptor)
|
||||
assert openai_llm.interceptor is interceptor
|
||||
|
||||
# Anthropic - SUPPORTED
|
||||
anthropic_llm = LLM(model="anthropic/claude-3-opus-20240229", interceptor=interceptor)
|
||||
assert anthropic_llm.interceptor is interceptor
|
||||
|
||||
def test_unsupported_providers_raise_error(self) -> None:
|
||||
"""Test that unsupported providers raise NotImplementedError."""
|
||||
interceptor = DummyInterceptor()
|
||||
|
||||
# Azure - NOT SUPPORTED
|
||||
with pytest.raises(NotImplementedError):
|
||||
LLM(
|
||||
model="azure/gpt-4",
|
||||
interceptor=interceptor,
|
||||
api_key="test",
|
||||
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
|
||||
)
|
||||
|
||||
# Bedrock - NOT SUPPORTED
|
||||
with pytest.raises(NotImplementedError):
|
||||
LLM(
|
||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
interceptor=interceptor,
|
||||
aws_access_key_id="test",
|
||||
aws_secret_access_key="test",
|
||||
aws_region_name="us-east-1",
|
||||
)
|
||||
|
||||
# Gemini - NOT SUPPORTED
|
||||
with pytest.raises(NotImplementedError):
|
||||
LLM(
|
||||
model="gemini/gemini-pro",
|
||||
interceptor=interceptor,
|
||||
api_key="test",
|
||||
)
|
||||
|
||||
def test_all_providers_work_without_interceptor(self) -> None:
|
||||
"""Test that all providers work normally without interceptor."""
|
||||
# OpenAI
|
||||
openai_llm = LLM(model="gpt-4")
|
||||
assert openai_llm.interceptor is None
|
||||
|
||||
# Anthropic
|
||||
anthropic_llm = LLM(model="anthropic/claude-3-opus-20240229")
|
||||
assert anthropic_llm.interceptor is None
|
||||
|
||||
# Azure - doesn't have interceptor attribute
|
||||
azure_llm = LLM(
|
||||
model="azure/gpt-4",
|
||||
api_key="test",
|
||||
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
|
||||
)
|
||||
assert not hasattr(azure_llm, 'interceptor') or azure_llm.interceptor is None
|
||||
|
||||
# Bedrock - doesn't have interceptor attribute
|
||||
bedrock_llm = LLM(
|
||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
aws_access_key_id="test",
|
||||
aws_secret_access_key="test",
|
||||
aws_region_name="us-east-1",
|
||||
)
|
||||
assert not hasattr(bedrock_llm, 'interceptor') or bedrock_llm.interceptor is None
|
||||
|
||||
# Gemini - doesn't have interceptor attribute
|
||||
gemini_llm = LLM(model="gemini/gemini-pro", api_key="test")
|
||||
assert not hasattr(gemini_llm, 'interceptor') or gemini_llm.interceptor is None
|
||||
4
lib/crewai/tests/mcp/__init__.py
Normal file
4
lib/crewai/tests/mcp/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
"""Tests for MCP (Model Context Protocol) integration."""
|
||||
|
||||
|
||||
|
||||
200
lib/crewai/tests/mcp/test_mcp_config.py
Normal file
200
lib/crewai/tests/mcp/test_mcp_config.py
Normal file
@@ -0,0 +1,200 @@
|
||||
import asyncio
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.mcp.config import MCPServerHTTP, MCPServerSSE, MCPServerStdio
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_tool_definitions():
|
||||
"""Create mock MCP tool definitions (as returned by list_tools)."""
|
||||
return [
|
||||
{
|
||||
"name": "test_tool_1",
|
||||
"description": "Test tool 1 description",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {"type": "string", "description": "Search query"}
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "test_tool_2",
|
||||
"description": "Test tool 2 description",
|
||||
"inputSchema": {}
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_agent_with_stdio_mcp_config(mock_tool_definitions):
|
||||
"""Test agent setup with MCPServerStdio configuration."""
|
||||
stdio_config = MCPServerStdio(
|
||||
command="python",
|
||||
args=["server.py"],
|
||||
env={"API_KEY": "test_key"},
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
mcps=[stdio_config],
|
||||
)
|
||||
|
||||
|
||||
with patch("crewai.agent.core.MCPClient") as mock_client_class:
|
||||
mock_client = AsyncMock()
|
||||
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
|
||||
mock_client.connected = False # Will trigger connect
|
||||
mock_client.connect = AsyncMock()
|
||||
mock_client.disconnect = AsyncMock()
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
tools = agent.get_mcp_tools([stdio_config])
|
||||
|
||||
assert len(tools) == 2
|
||||
assert all(isinstance(tool, BaseTool) for tool in tools)
|
||||
|
||||
mock_client_class.assert_called_once()
|
||||
call_args = mock_client_class.call_args
|
||||
transport = call_args.kwargs["transport"]
|
||||
assert transport.command == "python"
|
||||
assert transport.args == ["server.py"]
|
||||
assert transport.env == {"API_KEY": "test_key"}
|
||||
|
||||
|
||||
def test_agent_with_http_mcp_config(mock_tool_definitions):
|
||||
"""Test agent setup with MCPServerHTTP configuration."""
|
||||
http_config = MCPServerHTTP(
|
||||
url="https://api.example.com/mcp",
|
||||
headers={"Authorization": "Bearer test_token"},
|
||||
streamable=True,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
mcps=[http_config],
|
||||
)
|
||||
|
||||
with patch("crewai.agent.core.MCPClient") as mock_client_class:
|
||||
mock_client = AsyncMock()
|
||||
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
|
||||
mock_client.connected = False # Will trigger connect
|
||||
mock_client.connect = AsyncMock()
|
||||
mock_client.disconnect = AsyncMock()
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
tools = agent.get_mcp_tools([http_config])
|
||||
|
||||
assert len(tools) == 2
|
||||
assert all(isinstance(tool, BaseTool) for tool in tools)
|
||||
|
||||
mock_client_class.assert_called_once()
|
||||
call_args = mock_client_class.call_args
|
||||
transport = call_args.kwargs["transport"]
|
||||
assert transport.url == "https://api.example.com/mcp"
|
||||
assert transport.headers == {"Authorization": "Bearer test_token"}
|
||||
assert transport.streamable is True
|
||||
|
||||
|
||||
def test_agent_with_sse_mcp_config(mock_tool_definitions):
|
||||
"""Test agent setup with MCPServerSSE configuration."""
|
||||
sse_config = MCPServerSSE(
|
||||
url="https://api.example.com/mcp/sse",
|
||||
headers={"Authorization": "Bearer test_token"},
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
mcps=[sse_config],
|
||||
)
|
||||
|
||||
with patch("crewai.agent.core.MCPClient") as mock_client_class:
|
||||
mock_client = AsyncMock()
|
||||
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
|
||||
mock_client.connected = False
|
||||
mock_client.connect = AsyncMock()
|
||||
mock_client.disconnect = AsyncMock()
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
tools = agent.get_mcp_tools([sse_config])
|
||||
|
||||
assert len(tools) == 2
|
||||
assert all(isinstance(tool, BaseTool) for tool in tools)
|
||||
|
||||
mock_client_class.assert_called_once()
|
||||
call_args = mock_client_class.call_args
|
||||
transport = call_args.kwargs["transport"]
|
||||
assert transport.url == "https://api.example.com/mcp/sse"
|
||||
assert transport.headers == {"Authorization": "Bearer test_token"}
|
||||
|
||||
|
||||
def test_mcp_tool_execution_in_sync_context(mock_tool_definitions):
|
||||
"""Test MCPNativeTool execution in synchronous context (normal crew execution)."""
|
||||
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
|
||||
|
||||
with patch("crewai.agent.core.MCPClient") as mock_client_class:
|
||||
mock_client = AsyncMock()
|
||||
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
|
||||
mock_client.connected = False
|
||||
mock_client.connect = AsyncMock()
|
||||
mock_client.disconnect = AsyncMock()
|
||||
mock_client.call_tool = AsyncMock(return_value="test result")
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
mcps=[http_config],
|
||||
)
|
||||
|
||||
tools = agent.get_mcp_tools([http_config])
|
||||
assert len(tools) == 2
|
||||
|
||||
|
||||
tool = tools[0]
|
||||
result = tool.run(query="test query")
|
||||
|
||||
assert result == "test result"
|
||||
mock_client.call_tool.assert_called()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_mcp_tool_execution_in_async_context(mock_tool_definitions):
|
||||
"""Test MCPNativeTool execution in async context (e.g., from a Flow)."""
|
||||
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
|
||||
|
||||
with patch("crewai.agent.core.MCPClient") as mock_client_class:
|
||||
mock_client = AsyncMock()
|
||||
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
|
||||
mock_client.connected = False
|
||||
mock_client.connect = AsyncMock()
|
||||
mock_client.disconnect = AsyncMock()
|
||||
mock_client.call_tool = AsyncMock(return_value="test result")
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
mcps=[http_config],
|
||||
)
|
||||
|
||||
tools = agent.get_mcp_tools([http_config])
|
||||
assert len(tools) == 2
|
||||
|
||||
|
||||
tool = tools[0]
|
||||
result = tool.run(query="test query")
|
||||
|
||||
assert result == "test result"
|
||||
mock_client.call_tool.assert_called()
|
||||
94
lib/crewai/tests/project/test_callback_with_taskoutput.py
Normal file
94
lib/crewai/tests/project/test_callback_with_taskoutput.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""Test callback decorator with TaskOutput arguments."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.project import CrewBase, callback, task
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
|
||||
def test_callback_decorator_with_taskoutput() -> None:
|
||||
"""Test that @callback decorator works with TaskOutput arguments."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
"""Test crew with callback."""
|
||||
|
||||
callback_called = False
|
||||
callback_output = None
|
||||
|
||||
@callback
|
||||
def task_callback(self, output: TaskOutput) -> None:
|
||||
"""Test callback that receives TaskOutput."""
|
||||
self.callback_called = True
|
||||
self.callback_output = output
|
||||
|
||||
@task
|
||||
def test_task(self) -> Task:
|
||||
"""Test task with callback."""
|
||||
return Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
callback=self.task_callback,
|
||||
)
|
||||
|
||||
test_crew = TestCrew()
|
||||
task_instance = test_crew.test_task()
|
||||
|
||||
test_output = TaskOutput(
|
||||
description="Test task",
|
||||
agent="Test Agent",
|
||||
raw="test result",
|
||||
output_format=OutputFormat.RAW,
|
||||
)
|
||||
|
||||
task_instance.callback(test_output)
|
||||
|
||||
assert test_crew.callback_called
|
||||
assert test_crew.callback_output == test_output
|
||||
|
||||
|
||||
def test_callback_decorator_with_taskoutput_integration() -> None:
|
||||
"""Integration test for callback with actual task execution."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
"""Test crew with callback integration."""
|
||||
|
||||
callback_called = False
|
||||
received_output: TaskOutput | None = None
|
||||
|
||||
@callback
|
||||
def task_callback(self, output: TaskOutput) -> None:
|
||||
"""Callback executed after task completion."""
|
||||
self.callback_called = True
|
||||
self.received_output = output
|
||||
|
||||
@task
|
||||
def test_task(self) -> Task:
|
||||
"""Test task."""
|
||||
return Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
callback=self.task_callback,
|
||||
)
|
||||
|
||||
test_crew = TestCrew()
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
)
|
||||
|
||||
task_instance = test_crew.test_task()
|
||||
task_instance.agent = agent
|
||||
|
||||
with patch.object(Agent, "execute_task") as mock_execute:
|
||||
mock_execute.return_value = "test result"
|
||||
task_instance.execute_sync()
|
||||
|
||||
assert test_crew.callback_called
|
||||
assert test_crew.received_output is not None
|
||||
assert test_crew.received_output.raw == "test result"
|
||||
@@ -3,6 +3,7 @@
|
||||
import asyncio
|
||||
import threading
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
@@ -1384,3 +1385,110 @@ def test_mixed_sync_async_execution_order():
|
||||
]
|
||||
|
||||
assert execution_order == expected_order
|
||||
|
||||
|
||||
def test_flow_copy_state_with_unpickleable_objects():
|
||||
"""Test that _copy_state handles unpickleable objects like RLock.
|
||||
|
||||
Regression test for issue #3828: Flow should not crash when state contains
|
||||
objects that cannot be deep copied (like threading.RLock).
|
||||
"""
|
||||
|
||||
class StateWithRLock(BaseModel):
|
||||
counter: int = 0
|
||||
lock: Optional[threading.RLock] = None
|
||||
|
||||
class FlowWithRLock(Flow[StateWithRLock]):
|
||||
@start()
|
||||
def step_1(self):
|
||||
self.state.counter += 1
|
||||
|
||||
@listen(step_1)
|
||||
def step_2(self):
|
||||
self.state.counter += 1
|
||||
|
||||
flow = FlowWithRLock(initial_state=StateWithRLock())
|
||||
flow._state.lock = threading.RLock()
|
||||
|
||||
copied_state = flow._copy_state()
|
||||
assert copied_state.counter == 0
|
||||
assert copied_state.lock is not None
|
||||
|
||||
|
||||
def test_flow_copy_state_with_nested_unpickleable_objects():
|
||||
"""Test that _copy_state handles unpickleable objects nested in containers.
|
||||
|
||||
Regression test for issue #3828: Verifies that unpickleable objects
|
||||
nested inside dicts/lists in state don't cause crashes.
|
||||
"""
|
||||
|
||||
class NestedState(BaseModel):
|
||||
data: dict = {}
|
||||
items: list = []
|
||||
|
||||
class FlowWithNestedUnpickleable(Flow[NestedState]):
|
||||
@start()
|
||||
def step_1(self):
|
||||
self.state.data["lock"] = threading.RLock()
|
||||
self.state.data["value"] = 42
|
||||
|
||||
@listen(step_1)
|
||||
def step_2(self):
|
||||
self.state.items.append(threading.Lock())
|
||||
self.state.items.append("normal_value")
|
||||
|
||||
flow = FlowWithNestedUnpickleable(initial_state=NestedState())
|
||||
flow.kickoff()
|
||||
|
||||
assert flow.state.data["value"] == 42
|
||||
assert len(flow.state.items) == 2
|
||||
|
||||
|
||||
def test_flow_copy_state_without_unpickleable_objects():
|
||||
"""Test that _copy_state still works normally with pickleable objects.
|
||||
|
||||
Ensures that the fallback logic doesn't break normal deep copy behavior.
|
||||
"""
|
||||
|
||||
class NormalState(BaseModel):
|
||||
counter: int = 0
|
||||
data: str = ""
|
||||
nested: dict = {}
|
||||
|
||||
class NormalFlow(Flow[NormalState]):
|
||||
@start()
|
||||
def step_1(self):
|
||||
self.state.counter = 5
|
||||
self.state.data = "test"
|
||||
self.state.nested = {"key": "value"}
|
||||
|
||||
flow = NormalFlow(initial_state=NormalState())
|
||||
flow.state.counter = 10
|
||||
flow.state.data = "modified"
|
||||
flow.state.nested["key"] = "modified"
|
||||
|
||||
copied_state = flow._copy_state()
|
||||
assert copied_state.counter == 10
|
||||
assert copied_state.data == "modified"
|
||||
assert copied_state.nested["key"] == "modified"
|
||||
|
||||
flow.state.nested["key"] = "changed_after_copy"
|
||||
assert copied_state.nested["key"] == "modified"
|
||||
|
||||
|
||||
def test_flow_copy_state_with_dict_state():
|
||||
"""Test that _copy_state works with dict-based states."""
|
||||
|
||||
class DictFlow(Flow[dict]):
|
||||
@start()
|
||||
def step_1(self):
|
||||
self.state["counter"] = 1
|
||||
|
||||
flow = DictFlow()
|
||||
flow.state["test"] = "value"
|
||||
|
||||
copied_state = flow._copy_state()
|
||||
assert copied_state["test"] == "value"
|
||||
|
||||
flow.state["test"] = "modified"
|
||||
assert copied_state["test"] == "value"
|
||||
|
||||
@@ -340,7 +340,7 @@ def test_output_pydantic_hierarchical():
|
||||
)
|
||||
result = crew.kickoff()
|
||||
assert isinstance(result.pydantic, ScoreOutput)
|
||||
assert result.to_dict() == {"score": 0}
|
||||
assert result.to_dict() == {"score": 4}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -599,7 +599,7 @@ def test_output_pydantic_to_another_task():
|
||||
assert isinstance(pydantic_result, ScoreOutput), (
|
||||
"Expected pydantic result to be of type ScoreOutput"
|
||||
)
|
||||
assert pydantic_result.score == 4
|
||||
assert pydantic_result.score == 5
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -630,7 +630,7 @@ def test_output_json_to_another_task():
|
||||
|
||||
crew = Crew(agents=[scorer], tasks=[task1, task2])
|
||||
result = crew.kickoff()
|
||||
assert '{"score": 4}' == result.json
|
||||
assert '{"score": 3}' == result.json
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
|
||||
@@ -181,7 +181,7 @@ def test_task_guardrail_process_output(task_output):
|
||||
result = guardrail(task_output)
|
||||
assert result[0] is False
|
||||
|
||||
assert "exceeding the guardrail limit of fewer than" in result[1].lower()
|
||||
assert result[1] == "The task result contains more than 10 words, violating the guardrail. The text provided contains about 21 words."
|
||||
|
||||
guardrail = LLMGuardrail(
|
||||
description="Ensure the result has less than 500 words", llm=LLM(model="gpt-4o")
|
||||
@@ -252,10 +252,7 @@ def test_guardrail_emits_events(sample_agent):
|
||||
{
|
||||
"success": False,
|
||||
"result": None,
|
||||
"error": "The task result does not comply with the guardrail because none of "
|
||||
"the listed authors are from Italy. All authors mentioned are from "
|
||||
"different countries, including Germany, the UK, the USA, and others, "
|
||||
"which violates the requirement that authors must be Italian.",
|
||||
"error": "The output indicates that none of the authors mentioned are from Italy, while the guardrail requires authors to be from Italy. Therefore, the output does not comply with the guardrail.",
|
||||
"retry_count": 0,
|
||||
},
|
||||
{"success": True, "result": result.raw, "error": None, "retry_count": 1},
|
||||
|
||||
@@ -227,28 +227,22 @@ def test_get_conversion_instructions_gpt() -> None:
|
||||
with patch.object(LLM, "supports_function_calling") as supports_function_calling:
|
||||
supports_function_calling.return_value = True
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
model_schema = PydanticSchemaParser(model=SimpleModel).get_schema()
|
||||
expected_instructions = (
|
||||
"Please convert the following text into valid JSON.\n\n"
|
||||
"Output ONLY the valid JSON and nothing else.\n\n"
|
||||
"Use this format exactly:\n```json\n"
|
||||
f"{model_schema}\n```"
|
||||
)
|
||||
assert instructions == expected_instructions
|
||||
# Now using OpenAPI schema format for all models
|
||||
assert "Ensure your final answer strictly adheres to the following OpenAPI schema:" in instructions
|
||||
assert '"type": "json_schema"' in instructions
|
||||
assert '"name": "SimpleModel"' in instructions
|
||||
assert "Do not include the OpenAPI schema in the final output" in instructions
|
||||
|
||||
|
||||
def test_get_conversion_instructions_non_gpt() -> None:
|
||||
llm = LLM(model="ollama/llama3.1", base_url="http://localhost:11434")
|
||||
with patch.object(LLM, "supports_function_calling", return_value=False):
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
# Check that the JSON schema is properly formatted
|
||||
assert "Please convert the following text into valid JSON" in instructions
|
||||
assert "Output ONLY the valid JSON and nothing else" in instructions
|
||||
assert "Use this format exactly" in instructions
|
||||
assert "```json" in instructions
|
||||
assert '"type": "object"' in instructions
|
||||
assert '"properties"' in instructions
|
||||
assert "'type': 'json_schema'" not in instructions
|
||||
# Now using OpenAPI schema format for all models
|
||||
assert "Ensure your final answer strictly adheres to the following OpenAPI schema:" in instructions
|
||||
assert '"type": "json_schema"' in instructions
|
||||
assert '"name": "SimpleModel"' in instructions
|
||||
assert "Do not include the OpenAPI schema in the final output" in instructions
|
||||
|
||||
|
||||
# Tests for is_gpt
|
||||
|
||||
Reference in New Issue
Block a user