mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 07:38:29 +00:00
Compare commits
51 Commits
devin/1750
...
joaomdmour
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1305de3a0c | ||
|
|
5537f3d0da | ||
|
|
b9d7d3ce7a | ||
|
|
30413d19be | ||
|
|
f0f0e3d7f8 | ||
|
|
4c21711dbf | ||
|
|
d3bffbfd0c | ||
|
|
e242a69ea9 | ||
|
|
21046f29b6 | ||
|
|
86f9226be7 | ||
|
|
6d42f67dfc | ||
|
|
a81bd90e7c | ||
|
|
df701d4ab3 | ||
|
|
d79b2dc68a | ||
|
|
026b337574 | ||
|
|
2bc9a54136 | ||
|
|
9267302ce6 | ||
|
|
613b196ffb | ||
|
|
b78671ed40 | ||
|
|
4a13f2f7b1 | ||
|
|
5f1be9e1db | ||
|
|
12fe6a9a44 | ||
|
|
c90272d601 | ||
|
|
e4e9bf343a | ||
|
|
efebcd9734 | ||
|
|
6ecb30ee87 | ||
|
|
7c12aeaa0c | ||
|
|
4fcabd391f | ||
|
|
7009a6b7a0 | ||
|
|
e3cd7209ad | ||
|
|
635e5a21f3 | ||
|
|
e0cd41e9f9 | ||
|
|
224ba1fb69 | ||
|
|
286958be4f | ||
|
|
36fc2365d3 | ||
|
|
138ac95b09 | ||
|
|
572d8043eb | ||
|
|
3d5668e988 | ||
|
|
cbc81ecc06 | ||
|
|
38ed69577f | ||
|
|
203faa6a77 | ||
|
|
88721788e9 | ||
|
|
e636f1dc17 | ||
|
|
5868ac71dd | ||
|
|
cb5116a21d | ||
|
|
b74ee4e98b | ||
|
|
0383aa1f27 | ||
|
|
db6b831c66 | ||
|
|
acb1eac2ac | ||
|
|
acabaee480 | ||
|
|
9a2ddb39ce |
@@ -1,39 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
print("Checking optional dependencies availability:")
|
||||
|
||||
try:
|
||||
import chromadb # noqa: F401
|
||||
print('chromadb: AVAILABLE')
|
||||
except ImportError:
|
||||
print('chromadb: NOT AVAILABLE')
|
||||
|
||||
try:
|
||||
import pdfplumber # noqa: F401
|
||||
print('pdfplumber: AVAILABLE')
|
||||
except ImportError:
|
||||
print('pdfplumber: NOT AVAILABLE')
|
||||
|
||||
try:
|
||||
import pyvis # noqa: F401
|
||||
print('pyvis: AVAILABLE')
|
||||
except ImportError:
|
||||
print('pyvis: NOT AVAILABLE')
|
||||
|
||||
try:
|
||||
import opentelemetry # noqa: F401
|
||||
print('opentelemetry: AVAILABLE')
|
||||
except ImportError:
|
||||
print('opentelemetry: NOT AVAILABLE')
|
||||
|
||||
try:
|
||||
import auth0 # noqa: F401
|
||||
print('auth0: AVAILABLE')
|
||||
except ImportError:
|
||||
print('auth0: NOT AVAILABLE')
|
||||
|
||||
try:
|
||||
import aisuite # noqa: F401
|
||||
print('aisuite: AVAILABLE')
|
||||
except ImportError:
|
||||
print('aisuite: NOT AVAILABLE')
|
||||
@@ -1,119 +0,0 @@
|
||||
# CrewAI Lite Version
|
||||
|
||||
CrewAI now supports a "lite" installation with minimal dependencies, allowing you to use the core functionality without installing heavy optional dependencies.
|
||||
|
||||
## Installation
|
||||
|
||||
### Lite Installation (Minimal Dependencies)
|
||||
```bash
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
This installs only the core dependencies needed for basic Agent, Crew, and Task functionality.
|
||||
|
||||
### Full Installation (All Dependencies)
|
||||
```bash
|
||||
pip install crewai[all]
|
||||
```
|
||||
|
||||
### Selective Installation (Optional Extras)
|
||||
|
||||
Install only the features you need:
|
||||
|
||||
```bash
|
||||
# Memory and knowledge storage
|
||||
pip install crewai[memory]
|
||||
|
||||
# Knowledge sources (PDF, Excel, etc.)
|
||||
pip install crewai[knowledge]
|
||||
|
||||
# Telemetry and monitoring
|
||||
pip install crewai[telemetry]
|
||||
|
||||
# Flow visualization
|
||||
pip install crewai[visualization]
|
||||
|
||||
# Authentication features
|
||||
pip install crewai[auth]
|
||||
|
||||
# Additional LLM integrations
|
||||
pip install crewai[llm-integrations]
|
||||
|
||||
# AgentOps integration
|
||||
pip install crewai[agentops]
|
||||
|
||||
# FastEmbed embeddings
|
||||
pip install crewai[embeddings]
|
||||
```
|
||||
|
||||
You can also combine multiple extras:
|
||||
```bash
|
||||
pip install crewai[memory,knowledge,telemetry]
|
||||
```
|
||||
|
||||
## Core vs Optional Features
|
||||
|
||||
### Core Features (Always Available)
|
||||
- Basic Agent, Crew, and Task functionality
|
||||
- LiteAgent for simple interactions
|
||||
- Core LLM integrations (OpenAI, etc.)
|
||||
- Basic tools and utilities
|
||||
- Process management
|
||||
|
||||
### Optional Features (Require Extras)
|
||||
|
||||
#### Memory (`crewai[memory]`)
|
||||
- RAG storage with ChromaDB
|
||||
- Memory management
|
||||
- Embeddings configuration
|
||||
|
||||
#### Knowledge (`crewai[knowledge]`)
|
||||
- PDF knowledge sources
|
||||
- Excel/spreadsheet processing
|
||||
- Document processing with Docling
|
||||
- Knowledge storage and retrieval
|
||||
|
||||
#### Telemetry (`crewai[telemetry]`)
|
||||
- OpenTelemetry integration
|
||||
- Performance monitoring
|
||||
- Usage analytics
|
||||
|
||||
#### Visualization (`crewai[visualization]`)
|
||||
- Flow visualization with Pyvis
|
||||
- Network diagrams
|
||||
|
||||
#### Authentication (`crewai[auth]`)
|
||||
- Auth0 integration
|
||||
- Secure token management
|
||||
|
||||
#### LLM Integrations (`crewai[llm-integrations]`)
|
||||
- AISuite integration
|
||||
- Additional model providers
|
||||
|
||||
## Error Handling
|
||||
|
||||
When you try to use a feature that requires optional dependencies, you'll get a helpful error message:
|
||||
|
||||
```python
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
|
||||
# Without crewai[memory] installed:
|
||||
# ImportError: ChromaDB is required for RAG storage functionality.
|
||||
# Please install it with: pip install 'crewai[memory]'
|
||||
```
|
||||
|
||||
## Migration Guide
|
||||
|
||||
Existing installations will continue to work as before. If you want to switch to the lite version:
|
||||
|
||||
1. Uninstall current crewai: `pip uninstall crewai`
|
||||
2. Install lite version: `pip install crewai`
|
||||
3. Add extras as needed: `pip install crewai[memory,knowledge]`
|
||||
|
||||
## Benefits
|
||||
|
||||
- **Reduced installation size**: Core installation is much smaller
|
||||
- **Faster installation**: Fewer dependencies to download and compile
|
||||
- **Reduced security surface**: Fewer dependencies means fewer potential vulnerabilities
|
||||
- **Flexible**: Install only what you need
|
||||
- **Backward compatible**: Existing code continues to work with full installation
|
||||
@@ -295,11 +295,6 @@ multimodal_agent = Agent(
|
||||
- `"safe"`: Uses Docker (recommended for production)
|
||||
- `"unsafe"`: Direct execution (use only in trusted environments)
|
||||
|
||||
<Note>
|
||||
This runs a default Docker image. If you want to configure the docker image, the checkout the Code Interpreter Tool in the tools section.
|
||||
Add the code interpreter tool as a tool in the agent as a tool parameter.
|
||||
</Note>
|
||||
|
||||
#### Advanced Features
|
||||
- `multimodal`: Enable multimodal capabilities for processing text and visual content
|
||||
- `reasoning`: Enable agent to reflect and create plans before executing tasks
|
||||
|
||||
@@ -200,37 +200,6 @@ Deploy the crew or flow to [CrewAI Enterprise](https://app.crewai.com).
|
||||
```
|
||||
- Reads your local project configuration.
|
||||
- Prompts you to confirm the environment variables (like `OPENAI_API_KEY`, `SERPER_API_KEY`) found locally. These will be securely stored with the deployment on the Enterprise platform. Ensure your sensitive keys are correctly configured locally (e.g., in a `.env` file) before running this.
|
||||
|
||||
### 11. Organization Management
|
||||
|
||||
Manage your CrewAI Enterprise organizations.
|
||||
|
||||
```shell Terminal
|
||||
crewai org [COMMAND] [OPTIONS]
|
||||
```
|
||||
|
||||
#### Commands:
|
||||
|
||||
- `list`: List all organizations you belong to
|
||||
```shell Terminal
|
||||
crewai org list
|
||||
```
|
||||
|
||||
- `current`: Display your currently active organization
|
||||
```shell Terminal
|
||||
crewai org current
|
||||
```
|
||||
|
||||
- `switch`: Switch to a specific organization
|
||||
```shell Terminal
|
||||
crewai org switch <organization_id>
|
||||
```
|
||||
|
||||
<Note>
|
||||
You must be authenticated to CrewAI Enterprise to use these organization management commands.
|
||||
</Note>
|
||||
|
||||
- **Create a deployment** (continued):
|
||||
- Links the deployment to the corresponding remote GitHub repository (it usually detects this automatically).
|
||||
|
||||
- **Deploy the Crew**: Once you are authenticated, you can deploy your crew or flow to CrewAI Enterprise.
|
||||
|
||||
@@ -233,11 +233,6 @@ CrewAI provides a wide range of events that you can listen for:
|
||||
- **KnowledgeQueryFailedEvent**: Emitted when a knowledge query fails
|
||||
- **KnowledgeSearchQueryFailedEvent**: Emitted when a knowledge search query fails
|
||||
|
||||
### LLM Guardrail Events
|
||||
|
||||
- **LLMGuardrailStartedEvent**: Emitted when a guardrail validation starts. Contains details about the guardrail being applied and retry count.
|
||||
- **LLMGuardrailCompletedEvent**: Emitted when a guardrail validation completes. Contains details about validation success/failure, results, and error messages if any.
|
||||
|
||||
### Flow Events
|
||||
|
||||
- **FlowCreatedEvent**: Emitted when a Flow is created
|
||||
|
||||
@@ -29,10 +29,6 @@ my_crew = Crew(
|
||||
|
||||
From this point on, your crew will have planning enabled, and the tasks will be planned before each iteration.
|
||||
|
||||
<Warning>
|
||||
When planning is enabled, crewAI will use `gpt-4o-mini` as the default LLM for planning, which requires a valid OpenAI API key. Since your agents might be using different LLMs, this could cause confusion if you don't have an OpenAI API key configured or if you're experiencing unexpected behavior related to LLM API calls.
|
||||
</Warning>
|
||||
|
||||
#### Planning LLM
|
||||
|
||||
Now you can define the LLM that will be used to plan the tasks.
|
||||
|
||||
@@ -6,11 +6,11 @@ icon: brain
|
||||
|
||||
## Overview
|
||||
|
||||
Agent reasoning is a feature that allows agents to reflect on a task and create a plan before execution. This helps agents approach tasks more methodically and ensures they're ready to perform the assigned work.
|
||||
Agent reasoning is a feature that allows agents to reflect on a task and create a plan before and during execution. This helps agents approach tasks more methodically and adapt their strategy as they progress through complex tasks.
|
||||
|
||||
## Usage
|
||||
|
||||
To enable reasoning for an agent, simply set `reasoning=True` when creating the agent:
|
||||
To enable reasoning for an agent, set `reasoning=True` when creating the agent:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
@@ -19,13 +19,43 @@ agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze complex datasets and provide insights",
|
||||
backstory="You are an experienced data analyst with expertise in finding patterns in complex data.",
|
||||
reasoning=True, # Enable reasoning
|
||||
reasoning=True, # Enable basic reasoning
|
||||
max_reasoning_attempts=3 # Optional: Set a maximum number of reasoning attempts
|
||||
)
|
||||
```
|
||||
|
||||
### Interval-based Reasoning
|
||||
|
||||
To enable periodic reasoning during task execution, set `reasoning_interval` to specify how often the agent should re-evaluate its plan:
|
||||
|
||||
```python
|
||||
agent = Agent(
|
||||
role="Research Analyst",
|
||||
goal="Find comprehensive information about a topic",
|
||||
backstory="You are a skilled research analyst who methodically approaches information gathering.",
|
||||
reasoning=True,
|
||||
reasoning_interval=3, # Re-evaluate plan every 3 steps
|
||||
)
|
||||
```
|
||||
|
||||
### Adaptive Reasoning
|
||||
|
||||
For more dynamic reasoning that adapts to the execution context, enable `adaptive_reasoning`:
|
||||
|
||||
```python
|
||||
agent = Agent(
|
||||
role="Strategic Advisor",
|
||||
goal="Provide strategic advice based on market research",
|
||||
backstory="You are an experienced strategic advisor who adapts your approach based on the information you discover.",
|
||||
reasoning=True,
|
||||
adaptive_reasoning=True, # Agent decides when to reason based on context
|
||||
)
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
### Initial Reasoning
|
||||
|
||||
When reasoning is enabled, before executing a task, the agent will:
|
||||
|
||||
1. Reflect on the task and create a detailed plan
|
||||
@@ -33,7 +63,17 @@ When reasoning is enabled, before executing a task, the agent will:
|
||||
3. Refine the plan as necessary until it's ready or max_reasoning_attempts is reached
|
||||
4. Inject the reasoning plan into the task description before execution
|
||||
|
||||
This process helps the agent break down complex tasks into manageable steps and identify potential challenges before starting.
|
||||
### Mid-execution Reasoning
|
||||
|
||||
During task execution, the agent can re-evaluate and adjust its plan based on:
|
||||
|
||||
1. **Interval-based reasoning**: The agent reasons after a fixed number of steps (specified by `reasoning_interval`)
|
||||
2. **Adaptive reasoning**: The agent uses its LLM to intelligently decide when reasoning is needed based on:
|
||||
- Current execution context (task description, expected output, steps taken)
|
||||
- The agent's own judgment about whether strategic reassessment would be beneficial
|
||||
- Automatic fallback when recent errors or failures are detected in the execution
|
||||
|
||||
This mid-execution reasoning helps agents adapt to new information, overcome obstacles, and optimize their approach as they work through complex tasks.
|
||||
|
||||
## Configuration Options
|
||||
|
||||
@@ -45,35 +85,44 @@ This process helps the agent break down complex tasks into manageable steps and
|
||||
Maximum number of attempts to refine the plan before proceeding with execution. If None (default), the agent will continue refining until it's ready.
|
||||
</ParamField>
|
||||
|
||||
## Example
|
||||
<ParamField body="reasoning_interval" type="int" default="None">
|
||||
Interval of steps after which the agent should reason again during execution. If None, reasoning only happens before execution.
|
||||
</ParamField>
|
||||
|
||||
Here's a complete example:
|
||||
<ParamField body="adaptive_reasoning" type="bool" default="False">
|
||||
Whether the agent should adaptively decide when to reason during execution based on context.
|
||||
</ParamField>
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
## Technical Implementation
|
||||
|
||||
# Create an agent with reasoning enabled
|
||||
analyst = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze data and provide insights",
|
||||
backstory="You are an expert data analyst.",
|
||||
reasoning=True,
|
||||
max_reasoning_attempts=3 # Optional: Set a limit on reasoning attempts
|
||||
)
|
||||
### Interval-based Reasoning
|
||||
|
||||
# Create a task
|
||||
analysis_task = Task(
|
||||
description="Analyze the provided sales data and identify key trends.",
|
||||
expected_output="A report highlighting the top 3 sales trends.",
|
||||
agent=analyst
|
||||
)
|
||||
The interval-based reasoning feature works by:
|
||||
|
||||
# Create a crew and run the task
|
||||
crew = Crew(agents=[analyst], tasks=[analysis_task])
|
||||
result = crew.kickoff()
|
||||
1. Tracking the number of steps since the last reasoning event
|
||||
2. Triggering reasoning when `steps_since_reasoning >= reasoning_interval`
|
||||
3. Resetting the counter after each reasoning event
|
||||
4. Generating an updated plan based on current progress
|
||||
|
||||
print(result)
|
||||
```
|
||||
This creates a predictable pattern of reflection during task execution, which is useful for complex tasks where periodic reassessment is beneficial.
|
||||
|
||||
### Adaptive Reasoning
|
||||
|
||||
The adaptive reasoning feature uses LLM function calling to determine when reasoning should occur:
|
||||
|
||||
1. **LLM-based decision**: The agent's LLM evaluates the current execution context (task description, expected output, steps taken so far) to decide if reasoning is needed
|
||||
2. **Error detection fallback**: When recent messages contain error indicators like "error", "exception", "failed", etc., reasoning is automatically triggered
|
||||
|
||||
This creates an intelligent reasoning pattern where the agent uses its own judgment to determine when strategic reassessment would be most beneficial, while maintaining automatic error recovery.
|
||||
|
||||
### Mid-execution Reasoning Process
|
||||
|
||||
When mid-execution reasoning is triggered, the agent:
|
||||
|
||||
1. Summarizes current progress (steps taken, tools used, recent actions)
|
||||
2. Evaluates the effectiveness of the current approach
|
||||
3. Adjusts the plan based on new information and challenges encountered
|
||||
4. Continues execution with the updated plan
|
||||
|
||||
## Error Handling
|
||||
|
||||
@@ -93,7 +142,7 @@ agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze data and provide insights",
|
||||
reasoning=True,
|
||||
max_reasoning_attempts=3
|
||||
reasoning_interval=5 # Re-evaluate plan every 5 steps
|
||||
)
|
||||
|
||||
# Create a task
|
||||
@@ -144,4 +193,33 @@ I'll analyze the sales data to identify the top 3 trends.
|
||||
READY: I am ready to execute the task.
|
||||
```
|
||||
|
||||
This reasoning plan helps the agent organize its approach to the task, consider potential challenges, and ensure it delivers the expected output.
|
||||
During execution, the agent might generate an updated plan:
|
||||
|
||||
```
|
||||
Based on progress so far (3 steps completed):
|
||||
|
||||
Updated Reasoning Plan:
|
||||
After examining the data structure and initial exploratory analysis, I need to adjust my approach:
|
||||
|
||||
1. Current findings:
|
||||
- The data shows seasonal patterns that need deeper investigation
|
||||
- Customer segments show varying purchasing behaviors
|
||||
- There are outliers in the luxury product category
|
||||
|
||||
2. Adjusted approach:
|
||||
- Focus more on seasonal analysis with year-over-year comparisons
|
||||
- Segment analysis by both demographics and purchasing frequency
|
||||
- Investigate the luxury product category anomalies
|
||||
|
||||
3. Next steps:
|
||||
- Apply time series analysis to better quantify seasonal patterns
|
||||
- Create customer cohorts for more precise segmentation
|
||||
- Perform statistical tests on the luxury category data
|
||||
|
||||
4. Expected outcome:
|
||||
Still on track to deliver the top 3 sales trends, but with more precise quantification and actionable insights.
|
||||
|
||||
READY: I am ready to continue executing the task.
|
||||
```
|
||||
|
||||
This mid-execution reasoning helps the agent adapt its approach based on what it has learned during the initial steps of the task.
|
||||
|
||||
@@ -32,7 +32,6 @@ The Enterprise Tools Repository includes:
|
||||
- **Customizability**: Provides the flexibility to develop custom tools or utilize existing ones, catering to the specific needs of agents.
|
||||
- **Error Handling**: Incorporates robust error handling mechanisms to ensure smooth operation.
|
||||
- **Caching Mechanism**: Features intelligent caching to optimize performance and reduce redundant operations.
|
||||
- **Asynchronous Support**: Handles both synchronous and asynchronous tools, enabling non-blocking operations.
|
||||
|
||||
## Using CrewAI Tools
|
||||
|
||||
@@ -178,62 +177,6 @@ class MyCustomTool(BaseTool):
|
||||
return "Tool's result"
|
||||
```
|
||||
|
||||
## Asynchronous Tool Support
|
||||
|
||||
CrewAI supports asynchronous tools, allowing you to implement tools that perform non-blocking operations like network requests, file I/O, or other async operations without blocking the main execution thread.
|
||||
|
||||
### Creating Async Tools
|
||||
|
||||
You can create async tools in two ways:
|
||||
|
||||
#### 1. Using the `tool` Decorator with Async Functions
|
||||
|
||||
```python Code
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool("fetch_data_async")
|
||||
async def fetch_data_async(query: str) -> str:
|
||||
"""Asynchronously fetch data based on the query."""
|
||||
# Simulate async operation
|
||||
await asyncio.sleep(1)
|
||||
return f"Data retrieved for {query}"
|
||||
```
|
||||
|
||||
#### 2. Implementing Async Methods in Custom Tool Classes
|
||||
|
||||
```python Code
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
class AsyncCustomTool(BaseTool):
|
||||
name: str = "async_custom_tool"
|
||||
description: str = "An asynchronous custom tool"
|
||||
|
||||
async def _run(self, query: str = "") -> str:
|
||||
"""Asynchronously run the tool"""
|
||||
# Your async implementation here
|
||||
await asyncio.sleep(1)
|
||||
return f"Processed {query} asynchronously"
|
||||
```
|
||||
|
||||
### Using Async Tools
|
||||
|
||||
Async tools work seamlessly in both standard Crew workflows and Flow-based workflows:
|
||||
|
||||
```python Code
|
||||
# In standard Crew
|
||||
agent = Agent(role="researcher", tools=[async_custom_tool])
|
||||
|
||||
# In Flow
|
||||
class MyFlow(Flow):
|
||||
@start()
|
||||
async def begin(self):
|
||||
crew = Crew(agents=[agent])
|
||||
result = await crew.kickoff_async()
|
||||
return result
|
||||
```
|
||||
|
||||
The CrewAI framework automatically handles the execution of both synchronous and asynchronous tools, so you don't need to worry about how to call them differently.
|
||||
|
||||
### Utilizing the `tool` Decorator
|
||||
|
||||
```python Code
|
||||
|
||||
@@ -9,12 +9,7 @@
|
||||
},
|
||||
"favicon": "images/favicon.svg",
|
||||
"contextual": {
|
||||
"options": [
|
||||
"copy",
|
||||
"view",
|
||||
"chatgpt",
|
||||
"claude"
|
||||
]
|
||||
"options": ["copy", "view", "chatgpt", "claude"]
|
||||
},
|
||||
"navigation": {
|
||||
"tabs": [
|
||||
@@ -206,7 +201,6 @@
|
||||
"observability/arize-phoenix",
|
||||
"observability/langfuse",
|
||||
"observability/langtrace",
|
||||
"observability/maxim",
|
||||
"observability/mlflow",
|
||||
"observability/openlit",
|
||||
"observability/opik",
|
||||
@@ -262,8 +256,7 @@
|
||||
"enterprise/features/tool-repository",
|
||||
"enterprise/features/webhook-streaming",
|
||||
"enterprise/features/traces",
|
||||
"enterprise/features/hallucination-guardrail",
|
||||
"enterprise/features/integrations"
|
||||
"enterprise/features/hallucination-guardrail"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,185 +0,0 @@
|
||||
---
|
||||
title: Integrations
|
||||
description: "Connected applications for your agents to take actions."
|
||||
icon: "plug"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Enable your agents to authenticate with any OAuth enabled provider and take actions. From Salesforce and HubSpot to Google and GitHub, we've got you covered with 16+ integrated services.
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
## Supported Integrations
|
||||
|
||||
### **Communication & Collaboration**
|
||||
- **Gmail** - Manage emails and drafts
|
||||
- **Slack** - Workspace notifications and alerts
|
||||
- **Microsoft** - Office 365 and Teams integration
|
||||
|
||||
### **Project Management**
|
||||
- **Jira** - Issue tracking and project management
|
||||
- **ClickUp** - Task and productivity management
|
||||
- **Asana** - Team task and project coordination
|
||||
- **Notion** - Page and database management
|
||||
- **Linear** - Software project and bug tracking
|
||||
- **GitHub** - Repository and issue management
|
||||
|
||||
### **Customer Relationship Management**
|
||||
- **Salesforce** - CRM account and opportunity management
|
||||
- **HubSpot** - Sales pipeline and contact management
|
||||
- **Zendesk** - Customer support ticket management
|
||||
|
||||
### **Business & Finance**
|
||||
- **Stripe** - Payment processing and customer management
|
||||
- **Shopify** - E-commerce store and product management
|
||||
|
||||
### **Productivity & Storage**
|
||||
- **Google Sheets** - Spreadsheet data synchronization
|
||||
- **Google Calendar** - Event and schedule management
|
||||
- **Box** - File storage and document management
|
||||
|
||||
and more to come!
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before using Authentication Integrations, ensure you have:
|
||||
|
||||
- A [CrewAI Enterprise](https://app.crewai.com) account. You can get started with a free trial.
|
||||
|
||||
|
||||
## Setting Up Integrations
|
||||
|
||||
### 1. Connect Your Account
|
||||
|
||||
1. Navigate to [CrewAI Enterprise](https://app.crewai.com)
|
||||
2. Go to **Integrations** tab - https://app.crewai.com/crewai_plus/connectors
|
||||
3. Click **Connect** on your desired service from the Authentication Integrations section
|
||||
4. Complete the OAuth authentication flow
|
||||
5. Grant necessary permissions for your use case
|
||||
6. Get your Enterprise Token from your [CrewAI Enterprise](https://app.crewai.com) account page - https://app.crewai.com/crewai_plus/settings/account
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
### 2. Install Integration Tools
|
||||
|
||||
All you need is the latest version of `crewai-tools` package.
|
||||
|
||||
```bash
|
||||
uv add crewai-tools
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Usage
|
||||
<Tip>
|
||||
All the services you are authenticated into will be available as tools. So all you need to do is add the `CrewaiEnterpriseTools` to your agent and you are good to go.
|
||||
</Tip>
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import CrewaiEnterpriseTools
|
||||
|
||||
# Get enterprise tools (Gmail tool will be included)
|
||||
enterprise_tools = CrewaiEnterpriseTools(
|
||||
enterprise_token="your_enterprise_token"
|
||||
)
|
||||
# print the tools
|
||||
print(enterprise_tools)
|
||||
|
||||
# Create an agent with Gmail capabilities
|
||||
email_agent = Agent(
|
||||
role="Email Manager",
|
||||
goal="Manage and organize email communications",
|
||||
backstory="An AI assistant specialized in email management and communication.",
|
||||
tools=enterprise_tools
|
||||
)
|
||||
|
||||
# Task to send an email
|
||||
email_task = Task(
|
||||
description="Draft and send a follow-up email to john@example.com about the project update",
|
||||
agent=email_agent,
|
||||
expected_output="Confirmation that email was sent successfully"
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(
|
||||
agents=[email_agent],
|
||||
tasks=[email_task]
|
||||
)
|
||||
|
||||
# Run the crew
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
### Filtering Tools
|
||||
|
||||
```python
|
||||
from crewai_tools import CrewaiEnterpriseTools
|
||||
|
||||
enterprise_tools = CrewaiEnterpriseTools(
|
||||
actions_list=["gmail_find_email"] # only gmail_find_email tool will be available
|
||||
)
|
||||
gmail_tool = enterprise_tools[0]
|
||||
|
||||
gmail_agent = Agent(
|
||||
role="Gmail Manager",
|
||||
goal="Manage gmail communications and notifications",
|
||||
backstory="An AI assistant that helps coordinate gmail communications.",
|
||||
tools=[gmail_tool]
|
||||
)
|
||||
|
||||
notification_task = Task(
|
||||
description="Find the email from john@example.com",
|
||||
agent=gmail_agent,
|
||||
expected_output="Email found from john@example.com"
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(
|
||||
agents=[slack_agent],
|
||||
tasks=[notification_task]
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Security
|
||||
- **Principle of Least Privilege**: Only grant the minimum permissions required for your agents' tasks
|
||||
- **Regular Audits**: Periodically review connected integrations and their permissions
|
||||
- **Secure Credentials**: Never hardcode credentials; use CrewAI's secure authentication flow
|
||||
|
||||
|
||||
### Filtering Tools
|
||||
On a deployed crew, you can specify which actions are avialbel for each integration from the settings page of the service you connected to.
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
|
||||
### Scoped Deployments for multi user organizations
|
||||
You can deploy your crew and scope each integration to a specific user. For example, a crew that connects to google can use a specific user's gmail account.
|
||||
|
||||
<Tip>
|
||||
This is useful for multi user organizations where you want to scope the integration to a specific user.
|
||||
</Tip>
|
||||
|
||||
|
||||
Use the `user_bearer_token` to scope the integration to a specific user so that when the crew is kicked off, it will use the user's bearer token to authenticate with the integration. If user is not logged in, then the crew will not use any connected integrations. Use the default bearer token to authenticate with the integrations thats deployed with the crew.
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
|
||||
|
||||
|
||||
### Getting Help
|
||||
|
||||
<Card title="Need Help?" icon="headset" href="mailto:support@crewai.com">
|
||||
Contact our support team for assistance with integration setup or troubleshooting.
|
||||
</Card>
|
||||
@@ -277,23 +277,22 @@ This pattern allows you to combine direct data passing with state updates for ma
|
||||
|
||||
One of CrewAI's most powerful features is the ability to persist flow state across executions. This enables workflows that can be paused, resumed, and even recovered after failures.
|
||||
|
||||
### The @persist() Decorator
|
||||
### The @persist Decorator
|
||||
|
||||
The `@persist()` decorator automates state persistence, saving your flow's state at key points in execution.
|
||||
The `@persist` decorator automates state persistence, saving your flow's state at key points in execution.
|
||||
|
||||
#### Class-Level Persistence
|
||||
|
||||
When applied at the class level, `@persist()` saves state after every method execution:
|
||||
When applied at the class level, `@persist` saves state after every method execution:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from crewai.flow.persistence import persist
|
||||
from crewai.flow.flow import Flow, listen, persist, start
|
||||
from pydantic import BaseModel
|
||||
|
||||
class CounterState(BaseModel):
|
||||
value: int = 0
|
||||
|
||||
@persist() # Apply to the entire flow class
|
||||
@persist # Apply to the entire flow class
|
||||
class PersistentCounterFlow(Flow[CounterState]):
|
||||
@start()
|
||||
def increment(self):
|
||||
@@ -320,11 +319,10 @@ print(f"Second run result: {result2}") # Will be higher due to persisted state
|
||||
|
||||
#### Method-Level Persistence
|
||||
|
||||
For more granular control, you can apply `@persist()` to specific methods:
|
||||
For more granular control, you can apply `@persist` to specific methods:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from crewai.flow.persistence import persist
|
||||
from crewai.flow.flow import Flow, listen, persist, start
|
||||
|
||||
class SelectivePersistFlow(Flow):
|
||||
@start()
|
||||
@@ -332,7 +330,7 @@ class SelectivePersistFlow(Flow):
|
||||
self.state["count"] = 1
|
||||
return "First step"
|
||||
|
||||
@persist() # Only persist after this method
|
||||
@persist # Only persist after this method
|
||||
@listen(first_step)
|
||||
def important_step(self, prev_result):
|
||||
self.state["count"] += 1
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 1.2 MiB |
Binary file not shown.
|
Before Width: | Height: | Size: 54 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 362 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 49 KiB |
@@ -22,7 +22,7 @@ Watch this video tutorial for a step-by-step demonstration of the installation p
|
||||
<Note>
|
||||
**Python Version Requirements**
|
||||
|
||||
CrewAI requires `Python >=3.10 and <3.14`. Here's how to check your version:
|
||||
CrewAI requires `Python >=3.10 and <=3.13`. Here's how to check your version:
|
||||
```bash
|
||||
python3 --version
|
||||
```
|
||||
|
||||
@@ -1,152 +0,0 @@
|
||||
---
|
||||
title: Maxim Integration
|
||||
description: Start Agent monitoring, evaluation, and observability
|
||||
icon: bars-staggered
|
||||
---
|
||||
|
||||
# Maxim Integration
|
||||
|
||||
Maxim AI provides comprehensive agent monitoring, evaluation, and observability for your CrewAI applications. With Maxim's one-line integration, you can easily trace and analyse agent interactions, performance metrics, and more.
|
||||
|
||||
|
||||
## Features: One Line Integration
|
||||
|
||||
- **End-to-End Agent Tracing**: Monitor the complete lifecycle of your agents
|
||||
- **Performance Analytics**: Track latency, tokens consumed, and costs
|
||||
- **Hyperparameter Monitoring**: View the configuration details of your agent runs
|
||||
- **Tool Call Tracking**: Observe when and how agents use their tools
|
||||
- **Advanced Visualisation**: Understand agent trajectories through intuitive dashboards
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Python version >=3.10
|
||||
- A Maxim account ([sign up here](https://getmaxim.ai/))
|
||||
- A CrewAI project
|
||||
|
||||
### Installation
|
||||
|
||||
Install the Maxim SDK via pip:
|
||||
|
||||
```python
|
||||
pip install maxim-py>=3.6.2
|
||||
```
|
||||
|
||||
Or add it to your `requirements.txt`:
|
||||
|
||||
```
|
||||
maxim-py>=3.6.2
|
||||
```
|
||||
|
||||
|
||||
### Basic Setup
|
||||
|
||||
### 1. Set up environment variables
|
||||
|
||||
```python
|
||||
### Environment Variables Setup
|
||||
|
||||
# Create a `.env` file in your project root:
|
||||
|
||||
# Maxim API Configuration
|
||||
MAXIM_API_KEY=your_api_key_here
|
||||
MAXIM_LOG_REPO_ID=your_repo_id_here
|
||||
```
|
||||
|
||||
### 2. Import the required packages
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
from maxim import Maxim
|
||||
from maxim.logger.crewai import instrument_crewai
|
||||
```
|
||||
|
||||
### 3. Initialise Maxim with your API key
|
||||
|
||||
```python
|
||||
# Initialize Maxim logger
|
||||
logger = Maxim().logger()
|
||||
|
||||
# Instrument CrewAI with just one line
|
||||
instrument_crewai(logger)
|
||||
```
|
||||
|
||||
### 4. Create and run your CrewAI application as usual
|
||||
|
||||
```python
|
||||
|
||||
# Create your agent
|
||||
researcher = Agent(
|
||||
role='Senior Research Analyst',
|
||||
goal='Uncover cutting-edge developments in AI',
|
||||
backstory="You are an expert researcher at a tech think tank...",
|
||||
verbose=True,
|
||||
llm=llm
|
||||
)
|
||||
|
||||
# Define the task
|
||||
research_task = Task(
|
||||
description="Research the latest AI advancements...",
|
||||
expected_output="",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
# Configure and run the crew
|
||||
crew = Crew(
|
||||
agents=[researcher],
|
||||
tasks=[research_task],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
try:
|
||||
result = crew.kickoff()
|
||||
finally:
|
||||
maxim.cleanup() # Ensure cleanup happens even if errors occur
|
||||
```
|
||||
|
||||
That's it! All your CrewAI agent interactions will now be logged and available in your Maxim dashboard.
|
||||
|
||||
Check this Google Colab Notebook for a quick reference - [Notebook](https://colab.research.google.com/drive/1ZKIZWsmgQQ46n8TH9zLsT1negKkJA6K8?usp=sharing)
|
||||
|
||||
## Viewing Your Traces
|
||||
|
||||
After running your CrewAI application:
|
||||
|
||||

|
||||
|
||||
1. Log in to your [Maxim Dashboard](https://getmaxim.ai/dashboard)
|
||||
2. Navigate to your repository
|
||||
3. View detailed agent traces, including:
|
||||
- Agent conversations
|
||||
- Tool usage patterns
|
||||
- Performance metrics
|
||||
- Cost analytics
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
- **No traces appearing**: Ensure your API key and repository ID are correc
|
||||
- Ensure you've **called `instrument_crewai()`** ***before*** running your crew. This initializes logging hooks correctly.
|
||||
- Set `debug=True` in your `instrument_crewai()` call to surface any internal errors:
|
||||
|
||||
```python
|
||||
instrument_crewai(logger, debug=True)
|
||||
```
|
||||
|
||||
- Configure your agents with `verbose=True` to capture detailed logs:
|
||||
|
||||
```python
|
||||
|
||||
agent = CrewAgent(..., verbose=True)
|
||||
```
|
||||
|
||||
- Double-check that `instrument_crewai()` is called **before** creating or executing agents. This might be obvious, but it's a common oversight.
|
||||
|
||||
### Support
|
||||
|
||||
If you encounter any issues:
|
||||
|
||||
- Check the [Maxim Documentation](https://getmaxim.ai/docs)
|
||||
- Maxim Github [Link](https://github.com/maximhq)
|
||||
@@ -212,7 +212,7 @@ Follow the steps below to get Crewing! 🚣♂️
|
||||
|
||||
1. Log in to your CrewAI Enterprise account (create a free account at [app.crewai.com](https://app.crewai.com))
|
||||
2. Open Crew Studio
|
||||
3. Type what is the automation you're trying to build
|
||||
3. Type what is the automation you're tryign to build
|
||||
4. Create your tasks visually and connect them in sequence
|
||||
5. Configure your inputs and click "Download Code" or "Deploy"
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "crewai"
|
||||
version = "0.130.0"
|
||||
version = "0.126.0"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
@@ -11,11 +11,23 @@ dependencies = [
|
||||
# Core Dependencies
|
||||
"pydantic>=2.4.2",
|
||||
"openai>=1.13.3",
|
||||
"litellm==1.72.0",
|
||||
"litellm==1.68.0",
|
||||
"instructor>=1.3.3",
|
||||
# Text Processing
|
||||
"pdfplumber>=0.11.4",
|
||||
"regex>=2024.9.11",
|
||||
# Security
|
||||
# Telemetry and Monitoring
|
||||
"opentelemetry-api>=1.30.0",
|
||||
"opentelemetry-sdk>=1.30.0",
|
||||
"opentelemetry-exporter-otlp-proto-http>=1.30.0",
|
||||
# Data Handling
|
||||
"chromadb>=0.5.23",
|
||||
"tokenizers>=0.20.3",
|
||||
"onnxruntime==1.22.0",
|
||||
"openpyxl>=3.1.5",
|
||||
"pyvis>=0.3.2",
|
||||
# Authentication and Security
|
||||
"auth0-python>=4.7.1",
|
||||
"python-dotenv>=1.0.0",
|
||||
# Configuration and Utils
|
||||
"click>=8.1.7",
|
||||
@@ -35,7 +47,7 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools~=0.47.1"]
|
||||
tools = ["crewai-tools~=0.46.0"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
]
|
||||
@@ -56,53 +68,6 @@ docling = [
|
||||
aisuite = [
|
||||
"aisuite>=0.1.10",
|
||||
]
|
||||
memory = [
|
||||
"chromadb>=0.5.23",
|
||||
"tokenizers>=0.20.3",
|
||||
"tiktoken~=0.8.0",
|
||||
"mem0ai>=0.1.94",
|
||||
]
|
||||
knowledge = [
|
||||
"chromadb>=0.5.23",
|
||||
"pdfplumber>=0.11.4",
|
||||
"openpyxl>=3.1.5",
|
||||
"docling>=2.12.0",
|
||||
"tiktoken~=0.8.0",
|
||||
]
|
||||
telemetry = [
|
||||
"opentelemetry-api>=1.30.0",
|
||||
"opentelemetry-sdk>=1.30.0",
|
||||
"opentelemetry-exporter-otlp-proto-http>=1.30.0",
|
||||
]
|
||||
visualization = [
|
||||
"pyvis>=0.3.2",
|
||||
]
|
||||
auth = [
|
||||
"auth0-python>=4.7.1",
|
||||
]
|
||||
llm-integrations = [
|
||||
"aisuite>=0.1.10",
|
||||
"onnxruntime==1.22.0",
|
||||
]
|
||||
all = [
|
||||
"chromadb>=0.5.23",
|
||||
"tokenizers>=0.20.3",
|
||||
"tiktoken~=0.8.0",
|
||||
"mem0ai>=0.1.94",
|
||||
"pdfplumber>=0.11.4",
|
||||
"openpyxl>=3.1.5",
|
||||
"docling>=2.12.0",
|
||||
"opentelemetry-api>=1.30.0",
|
||||
"opentelemetry-sdk>=1.30.0",
|
||||
"opentelemetry-exporter-otlp-proto-http>=1.30.0",
|
||||
"pyvis>=0.3.2",
|
||||
"auth0-python>=4.7.1",
|
||||
"aisuite>=0.1.10",
|
||||
"onnxruntime==1.22.0",
|
||||
"agentops>=0.3.0",
|
||||
"crewai-tools~=0.47.1",
|
||||
"pandas>=2.2.3",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
dev-dependencies = [
|
||||
|
||||
3
score.json
Normal file
3
score.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"score": 4
|
||||
}
|
||||
@@ -18,7 +18,7 @@ warnings.filterwarnings(
|
||||
category=UserWarning,
|
||||
module="pydantic.main",
|
||||
)
|
||||
__version__ = "0.130.0"
|
||||
__version__ = "0.126.0"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"Crew",
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import shutil
|
||||
import subprocess
|
||||
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Type, Union
|
||||
from typing import Any, Dict, List, Literal, Optional, Sequence, Type, Union
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, field_validator, model_validator
|
||||
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
@@ -71,6 +71,7 @@ class Agent(BaseAgent):
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
_last_reasoning_output: Optional[Any] = PrivateAttr(default=None)
|
||||
max_execution_time: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum execution time for an agent to execute a task",
|
||||
@@ -135,6 +136,21 @@ class Agent(BaseAgent):
|
||||
default=None,
|
||||
description="Maximum number of reasoning attempts before executing the task. If None, will try until ready.",
|
||||
)
|
||||
reasoning_interval: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Interval of steps after which the agent should reason again during execution. If None, reasoning only happens before execution.",
|
||||
)
|
||||
|
||||
@field_validator('reasoning_interval')
|
||||
@classmethod
|
||||
def validate_reasoning_interval(cls, v):
|
||||
if v is not None and v < 1:
|
||||
raise ValueError("reasoning_interval must be >= 1")
|
||||
return v
|
||||
adaptive_reasoning: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent should adaptively decide when to reason during execution based on context.",
|
||||
)
|
||||
embedder: Optional[Dict[str, Any]] = Field(
|
||||
default=None,
|
||||
description="Embedder configuration for the agent.",
|
||||
@@ -155,13 +171,6 @@ class Agent(BaseAgent):
|
||||
default=None,
|
||||
description="The Agent's role to be used from your repository.",
|
||||
)
|
||||
guardrail: Optional[Union[Callable[[Any], Tuple[bool, Any]], str]] = Field(
|
||||
default=None,
|
||||
description="Function or string description of a guardrail to validate agent output"
|
||||
)
|
||||
guardrail_max_retries: int = Field(
|
||||
default=3, description="Maximum number of retries when guardrail fails"
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
def validate_from_repository(cls, v):
|
||||
@@ -173,6 +182,9 @@ class Agent(BaseAgent):
|
||||
def post_init_setup(self):
|
||||
self.agent_ops_agent_name = self.role
|
||||
|
||||
if getattr(self, "adaptive_reasoning", False) and not getattr(self, "reasoning", False):
|
||||
self.reasoning = True
|
||||
|
||||
self.llm = create_llm(self.llm)
|
||||
if self.function_calling_llm and not isinstance(
|
||||
self.function_calling_llm, BaseLLM
|
||||
@@ -384,6 +396,44 @@ class Agent(BaseAgent):
|
||||
else:
|
||||
task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
||||
|
||||
if self.reasoning:
|
||||
try:
|
||||
from crewai.utilities.reasoning_handler import (
|
||||
AgentReasoning,
|
||||
AgentReasoningOutput,
|
||||
)
|
||||
|
||||
reasoning_handler = AgentReasoning(
|
||||
task=task,
|
||||
agent=self,
|
||||
extra_context=context or "",
|
||||
)
|
||||
|
||||
reasoning_output: AgentReasoningOutput = reasoning_handler.handle_agent_reasoning()
|
||||
|
||||
# Store the reasoning output for the executor to use
|
||||
self._last_reasoning_output = reasoning_output
|
||||
|
||||
plan_text = reasoning_output.plan.plan
|
||||
|
||||
internal_plan_msg = (
|
||||
"### INTERNAL PLAN (do NOT reveal or repeat)\n" + plan_text
|
||||
)
|
||||
|
||||
task_prompt = (
|
||||
task_prompt
|
||||
+ "\n\n"
|
||||
+ internal_plan_msg
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if hasattr(self, "_logger"):
|
||||
self._logger.log(
|
||||
"error", f"Error during reasoning process: {str(e)}"
|
||||
)
|
||||
else:
|
||||
print(f"Error during reasoning process: {str(e)}")
|
||||
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -459,6 +509,10 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
event=AgentExecutionCompletedEvent(agent=self, task=task, output=result),
|
||||
)
|
||||
|
||||
# Clean up reasoning output after task completion
|
||||
self._last_reasoning_output = None
|
||||
|
||||
return result
|
||||
|
||||
def _execute_with_timeout(self, task_prompt: str, task: Task, timeout: int) -> str:
|
||||
@@ -787,8 +841,6 @@ class Agent(BaseAgent):
|
||||
response_format=response_format,
|
||||
i18n=self.i18n,
|
||||
original_agent=self,
|
||||
guardrail=self.guardrail,
|
||||
guardrail_max_retries=self.guardrail_max_retries,
|
||||
)
|
||||
|
||||
return lite_agent.kickoff(messages)
|
||||
|
||||
@@ -7,7 +7,6 @@ from crewai.utilities import I18N
|
||||
from crewai.utilities.converter import ConverterError
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.events.event_listener import event_listener
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
@@ -126,38 +125,33 @@ class CrewAgentExecutorMixin:
|
||||
|
||||
def _ask_human_input(self, final_answer: str) -> str:
|
||||
"""Prompt human input with mode-appropriate messaging."""
|
||||
event_listener.formatter.pause_live_updates()
|
||||
|
||||
try:
|
||||
self._printer.print(
|
||||
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
|
||||
self._printer.print(
|
||||
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
|
||||
)
|
||||
|
||||
# Training mode prompt (single iteration)
|
||||
if self.crew and getattr(self.crew, "_train", False):
|
||||
prompt = (
|
||||
"\n\n=====\n"
|
||||
"## TRAINING MODE: Provide feedback to improve the agent's performance.\n"
|
||||
"This will be used to train better versions of the agent.\n"
|
||||
"Please provide detailed feedback about the result quality and reasoning process.\n"
|
||||
"=====\n"
|
||||
)
|
||||
# Regular human-in-the-loop prompt (multiple iterations)
|
||||
else:
|
||||
prompt = (
|
||||
"\n\n=====\n"
|
||||
"## HUMAN FEEDBACK: Provide feedback on the Final Result and Agent's actions.\n"
|
||||
"Please follow these guidelines:\n"
|
||||
" - If you are happy with the result, simply hit Enter without typing anything.\n"
|
||||
" - Otherwise, provide specific improvement requests.\n"
|
||||
" - You can provide multiple rounds of feedback until satisfied.\n"
|
||||
"=====\n"
|
||||
)
|
||||
|
||||
# Training mode prompt (single iteration)
|
||||
if self.crew and getattr(self.crew, "_train", False):
|
||||
prompt = (
|
||||
"\n\n=====\n"
|
||||
"## TRAINING MODE: Provide feedback to improve the agent's performance.\n"
|
||||
"This will be used to train better versions of the agent.\n"
|
||||
"Please provide detailed feedback about the result quality and reasoning process.\n"
|
||||
"=====\n"
|
||||
)
|
||||
# Regular human-in-the-loop prompt (multiple iterations)
|
||||
else:
|
||||
prompt = (
|
||||
"\n\n=====\n"
|
||||
"## HUMAN FEEDBACK: Provide feedback on the Final Result and Agent's actions.\n"
|
||||
"Please follow these guidelines:\n"
|
||||
" - If you are happy with the result, simply hit Enter without typing anything.\n"
|
||||
" - Otherwise, provide specific improvement requests.\n"
|
||||
" - You can provide multiple rounds of feedback until satisfied.\n"
|
||||
"=====\n"
|
||||
)
|
||||
|
||||
self._printer.print(content=prompt, color="bold_yellow")
|
||||
response = input()
|
||||
if response.strip() != "":
|
||||
self._printer.print(content="\nProcessing your feedback...", color="cyan")
|
||||
return response
|
||||
finally:
|
||||
event_listener.formatter.resume_live_updates()
|
||||
self._printer.print(content=prompt, color="bold_yellow")
|
||||
response = input()
|
||||
if response.strip() != "":
|
||||
self._printer.print(content="\nProcessing your feedback...", color="cyan")
|
||||
return response
|
||||
|
||||
386
src/crewai/agents/agent_state.py
Normal file
386
src/crewai/agents/agent_state.py
Normal file
@@ -0,0 +1,386 @@
|
||||
"""Agent state management for long-running tasks with focus on progress tracking."""
|
||||
|
||||
from typing import Any, Dict, List, Optional, Union, Set
|
||||
from pydantic import BaseModel, Field
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
|
||||
class CriterionProgress(BaseModel):
|
||||
"""Progress tracking for a single acceptance criterion."""
|
||||
criterion: str = Field(description="The acceptance criterion")
|
||||
status: str = Field(default="not_started", description="Status: not_started, in_progress, completed")
|
||||
progress_notes: str = Field(default="", description="Specific progress made towards this criterion")
|
||||
completion_percentage: int = Field(default=0, description="Estimated completion percentage (0-100)")
|
||||
remaining_work: str = Field(default="", description="What still needs to be done for this criterion")
|
||||
|
||||
# Enhanced tracking
|
||||
processed_items: Set[str] = Field(default_factory=set, description="IDs or identifiers of processed items")
|
||||
total_items_expected: Optional[int] = Field(default=None, description="Total number of items expected (if known)")
|
||||
items_to_process: List[str] = Field(default_factory=list, description="Queue of specific items to process next")
|
||||
last_updated: datetime = Field(default_factory=datetime.now)
|
||||
|
||||
|
||||
class ProgressLog(BaseModel):
|
||||
"""Single log entry for progress tracking."""
|
||||
timestamp: datetime = Field(default_factory=datetime.now)
|
||||
action: str = Field(description="What action was taken")
|
||||
result: str = Field(description="Result or outcome of the action")
|
||||
items_processed: List[str] = Field(default_factory=list, description="Items processed in this action")
|
||||
criterion: Optional[str] = Field(default=None, description="Related acceptance criterion")
|
||||
|
||||
|
||||
class AgentState(BaseModel):
|
||||
"""Enhanced state management with deterministic progress tracking.
|
||||
|
||||
This state helps agents maintain focus during long executions by tracking
|
||||
specific progress against each acceptance criterion with detailed logging.
|
||||
"""
|
||||
|
||||
# Core planning elements
|
||||
plan: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="The current plan steps"
|
||||
)
|
||||
|
||||
acceptance_criteria: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="Concrete criteria that must be met for task completion"
|
||||
)
|
||||
|
||||
# Progress tracking
|
||||
criteria_progress: Dict[str, CriterionProgress] = Field(
|
||||
default_factory=dict,
|
||||
description="Detailed progress for each acceptance criterion"
|
||||
)
|
||||
|
||||
# Data storage
|
||||
scratchpad: Dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Storage for intermediate results and data"
|
||||
)
|
||||
|
||||
# Simple tracking
|
||||
current_focus: str = Field(
|
||||
default="",
|
||||
description="What the agent should be focusing on right now"
|
||||
)
|
||||
|
||||
next_steps: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="Immediate next steps to take"
|
||||
)
|
||||
|
||||
overall_progress: int = Field(
|
||||
default=0,
|
||||
description="Overall task completion percentage (0-100)"
|
||||
)
|
||||
|
||||
# Enhanced tracking
|
||||
progress_logs: List[ProgressLog] = Field(
|
||||
default_factory=list,
|
||||
description="Detailed log of all progress made"
|
||||
)
|
||||
|
||||
work_queue: List[Dict[str, Any]] = Field(
|
||||
default_factory=list,
|
||||
description="Queue of specific work items to process"
|
||||
)
|
||||
|
||||
# Metadata tracking
|
||||
metadata: Dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Additional metadata for tracking (e.g., total count expectations)"
|
||||
)
|
||||
|
||||
def initialize_criteria_progress(self) -> None:
|
||||
"""Initialize progress tracking for all acceptance criteria."""
|
||||
for criterion in self.acceptance_criteria:
|
||||
if criterion not in self.criteria_progress:
|
||||
self.criteria_progress[criterion] = CriterionProgress(criterion=criterion)
|
||||
|
||||
def update_criterion_progress(
|
||||
self,
|
||||
criterion: str,
|
||||
status: str,
|
||||
progress_notes: str,
|
||||
completion_percentage: int,
|
||||
remaining_work: str,
|
||||
processed_items: Optional[List[str]] = None,
|
||||
items_to_process: Optional[List[str]] = None,
|
||||
total_items_expected: Optional[int] = None
|
||||
) -> None:
|
||||
"""Update progress for a specific criterion with enhanced tracking."""
|
||||
if criterion in self.criteria_progress:
|
||||
progress = self.criteria_progress[criterion]
|
||||
progress.status = status
|
||||
progress.progress_notes = progress_notes
|
||||
progress.completion_percentage = max(0, min(100, completion_percentage))
|
||||
progress.remaining_work = remaining_work
|
||||
progress.last_updated = datetime.now()
|
||||
|
||||
# Update processed items
|
||||
if processed_items:
|
||||
progress.processed_items.update(processed_items)
|
||||
|
||||
# Update items to process queue
|
||||
if items_to_process is not None:
|
||||
progress.items_to_process = items_to_process
|
||||
|
||||
# Update total expected if provided
|
||||
if total_items_expected is not None:
|
||||
progress.total_items_expected = total_items_expected
|
||||
|
||||
# Recalculate completion percentage based on actual items if possible
|
||||
if progress.total_items_expected and progress.total_items_expected > 0:
|
||||
actual_percentage = int((len(progress.processed_items) / progress.total_items_expected) * 100)
|
||||
progress.completion_percentage = actual_percentage
|
||||
|
||||
# Update overall progress
|
||||
self._recalculate_overall_progress()
|
||||
|
||||
def _recalculate_overall_progress(self) -> None:
|
||||
"""Recalculate overall progress based on all criteria."""
|
||||
if not self.criteria_progress:
|
||||
self.overall_progress = 0
|
||||
return
|
||||
|
||||
total_progress = sum(p.completion_percentage for p in self.criteria_progress.values())
|
||||
self.overall_progress = int(total_progress / len(self.criteria_progress))
|
||||
|
||||
def add_to_scratchpad(self, key: str, value: Any) -> None:
|
||||
"""Add or update a value in the scratchpad."""
|
||||
self.scratchpad[key] = value
|
||||
|
||||
# Analyze the data for item tracking
|
||||
self._analyze_scratchpad_for_items(key, value)
|
||||
|
||||
def _analyze_scratchpad_for_items(self, key: str, value: Any) -> None:
|
||||
"""Analyze scratchpad data to extract trackable items."""
|
||||
# If it's a list, try to extract IDs
|
||||
if isinstance(value, list) and value:
|
||||
item_ids = []
|
||||
for item in value:
|
||||
if isinstance(item, dict):
|
||||
# Look for common ID fields
|
||||
for id_field in ['id', 'ID', 'uid', 'uuid', 'message_id', 'email_id']:
|
||||
if id_field in item:
|
||||
item_ids.append(str(item[id_field]))
|
||||
break
|
||||
|
||||
if item_ids:
|
||||
# Store metadata about this list
|
||||
self.metadata[f"{key}_ids"] = item_ids
|
||||
self.metadata[f"{key}_count"] = len(value)
|
||||
|
||||
def log_progress(self, action: str, result: str, items_processed: Optional[List[str]] = None, criterion: Optional[str] = None) -> None:
|
||||
"""Add a progress log entry."""
|
||||
log_entry = ProgressLog(
|
||||
action=action,
|
||||
result=result,
|
||||
items_processed=items_processed or [],
|
||||
criterion=criterion
|
||||
)
|
||||
self.progress_logs.append(log_entry)
|
||||
|
||||
def add_to_work_queue(self, work_item: Dict[str, Any]) -> None:
|
||||
"""Add an item to the work queue."""
|
||||
self.work_queue.append(work_item)
|
||||
|
||||
def get_next_work_item(self) -> Optional[Dict[str, Any]]:
|
||||
"""Get and remove the next item from the work queue."""
|
||||
if self.work_queue:
|
||||
return self.work_queue.pop(0)
|
||||
return None
|
||||
|
||||
def set_focus_and_next_steps(self, focus: str, next_steps: List[str]) -> None:
|
||||
"""Update current focus and next steps."""
|
||||
self.current_focus = focus
|
||||
self.next_steps = next_steps
|
||||
|
||||
def get_progress_context(self) -> str:
|
||||
"""Generate a focused progress update for the agent."""
|
||||
context = f"📊 PROGRESS UPDATE (Overall: {self.overall_progress}%)\n"
|
||||
context += "="*50 + "\n\n"
|
||||
|
||||
# Current focus
|
||||
if self.current_focus:
|
||||
context += f"🎯 CURRENT FOCUS: {self.current_focus}\n\n"
|
||||
|
||||
# Progress on each criterion with detailed tracking
|
||||
if self.criteria_progress:
|
||||
context += "📋 ACCEPTANCE CRITERIA PROGRESS:\n"
|
||||
for criterion, progress in self.criteria_progress.items():
|
||||
status_emoji = "✅" if progress.status == "completed" else "🔄" if progress.status == "in_progress" else "⏸️"
|
||||
context += f"\n{status_emoji} {criterion}\n"
|
||||
|
||||
# Show detailed progress
|
||||
if progress.total_items_expected:
|
||||
context += f" Progress: {len(progress.processed_items)}/{progress.total_items_expected} items ({progress.completion_percentage}%)\n"
|
||||
else:
|
||||
context += f" Progress: {progress.completion_percentage}%"
|
||||
if progress.processed_items:
|
||||
context += f" - {len(progress.processed_items)} items processed"
|
||||
context += "\n"
|
||||
|
||||
if progress.progress_notes:
|
||||
context += f" Notes: {progress.progress_notes}\n"
|
||||
|
||||
# Show next items to process
|
||||
if progress.items_to_process and progress.status != "completed":
|
||||
next_items = progress.items_to_process[:3] # Show next 3
|
||||
context += f" Next items: {', '.join(next_items)}"
|
||||
if len(progress.items_to_process) > 3:
|
||||
context += f" (and {len(progress.items_to_process) - 3} more)"
|
||||
context += "\n"
|
||||
|
||||
if progress.remaining_work and progress.status != "completed":
|
||||
context += f" Still needed: {progress.remaining_work}\n"
|
||||
|
||||
# Work queue status
|
||||
if self.work_queue:
|
||||
context += f"\n📝 WORK QUEUE: {len(self.work_queue)} items pending\n"
|
||||
next_work = self.work_queue[0]
|
||||
context += f" Next: {next_work.get('description', 'Process next item')}\n"
|
||||
|
||||
# Next steps
|
||||
if self.next_steps:
|
||||
context += f"\n📍 IMMEDIATE NEXT STEPS:\n"
|
||||
for i, step in enumerate(self.next_steps, 1):
|
||||
context += f"{i}. {step}\n"
|
||||
|
||||
# Available data
|
||||
if self.scratchpad:
|
||||
context += f"\n💾 AVAILABLE DATA IN SCRATCHPAD:\n"
|
||||
for key, value in self.scratchpad.items():
|
||||
if isinstance(value, list):
|
||||
context += f" • '{key}' - {len(value)} items"
|
||||
if f"{key}_ids" in self.metadata:
|
||||
context += f" (IDs tracked)"
|
||||
context += "\n"
|
||||
elif isinstance(value, dict):
|
||||
context += f" • '{key}' - dictionary data\n"
|
||||
else:
|
||||
context += f" • '{key}'\n"
|
||||
|
||||
# Recent progress logs
|
||||
if self.progress_logs:
|
||||
context += f"\n📜 RECENT ACTIVITY:\n"
|
||||
for log in self.progress_logs[-3:]: # Show last 3 logs
|
||||
context += f" • {log.timestamp.strftime('%H:%M:%S')} - {log.action}"
|
||||
if log.items_processed:
|
||||
context += f" ({len(log.items_processed)} items)"
|
||||
context += "\n"
|
||||
|
||||
context += "\n" + "="*50
|
||||
return context
|
||||
|
||||
def analyze_scratchpad_for_criterion_progress(self, criterion: str) -> Dict[str, Any]:
|
||||
"""Analyze scratchpad data to determine specific progress on a criterion."""
|
||||
analysis = {
|
||||
"relevant_data": [],
|
||||
"item_count": 0,
|
||||
"processed_ids": set(),
|
||||
"data_completeness": 0,
|
||||
"specific_gaps": []
|
||||
}
|
||||
|
||||
criterion_lower = criterion.lower()
|
||||
|
||||
# Look for data that relates to this criterion
|
||||
for key, value in self.scratchpad.items():
|
||||
key_lower = key.lower()
|
||||
|
||||
# Check if this data is relevant to the criterion
|
||||
is_relevant = False
|
||||
for keyword in criterion_lower.split():
|
||||
if len(keyword) > 3 and keyword in key_lower: # Skip short words
|
||||
is_relevant = True
|
||||
break
|
||||
|
||||
if is_relevant:
|
||||
analysis["relevant_data"].append(key)
|
||||
|
||||
# Count items and extract IDs
|
||||
if isinstance(value, list):
|
||||
analysis["item_count"] += len(value)
|
||||
|
||||
# Try to extract IDs from metadata
|
||||
if f"{key}_ids" in self.metadata:
|
||||
analysis["processed_ids"].update(self.metadata[f"{key}_ids"])
|
||||
|
||||
elif isinstance(value, dict):
|
||||
analysis["item_count"] += 1
|
||||
|
||||
# Calculate completeness based on what we know
|
||||
if analysis["item_count"] > 0:
|
||||
# Check if criterion mentions specific numbers
|
||||
import re
|
||||
number_match = re.search(r'\b(\d+)\b', criterion)
|
||||
if number_match:
|
||||
expected_count = int(number_match.group(1))
|
||||
analysis["data_completeness"] = min(100, int((analysis["item_count"] / expected_count) * 100))
|
||||
if analysis["item_count"] < expected_count:
|
||||
analysis["specific_gaps"].append(f"Need {expected_count - analysis['item_count']} more items")
|
||||
else:
|
||||
# For criteria without specific numbers, use heuristics
|
||||
if "all" in criterion_lower or "every" in criterion_lower:
|
||||
# For "all" criteria, we need to be more careful
|
||||
analysis["data_completeness"] = 50 if analysis["item_count"] > 0 else 0
|
||||
analysis["specific_gaps"].append("Verify all items are included")
|
||||
else:
|
||||
analysis["data_completeness"] = min(100, analysis["item_count"] * 20) # Rough estimate
|
||||
|
||||
return analysis
|
||||
|
||||
def generate_specific_next_steps(self, criterion: str) -> List[str]:
|
||||
"""Generate specific, actionable next steps for a criterion."""
|
||||
analysis = self.analyze_scratchpad_for_criterion_progress(criterion)
|
||||
progress = self.criteria_progress.get(criterion)
|
||||
next_steps = []
|
||||
|
||||
if not progress:
|
||||
return ["Initialize progress tracking for this criterion"]
|
||||
|
||||
# If we have a queue of items to process
|
||||
if progress.items_to_process:
|
||||
next_item = progress.items_to_process[0]
|
||||
next_steps.append(f"Query/process item: {next_item}")
|
||||
if len(progress.items_to_process) > 1:
|
||||
next_steps.append(f"Then process {len(progress.items_to_process) - 1} remaining items")
|
||||
|
||||
# If we have processed some items but not all
|
||||
elif progress.processed_items and progress.total_items_expected:
|
||||
remaining = progress.total_items_expected - len(progress.processed_items)
|
||||
if remaining > 0:
|
||||
next_steps.append(f"Process {remaining} more items to reach target of {progress.total_items_expected}")
|
||||
|
||||
# If we have data but haven't accessed it
|
||||
elif analysis["relevant_data"] and not progress.processed_items:
|
||||
for data_key in analysis["relevant_data"][:2]: # First 2 relevant keys
|
||||
next_steps.append(f"Access and process data from '{data_key}'")
|
||||
|
||||
# Generic steps based on criterion keywords
|
||||
else:
|
||||
criterion_lower = criterion.lower()
|
||||
if "email" in criterion_lower:
|
||||
next_steps.append("Use email search/fetch tool to gather emails")
|
||||
elif "analyze" in criterion_lower or "summary" in criterion_lower:
|
||||
next_steps.append("Access stored data and create analysis/summary")
|
||||
else:
|
||||
next_steps.append(f"Use appropriate tools to gather data for: {criterion}")
|
||||
|
||||
return next_steps
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset state for a new task."""
|
||||
self.plan = []
|
||||
self.acceptance_criteria = []
|
||||
self.criteria_progress = {}
|
||||
self.scratchpad = {}
|
||||
self.current_focus = ""
|
||||
self.next_steps = []
|
||||
self.overall_progress = 0
|
||||
self.progress_logs = []
|
||||
self.work_queue = []
|
||||
self.metadata = {}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,27 +5,16 @@ from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
try:
|
||||
from auth0.authentication.token_verifier import (
|
||||
AsymmetricSignatureVerifier,
|
||||
TokenVerifier,
|
||||
)
|
||||
AUTH0_AVAILABLE = True
|
||||
except ImportError:
|
||||
AUTH0_AVAILABLE = False
|
||||
AsymmetricSignatureVerifier = None
|
||||
TokenVerifier = None
|
||||
from auth0.authentication.token_verifier import (
|
||||
AsymmetricSignatureVerifier,
|
||||
TokenVerifier,
|
||||
)
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
from .constants import AUTH0_CLIENT_ID, AUTH0_DOMAIN
|
||||
|
||||
|
||||
def validate_token(id_token: str) -> None:
|
||||
if not AUTH0_AVAILABLE:
|
||||
raise ImportError(
|
||||
"Auth0 is required for authentication functionality. "
|
||||
"Please install it with: pip install 'crewai[auth]'"
|
||||
)
|
||||
"""
|
||||
Verify the token and its precedence
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from requests import HTTPError
|
||||
from crewai.cli.command import BaseCommand, PlusAPIMixin
|
||||
from crewai.cli.config import Settings
|
||||
|
||||
@@ -17,7 +16,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
response = self.plus_api_client.get_organizations()
|
||||
response.raise_for_status()
|
||||
orgs = response.json()
|
||||
|
||||
|
||||
if not orgs:
|
||||
console.print("You don't belong to any organizations yet.", style="yellow")
|
||||
return
|
||||
@@ -27,14 +26,8 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
table.add_column("ID", style="green")
|
||||
for org in orgs:
|
||||
table.add_row(org["name"], org["uuid"])
|
||||
|
||||
|
||||
console.print(table)
|
||||
except HTTPError as e:
|
||||
if e.response.status_code == 401:
|
||||
console.print("You are not logged in to any organization. Use 'crewai login' to login.", style="bold red")
|
||||
return
|
||||
console.print(f"Failed to retrieve organization list: {str(e)}", style="bold red")
|
||||
raise SystemExit(1)
|
||||
except Exception as e:
|
||||
console.print(f"Failed to retrieve organization list: {str(e)}", style="bold red")
|
||||
raise SystemExit(1)
|
||||
@@ -44,24 +37,18 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
response = self.plus_api_client.get_organizations()
|
||||
response.raise_for_status()
|
||||
orgs = response.json()
|
||||
|
||||
|
||||
org = next((o for o in orgs if o["uuid"] == org_id), None)
|
||||
if not org:
|
||||
console.print(f"Organization with id '{org_id}' not found.", style="bold red")
|
||||
return
|
||||
|
||||
|
||||
settings = Settings()
|
||||
settings.org_name = org["name"]
|
||||
settings.org_uuid = org["uuid"]
|
||||
settings.dump()
|
||||
|
||||
|
||||
console.print(f"Successfully switched to {org['name']} ({org['uuid']})", style="bold green")
|
||||
except HTTPError as e:
|
||||
if e.response.status_code == 401:
|
||||
console.print("You are not logged in to any organization. Use 'crewai login' to login.", style="bold red")
|
||||
return
|
||||
console.print(f"Failed to retrieve organization list: {str(e)}", style="bold red")
|
||||
raise SystemExit(1)
|
||||
except Exception as e:
|
||||
console.print(f"Failed to switch organization: {str(e)}", style="bold red")
|
||||
raise SystemExit(1)
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import os
|
||||
import certifi
|
||||
import json
|
||||
import time
|
||||
from collections import defaultdict
|
||||
@@ -165,10 +163,8 @@ def fetch_provider_data(cache_file):
|
||||
Returns:
|
||||
- dict or None: The fetched provider data or None if the operation fails.
|
||||
"""
|
||||
ssl_config = os.environ['SSL_CERT_FILE'] = certifi.where()
|
||||
|
||||
try:
|
||||
response = requests.get(JSON_URL, stream=True, timeout=60, verify=ssl_config)
|
||||
response = requests.get(JSON_URL, stream=True, timeout=60)
|
||||
response.raise_for_status()
|
||||
data = download_data(response)
|
||||
with open(cache_file, "w") as f:
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.130.0,<1.0.0"
|
||||
"crewai[tools]>=0.126.0,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.130.0,<1.0.0",
|
||||
"crewai[tools]>=0.126.0,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.130.0"
|
||||
"crewai[tools]>=0.126.0"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -91,7 +91,6 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
console.print(
|
||||
f"[green]Found these tools to publish: {', '.join([e['name'] for e in available_exports])}[/green]"
|
||||
)
|
||||
self._print_current_organization()
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_build_dir:
|
||||
subprocess.run(
|
||||
@@ -137,7 +136,6 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
)
|
||||
|
||||
def install(self, handle: str):
|
||||
self._print_current_organization()
|
||||
get_response = self.plus_api_client.get_tool(handle)
|
||||
|
||||
if get_response.status_code == 404:
|
||||
@@ -184,7 +182,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
settings.dump()
|
||||
|
||||
console.print(
|
||||
f"Successfully authenticated to the tool repository as {settings.org_name} ({settings.org_uuid}).", style="bold green"
|
||||
"Successfully authenticated to the tool repository.", style="bold green"
|
||||
)
|
||||
|
||||
def _add_package(self, tool_details: dict[str, Any]):
|
||||
@@ -242,10 +240,3 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
)
|
||||
|
||||
return env
|
||||
|
||||
def _print_current_organization(self):
|
||||
settings = Settings()
|
||||
if settings.org_uuid:
|
||||
console.print(f"Current organization: {settings.org_name} ({settings.org_uuid})", style="bold blue")
|
||||
else:
|
||||
console.print("No organization currently set. We recommend setting one before using: `crewai org switch <org_id>` command.", style="yellow")
|
||||
|
||||
@@ -314,7 +314,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
def create_crew_memory(self) -> "Crew":
|
||||
"""Initialize private memory attributes."""
|
||||
self._external_memory = (
|
||||
# External memory doesn’t support a default value since it was designed to be managed entirely externally
|
||||
# External memory doesn't support a default value since it was designed to be managed entirely externally
|
||||
self.external_memory.set_crew(self) if self.external_memory else None
|
||||
)
|
||||
|
||||
@@ -655,6 +655,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
if self.planning:
|
||||
self._handle_crew_planning()
|
||||
|
||||
metrics: List[UsageMetrics] = []
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = self._run_sequential_process()
|
||||
elif self.process == Process.hierarchical:
|
||||
@@ -667,8 +669,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
for after_callback in self.after_kickoff_callbacks:
|
||||
result = after_callback(result)
|
||||
|
||||
self.usage_metrics = self.calculate_usage_metrics()
|
||||
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
||||
|
||||
self.usage_metrics = UsageMetrics()
|
||||
for metric in metrics:
|
||||
self.usage_metrics.add_usage_metrics(metric)
|
||||
return result
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
@@ -1076,6 +1081,23 @@ class Crew(FlowTrackable, BaseModel):
|
||||
token_usage=token_usage,
|
||||
)
|
||||
|
||||
def _finish_execution(self, final_string_output: str) -> None:
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
|
||||
def calculate_usage_metrics(self) -> UsageMetrics:
|
||||
"""Calculates and returns the usage metrics."""
|
||||
total_usage_metrics = UsageMetrics()
|
||||
for agent in self.agents:
|
||||
if hasattr(agent, "_token_process"):
|
||||
token_sum = agent._token_process.get_summary()
|
||||
total_usage_metrics.add_usage_metrics(token_sum)
|
||||
if self.manager_agent and hasattr(self.manager_agent, "_token_process"):
|
||||
token_sum = self.manager_agent._token_process.get_summary()
|
||||
total_usage_metrics.add_usage_metrics(token_sum)
|
||||
self.usage_metrics = total_usage_metrics
|
||||
return total_usage_metrics
|
||||
|
||||
def _process_async_tasks(
|
||||
self,
|
||||
futures: List[Tuple[Task, Future[TaskOutput], int]],
|
||||
@@ -1279,23 +1301,6 @@ class Crew(FlowTrackable, BaseModel):
|
||||
for agent in self.agents:
|
||||
agent.interpolate_inputs(inputs)
|
||||
|
||||
def _finish_execution(self, final_string_output: str) -> None:
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
|
||||
def calculate_usage_metrics(self) -> UsageMetrics:
|
||||
"""Calculates and returns the usage metrics."""
|
||||
total_usage_metrics = UsageMetrics()
|
||||
for agent in self.agents:
|
||||
if hasattr(agent, "_token_process"):
|
||||
token_sum = agent._token_process.get_summary()
|
||||
total_usage_metrics.add_usage_metrics(token_sum)
|
||||
if self.manager_agent and hasattr(self.manager_agent, "_token_process"):
|
||||
token_sum = self.manager_agent._token_process.get_summary()
|
||||
total_usage_metrics.add_usage_metrics(token_sum)
|
||||
self.usage_metrics = total_usage_metrics
|
||||
return total_usage_metrics
|
||||
|
||||
def test(
|
||||
self,
|
||||
n_iterations: int,
|
||||
|
||||
@@ -20,8 +20,7 @@ class FlowTrackable(BaseModel):
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _set_parent_flow(self) -> "FlowTrackable":
|
||||
max_depth = 5
|
||||
def _set_parent_flow(self, max_depth: int = 5) -> "FlowTrackable":
|
||||
frame = inspect.currentframe()
|
||||
|
||||
try:
|
||||
|
||||
@@ -1,18 +1,14 @@
|
||||
# flow_visualizer.py
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
from pyvis.network import Network
|
||||
PYVIS_AVAILABLE = True
|
||||
except ImportError:
|
||||
PYVIS_AVAILABLE = False
|
||||
Network = None
|
||||
from pyvis.network import Network
|
||||
|
||||
from crewai.flow.config import COLORS, NODE_STYLES
|
||||
from crewai.flow.html_template_handler import HTMLTemplateHandler
|
||||
from crewai.flow.legend_generator import generate_legend_items_html, get_legend_items
|
||||
from crewai.flow.path_utils import safe_path_join
|
||||
from crewai.flow.path_utils import safe_path_join, validate_path_exists
|
||||
from crewai.flow.utils import calculate_node_levels
|
||||
from crewai.flow.visualization_utils import (
|
||||
add_edges,
|
||||
@@ -67,12 +63,6 @@ class FlowPlot:
|
||||
RuntimeError
|
||||
If network visualization generation fails.
|
||||
"""
|
||||
if not PYVIS_AVAILABLE:
|
||||
raise ImportError(
|
||||
"Pyvis is required for flow visualization. "
|
||||
"Please install it with: pip install 'crewai[visualization]'"
|
||||
)
|
||||
|
||||
if not filename or not isinstance(filename, str):
|
||||
raise ValueError("Filename must be a non-empty string")
|
||||
|
||||
@@ -232,11 +222,5 @@ def plot_flow(flow, filename="flow_plot"):
|
||||
IOError
|
||||
If file operations fail.
|
||||
"""
|
||||
if not PYVIS_AVAILABLE:
|
||||
raise ImportError(
|
||||
"Pyvis is required for flow visualization. "
|
||||
"Please install it with: pip install 'crewai[visualization]'"
|
||||
)
|
||||
|
||||
visualizer = FlowPlot(flow)
|
||||
visualizer.plot(filename)
|
||||
|
||||
@@ -1,33 +1,16 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
try:
|
||||
import pdfplumber
|
||||
PDFPLUMBER_AVAILABLE = True
|
||||
except ImportError:
|
||||
PDFPLUMBER_AVAILABLE = False
|
||||
pdfplumber = None
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class PDFKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries PDF file content using embeddings."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initialize PDFKnowledgeSource and check for pdfplumber availability."""
|
||||
if not PDFPLUMBER_AVAILABLE:
|
||||
raise ImportError(
|
||||
"pdfplumber is required for PDF knowledge sources. "
|
||||
"Please install it with: pip install 'crewai[knowledge]'"
|
||||
)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess PDF file content."""
|
||||
if not PDFPLUMBER_AVAILABLE:
|
||||
return {}
|
||||
|
||||
pdfplumber = self._import_pdfplumber()
|
||||
|
||||
content = {}
|
||||
|
||||
for path in self.safe_file_paths:
|
||||
@@ -43,20 +26,20 @@ class PDFKnowledgeSource(BaseFileKnowledgeSource):
|
||||
|
||||
def _import_pdfplumber(self):
|
||||
"""Dynamically import pdfplumber."""
|
||||
if not PDFPLUMBER_AVAILABLE:
|
||||
try:
|
||||
import pdfplumber
|
||||
|
||||
return pdfplumber
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"pdfplumber is required for PDF knowledge sources. "
|
||||
"Please install it with: pip install 'crewai[knowledge]'"
|
||||
"pdfplumber is not installed. Please install it with: pip install pdfplumber"
|
||||
)
|
||||
return pdfplumber
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add PDF file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
self._import_pdfplumber()
|
||||
|
||||
for _, text in self.content.items():
|
||||
new_chunks = self._chunk_text(text)
|
||||
self.chunks.extend(new_chunks)
|
||||
|
||||
@@ -6,19 +6,11 @@ import os
|
||||
import shutil
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
try:
|
||||
import chromadb
|
||||
import chromadb.errors
|
||||
from chromadb.api import ClientAPI
|
||||
from chromadb.api.types import OneOrMany
|
||||
from chromadb.config import Settings
|
||||
CHROMADB_AVAILABLE = True
|
||||
except ImportError:
|
||||
CHROMADB_AVAILABLE = False
|
||||
chromadb = None
|
||||
ClientAPI = None
|
||||
OneOrMany = None
|
||||
Settings = None
|
||||
import chromadb
|
||||
import chromadb.errors
|
||||
from chromadb.api import ClientAPI
|
||||
from chromadb.api.types import OneOrMany
|
||||
from chromadb.config import Settings
|
||||
|
||||
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
|
||||
from crewai.utilities import EmbeddingConfigurator
|
||||
@@ -51,7 +43,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
search efficiency.
|
||||
"""
|
||||
|
||||
collection: Optional[Any] = None
|
||||
collection: Optional[chromadb.Collection] = None
|
||||
collection_name: Optional[str] = "knowledge"
|
||||
app: Optional[ClientAPI] = None
|
||||
|
||||
@@ -60,12 +52,6 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
embedder: Optional[Dict[str, Any]] = None,
|
||||
collection_name: Optional[str] = None,
|
||||
):
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for knowledge storage functionality. "
|
||||
"Please install it with: pip install 'crewai[knowledge]'"
|
||||
)
|
||||
|
||||
self.collection_name = collection_name
|
||||
self._set_embedder_config(embedder)
|
||||
|
||||
@@ -195,12 +181,6 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
raise
|
||||
|
||||
def _create_default_embedding_function(self):
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for embedding functionality. "
|
||||
"Please install it with: pip install 'crewai[knowledge]'"
|
||||
)
|
||||
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
|
||||
@@ -1,33 +1,9 @@
|
||||
import asyncio
|
||||
import inspect
|
||||
import uuid
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
cast,
|
||||
get_args,
|
||||
get_origin,
|
||||
)
|
||||
from datetime import datetime
|
||||
from typing import Any, Callable, Dict, List, Optional, Type, Union, cast
|
||||
|
||||
try:
|
||||
from typing import Self
|
||||
except ImportError:
|
||||
from typing_extensions import Self
|
||||
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
Field,
|
||||
InstanceOf,
|
||||
PrivateAttr,
|
||||
model_validator,
|
||||
field_validator,
|
||||
)
|
||||
from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
@@ -42,7 +18,6 @@ from crewai.llm import LLM
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities.guardrail import process_guardrail
|
||||
from crewai.utilities.agent_utils import (
|
||||
enforce_rpm_limit,
|
||||
format_message_for_llm,
|
||||
@@ -58,10 +33,10 @@ from crewai.utilities.agent_utils import (
|
||||
parse_tools,
|
||||
process_llm_response,
|
||||
render_text_description_and_args,
|
||||
show_agent_logs,
|
||||
)
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.utilities.converter import convert_to_model, generate_model_description
|
||||
from crewai.utilities.events.agent_events import (
|
||||
AgentLogsExecutionEvent,
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
@@ -171,17 +146,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
default=[], description="Callbacks to be used for the agent"
|
||||
)
|
||||
|
||||
# Guardrail Properties
|
||||
guardrail: Optional[Union[Callable[[LiteAgentOutput], Tuple[bool, Any]], str]] = (
|
||||
Field(
|
||||
default=None,
|
||||
description="Function or string description of a guardrail to validate agent output",
|
||||
)
|
||||
)
|
||||
guardrail_max_retries: int = Field(
|
||||
default=3, description="Maximum number of retries when guardrail fails"
|
||||
)
|
||||
|
||||
# State and Results
|
||||
tools_results: List[Dict[str, Any]] = Field(
|
||||
default=[], description="Results of the tools used by the agent."
|
||||
@@ -199,8 +163,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
_messages: List[Dict[str, str]] = PrivateAttr(default_factory=list)
|
||||
_iterations: int = PrivateAttr(default=0)
|
||||
_printer: Printer = PrivateAttr(default_factory=Printer)
|
||||
_guardrail: Optional[Callable] = PrivateAttr(default=None)
|
||||
_guardrail_retry_count: int = PrivateAttr(default=0)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def setup_llm(self):
|
||||
@@ -222,61 +184,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def ensure_guardrail_is_callable(self) -> Self:
|
||||
if callable(self.guardrail):
|
||||
self._guardrail = self.guardrail
|
||||
elif isinstance(self.guardrail, str):
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
|
||||
assert isinstance(self.llm, LLM)
|
||||
|
||||
self._guardrail = LLMGuardrail(description=self.guardrail, llm=self.llm)
|
||||
|
||||
return self
|
||||
|
||||
@field_validator("guardrail", mode="before")
|
||||
@classmethod
|
||||
def validate_guardrail_function(
|
||||
cls, v: Optional[Union[Callable, str]]
|
||||
) -> Optional[Union[Callable, str]]:
|
||||
"""Validate that the guardrail function has the correct signature.
|
||||
|
||||
If v is a callable, validate that it has the correct signature.
|
||||
If v is a string, return it as is.
|
||||
|
||||
Args:
|
||||
v: The guardrail function to validate or a string describing the guardrail task
|
||||
|
||||
Returns:
|
||||
The validated guardrail function or a string describing the guardrail task
|
||||
"""
|
||||
if v is None or isinstance(v, str):
|
||||
return v
|
||||
|
||||
# Check function signature
|
||||
sig = inspect.signature(v)
|
||||
if len(sig.parameters) != 1:
|
||||
raise ValueError(
|
||||
f"Guardrail function must accept exactly 1 parameter (LiteAgentOutput), "
|
||||
f"but it accepts {len(sig.parameters)}"
|
||||
)
|
||||
|
||||
# Check return annotation if present
|
||||
if sig.return_annotation is not sig.empty:
|
||||
if sig.return_annotation == Tuple[bool, Any]:
|
||||
return v
|
||||
|
||||
origin = get_origin(sig.return_annotation)
|
||||
args = get_args(sig.return_annotation)
|
||||
|
||||
if origin is not tuple or len(args) != 2 or args[0] is not bool:
|
||||
raise ValueError(
|
||||
"If return type is annotated, it must be Tuple[bool, Any]"
|
||||
)
|
||||
|
||||
return v
|
||||
|
||||
@property
|
||||
def key(self) -> str:
|
||||
"""Get the unique key for this agent instance."""
|
||||
@@ -316,7 +223,54 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
# Format messages for the LLM
|
||||
self._messages = self._format_messages(messages)
|
||||
|
||||
return self._execute_core(agent_info=agent_info)
|
||||
# Emit event for agent execution start
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionStartedEvent(
|
||||
agent_info=agent_info,
|
||||
tools=self._parsed_tools,
|
||||
messages=messages,
|
||||
),
|
||||
)
|
||||
|
||||
# Execute the agent using invoke loop
|
||||
agent_finish = self._invoke_loop()
|
||||
formatted_result: Optional[BaseModel] = None
|
||||
if self.response_format:
|
||||
try:
|
||||
# Cast to BaseModel to ensure type safety
|
||||
result = self.response_format.model_validate_json(
|
||||
agent_finish.output
|
||||
)
|
||||
if isinstance(result, BaseModel):
|
||||
formatted_result = result
|
||||
except Exception as e:
|
||||
self._printer.print(
|
||||
content=f"Failed to parse output into response format: {str(e)}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
# Calculate token usage metrics
|
||||
usage_metrics = self._token_process.get_summary()
|
||||
|
||||
# Create output
|
||||
output = LiteAgentOutput(
|
||||
raw=agent_finish.output,
|
||||
pydantic=formatted_result,
|
||||
agent_role=self.role,
|
||||
usage_metrics=usage_metrics.model_dump() if usage_metrics else None,
|
||||
)
|
||||
|
||||
# Emit completion event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionCompletedEvent(
|
||||
agent_info=agent_info,
|
||||
output=agent_finish.output,
|
||||
),
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
except Exception as e:
|
||||
self._printer.print(
|
||||
@@ -334,95 +288,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
raise e
|
||||
|
||||
def _execute_core(self, agent_info: Dict[str, Any]) -> LiteAgentOutput:
|
||||
# Emit event for agent execution start
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionStartedEvent(
|
||||
agent_info=agent_info,
|
||||
tools=self._parsed_tools,
|
||||
messages=self._messages,
|
||||
),
|
||||
)
|
||||
|
||||
# Execute the agent using invoke loop
|
||||
agent_finish = self._invoke_loop()
|
||||
formatted_result: Optional[BaseModel] = None
|
||||
if self.response_format:
|
||||
try:
|
||||
# Cast to BaseModel to ensure type safety
|
||||
result = self.response_format.model_validate_json(agent_finish.output)
|
||||
if isinstance(result, BaseModel):
|
||||
formatted_result = result
|
||||
except Exception as e:
|
||||
self._printer.print(
|
||||
content=f"Failed to parse output into response format: {str(e)}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
# Calculate token usage metrics
|
||||
usage_metrics = self._token_process.get_summary()
|
||||
|
||||
# Create output
|
||||
output = LiteAgentOutput(
|
||||
raw=agent_finish.output,
|
||||
pydantic=formatted_result,
|
||||
agent_role=self.role,
|
||||
usage_metrics=usage_metrics.model_dump() if usage_metrics else None,
|
||||
)
|
||||
|
||||
# Process guardrail if set
|
||||
if self._guardrail is not None:
|
||||
guardrail_result = process_guardrail(
|
||||
output=output,
|
||||
guardrail=self._guardrail,
|
||||
retry_count=self._guardrail_retry_count,
|
||||
)
|
||||
|
||||
if not guardrail_result.success:
|
||||
if self._guardrail_retry_count >= self.guardrail_max_retries:
|
||||
raise Exception(
|
||||
f"Agent's guardrail failed validation after {self.guardrail_max_retries} retries. "
|
||||
f"Last error: {guardrail_result.error}"
|
||||
)
|
||||
self._guardrail_retry_count += 1
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
f"Guardrail failed. Retrying ({self._guardrail_retry_count}/{self.guardrail_max_retries})..."
|
||||
f"\n{guardrail_result.error}"
|
||||
)
|
||||
|
||||
self._messages.append(
|
||||
{
|
||||
"role": "user",
|
||||
"content": guardrail_result.error
|
||||
or "Guardrail validation failed",
|
||||
}
|
||||
)
|
||||
|
||||
return self._execute_core(agent_info=agent_info)
|
||||
|
||||
# Apply guardrail result if available
|
||||
if guardrail_result.result is not None:
|
||||
if isinstance(guardrail_result.result, str):
|
||||
output.raw = guardrail_result.result
|
||||
elif isinstance(guardrail_result.result, BaseModel):
|
||||
output.pydantic = guardrail_result.result
|
||||
|
||||
usage_metrics = self._token_process.get_summary()
|
||||
output.usage_metrics = usage_metrics.model_dump() if usage_metrics else None
|
||||
|
||||
# Emit completion event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionCompletedEvent(
|
||||
agent_info=agent_info,
|
||||
output=agent_finish.output,
|
||||
),
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
async def kickoff_async(
|
||||
self, messages: Union[str, List[Dict[str, str]]]
|
||||
) -> LiteAgentOutput:
|
||||
@@ -602,13 +467,11 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
|
||||
"""Show logs for the agent's execution."""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
AgentLogsExecutionEvent(
|
||||
agent_role=self.role,
|
||||
formatted_answer=formatted_answer,
|
||||
verbose=self.verbose,
|
||||
),
|
||||
show_agent_logs(
|
||||
printer=self._printer,
|
||||
agent_role=self.role,
|
||||
formatted_answer=formatted_answer,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
|
||||
def _append_message(self, text: str, role: str = "assistant") -> None:
|
||||
|
||||
@@ -79,7 +79,7 @@ class FilteredStream(io.TextIOBase):
|
||||
"give feedback / get help" in lower_s
|
||||
or "litellm.info:" in lower_s
|
||||
or "litellm" in lower_s
|
||||
or "Consider using a smaller input or implementing a text splitting strategy" in lower_s
|
||||
or "consider using a smaller input or implementing a text splitting strategy" in lower_s
|
||||
):
|
||||
return 0
|
||||
|
||||
|
||||
12
src/crewai/llms/third_party/ai_suite.py
vendored
12
src/crewai/llms/third_party/ai_suite.py
vendored
@@ -1,22 +1,12 @@
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
try:
|
||||
import aisuite as ai
|
||||
AISUITE_AVAILABLE = True
|
||||
except ImportError:
|
||||
AISUITE_AVAILABLE = False
|
||||
ai = None
|
||||
import aisuite as ai
|
||||
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
|
||||
|
||||
class AISuiteLLM(BaseLLM):
|
||||
def __init__(self, model: str, temperature: Optional[float] = None, **kwargs):
|
||||
if not AISUITE_AVAILABLE:
|
||||
raise ImportError(
|
||||
"AISuite is required for AISuiteLLM. "
|
||||
"Please install it with: pip install 'crewai[llm-integrations]'"
|
||||
)
|
||||
super().__init__(model, temperature, **kwargs)
|
||||
self.client = ai.Client()
|
||||
|
||||
|
||||
@@ -4,9 +4,6 @@ from typing import Any, Dict, List
|
||||
from mem0 import Memory, MemoryClient
|
||||
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.utilities.chromadb import sanitize_collection_name
|
||||
|
||||
MAX_AGENT_ID_LENGTH_MEM0 = 255
|
||||
|
||||
|
||||
class Mem0Storage(Storage):
|
||||
@@ -137,7 +134,7 @@ class Mem0Storage(Storage):
|
||||
agents = self.crew.agents
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
agents = "_".join(agents)
|
||||
return sanitize_collection_name(name=agents,max_collection_length=MAX_AGENT_ID_LENGTH_MEM0)
|
||||
return agents
|
||||
|
||||
def _get_config(self) -> Dict[str, Any]:
|
||||
return self.config or getattr(self, "memory_config", {}).get("config", {}) or {}
|
||||
|
||||
@@ -6,12 +6,7 @@ import shutil
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
try:
|
||||
from chromadb.api import ClientAPI
|
||||
CHROMADB_AVAILABLE = True
|
||||
except ImportError:
|
||||
CHROMADB_AVAILABLE = False
|
||||
ClientAPI = None
|
||||
from chromadb.api import ClientAPI
|
||||
|
||||
from crewai.memory.storage.base_rag_storage import BaseRAGStorage
|
||||
from crewai.utilities import EmbeddingConfigurator
|
||||
@@ -42,17 +37,11 @@ class RAGStorage(BaseRAGStorage):
|
||||
search efficiency.
|
||||
"""
|
||||
|
||||
app: Optional[Any] = None
|
||||
app: ClientAPI | None = None
|
||||
|
||||
def __init__(
|
||||
self, type, allow_reset=True, embedder_config=None, crew=None, path=None
|
||||
):
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for RAG storage functionality. "
|
||||
"Please install it with: pip install 'crewai[memory]'"
|
||||
)
|
||||
|
||||
super().__init__(type, allow_reset, embedder_config, crew)
|
||||
agents = crew.agents if crew else []
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
@@ -71,12 +60,6 @@ class RAGStorage(BaseRAGStorage):
|
||||
self.embedder_config = configurator.configure_embedder(self.embedder_config)
|
||||
|
||||
def _initialize_app(self):
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for RAG storage functionality. "
|
||||
"Please install it with: pip install 'crewai[memory]'"
|
||||
)
|
||||
|
||||
import chromadb
|
||||
from chromadb.config import Settings
|
||||
|
||||
@@ -182,12 +165,6 @@ class RAGStorage(BaseRAGStorage):
|
||||
)
|
||||
|
||||
def _create_default_embedding_function(self):
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for embedding functionality. "
|
||||
"Please install it with: pip install 'crewai[memory]'"
|
||||
)
|
||||
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
|
||||
@@ -35,12 +35,12 @@ from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.security import Fingerprint, SecurityConfig
|
||||
from crewai.tasks.guardrail_result import GuardrailResult
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.constants import NOT_SPECIFIED
|
||||
from crewai.utilities.guardrail import process_guardrail, GuardrailResult
|
||||
from crewai.utilities.converter import Converter, convert_to_model
|
||||
from crewai.utilities.events import (
|
||||
TaskCompletedEvent,
|
||||
@@ -431,11 +431,7 @@ class Task(BaseModel):
|
||||
)
|
||||
|
||||
if self._guardrail:
|
||||
guardrail_result = process_guardrail(
|
||||
output=task_output,
|
||||
guardrail=self._guardrail,
|
||||
retry_count=self.retry_count
|
||||
)
|
||||
guardrail_result = self._process_guardrail(task_output)
|
||||
if not guardrail_result.success:
|
||||
if self.retry_count >= self.max_retries:
|
||||
raise Exception(
|
||||
@@ -554,7 +550,8 @@ Follow these guidelines:
|
||||
- Use * for italic text
|
||||
- Use - or * for bullet points
|
||||
- Use `code` for inline code
|
||||
- Use ```language for code blocks"""
|
||||
- Use ```language for code blocks
|
||||
- Don't start your answer with a code block"""
|
||||
tasks_slices.append(markdown_instruction)
|
||||
return "\n".join(tasks_slices)
|
||||
|
||||
|
||||
@@ -1,7 +1,15 @@
|
||||
from typing import Any, Callable, Optional, Tuple, Union
|
||||
"""
|
||||
Module for handling task guardrail validation results.
|
||||
|
||||
This module provides the GuardrailResult class which standardizes
|
||||
the way task guardrails return their validation results.
|
||||
"""
|
||||
|
||||
from typing import Any, Optional, Tuple, Union
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
|
||||
class GuardrailResult(BaseModel):
|
||||
"""Result from a task guardrail execution.
|
||||
|
||||
@@ -46,48 +54,3 @@ class GuardrailResult(BaseModel):
|
||||
result=data if success else None,
|
||||
error=data if not success else None
|
||||
)
|
||||
|
||||
|
||||
def process_guardrail(output: Any, guardrail: Callable, retry_count: int) -> GuardrailResult:
|
||||
"""Process the guardrail for the agent output.
|
||||
|
||||
Args:
|
||||
output: The output to validate with the guardrail
|
||||
|
||||
Returns:
|
||||
GuardrailResult: The result of the guardrail validation
|
||||
"""
|
||||
from crewai.task import TaskOutput
|
||||
from crewai.lite_agent import LiteAgentOutput
|
||||
|
||||
assert isinstance(output, TaskOutput) or isinstance(output, LiteAgentOutput), "Output must be a TaskOutput or LiteAgentOutput"
|
||||
|
||||
assert guardrail is not None
|
||||
|
||||
from crewai.utilities.events import (
|
||||
LLMGuardrailCompletedEvent,
|
||||
LLMGuardrailStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
LLMGuardrailStartedEvent(
|
||||
guardrail=guardrail, retry_count=retry_count
|
||||
),
|
||||
)
|
||||
|
||||
result = guardrail(output)
|
||||
guardrail_result = GuardrailResult.from_tuple(result)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
LLMGuardrailCompletedEvent(
|
||||
success=guardrail_result.success,
|
||||
result=guardrail_result.result,
|
||||
error=guardrail_result.error,
|
||||
retry_count=retry_count,
|
||||
),
|
||||
)
|
||||
|
||||
return guardrail_result
|
||||
@@ -8,34 +8,20 @@ import platform
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
from importlib.metadata import version
|
||||
from typing import TYPE_CHECKING, Any, Callable, Optional
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
import threading
|
||||
|
||||
try:
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
|
||||
OTLPSpanExporter,
|
||||
)
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
BatchSpanProcessor,
|
||||
SpanExportResult,
|
||||
)
|
||||
from opentelemetry.trace import Span, Status, StatusCode
|
||||
OPENTELEMETRY_AVAILABLE = True
|
||||
except ImportError:
|
||||
OPENTELEMETRY_AVAILABLE = False
|
||||
trace = None
|
||||
OTLPSpanExporter = None
|
||||
SERVICE_NAME = None
|
||||
Resource = None
|
||||
TracerProvider = None
|
||||
BatchSpanProcessor = None
|
||||
SpanExportResult = None
|
||||
Span = None
|
||||
Status = None
|
||||
StatusCode = None
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
|
||||
OTLPSpanExporter,
|
||||
)
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
BatchSpanProcessor,
|
||||
SpanExportResult,
|
||||
)
|
||||
from opentelemetry.trace import Span, Status, StatusCode
|
||||
|
||||
from crewai.telemetry.constants import (
|
||||
CREWAI_TELEMETRY_BASE_URL,
|
||||
@@ -57,30 +43,13 @@ if TYPE_CHECKING:
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class SafeOTLPSpanExporter:
|
||||
def __init__(self, *args, **kwargs):
|
||||
if OPENTELEMETRY_AVAILABLE:
|
||||
self._exporter = OTLPSpanExporter(*args, **kwargs)
|
||||
else:
|
||||
self._exporter = None
|
||||
|
||||
def export(self, spans):
|
||||
if not OPENTELEMETRY_AVAILABLE or not self._exporter:
|
||||
return None
|
||||
class SafeOTLPSpanExporter(OTLPSpanExporter):
|
||||
def export(self, spans) -> SpanExportResult:
|
||||
try:
|
||||
return self._exporter.export(spans)
|
||||
return super().export(spans)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
return SpanExportResult.FAILURE if SpanExportResult else None
|
||||
|
||||
def shutdown(self):
|
||||
"""Shutdown the exporter."""
|
||||
if OPENTELEMETRY_AVAILABLE and self._exporter and hasattr(self._exporter, 'shutdown'):
|
||||
try:
|
||||
return self._exporter.shutdown()
|
||||
except Exception as e:
|
||||
logger.error(f"Error during exporter shutdown: {e}")
|
||||
return None
|
||||
return SpanExportResult.FAILURE
|
||||
|
||||
|
||||
class Telemetry:
|
||||
@@ -104,25 +73,13 @@ class Telemetry:
|
||||
with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = super(Telemetry, cls).__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
@classmethod
|
||||
def _reset_instance(cls):
|
||||
"""Reset the singleton instance for testing purposes."""
|
||||
with cls._lock:
|
||||
cls._instance = None
|
||||
|
||||
def __init__(self) -> None:
|
||||
if hasattr(self, '_initialized') and self._initialized:
|
||||
return
|
||||
|
||||
self.ready: bool = False
|
||||
self.trace_set: bool = False
|
||||
self._initialized: bool = True
|
||||
|
||||
if self._is_telemetry_disabled() or not OPENTELEMETRY_AVAILABLE:
|
||||
self.ready = False
|
||||
if self._is_telemetry_disabled():
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -154,13 +111,8 @@ class Telemetry:
|
||||
return (
|
||||
os.getenv("OTEL_SDK_DISABLED", "false").lower() == "true"
|
||||
or os.getenv("CREWAI_DISABLE_TELEMETRY", "false").lower() == "true"
|
||||
or not OPENTELEMETRY_AVAILABLE
|
||||
)
|
||||
|
||||
def _should_execute_telemetry(self) -> bool:
|
||||
"""Check if telemetry operations should be executed."""
|
||||
return self.ready and not self._is_telemetry_disabled() and OPENTELEMETRY_AVAILABLE
|
||||
|
||||
def set_tracer(self):
|
||||
if self.ready and not self.trace_set:
|
||||
try:
|
||||
@@ -171,9 +123,8 @@ class Telemetry:
|
||||
self.ready = False
|
||||
self.trace_set = False
|
||||
|
||||
def _safe_telemetry_operation(self, operation: Callable[[], None]) -> None:
|
||||
"""Execute telemetry operation safely, checking both readiness and environment variables."""
|
||||
if not self._should_execute_telemetry():
|
||||
def _safe_telemetry_operation(self, operation):
|
||||
if not self.ready:
|
||||
return
|
||||
try:
|
||||
operation()
|
||||
@@ -472,8 +423,7 @@ class Telemetry:
|
||||
|
||||
return span
|
||||
|
||||
self._safe_telemetry_operation(operation)
|
||||
return None
|
||||
return self._safe_telemetry_operation(operation)
|
||||
|
||||
def task_ended(self, span: Span, task: Task, crew: Crew):
|
||||
"""Records the completion of a task execution in a crew.
|
||||
@@ -823,8 +773,7 @@ class Telemetry:
|
||||
return span
|
||||
|
||||
if crew.share_crew:
|
||||
self._safe_telemetry_operation(operation)
|
||||
return operation()
|
||||
return self._safe_telemetry_operation(operation)
|
||||
return None
|
||||
|
||||
def end_crew(self, crew, final_string_output):
|
||||
|
||||
@@ -1 +1,6 @@
|
||||
"""Agent tools for crewAI."""
|
||||
|
||||
from .agent_tools import AgentTools
|
||||
from .scratchpad_tool import ScratchpadTool
|
||||
|
||||
__all__ = ["AgentTools", "ScratchpadTool"]
|
||||
|
||||
174
src/crewai/tools/agent_tools/scratchpad_tool.py
Normal file
174
src/crewai/tools/agent_tools/scratchpad_tool.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""Tool for accessing data stored in the agent's scratchpad during reasoning."""
|
||||
|
||||
from typing import Any, Dict, Optional, Type, Union, Callable
|
||||
from pydantic import BaseModel, Field
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
|
||||
class ScratchpadToolSchema(BaseModel):
|
||||
"""Input schema for ScratchpadTool."""
|
||||
key: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"The key name to retrieve data from the scratchpad. "
|
||||
"Must be one of the available keys shown in the tool description. "
|
||||
"Example: if 'email_data' is listed as available, use {\"key\": \"email_data\"}"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ScratchpadTool(BaseTool):
|
||||
"""Tool that allows agents to access data stored in their scratchpad during task execution.
|
||||
|
||||
This tool's description is dynamically updated to show all available keys,
|
||||
making it easy for agents to know what data they can retrieve.
|
||||
"""
|
||||
|
||||
name: str = "Access Scratchpad Memory"
|
||||
description: str = "Access data stored in your scratchpad memory during task execution."
|
||||
args_schema: Type[BaseModel] = ScratchpadToolSchema
|
||||
scratchpad_data: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
# Allow repeated usage of this tool - scratchpad access should not be limited
|
||||
cache_function: Callable = lambda _args, _result: False # Don't cache scratchpad access
|
||||
allow_repeated_usage: bool = True # Allow accessing the same key multiple times
|
||||
|
||||
def __init__(self, scratchpad_data: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Initialize the scratchpad tool with optional initial data.
|
||||
|
||||
Args:
|
||||
scratchpad_data: Initial scratchpad data (usually from agent state)
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
if scratchpad_data:
|
||||
self.scratchpad_data = scratchpad_data
|
||||
self._update_description()
|
||||
|
||||
def _run(
|
||||
self,
|
||||
key: str,
|
||||
**kwargs: Any,
|
||||
) -> Union[str, Dict[str, Any], Any]:
|
||||
"""Retrieve data from the scratchpad using the specified key.
|
||||
|
||||
Args:
|
||||
key: The key to look up in the scratchpad
|
||||
|
||||
Returns:
|
||||
The value associated with the key, or an error message if not found
|
||||
"""
|
||||
print(f"[DEBUG] ScratchpadTool._run called with key: '{key}'")
|
||||
print(f"[DEBUG] Current scratchpad keys: {list(self.scratchpad_data.keys())}")
|
||||
print(f"[DEBUG] Scratchpad data size: {len(self.scratchpad_data)}")
|
||||
|
||||
if not self.scratchpad_data:
|
||||
return (
|
||||
"❌ SCRATCHPAD IS EMPTY\n\n"
|
||||
"The scratchpad does not contain any data yet.\n"
|
||||
"Data will be automatically stored here as you use other tools.\n"
|
||||
"Try executing other tools first to gather information.\n\n"
|
||||
"💡 TIP: Tools like search, read, or fetch operations will automatically store their results in the scratchpad."
|
||||
)
|
||||
|
||||
if key not in self.scratchpad_data:
|
||||
available_keys = list(self.scratchpad_data.keys())
|
||||
keys_formatted = "\n".join(f" - '{k}'" for k in available_keys)
|
||||
|
||||
# Create more helpful examples based on actual keys
|
||||
example_key = available_keys[0] if available_keys else 'example_key'
|
||||
|
||||
# Check if the user tried a similar key (case-insensitive or partial match)
|
||||
similar_keys = [k for k in available_keys if key.lower() in k.lower() or k.lower() in key.lower()]
|
||||
similarity_hint = ""
|
||||
if similar_keys:
|
||||
similarity_hint = f"\n\n🔍 Did you mean one of these?\n" + "\n".join(f" - '{k}'" for k in similar_keys)
|
||||
|
||||
return (
|
||||
f"❌ KEY NOT FOUND: '{key}'\n"
|
||||
f"{'='*50}\n\n"
|
||||
f"The key '{key}' does not exist in the scratchpad.\n\n"
|
||||
f"📦 AVAILABLE KEYS IN SCRATCHPAD:\n{keys_formatted}\n"
|
||||
f"{similarity_hint}\n\n"
|
||||
f"✅ CORRECT USAGE EXAMPLE:\n"
|
||||
f"Action: Access Scratchpad Memory\n"
|
||||
f"Action Input: {{\"key\": \"{example_key}\"}}\n\n"
|
||||
f"⚠️ IMPORTANT:\n"
|
||||
f"- Keys are case-sensitive and must match EXACTLY\n"
|
||||
f"- Use the exact key name from the list above\n"
|
||||
f"- Do NOT modify or guess key names\n\n"
|
||||
f"{'='*50}"
|
||||
)
|
||||
|
||||
value = self.scratchpad_data[key]
|
||||
|
||||
# Format the output nicely based on the type
|
||||
if isinstance(value, dict):
|
||||
import json
|
||||
formatted_output = f"✅ Successfully retrieved data for key '{key}':\n\n"
|
||||
formatted_output += json.dumps(value, indent=2)
|
||||
return formatted_output
|
||||
elif isinstance(value, list):
|
||||
import json
|
||||
formatted_output = f"✅ Successfully retrieved data for key '{key}':\n\n"
|
||||
formatted_output += json.dumps(value, indent=2)
|
||||
return formatted_output
|
||||
else:
|
||||
return f"✅ Successfully retrieved data for key '{key}':\n\n{str(value)}"
|
||||
|
||||
def update_scratchpad(self, new_data: Dict[str, Any]) -> None:
|
||||
"""Update the scratchpad data and refresh the tool description.
|
||||
|
||||
Args:
|
||||
new_data: The new complete scratchpad data
|
||||
"""
|
||||
self.scratchpad_data = new_data
|
||||
self._update_description()
|
||||
|
||||
def _update_description(self) -> None:
|
||||
"""Update the tool description to include all available keys."""
|
||||
base_description = (
|
||||
"Access data stored in your scratchpad memory during task execution.\n\n"
|
||||
"HOW TO USE THIS TOOL:\n"
|
||||
"Provide a JSON object with a 'key' field containing the exact name of the data you want to retrieve.\n"
|
||||
"Example: {\"key\": \"email_data\"}"
|
||||
)
|
||||
|
||||
if not self.scratchpad_data:
|
||||
self.description = (
|
||||
f"{base_description}\n\n"
|
||||
"📝 STATUS: Scratchpad is currently empty.\n"
|
||||
"Data will be automatically stored here as you use other tools."
|
||||
)
|
||||
return
|
||||
|
||||
# Build a description of available keys with a preview of their contents
|
||||
key_descriptions = []
|
||||
example_key = None
|
||||
|
||||
for key, value in self.scratchpad_data.items():
|
||||
if not example_key:
|
||||
example_key = key
|
||||
|
||||
# Create a brief description of what's stored
|
||||
if isinstance(value, dict):
|
||||
preview = f"dict with {len(value)} items"
|
||||
if 'data' in value and isinstance(value['data'], list):
|
||||
preview = f"list of {len(value['data'])} items"
|
||||
elif isinstance(value, list):
|
||||
preview = f"list of {len(value)} items"
|
||||
elif isinstance(value, str):
|
||||
preview = f"string ({len(value)} chars)"
|
||||
else:
|
||||
preview = type(value).__name__
|
||||
|
||||
key_descriptions.append(f" 📌 '{key}': {preview}")
|
||||
|
||||
available_keys_text = "\n".join(key_descriptions)
|
||||
|
||||
self.description = (
|
||||
f"{base_description}\n\n"
|
||||
f"📦 AVAILABLE DATA IN SCRATCHPAD:\n{available_keys_text}\n\n"
|
||||
f"💡 EXAMPLE USAGE:\n"
|
||||
f"To retrieve the '{example_key}' data, use:\n"
|
||||
f"Action Input: {{\"key\": \"{example_key}\"}}"
|
||||
)
|
||||
@@ -46,6 +46,8 @@ class BaseTool(BaseModel, ABC):
|
||||
"""Maximum number of times this tool can be used. None means unlimited usage."""
|
||||
current_usage_count: int = 0
|
||||
"""Current number of times this tool has been used."""
|
||||
allow_repeated_usage: bool = False
|
||||
"""Flag to allow this tool to be used repeatedly with the same arguments."""
|
||||
|
||||
@field_validator("args_schema", mode="before")
|
||||
@classmethod
|
||||
@@ -116,6 +118,8 @@ class BaseTool(BaseModel, ABC):
|
||||
result_as_answer=self.result_as_answer,
|
||||
max_usage_count=self.max_usage_count,
|
||||
current_usage_count=self.current_usage_count,
|
||||
allow_repeated_usage=self.allow_repeated_usage,
|
||||
cache_function=self.cache_function,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
|
||||
import inspect
|
||||
import textwrap
|
||||
from typing import Any, Callable, Optional, Union, get_type_hints
|
||||
@@ -27,6 +25,8 @@ class CrewStructuredTool:
|
||||
result_as_answer: bool = False,
|
||||
max_usage_count: int | None = None,
|
||||
current_usage_count: int = 0,
|
||||
allow_repeated_usage: bool = False,
|
||||
cache_function: Optional[Callable] = None,
|
||||
) -> None:
|
||||
"""Initialize the structured tool.
|
||||
|
||||
@@ -38,6 +38,8 @@ class CrewStructuredTool:
|
||||
result_as_answer: Whether to return the output directly
|
||||
max_usage_count: Maximum number of times this tool can be used. None means unlimited usage.
|
||||
current_usage_count: Current number of times this tool has been used.
|
||||
allow_repeated_usage: Whether to allow this tool to be used repeatedly with the same arguments.
|
||||
cache_function: Function that will be used to determine if the tool should be cached.
|
||||
"""
|
||||
self.name = name
|
||||
self.description = description
|
||||
@@ -47,6 +49,8 @@ class CrewStructuredTool:
|
||||
self.result_as_answer = result_as_answer
|
||||
self.max_usage_count = max_usage_count
|
||||
self.current_usage_count = current_usage_count
|
||||
self.allow_repeated_usage = allow_repeated_usage
|
||||
self.cache_function = cache_function if cache_function is not None else lambda _args=None, _result=None: True
|
||||
|
||||
# Validate the function signature matches the schema
|
||||
self._validate_function_signature()
|
||||
@@ -199,6 +203,42 @@ class CrewStructuredTool:
|
||||
validated_args = self.args_schema.model_validate(raw_args)
|
||||
return validated_args.model_dump()
|
||||
except Exception as e:
|
||||
# Check if this is a "Field required" error and try to fix it
|
||||
error_str = str(e)
|
||||
if "Field required" in error_str:
|
||||
# Try to parse missing fields from the error
|
||||
import re
|
||||
from pydantic import ValidationError
|
||||
|
||||
if isinstance(e, ValidationError):
|
||||
# Extract missing fields from validation error
|
||||
missing_fields = []
|
||||
for error_detail in e.errors():
|
||||
if error_detail.get('type') == 'missing':
|
||||
field_path = error_detail.get('loc', ())
|
||||
if field_path:
|
||||
missing_fields.append(field_path[0])
|
||||
|
||||
if missing_fields:
|
||||
# Create a copy of raw_args and add missing fields with None
|
||||
fixed_args = dict(raw_args) if isinstance(raw_args, dict) else {}
|
||||
for field in missing_fields:
|
||||
if field not in fixed_args:
|
||||
fixed_args[field] = None
|
||||
|
||||
# Try validation again with fixed args
|
||||
try:
|
||||
self._logger.log("debug", f"Auto-fixing missing fields: {missing_fields}")
|
||||
validated_args = self.args_schema.model_validate(fixed_args)
|
||||
return validated_args.model_dump()
|
||||
except Exception as retry_e:
|
||||
# If it still fails, raise the original error with additional context
|
||||
raise ValueError(
|
||||
f"Arguments validation failed: {e}\n"
|
||||
f"Attempted to auto-fix missing fields {missing_fields} but still failed: {retry_e}"
|
||||
)
|
||||
|
||||
# For other validation errors, raise as before
|
||||
raise ValueError(f"Arguments validation failed: {e}")
|
||||
|
||||
async def ainvoke(
|
||||
@@ -241,17 +281,7 @@ class CrewStructuredTool:
|
||||
) -> Any:
|
||||
"""Main method for tool execution."""
|
||||
parsed_args = self._parse_args(input)
|
||||
|
||||
if inspect.iscoroutinefunction(self.func):
|
||||
result = asyncio.run(self.func(**parsed_args, **kwargs))
|
||||
return result
|
||||
|
||||
result = self.func(**parsed_args, **kwargs)
|
||||
|
||||
if asyncio.iscoroutine(result):
|
||||
return asyncio.run(result)
|
||||
|
||||
return result
|
||||
return self.func(**parsed_args, **kwargs)
|
||||
|
||||
@property
|
||||
def args(self) -> dict:
|
||||
|
||||
@@ -149,7 +149,13 @@ class ToolUsage:
|
||||
tool: CrewStructuredTool,
|
||||
calling: Union[ToolCalling, InstructorToolCalling],
|
||||
) -> str:
|
||||
if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
|
||||
# Check if tool allows repeated usage before blocking
|
||||
allows_repeated = False
|
||||
if hasattr(tool, 'allow_repeated_usage'):
|
||||
allows_repeated = tool.allow_repeated_usage
|
||||
elif hasattr(tool, '_tool') and hasattr(tool._tool, 'allow_repeated_usage'):
|
||||
allows_repeated = tool._tool.allow_repeated_usage
|
||||
if not allows_repeated and self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
|
||||
try:
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
tool_names=self.tools_names
|
||||
@@ -180,7 +186,7 @@ class ToolUsage:
|
||||
event_data.update(self.agent.fingerprint)
|
||||
|
||||
crewai_event_bus.emit(self,ToolUsageStartedEvent(**event_data))
|
||||
|
||||
|
||||
started_at = time.time()
|
||||
from_cache = False
|
||||
result = None # type: ignore
|
||||
@@ -250,9 +256,54 @@ class ToolUsage:
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
||||
error=e, tool=tool.name, tool_inputs=tool.description
|
||||
)
|
||||
|
||||
# Check if this is a validation error with missing fields
|
||||
error_str = str(e)
|
||||
if "Arguments validation failed" in error_str and "Field required" in error_str:
|
||||
# Extract the field name that's missing
|
||||
import re
|
||||
field_match = re.search(r'(\w+)\s*Field required', error_str)
|
||||
if field_match:
|
||||
missing_field = field_match.group(1)
|
||||
|
||||
# Create a more helpful error message
|
||||
error_message = (
|
||||
f"Tool validation error: The '{missing_field}' parameter is required but was not provided.\n\n"
|
||||
f"SOLUTION: Include ALL parameters in your tool call, even optional ones (use null for optional parameters):\n"
|
||||
f'{{"tool_name": "{tool.name}", "arguments": {{'
|
||||
)
|
||||
|
||||
# Get all expected parameters from the tool schema
|
||||
if hasattr(tool, 'args_schema'):
|
||||
schema_props = tool.args_schema.model_json_schema().get('properties', {})
|
||||
param_examples = []
|
||||
for param_name, param_info in schema_props.items():
|
||||
if param_name == missing_field:
|
||||
param_examples.append(f'"{param_name}": "YOUR_VALUE_HERE"')
|
||||
else:
|
||||
# Check if it's optional by looking at required fields
|
||||
is_required = param_name in tool.args_schema.model_json_schema().get('required', [])
|
||||
if not is_required:
|
||||
param_examples.append(f'"{param_name}": null')
|
||||
else:
|
||||
param_examples.append(f'"{param_name}": "value"')
|
||||
|
||||
error_message += ', '.join(param_examples)
|
||||
error_message += '}}\n\n'
|
||||
|
||||
error_message += f"Original error: {e}\n"
|
||||
error_message += f"Tool description: {tool.description}"
|
||||
else:
|
||||
# Use the original error message
|
||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
||||
error=e, tool=tool.name, tool_inputs=tool.description
|
||||
)
|
||||
else:
|
||||
# Use the original error message for non-validation errors
|
||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
||||
error=e, tool=tool.name, tool_inputs=tool.description
|
||||
)
|
||||
|
||||
error = ToolUsageErrorException(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
@@ -281,49 +332,54 @@ class ToolUsage:
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result) # type: ignore # "_format_result" of "ToolUsage" does not return a value (it only ever returns None)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result) # type: ignore # "_format_result" of "ToolUsage" does not return a value (it only ever returns None)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
|
||||
data["result_as_answer"] = result_as_answer # type: ignore
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
|
||||
data["result_as_answer"] = result_as_answer # type: ignore
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
if available_tool and hasattr(available_tool, 'current_usage_count'):
|
||||
available_tool.current_usage_count += 1
|
||||
if hasattr(available_tool, 'max_usage_count') and available_tool.max_usage_count is not None:
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue"
|
||||
)
|
||||
if available_tool and hasattr(available_tool, 'current_usage_count'):
|
||||
available_tool.current_usage_count += 1
|
||||
if hasattr(available_tool, 'max_usage_count') and available_tool.max_usage_count is not None:
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue"
|
||||
)
|
||||
|
||||
return result
|
||||
return result
|
||||
|
||||
def _format_result(self, result: Any) -> str:
|
||||
if self.task:
|
||||
self.task.used_tools += 1
|
||||
|
||||
# Handle None results explicitly
|
||||
if result is None:
|
||||
result = "No result returned from tool"
|
||||
|
||||
if self._should_remember_format():
|
||||
result = self._remember_format(result=result)
|
||||
return str(result)
|
||||
@@ -346,24 +402,34 @@ class ToolUsage:
|
||||
if not self.tools_handler:
|
||||
return False
|
||||
if last_tool_usage := self.tools_handler.last_used_tool:
|
||||
return (calling.tool_name == last_tool_usage.tool_name) and (
|
||||
# Add debug logging
|
||||
print(f"[DEBUG] _check_tool_repeated_usage:")
|
||||
print(f" Current tool: {calling.tool_name}")
|
||||
print(f" Current args: {calling.arguments}")
|
||||
print(f" Last tool: {last_tool_usage.tool_name}")
|
||||
print(f" Last args: {last_tool_usage.arguments}")
|
||||
|
||||
is_repeated = (calling.tool_name == last_tool_usage.tool_name) and (
|
||||
calling.arguments == last_tool_usage.arguments
|
||||
)
|
||||
print(f" Is repeated: {is_repeated}")
|
||||
|
||||
return is_repeated
|
||||
return False
|
||||
|
||||
|
||||
def _check_usage_limit(self, tool: Any, tool_name: str) -> str | None:
|
||||
"""Check if tool has reached its usage limit.
|
||||
|
||||
|
||||
Args:
|
||||
tool: The tool to check
|
||||
tool_name: The name of the tool (used for error message)
|
||||
|
||||
|
||||
Returns:
|
||||
Error message if limit reached, None otherwise
|
||||
"""
|
||||
if (
|
||||
hasattr(tool, 'max_usage_count')
|
||||
and tool.max_usage_count is not None
|
||||
hasattr(tool, 'max_usage_count')
|
||||
and tool.max_usage_count is not None
|
||||
and tool.current_usage_count >= tool.max_usage_count
|
||||
):
|
||||
return f"Tool '{tool_name}' has reached its usage limit of {tool.max_usage_count} times and cannot be used anymore."
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
|
||||
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
|
||||
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary.",
|
||||
"lite_agent_system_prompt_with_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
|
||||
"lite_agent_system_prompt_with_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the complete final answer to the original input question\n```",
|
||||
"lite_agent_system_prompt_without_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
"lite_agent_response_format": "\nIMPORTANT: Your final answer MUST contain all the information requested in the following format: {response_format}\n\nIMPORTANT: Ensure the final output does not include any code block markers like ```json or ```python.",
|
||||
"knowledge_search_query": "The original query is: {task_prompt}.",
|
||||
@@ -41,7 +41,8 @@
|
||||
"wrong_tool_name": "You tried to use the tool {tool}, but it doesn't exist. You must use one of the following tools, use one at time: {tools}.",
|
||||
"tool_usage_exception": "I encountered an error while trying to use the tool. This was the error: {error}.\n Tool {tool} accepts these inputs: {tool_inputs}",
|
||||
"agent_tool_execution_error": "Error executing task with agent '{agent_role}'. Error: {error}",
|
||||
"validation_error": "### Previous attempt failed validation: {guardrail_result_error}\n\n\n### Previous result:\n{task_output}\n\n\nTry again, making sure to address the validation error."
|
||||
"validation_error": "### Previous attempt failed validation: {guardrail_result_error}\n\n\n### Previous result:\n{task_output}\n\n\nTry again, making sure to address the validation error.",
|
||||
"criteria_validation_error": "### Your answer did not meet all acceptance criteria\n\n### Unmet criteria:\n{unmet_criteria}\n\n### Previous result:\n{task_output}\n\n\nPlease revise your answer to ensure ALL acceptance criteria are met. Use the 'Access Scratchpad Memory' tool if you need to retrieve any previously collected information."
|
||||
},
|
||||
"tools": {
|
||||
"delegate_work": "Delegate a specific task to one of the following coworkers: {coworkers}\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to execute the task, they know nothing about the task, so share absolutely everything you know, don't reference things but instead explain them.",
|
||||
@@ -55,7 +56,12 @@
|
||||
"reasoning": {
|
||||
"initial_plan": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are creating a strategic plan for a task that requires your expertise and unique perspective.",
|
||||
"refine_plan": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are refining a strategic plan for a task that requires your expertise and unique perspective.",
|
||||
"create_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou have been assigned the following task:\n{description}\n\nExpected output:\n{expected_output}\n\nAvailable tools: {tools}\n\nBefore executing this task, create a detailed plan that leverages your expertise as {role} and outlines:\n1. Your understanding of the task from your professional perspective\n2. The key steps you'll take to complete it, drawing on your background and skills\n3. How you'll approach any challenges that might arise, considering your expertise\n4. How you'll strategically use the available tools based on your experience, exactly what tools to use and how to use them\n5. The expected outcome and how it aligns with your goal\n\nAfter creating your plan, assess whether you feel ready to execute the task or if you could do better.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan because [specific reason].\"",
|
||||
"refine_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou created the following plan for this task:\n{current_plan}\n\nHowever, you indicated that you're not ready to execute the task yet.\n\nPlease refine your plan further, drawing on your expertise as {role} to address any gaps or uncertainties. As you refine your plan, be specific about which available tools you will use, how you will use them, and why they are the best choices for each step. Clearly outline your tool usage strategy as part of your improved plan.\n\nAfter refining your plan, assess whether you feel ready to execute the task.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan further because [specific reason].\""
|
||||
"create_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou have been assigned the following task:\n{description}\n\nExpected output:\n{expected_output}\n\nAvailable tools: {tools}\n\nBefore executing this task, create a detailed plan that leverages your expertise as {role} and outlines:\n1. Your understanding of the task from your professional perspective\n2. The key steps you'll take to complete it, drawing on your background and skills\n3. How you'll approach any challenges that might arise, considering your expertise\n4. How you'll strategically use the available tools based on your experience, exactly what tools to use and how to use them\n5. The expected outcome and how it aligns with your goal\n\nIMPORTANT: Structure your plan as follows:\n\nSTEPS:\n1. [First concrete action step]\n2. [Second concrete action step]\n3. [Continue with numbered steps...]\n\nACCEPTANCE CRITERIA:\n- [First criterion that must be met]\n- [Second criterion that must be met]\n- [Continue with criteria...]\n\nRemember: your ultimate objective is to produce the most COMPLETE Final Answer that fully meets the **Expected output** criteria.\n\nAfter creating your plan, assess whether you feel ready to execute the task or if you could do better.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan because [specific reason].\"",
|
||||
"refine_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou created the following plan for this task:\n{current_plan}\n\nHowever, you indicated that you're not ready to execute the task yet.\n\nPlease refine your plan further, drawing on your expertise as {role} to address any gaps or uncertainties. As you refine your plan, be specific about which available tools you will use, how you will use them, and why they are the best choices for each step. Clearly outline your tool usage strategy as part of your improved plan.\n\nIMPORTANT: Structure your refined plan as follows:\n\nSTEPS:\n1. [First concrete action step]\n2. [Second concrete action step]\n3. [Continue with numbered steps...]\n\nACCEPTANCE CRITERIA:\n- [First criterion that must be met]\n- [Second criterion that must be met]\n- [Continue with criteria...]\n\nMake sure your refined strategy directly guides you toward producing the most COMPLETE Final Answer that fully satisfies the **Expected output**.\n\nAfter refining your plan, assess whether you feel ready to execute the task.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan further because [specific reason].\"",
|
||||
"adaptive_reasoning_decision": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are currently executing a task and need to decide whether to pause and reassess your plan based on the current context.",
|
||||
"mid_execution_reasoning": "You are currently executing a task and need to reassess your plan based on progress so far.\n\nTASK DESCRIPTION:\n{description}\n\nEXPECTED OUTPUT:\n{expected_output}\n\nCURRENT PROGRESS:\nSteps completed: {current_steps}\nTools used: {tools_used}\nProgress summary: {current_progress}\n\nRECENT CONVERSATION:\n{recent_messages}\n\nYour reassessment MUST focus on steering the remaining work toward a FINAL ANSWER that is as complete as possible and perfectly matches the **Expected output**.\n\nBased on the current progress and context, please reassess your plan for completing this task.\nConsider what has been accomplished, what challenges you've encountered, and what steps remain.\nAdjust your strategy if needed or confirm your current approach is still optimal.\n\nIMPORTANT: Structure your updated plan as follows:\n\nREMAINING STEPS:\n1. [First remaining action step]\n2. [Second remaining action step]\n3. [Continue with numbered steps...]\n\nUPDATED ACCEPTANCE CRITERIA (if changed):\n- [First criterion that must be met]\n- [Second criterion that must be met]\n- [Continue with criteria...]\n\nProvide a detailed updated plan for completing the task.\nEnd with \"READY: I am ready to continue executing the task.\" if you're confident in your plan.",
|
||||
"mid_execution_plan": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are reassessing your plan during task execution based on the progress made so far.",
|
||||
"mid_execution_reasoning_update": "I've reassessed my approach based on progress so far. Updated plan:\n\n{plan}",
|
||||
"adaptive_reasoning_context": "\n\nTASK DESCRIPTION:\n{description}\n\nEXPECTED OUTPUT:\n{expected_output}\n\nCURRENT EXECUTION CONTEXT:\n- Steps completed: {current_steps}\n- Tools used: {tools_used}\n- Progress summary: {current_progress}\n\nConsider whether the current approach is optimal or if a strategic pause to reassess would be beneficial. You should reason when:\n- You might be approaching the task inefficiently\n- The context suggests a different strategy might be better\n- You're uncertain about the next steps\n- The progress suggests you need to reconsider your approach\n- Tool usage patterns indicate issues (e.g., repeated failures, same tool used many times, rapid switching)\n- Multiple tools have returned errors or empty results\n- You're using the same tool repeatedly without making progress\n\nPay special attention to the TOOL USAGE STATISTICS section if present, as it reveals patterns that might not be obvious from the tool list alone.\n\nDecide whether reasoning/re-planning is needed at this point."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,10 +20,7 @@ from crewai.utilities.errors import AgentRepositoryError
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
from rich.console import Console
|
||||
from crewai.cli.config import Settings
|
||||
|
||||
console = Console()
|
||||
|
||||
def parse_tools(tools: List[BaseTool]) -> List[CrewStructuredTool]:
|
||||
"""Parse tools to be used for the task."""
|
||||
@@ -296,6 +293,8 @@ def handle_context_length(
|
||||
llm: Any,
|
||||
callbacks: List[Any],
|
||||
i18n: Any,
|
||||
task_description: Optional[str] = None,
|
||||
expected_output: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Handle context length exceeded by either summarizing or raising an error.
|
||||
|
||||
@@ -306,13 +305,22 @@ def handle_context_length(
|
||||
llm: LLM instance for summarization
|
||||
callbacks: List of callbacks for LLM
|
||||
i18n: I18N instance for messages
|
||||
task_description: Optional original task description
|
||||
expected_output: Optional expected output
|
||||
"""
|
||||
if respect_context_window:
|
||||
printer.print(
|
||||
content="Context length exceeded. Summarizing content to fit the model context window. Might take a while...",
|
||||
color="yellow",
|
||||
)
|
||||
summarize_messages(messages, llm, callbacks, i18n)
|
||||
summarize_messages(
|
||||
messages,
|
||||
llm,
|
||||
callbacks,
|
||||
i18n,
|
||||
task_description=task_description,
|
||||
expected_output=expected_output,
|
||||
)
|
||||
else:
|
||||
printer.print(
|
||||
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
@@ -328,6 +336,8 @@ def summarize_messages(
|
||||
llm: Any,
|
||||
callbacks: List[Any],
|
||||
i18n: Any,
|
||||
task_description: Optional[str] = None,
|
||||
expected_output: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Summarize messages to fit within context window.
|
||||
|
||||
@@ -336,6 +346,8 @@ def summarize_messages(
|
||||
llm: LLM instance for summarization
|
||||
callbacks: List of callbacks for LLM
|
||||
i18n: I18N instance for messages
|
||||
task_description: Optional original task description
|
||||
expected_output: Optional expected output
|
||||
"""
|
||||
messages_string = " ".join([message["content"] for message in messages])
|
||||
messages_groups = []
|
||||
@@ -368,12 +380,19 @@ def summarize_messages(
|
||||
|
||||
merged_summary = " ".join(content["content"] for content in summarized_contents)
|
||||
|
||||
# Build the summary message and optionally inject the task reminder.
|
||||
summary_message = i18n.slice("summary").format(merged_summary=merged_summary)
|
||||
|
||||
if task_description or expected_output:
|
||||
summary_message += "\n\n" # blank line before the reminder
|
||||
if task_description:
|
||||
summary_message += f"Original task: {task_description}\n"
|
||||
if expected_output:
|
||||
summary_message += f"Expected output: {expected_output}"
|
||||
|
||||
# Replace the conversation with the new summary message.
|
||||
messages.clear()
|
||||
messages.append(
|
||||
format_message_for_llm(
|
||||
i18n.slice("summary").format(merged_summary=merged_summary)
|
||||
)
|
||||
)
|
||||
messages.append(format_message_for_llm(summary_message))
|
||||
|
||||
|
||||
def show_agent_logs(
|
||||
@@ -438,13 +457,6 @@ def show_agent_logs(
|
||||
)
|
||||
|
||||
|
||||
def _print_current_organization():
|
||||
settings = Settings()
|
||||
if settings.org_uuid:
|
||||
console.print(f"Fetching agent from organization: {settings.org_name} ({settings.org_uuid})", style="bold blue")
|
||||
else:
|
||||
console.print("No organization currently set. We recommend setting one before using: `crewai org switch <org_id>` command.", style="yellow")
|
||||
|
||||
def load_agent_from_repository(from_repository: str) -> Dict[str, Any]:
|
||||
attributes: Dict[str, Any] = {}
|
||||
if from_repository:
|
||||
@@ -454,18 +466,15 @@ def load_agent_from_repository(from_repository: str) -> Dict[str, Any]:
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
|
||||
client = PlusAPI(api_key=get_auth_token())
|
||||
_print_current_organization()
|
||||
response = client.get_agent(from_repository)
|
||||
if response.status_code == 404:
|
||||
raise AgentRepositoryError(
|
||||
f"Agent {from_repository} does not exist, make sure the name is correct or the agent is available on your organization."
|
||||
f"\nIf you are using the wrong organization, switch to the correct one using `crewai org switch <org_id>` command.",
|
||||
f"Agent {from_repository} does not exist, make sure the name is correct or the agent is available on your organization"
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise AgentRepositoryError(
|
||||
f"Agent {from_repository} could not be loaded: {response.text}"
|
||||
f"\nIf you are using the wrong organization, switch to the correct one using `crewai org switch <org_id>` command.",
|
||||
)
|
||||
|
||||
agent = response.json()
|
||||
|
||||
@@ -23,7 +23,7 @@ def is_ipv4_pattern(name: str) -> bool:
|
||||
return bool(IPV4_PATTERN.match(name))
|
||||
|
||||
|
||||
def sanitize_collection_name(name: Optional[str], max_collection_length: int = MAX_COLLECTION_LENGTH) -> str:
|
||||
def sanitize_collection_name(name: Optional[str]) -> str:
|
||||
"""
|
||||
Sanitize a collection name to meet ChromaDB requirements:
|
||||
1. 3-63 characters long
|
||||
@@ -54,8 +54,8 @@ def sanitize_collection_name(name: Optional[str], max_collection_length: int = M
|
||||
|
||||
if len(sanitized) < MIN_COLLECTION_LENGTH:
|
||||
sanitized = sanitized + "x" * (MIN_COLLECTION_LENGTH - len(sanitized))
|
||||
if len(sanitized) > max_collection_length:
|
||||
sanitized = sanitized[:max_collection_length]
|
||||
if len(sanitized) > MAX_COLLECTION_LENGTH:
|
||||
sanitized = sanitized[:MAX_COLLECTION_LENGTH]
|
||||
if not sanitized[-1].isalnum():
|
||||
sanitized = sanitized[:-1] + "z"
|
||||
|
||||
|
||||
@@ -1,15 +1,8 @@
|
||||
import os
|
||||
from typing import Any, Dict, Optional, cast
|
||||
|
||||
try:
|
||||
from chromadb import Documents, EmbeddingFunction, Embeddings
|
||||
from chromadb.api.types import validate_embedding_function
|
||||
CHROMADB_AVAILABLE = True
|
||||
except ImportError:
|
||||
CHROMADB_AVAILABLE = False
|
||||
Documents = None
|
||||
EmbeddingFunction = None
|
||||
Embeddings = None
|
||||
from chromadb import Documents, EmbeddingFunction, Embeddings
|
||||
from chromadb.api.types import validate_embedding_function
|
||||
|
||||
|
||||
class EmbeddingConfigurator:
|
||||
@@ -32,11 +25,6 @@ class EmbeddingConfigurator:
|
||||
self,
|
||||
embedder_config: Optional[Dict[str, Any]] = None,
|
||||
) -> EmbeddingFunction:
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for embedding configuration. "
|
||||
"Please install it with: pip install 'crewai[memory]' or 'crewai[knowledge]'"
|
||||
)
|
||||
"""Configures and returns an embedding function based on the provided config."""
|
||||
if embedder_config is None:
|
||||
return self._create_default_embedding_function()
|
||||
@@ -59,12 +47,6 @@ class EmbeddingConfigurator:
|
||||
|
||||
@staticmethod
|
||||
def _create_default_embedding_function():
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for embedding functionality. "
|
||||
"Please install it with: pip install 'crewai[memory]' or 'crewai[knowledge]'"
|
||||
)
|
||||
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
@@ -75,12 +57,6 @@ class EmbeddingConfigurator:
|
||||
|
||||
@staticmethod
|
||||
def _configure_openai(config, model_name):
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for OpenAI embedding configuration. "
|
||||
"Please install it with: pip install 'crewai[memory]' or 'crewai[knowledge]'"
|
||||
)
|
||||
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
@@ -99,12 +75,6 @@ class EmbeddingConfigurator:
|
||||
|
||||
@staticmethod
|
||||
def _configure_azure(config, model_name):
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for Azure embedding configuration. "
|
||||
"Please install it with: pip install 'crewai[memory]' or 'crewai[knowledge]'"
|
||||
)
|
||||
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
@@ -123,12 +93,6 @@ class EmbeddingConfigurator:
|
||||
|
||||
@staticmethod
|
||||
def _configure_ollama(config, model_name):
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for Ollama embedding configuration. "
|
||||
"Please install it with: pip install 'crewai[memory]' or 'crewai[knowledge]'"
|
||||
)
|
||||
|
||||
from chromadb.utils.embedding_functions.ollama_embedding_function import (
|
||||
OllamaEmbeddingFunction,
|
||||
)
|
||||
@@ -140,12 +104,6 @@ class EmbeddingConfigurator:
|
||||
|
||||
@staticmethod
|
||||
def _configure_vertexai(config, model_name):
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for VertexAI embedding configuration. "
|
||||
"Please install it with: pip install 'crewai[memory]' or 'crewai[knowledge]'"
|
||||
)
|
||||
|
||||
from chromadb.utils.embedding_functions.google_embedding_function import (
|
||||
GoogleVertexEmbeddingFunction,
|
||||
)
|
||||
@@ -159,12 +117,6 @@ class EmbeddingConfigurator:
|
||||
|
||||
@staticmethod
|
||||
def _configure_google(config, model_name):
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for Google embedding configuration. "
|
||||
"Please install it with: pip install 'crewai[memory]' or 'crewai[knowledge]'"
|
||||
)
|
||||
|
||||
from chromadb.utils.embedding_functions.google_embedding_function import (
|
||||
GoogleGenerativeAiEmbeddingFunction,
|
||||
)
|
||||
@@ -177,12 +129,6 @@ class EmbeddingConfigurator:
|
||||
|
||||
@staticmethod
|
||||
def _configure_cohere(config, model_name):
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for Cohere embedding configuration. "
|
||||
"Please install it with: pip install 'crewai[memory]' or 'crewai[knowledge]'"
|
||||
)
|
||||
|
||||
from chromadb.utils.embedding_functions.cohere_embedding_function import (
|
||||
CohereEmbeddingFunction,
|
||||
)
|
||||
@@ -288,11 +234,3 @@ class EmbeddingConfigurator:
|
||||
raise ValueError(
|
||||
"Custom embedder must be an instance of `EmbeddingFunction` or a callable that creates one"
|
||||
)
|
||||
|
||||
def validate_embedder_config(self, config: EmbeddingFunction) -> EmbeddingFunction:
|
||||
if not CHROMADB_AVAILABLE:
|
||||
raise ImportError(
|
||||
"ChromaDB is required for embedding validation. "
|
||||
"Please install it with: pip install 'crewai[memory]' or 'crewai[knowledge]'"
|
||||
)
|
||||
return cast(EmbeddingFunction, validate_embedding_function(config))
|
||||
|
||||
@@ -102,24 +102,3 @@ class LiteAgentExecutionErrorEvent(BaseEvent):
|
||||
agent_info: Dict[str, Any]
|
||||
error: str
|
||||
type: str = "lite_agent_execution_error"
|
||||
|
||||
|
||||
# New logging events
|
||||
class AgentLogsStartedEvent(BaseEvent):
|
||||
"""Event emitted when agent logs should be shown at start"""
|
||||
|
||||
agent_role: str
|
||||
task_description: Optional[str] = None
|
||||
verbose: bool = False
|
||||
type: str = "agent_logs_started"
|
||||
|
||||
|
||||
class AgentLogsExecutionEvent(BaseEvent):
|
||||
"""Event emitted when agent logs should be shown during execution"""
|
||||
|
||||
agent_role: str
|
||||
formatted_answer: Any
|
||||
verbose: bool = False
|
||||
type: str = "agent_logs_execution"
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
@@ -27,8 +27,6 @@ from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
|
||||
from .agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
AgentLogsStartedEvent,
|
||||
AgentLogsExecutionEvent,
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
@@ -63,6 +61,8 @@ from .reasoning_events import (
|
||||
AgentReasoningStartedEvent,
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
AgentMidExecutionReasoningStartedEvent,
|
||||
AgentMidExecutionReasoningCompletedEvent,
|
||||
)
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ class EventListener(BaseEventListener):
|
||||
event.crew_name or "Crew",
|
||||
source.id,
|
||||
"completed",
|
||||
final_string_output,
|
||||
final_result=final_string_output,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffFailedEvent)
|
||||
@@ -289,14 +289,13 @@ class EventListener(BaseEventListener):
|
||||
if isinstance(source, LLM):
|
||||
self.formatter.handle_llm_tool_usage_started(
|
||||
event.tool_name,
|
||||
event.tool_args,
|
||||
)
|
||||
else:
|
||||
self.formatter.handle_tool_usage_started(
|
||||
self.formatter.current_agent_branch,
|
||||
event.tool_name,
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageFinishedEvent)
|
||||
def on_tool_usage_finished(source, event: ToolUsageFinishedEvent):
|
||||
@@ -324,20 +323,16 @@ class EventListener(BaseEventListener):
|
||||
event.tool_name,
|
||||
event.error,
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
)
|
||||
|
||||
# ----------- LLM EVENTS -----------
|
||||
|
||||
@crewai_event_bus.on(LLMCallStartedEvent)
|
||||
def on_llm_call_started(source, event: LLMCallStartedEvent):
|
||||
# Capture the returned tool branch and update the current_tool_branch reference
|
||||
thinking_branch = self.formatter.handle_llm_call_started(
|
||||
self.formatter.handle_llm_call_started(
|
||||
self.formatter.current_agent_branch,
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
# Update the formatter's current_tool_branch to ensure proper cleanup
|
||||
if thinking_branch is not None:
|
||||
self.formatter.current_tool_branch = thinking_branch
|
||||
|
||||
@crewai_event_bus.on(LLMCallCompletedEvent)
|
||||
def on_llm_call_completed(source, event: LLMCallCompletedEvent):
|
||||
@@ -445,8 +440,6 @@ class EventListener(BaseEventListener):
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
|
||||
# ----------- REASONING EVENTS -----------
|
||||
|
||||
@crewai_event_bus.on(AgentReasoningStartedEvent)
|
||||
def on_agent_reasoning_started(source, event: AgentReasoningStartedEvent):
|
||||
self.formatter.handle_reasoning_started(
|
||||
@@ -470,22 +463,36 @@ class EventListener(BaseEventListener):
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
|
||||
# ----------- AGENT LOGGING EVENTS -----------
|
||||
|
||||
@crewai_event_bus.on(AgentLogsStartedEvent)
|
||||
def on_agent_logs_started(source, event: AgentLogsStartedEvent):
|
||||
self.formatter.handle_agent_logs_started(
|
||||
event.agent_role,
|
||||
event.task_description,
|
||||
event.verbose,
|
||||
@crewai_event_bus.on(AgentMidExecutionReasoningStartedEvent)
|
||||
def on_mid_execution_reasoning_started(source, event: AgentMidExecutionReasoningStartedEvent):
|
||||
self.formatter.handle_reasoning_started(
|
||||
self.formatter.current_agent_branch,
|
||||
event.attempt if hasattr(event, "attempt") else 1,
|
||||
self.formatter.current_crew_tree,
|
||||
current_step=event.current_step,
|
||||
reasoning_trigger=event.reasoning_trigger,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(AgentLogsExecutionEvent)
|
||||
def on_agent_logs_execution(source, event: AgentLogsExecutionEvent):
|
||||
self.formatter.handle_agent_logs_execution(
|
||||
event.agent_role,
|
||||
event.formatted_answer,
|
||||
event.verbose,
|
||||
@crewai_event_bus.on(AgentMidExecutionReasoningCompletedEvent)
|
||||
def on_mid_execution_reasoning_completed(source, event: AgentMidExecutionReasoningCompletedEvent):
|
||||
self.formatter.handle_reasoning_completed(
|
||||
event.updated_plan,
|
||||
True,
|
||||
self.formatter.current_crew_tree,
|
||||
duration_seconds=event.duration_seconds,
|
||||
current_step=event.current_step,
|
||||
reasoning_trigger=event.reasoning_trigger,
|
||||
)
|
||||
|
||||
from crewai.utilities.events.reasoning_events import AgentAdaptiveReasoningDecisionEvent
|
||||
|
||||
@crewai_event_bus.on(AgentAdaptiveReasoningDecisionEvent)
|
||||
def on_adaptive_reasoning_decision(source, event: AgentAdaptiveReasoningDecisionEvent):
|
||||
self.formatter.handle_adaptive_reasoning_decision(
|
||||
self.formatter.current_agent_branch,
|
||||
event.should_reason,
|
||||
event.reasoning,
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ class AgentReasoningCompletedEvent(BaseEvent):
|
||||
plan: str
|
||||
ready: bool
|
||||
attempt: int = 1
|
||||
duration_seconds: float = 0.0 # Time taken for reasoning in seconds
|
||||
|
||||
|
||||
class AgentReasoningFailedEvent(BaseEvent):
|
||||
@@ -28,4 +29,37 @@ class AgentReasoningFailedEvent(BaseEvent):
|
||||
agent_role: str
|
||||
task_id: str
|
||||
error: str
|
||||
attempt: int = 1
|
||||
attempt: int = 1
|
||||
|
||||
|
||||
class AgentMidExecutionReasoningStartedEvent(BaseEvent):
|
||||
"""Event emitted when an agent starts mid-execution reasoning."""
|
||||
|
||||
type: str = "agent_mid_execution_reasoning_started"
|
||||
agent_role: str
|
||||
task_id: str
|
||||
current_step: int
|
||||
reasoning_trigger: str # "interval" or "adaptive"
|
||||
|
||||
|
||||
class AgentMidExecutionReasoningCompletedEvent(BaseEvent):
|
||||
"""Event emitted when an agent completes mid-execution reasoning."""
|
||||
|
||||
type: str = "agent_mid_execution_reasoning_completed"
|
||||
agent_role: str
|
||||
task_id: str
|
||||
current_step: int
|
||||
updated_plan: str
|
||||
reasoning_trigger: str
|
||||
duration_seconds: float = 0.0 # Time taken for reasoning in seconds
|
||||
|
||||
|
||||
class AgentAdaptiveReasoningDecisionEvent(BaseEvent):
|
||||
"""Event emitted after the agent decides whether to trigger adaptive reasoning."""
|
||||
|
||||
type: str = "agent_adaptive_reasoning_decision"
|
||||
agent_role: str
|
||||
task_id: str
|
||||
should_reason: bool # Whether the agent decided to reason
|
||||
reasoning: str # Brief explanation / rationale from the LLM
|
||||
reasoning_trigger: str = "adaptive" # Always adaptive for this event
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from typing import Any, Dict, Optional
|
||||
import threading
|
||||
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
from rich.tree import Tree
|
||||
from rich.live import Live
|
||||
from rich.syntax import Syntax
|
||||
|
||||
|
||||
class ConsoleFormatter:
|
||||
@@ -18,7 +18,14 @@ class ConsoleFormatter:
|
||||
current_lite_agent_branch: Optional[Tree] = None
|
||||
tool_usage_counts: Dict[str, int] = {}
|
||||
current_reasoning_branch: Optional[Tree] = None # Track reasoning status
|
||||
_live_paused: bool = False
|
||||
current_adaptive_decision_branch: Optional[Tree] = None # Track last adaptive decision branch
|
||||
# Spinner support ---------------------------------------------------
|
||||
_spinner_frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
|
||||
_spinner_index: int = 0
|
||||
_spinner_branches: Dict[Tree, tuple[str, str, str]] = {} # branch -> (icon, name, style)
|
||||
_spinner_thread: Optional[threading.Thread] = None
|
||||
_stop_spinner_event: Optional[threading.Event] = None
|
||||
_spinner_running: bool = False
|
||||
current_llm_tool_tree: Optional[Tree] = None
|
||||
|
||||
def __init__(self, verbose: bool = False):
|
||||
@@ -41,12 +48,7 @@ class ConsoleFormatter:
|
||||
)
|
||||
|
||||
def create_status_content(
|
||||
self,
|
||||
title: str,
|
||||
name: str,
|
||||
status_style: str = "blue",
|
||||
tool_args: Dict[str, Any] | str = "",
|
||||
**fields,
|
||||
self, title: str, name: str, status_style: str = "blue", **fields
|
||||
) -> Text:
|
||||
"""Create standardized status content with consistent formatting."""
|
||||
content = Text()
|
||||
@@ -56,11 +58,11 @@ class ConsoleFormatter:
|
||||
|
||||
for label, value in fields.items():
|
||||
content.append(f"{label}: ", style="white")
|
||||
if label == "Result":
|
||||
content.append("\n")
|
||||
content.append(
|
||||
f"{value}\n", style=fields.get(f"{label}_style", status_style)
|
||||
)
|
||||
content.append("Tool Args: ", style="white")
|
||||
content.append(f"{tool_args}\n", style=status_style)
|
||||
|
||||
return content
|
||||
|
||||
@@ -128,19 +130,6 @@ class ConsoleFormatter:
|
||||
# Finally, pass through to the regular Console.print implementation
|
||||
self.console.print(*args, **kwargs)
|
||||
|
||||
def pause_live_updates(self) -> None:
|
||||
"""Pause Live session updates to allow for human input without interference."""
|
||||
if not self._live_paused:
|
||||
if self._live:
|
||||
self._live.stop()
|
||||
self._live = None
|
||||
self._live_paused = True
|
||||
|
||||
def resume_live_updates(self) -> None:
|
||||
"""Resume Live session updates after human input is complete."""
|
||||
if self._live_paused:
|
||||
self._live_paused = False
|
||||
|
||||
def print_panel(
|
||||
self, content: Text, title: str, style: str = "blue", is_flow: bool = False
|
||||
) -> None:
|
||||
@@ -160,7 +149,7 @@ class ConsoleFormatter:
|
||||
crew_name: str,
|
||||
source_id: str,
|
||||
status: str = "completed",
|
||||
final_string_output: str = "",
|
||||
final_result: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Handle crew tree updates with consistent formatting."""
|
||||
if not self.verbose or tree is None:
|
||||
@@ -186,16 +175,26 @@ class ConsoleFormatter:
|
||||
style,
|
||||
)
|
||||
|
||||
# Prepare additional fields for the completion panel
|
||||
additional_fields: Dict[str, Any] = {"ID": source_id}
|
||||
|
||||
# Include the final result if provided and the status is completed
|
||||
if status == "completed" and final_result is not None:
|
||||
additional_fields["Result"] = final_result
|
||||
|
||||
content = self.create_status_content(
|
||||
content_title,
|
||||
crew_name or "Crew",
|
||||
style,
|
||||
ID=source_id,
|
||||
**additional_fields,
|
||||
)
|
||||
content.append(f"Final Output: {final_string_output}\n", style="white")
|
||||
|
||||
self.print_panel(content, title, style)
|
||||
|
||||
# Clear all spinners when crew completes or fails
|
||||
if status in {"completed", "failed"}:
|
||||
self._clear_all_spinners()
|
||||
|
||||
def create_crew_tree(self, crew_name: str, source_id: str) -> Optional[Tree]:
|
||||
"""Create and initialize a new crew tree with initial status."""
|
||||
if not self.verbose:
|
||||
@@ -243,6 +242,15 @@ class ConsoleFormatter:
|
||||
# Set the current_task_branch attribute directly
|
||||
self.current_task_branch = task_branch
|
||||
|
||||
# When a new task starts, clear pointers to previous agent, reasoning,
|
||||
# and tool branches so that any upcoming Reasoning / Tool logs attach
|
||||
# to the correct task.
|
||||
if self.current_tool_branch:
|
||||
self._unregister_spinner_branch(self.current_tool_branch)
|
||||
self.current_agent_branch = None
|
||||
# Keep current_reasoning_branch; reasoning may still be in progress
|
||||
self.current_tool_branch = None
|
||||
|
||||
return task_branch
|
||||
|
||||
def update_task_status(
|
||||
@@ -290,6 +298,17 @@ class ConsoleFormatter:
|
||||
)
|
||||
self.print_panel(content, panel_title, style)
|
||||
|
||||
# Clear task-scoped pointers after the task is finished so subsequent
|
||||
# events don't mistakenly attach to the old task branch.
|
||||
if status in {"completed", "failed"}:
|
||||
self.current_task_branch = None
|
||||
self.current_agent_branch = None
|
||||
self.current_tool_branch = None
|
||||
# Ensure spinner is stopped if reasoning branch exists
|
||||
if self.current_reasoning_branch is not None:
|
||||
self._unregister_spinner_branch(self.current_reasoning_branch)
|
||||
self.current_reasoning_branch = None
|
||||
|
||||
def create_agent_branch(
|
||||
self, task_branch: Optional[Tree], agent_role: str, crew_tree: Optional[Tree]
|
||||
) -> Optional[Tree]:
|
||||
@@ -465,19 +484,12 @@ class ConsoleFormatter:
|
||||
def handle_llm_tool_usage_started(
|
||||
self,
|
||||
tool_name: str,
|
||||
tool_args: Dict[str, Any] | str,
|
||||
):
|
||||
# Create status content for the tool usage
|
||||
content = self.create_status_content(
|
||||
"Tool Usage Started", tool_name, Status="In Progress", tool_args=tool_args
|
||||
)
|
||||
|
||||
# Create and print the panel
|
||||
self.print_panel(content, "Tool Usage", "green")
|
||||
tree = self.get_llm_tree(tool_name)
|
||||
self.add_tree_node(tree, "🔄 Tool Usage Started", "green")
|
||||
self.print(tree)
|
||||
self.print()
|
||||
|
||||
# Still return the tree for compatibility with existing code
|
||||
return self.get_llm_tree(tool_name)
|
||||
return tree
|
||||
|
||||
def handle_llm_tool_usage_finished(
|
||||
self,
|
||||
@@ -508,7 +520,6 @@ class ConsoleFormatter:
|
||||
agent_branch: Optional[Tree],
|
||||
tool_name: str,
|
||||
crew_tree: Optional[Tree],
|
||||
tool_args: Dict[str, Any] | str = "",
|
||||
) -> Optional[Tree]:
|
||||
"""Handle tool usage started event."""
|
||||
if not self.verbose:
|
||||
@@ -516,7 +527,9 @@ class ConsoleFormatter:
|
||||
|
||||
# Parent for tool usage: LiteAgent > Agent > Task
|
||||
branch_to_use = (
|
||||
self.current_lite_agent_branch or agent_branch or self.current_task_branch
|
||||
self.current_lite_agent_branch
|
||||
or agent_branch
|
||||
or self.current_task_branch
|
||||
)
|
||||
|
||||
# Render full crew tree when available for consistent live updates
|
||||
@@ -532,19 +545,20 @@ class ConsoleFormatter:
|
||||
# Update tool usage count
|
||||
self.tool_usage_counts[tool_name] = self.tool_usage_counts.get(tool_name, 0) + 1
|
||||
|
||||
# Find or create tool node
|
||||
tool_branch = self.current_tool_branch
|
||||
if tool_branch is None:
|
||||
tool_branch = branch_to_use.add("")
|
||||
self.current_tool_branch = tool_branch
|
||||
# Always create a new branch for each tool invocation so that previous
|
||||
# tool usages remain visible in the tree.
|
||||
tool_branch = branch_to_use.add("")
|
||||
self.current_tool_branch = tool_branch
|
||||
|
||||
# Update label with current count
|
||||
spinner_char = self._next_spinner()
|
||||
self.update_tree_label(
|
||||
tool_branch,
|
||||
"🔧",
|
||||
f"🔧 {spinner_char}",
|
||||
f"Using {tool_name} ({self.tool_usage_counts[tool_name]})",
|
||||
"yellow",
|
||||
)
|
||||
self._register_spinner_branch(tool_branch, "🔧", f"Using {tool_name} ({self.tool_usage_counts[tool_name]})", "yellow")
|
||||
|
||||
# Print updated tree immediately
|
||||
self.print(tree_to_use)
|
||||
@@ -574,9 +588,7 @@ class ConsoleFormatter:
|
||||
f"Used {tool_name} ({self.tool_usage_counts[tool_name]})",
|
||||
"green",
|
||||
)
|
||||
|
||||
# Clear the current tool branch as we're done with it
|
||||
self.current_tool_branch = None
|
||||
self._unregister_spinner_branch(tool_branch)
|
||||
|
||||
# Only print if we have a valid tree and the tool node is still in it
|
||||
if isinstance(tree_to_use, Tree) and tool_branch in tree_to_use.children:
|
||||
@@ -604,6 +616,7 @@ class ConsoleFormatter:
|
||||
f"{tool_name} ({self.tool_usage_counts[tool_name]})",
|
||||
"red",
|
||||
)
|
||||
self._unregister_spinner_branch(tool_branch)
|
||||
if tree_to_use:
|
||||
self.print(tree_to_use)
|
||||
self.print()
|
||||
@@ -625,7 +638,9 @@ class ConsoleFormatter:
|
||||
|
||||
# Parent for tool usage: LiteAgent > Agent > Task
|
||||
branch_to_use = (
|
||||
self.current_lite_agent_branch or agent_branch or self.current_task_branch
|
||||
self.current_lite_agent_branch
|
||||
or agent_branch
|
||||
or self.current_task_branch
|
||||
)
|
||||
|
||||
# Render full crew tree when available for consistent live updates
|
||||
@@ -639,21 +654,16 @@ class ConsoleFormatter:
|
||||
return None
|
||||
|
||||
# Only add thinking status if we don't have a current tool branch
|
||||
# or if the current tool branch is not a thinking node
|
||||
should_add_thinking = self.current_tool_branch is None or "Thinking" not in str(
|
||||
self.current_tool_branch.label
|
||||
)
|
||||
|
||||
if should_add_thinking:
|
||||
if self.current_tool_branch is None:
|
||||
tool_branch = branch_to_use.add("")
|
||||
self.update_tree_label(tool_branch, "🧠", "Thinking...", "blue")
|
||||
spinner_char = self._next_spinner()
|
||||
self.update_tree_label(tool_branch, f"🧠 {spinner_char}", "Thinking...", "blue")
|
||||
self._register_spinner_branch(tool_branch, "🧠", "Thinking...", "blue")
|
||||
self.current_tool_branch = tool_branch
|
||||
self.print(tree_to_use)
|
||||
self.print()
|
||||
return tool_branch
|
||||
|
||||
# Return the existing tool branch if it's already a thinking node
|
||||
return self.current_tool_branch
|
||||
return None
|
||||
|
||||
def handle_llm_call_completed(
|
||||
self,
|
||||
@@ -662,7 +672,7 @@ class ConsoleFormatter:
|
||||
crew_tree: Optional[Tree],
|
||||
) -> None:
|
||||
"""Handle LLM call completed event."""
|
||||
if not self.verbose:
|
||||
if not self.verbose or tool_branch is None:
|
||||
return
|
||||
|
||||
# Decide which tree to render: prefer full crew tree, else parent branch
|
||||
@@ -670,50 +680,25 @@ class ConsoleFormatter:
|
||||
if tree_to_use is None:
|
||||
return
|
||||
|
||||
# Try to remove the thinking status node - first try the provided tool_branch
|
||||
thinking_branch_to_remove = None
|
||||
removed = False
|
||||
|
||||
# Method 1: Use the provided tool_branch if it's a thinking node
|
||||
if tool_branch is not None and "Thinking" in str(tool_branch.label):
|
||||
thinking_branch_to_remove = tool_branch
|
||||
|
||||
# Method 2: Fallback - search for any thinking node if tool_branch is None or not thinking
|
||||
if thinking_branch_to_remove is None:
|
||||
# Remove the thinking status node when complete
|
||||
if "Thinking" in str(tool_branch.label):
|
||||
parents = [
|
||||
self.current_lite_agent_branch,
|
||||
self.current_agent_branch,
|
||||
self.current_task_branch,
|
||||
tree_to_use,
|
||||
]
|
||||
removed = False
|
||||
for parent in parents:
|
||||
if isinstance(parent, Tree):
|
||||
for child in parent.children:
|
||||
if "Thinking" in str(child.label):
|
||||
thinking_branch_to_remove = child
|
||||
break
|
||||
if thinking_branch_to_remove:
|
||||
break
|
||||
|
||||
# Remove the thinking node if found
|
||||
if thinking_branch_to_remove:
|
||||
parents = [
|
||||
self.current_lite_agent_branch,
|
||||
self.current_agent_branch,
|
||||
self.current_task_branch,
|
||||
tree_to_use,
|
||||
]
|
||||
for parent in parents:
|
||||
if (
|
||||
isinstance(parent, Tree)
|
||||
and thinking_branch_to_remove in parent.children
|
||||
):
|
||||
parent.children.remove(thinking_branch_to_remove)
|
||||
if isinstance(parent, Tree) and tool_branch in parent.children:
|
||||
parent.children.remove(tool_branch)
|
||||
# Stop spinner for the thinking branch before removing
|
||||
self._unregister_spinner_branch(tool_branch)
|
||||
removed = True
|
||||
break
|
||||
|
||||
# Clear pointer if we just removed the current_tool_branch
|
||||
if self.current_tool_branch is thinking_branch_to_remove:
|
||||
if self.current_tool_branch is tool_branch:
|
||||
self.current_tool_branch = None
|
||||
|
||||
if removed:
|
||||
@@ -730,36 +715,10 @@ class ConsoleFormatter:
|
||||
# Decide which tree to render: prefer full crew tree, else parent branch
|
||||
tree_to_use = self.current_crew_tree or crew_tree or self.current_task_branch
|
||||
|
||||
# Find the thinking branch to update (similar to completion logic)
|
||||
thinking_branch_to_update = None
|
||||
|
||||
# Method 1: Use the provided tool_branch if it's a thinking node
|
||||
if tool_branch is not None and "Thinking" in str(tool_branch.label):
|
||||
thinking_branch_to_update = tool_branch
|
||||
|
||||
# Method 2: Fallback - search for any thinking node if tool_branch is None or not thinking
|
||||
if thinking_branch_to_update is None:
|
||||
parents = [
|
||||
self.current_lite_agent_branch,
|
||||
self.current_agent_branch,
|
||||
self.current_task_branch,
|
||||
tree_to_use,
|
||||
]
|
||||
for parent in parents:
|
||||
if isinstance(parent, Tree):
|
||||
for child in parent.children:
|
||||
if "Thinking" in str(child.label):
|
||||
thinking_branch_to_update = child
|
||||
break
|
||||
if thinking_branch_to_update:
|
||||
break
|
||||
|
||||
# Update the thinking branch to show failure
|
||||
if thinking_branch_to_update:
|
||||
thinking_branch_to_update.label = Text("❌ LLM Failed", style="red bold")
|
||||
# Clear the current_tool_branch reference
|
||||
if self.current_tool_branch is thinking_branch_to_update:
|
||||
self.current_tool_branch = None
|
||||
# Update tool branch if it exists
|
||||
if tool_branch:
|
||||
tool_branch.label = Text("❌ LLM Failed", style="red bold")
|
||||
self._unregister_spinner_branch(tool_branch)
|
||||
if tree_to_use:
|
||||
self.print(tree_to_use)
|
||||
self.print()
|
||||
@@ -1195,15 +1154,23 @@ class ConsoleFormatter:
|
||||
agent_branch: Optional[Tree],
|
||||
attempt: int,
|
||||
crew_tree: Optional[Tree],
|
||||
current_step: Optional[int] = None,
|
||||
reasoning_trigger: Optional[str] = None,
|
||||
) -> Optional[Tree]:
|
||||
"""Handle agent reasoning started (or refinement) event."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
# Prefer LiteAgent > Agent > Task branch as the parent for reasoning
|
||||
branch_to_use = (
|
||||
self.current_lite_agent_branch or agent_branch or self.current_task_branch
|
||||
)
|
||||
# Prefer to nest under the latest adaptive decision branch when this is a
|
||||
# mid-execution reasoning cycle so the tree indents nicely.
|
||||
if current_step is not None and self.current_adaptive_decision_branch is not None:
|
||||
branch_to_use = self.current_adaptive_decision_branch
|
||||
else:
|
||||
branch_to_use = (
|
||||
self.current_lite_agent_branch
|
||||
or agent_branch
|
||||
or self.current_task_branch
|
||||
)
|
||||
|
||||
# We always want to render the full crew tree when possible so the
|
||||
# Live view updates coherently. Fallbacks: crew tree → branch itself.
|
||||
@@ -1219,11 +1186,21 @@ class ConsoleFormatter:
|
||||
reasoning_branch = branch_to_use.add("")
|
||||
self.current_reasoning_branch = reasoning_branch
|
||||
|
||||
# Build label text depending on attempt
|
||||
status_text = (
|
||||
f"Reasoning (Attempt {attempt})" if attempt > 1 else "Reasoning..."
|
||||
)
|
||||
self.update_tree_label(reasoning_branch, "🧠", status_text, "blue")
|
||||
# Build label text depending on attempt and whether it's mid-execution
|
||||
if current_step is not None:
|
||||
status_text = "Mid-Execution Reasoning"
|
||||
else:
|
||||
status_text = (
|
||||
f"Reasoning (Attempt {attempt})" if attempt > 1 else "Reasoning..."
|
||||
)
|
||||
|
||||
# ⠋ is the first frame of a braille spinner – visually hints progress even
|
||||
# without true animation.
|
||||
spinner_char = self._next_spinner()
|
||||
self.update_tree_label(reasoning_branch, f"🧠 {spinner_char}", status_text, "yellow")
|
||||
|
||||
# Register branch for continuous spinner
|
||||
self._register_spinner_branch(reasoning_branch, "🧠", status_text, "yellow")
|
||||
|
||||
self.print(tree_to_use)
|
||||
self.print()
|
||||
@@ -1235,6 +1212,9 @@ class ConsoleFormatter:
|
||||
plan: str,
|
||||
ready: bool,
|
||||
crew_tree: Optional[Tree],
|
||||
duration_seconds: float = 0.0,
|
||||
current_step: Optional[int] = None,
|
||||
reasoning_trigger: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Handle agent reasoning completed event."""
|
||||
if not self.verbose:
|
||||
@@ -1248,12 +1228,31 @@ class ConsoleFormatter:
|
||||
or crew_tree
|
||||
)
|
||||
|
||||
style = "green" if ready else "yellow"
|
||||
status_text = (
|
||||
"Reasoning Completed" if ready else "Reasoning Completed (Not Ready)"
|
||||
)
|
||||
# Completed reasoning should always display in green.
|
||||
style = "green"
|
||||
# Build duration part separately for cleaner formatting
|
||||
duration_part = f"{duration_seconds:.2f}s" if duration_seconds > 0 else ""
|
||||
|
||||
if reasoning_branch is not None:
|
||||
if current_step is not None:
|
||||
# Build label manually to style duration differently and omit trigger info.
|
||||
if reasoning_branch is not None:
|
||||
label = Text()
|
||||
label.append("✅ ", style=f"{style} bold")
|
||||
label.append("Mid-Execution Reasoning Completed", style=style)
|
||||
if duration_part:
|
||||
label.append(f" ({duration_part})", style="cyan")
|
||||
reasoning_branch.label = label
|
||||
|
||||
status_text = None # Already set label manually
|
||||
else:
|
||||
status_text = (
|
||||
f"Reasoning Completed ({duration_part})" if duration_part else "Reasoning Completed"
|
||||
) if ready else (
|
||||
f"Reasoning Completed (Not Ready • {duration_part})" if duration_part else "Reasoning Completed (Not Ready)"
|
||||
)
|
||||
|
||||
# If we didn't build a custom label (non-mid-execution case), use helper
|
||||
if status_text and reasoning_branch is not None:
|
||||
self.update_tree_label(reasoning_branch, "✅", status_text, style)
|
||||
|
||||
if tree_to_use is not None:
|
||||
@@ -1261,9 +1260,17 @@ class ConsoleFormatter:
|
||||
|
||||
# Show plan in a panel (trim very long plans)
|
||||
if plan:
|
||||
# Derive duration text for panel title
|
||||
duration_text = f" ({duration_part})" if duration_part else ""
|
||||
|
||||
if current_step is not None:
|
||||
title = f"🧠 Mid-Execution Reasoning Plan{duration_text}"
|
||||
else:
|
||||
title = f"🧠 Reasoning Plan{duration_text}"
|
||||
|
||||
plan_panel = Panel(
|
||||
Text(plan, style="white"),
|
||||
title="🧠 Reasoning Plan",
|
||||
title=title,
|
||||
border_style=style,
|
||||
padding=(1, 2),
|
||||
)
|
||||
@@ -1271,9 +1278,17 @@ class ConsoleFormatter:
|
||||
|
||||
self.print()
|
||||
|
||||
# Unregister spinner before clearing
|
||||
if reasoning_branch is not None:
|
||||
self._unregister_spinner_branch(reasoning_branch)
|
||||
|
||||
# Clear stored branch after completion
|
||||
self.current_reasoning_branch = None
|
||||
|
||||
# After reasoning finished, we also clear the adaptive decision branch to
|
||||
# avoid nesting unrelated future nodes.
|
||||
self.current_adaptive_decision_branch = None
|
||||
|
||||
def handle_reasoning_failed(
|
||||
self,
|
||||
error: str,
|
||||
@@ -1293,6 +1308,7 @@ class ConsoleFormatter:
|
||||
|
||||
if reasoning_branch is not None:
|
||||
self.update_tree_label(reasoning_branch, "❌", "Reasoning Failed", "red")
|
||||
self._unregister_spinner_branch(reasoning_branch)
|
||||
|
||||
if tree_to_use is not None:
|
||||
self.print(tree_to_use)
|
||||
@@ -1309,148 +1325,114 @@ class ConsoleFormatter:
|
||||
# Clear stored branch after failure
|
||||
self.current_reasoning_branch = None
|
||||
|
||||
# ----------- AGENT LOGGING EVENTS -----------
|
||||
# ----------- ADAPTIVE REASONING DECISION EVENTS -----------
|
||||
|
||||
def handle_agent_logs_started(
|
||||
def handle_adaptive_reasoning_decision(
|
||||
self,
|
||||
agent_role: str,
|
||||
task_description: Optional[str] = None,
|
||||
verbose: bool = False,
|
||||
agent_branch: Optional[Tree],
|
||||
should_reason: bool,
|
||||
reasoning: str,
|
||||
crew_tree: Optional[Tree],
|
||||
) -> None:
|
||||
"""Handle agent logs started event."""
|
||||
if not verbose:
|
||||
"""Render the decision on whether to trigger adaptive reasoning."""
|
||||
if not self.verbose:
|
||||
return
|
||||
|
||||
agent_role = agent_role.split("\n")[0]
|
||||
|
||||
# Create panel content
|
||||
content = Text()
|
||||
content.append("Agent: ", style="white")
|
||||
content.append(f"{agent_role}", style="bright_green bold")
|
||||
|
||||
if task_description:
|
||||
content.append("\n\nTask: ", style="white")
|
||||
content.append(f"{task_description}", style="bright_green")
|
||||
|
||||
# Create and display the panel
|
||||
agent_panel = Panel(
|
||||
content,
|
||||
title="🤖 Agent Started",
|
||||
border_style="magenta",
|
||||
padding=(1, 2),
|
||||
# Prefer LiteAgent > Agent > Task as parent
|
||||
branch_to_use = (
|
||||
self.current_lite_agent_branch
|
||||
or agent_branch
|
||||
or self.current_task_branch
|
||||
)
|
||||
self.print(agent_panel)
|
||||
|
||||
tree_to_use = self.current_crew_tree or crew_tree or branch_to_use
|
||||
|
||||
if branch_to_use is None or tree_to_use is None:
|
||||
return
|
||||
|
||||
decision_branch = branch_to_use.add("")
|
||||
|
||||
decision_text = "YES" if should_reason else "NO"
|
||||
style = "green" if should_reason else "yellow"
|
||||
|
||||
self.update_tree_label(
|
||||
decision_branch,
|
||||
"🤔",
|
||||
f"Adaptive Reasoning Decision: {decision_text}",
|
||||
style,
|
||||
)
|
||||
|
||||
# Print tree first (live update)
|
||||
self.print(tree_to_use)
|
||||
|
||||
# Also show explanation if available
|
||||
if reasoning:
|
||||
truncated_reasoning = reasoning[:500] + "..." if len(reasoning) > 500 else reasoning
|
||||
panel = Panel(
|
||||
Text(truncated_reasoning, style="white"),
|
||||
title="🤔 Adaptive Reasoning Rationale",
|
||||
border_style=style,
|
||||
padding=(1, 2),
|
||||
)
|
||||
self.print(panel)
|
||||
|
||||
self.print()
|
||||
|
||||
def handle_agent_logs_execution(
|
||||
self,
|
||||
agent_role: str,
|
||||
formatted_answer: Any,
|
||||
verbose: bool = False,
|
||||
) -> None:
|
||||
"""Handle agent logs execution event."""
|
||||
if not verbose:
|
||||
# Store the decision branch so that subsequent mid-execution reasoning nodes
|
||||
# can be rendered as children of this decision (for better indentation).
|
||||
self.current_adaptive_decision_branch = decision_branch
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Spinner helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _next_spinner(self) -> str:
|
||||
"""Return next spinner frame."""
|
||||
frame = self._spinner_frames[self._spinner_index]
|
||||
self._spinner_index = (self._spinner_index + 1) % len(self._spinner_frames)
|
||||
return frame
|
||||
|
||||
def _register_spinner_branch(self, branch: Tree, icon: str, name: str, style: str):
|
||||
"""Start animating spinner for given branch."""
|
||||
self._spinner_branches[branch] = (icon, name, style)
|
||||
if not self._spinner_running:
|
||||
self._start_spinner_thread()
|
||||
|
||||
def _unregister_spinner_branch(self, branch: Optional[Tree]):
|
||||
if branch is None:
|
||||
return
|
||||
self._spinner_branches.pop(branch, None)
|
||||
if not self._spinner_branches:
|
||||
self._stop_spinner_thread()
|
||||
|
||||
from crewai.agents.parser import AgentAction, AgentFinish
|
||||
import json
|
||||
import re
|
||||
def _start_spinner_thread(self):
|
||||
if self._spinner_running:
|
||||
return
|
||||
self._stop_spinner_event = threading.Event()
|
||||
self._spinner_thread = threading.Thread(target=self._spinner_loop, daemon=True)
|
||||
self._spinner_thread.start()
|
||||
self._spinner_running = True
|
||||
|
||||
agent_role = agent_role.split("\n")[0]
|
||||
def _stop_spinner_thread(self):
|
||||
if self._stop_spinner_event:
|
||||
self._stop_spinner_event.set()
|
||||
self._spinner_running = False
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
thought = re.sub(r"\n+", "\n", formatted_answer.thought)
|
||||
formatted_json = json.dumps(
|
||||
formatted_answer.tool_input,
|
||||
indent=2,
|
||||
ensure_ascii=False,
|
||||
)
|
||||
def _clear_all_spinners(self):
|
||||
"""Clear all active spinners. Used as a safety mechanism."""
|
||||
self._spinner_branches.clear()
|
||||
self._stop_spinner_thread()
|
||||
|
||||
# Create content for the action panel
|
||||
content = Text()
|
||||
content.append("Agent: ", style="white")
|
||||
content.append(f"{agent_role}\n\n", style="bright_green bold")
|
||||
|
||||
if thought and thought != "":
|
||||
content.append("Thought: ", style="white")
|
||||
content.append(f"{thought}\n\n", style="bright_green")
|
||||
|
||||
content.append("Using Tool: ", style="white")
|
||||
content.append(f"{formatted_answer.tool}\n\n", style="bright_green bold")
|
||||
|
||||
content.append("Tool Input:\n", style="white")
|
||||
|
||||
# Create a syntax-highlighted JSON code block
|
||||
json_syntax = Syntax(
|
||||
formatted_json,
|
||||
"json",
|
||||
theme="monokai",
|
||||
line_numbers=False,
|
||||
background_color="default",
|
||||
)
|
||||
|
||||
content.append("\n")
|
||||
|
||||
# Create separate panels for better organization
|
||||
main_content = Text()
|
||||
main_content.append("Agent: ", style="white")
|
||||
main_content.append(f"{agent_role}\n\n", style="bright_green bold")
|
||||
|
||||
if thought and thought != "":
|
||||
main_content.append("Thought: ", style="white")
|
||||
main_content.append(f"{thought}\n\n", style="bright_green")
|
||||
|
||||
main_content.append("Using Tool: ", style="white")
|
||||
main_content.append(f"{formatted_answer.tool}", style="bright_green bold")
|
||||
|
||||
# Create the main action panel
|
||||
action_panel = Panel(
|
||||
main_content,
|
||||
title="🔧 Agent Tool Execution",
|
||||
border_style="magenta",
|
||||
padding=(1, 2),
|
||||
)
|
||||
|
||||
# Create the JSON input panel
|
||||
input_panel = Panel(
|
||||
json_syntax,
|
||||
title="Tool Input",
|
||||
border_style="blue",
|
||||
padding=(1, 2),
|
||||
)
|
||||
|
||||
# Create tool output content with better formatting
|
||||
output_text = str(formatted_answer.result)
|
||||
if len(output_text) > 2000:
|
||||
output_text = output_text[:1997] + "..."
|
||||
|
||||
output_panel = Panel(
|
||||
Text(output_text, style="bright_green"),
|
||||
title="Tool Output",
|
||||
border_style="green",
|
||||
padding=(1, 2),
|
||||
)
|
||||
|
||||
# Print all panels
|
||||
self.print(action_panel)
|
||||
self.print(input_panel)
|
||||
self.print(output_panel)
|
||||
self.print()
|
||||
|
||||
elif isinstance(formatted_answer, AgentFinish):
|
||||
# Create content for the finish panel
|
||||
content = Text()
|
||||
content.append("Agent: ", style="white")
|
||||
content.append(f"{agent_role}\n\n", style="bright_green bold")
|
||||
content.append("Final Answer:\n", style="white")
|
||||
content.append(f"{formatted_answer.output}", style="bright_green")
|
||||
|
||||
# Create and display the finish panel
|
||||
finish_panel = Panel(
|
||||
content,
|
||||
title="✅ Agent Final Answer",
|
||||
border_style="green",
|
||||
padding=(1, 2),
|
||||
)
|
||||
self.print(finish_panel)
|
||||
self.print()
|
||||
def _spinner_loop(self):
|
||||
import time
|
||||
while self._stop_spinner_event and not self._stop_spinner_event.is_set():
|
||||
if self._live and self._spinner_branches:
|
||||
for branch, (icon, name, style) in list(self._spinner_branches.items()):
|
||||
spinner_char = self._next_spinner()
|
||||
self.update_tree_label(branch, f"{icon} {spinner_char}", name, style)
|
||||
# Refresh live view
|
||||
try:
|
||||
self._live.update(self._live.renderable, refresh=True)
|
||||
except Exception:
|
||||
pass
|
||||
time.sleep(0.15)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import logging
|
||||
import json
|
||||
from typing import Tuple, cast
|
||||
from typing import Tuple, cast, List, Optional, Dict, Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -16,10 +16,17 @@ from crewai.utilities.events.reasoning_events import (
|
||||
)
|
||||
|
||||
|
||||
class StructuredPlan(BaseModel):
|
||||
"""Structured representation of a task plan."""
|
||||
steps: List[str] = Field(description="List of steps to complete the task")
|
||||
acceptance_criteria: List[str] = Field(description="Criteria that must be met before task is considered complete")
|
||||
|
||||
|
||||
class ReasoningPlan(BaseModel):
|
||||
"""Model representing a reasoning plan for a task."""
|
||||
plan: str = Field(description="The detailed reasoning plan for the task.")
|
||||
ready: bool = Field(description="Whether the agent is ready to execute the task.")
|
||||
structured_plan: Optional[StructuredPlan] = Field(default=None, description="Structured version of the plan")
|
||||
|
||||
|
||||
class AgentReasoningOutput(BaseModel):
|
||||
@@ -31,6 +38,8 @@ class ReasoningFunction(BaseModel):
|
||||
"""Model for function calling with reasoning."""
|
||||
plan: str = Field(description="The detailed reasoning plan for the task.")
|
||||
ready: bool = Field(description="Whether the agent is ready to execute the task.")
|
||||
steps: Optional[List[str]] = Field(default=None, description="List of steps to complete the task")
|
||||
acceptance_criteria: Optional[List[str]] = Field(default=None, description="Criteria that must be met before task is complete")
|
||||
|
||||
|
||||
class AgentReasoning:
|
||||
@@ -38,7 +47,7 @@ class AgentReasoning:
|
||||
Handles the agent reasoning process, enabling an agent to reflect and create a plan
|
||||
before executing a task.
|
||||
"""
|
||||
def __init__(self, task: Task, agent: Agent):
|
||||
def __init__(self, task: Task, agent: Agent, extra_context: str | None = None):
|
||||
if not task or not agent:
|
||||
raise ValueError("Both task and agent must be provided.")
|
||||
self.task = task
|
||||
@@ -46,6 +55,7 @@ class AgentReasoning:
|
||||
self.llm = cast(LLM, agent.llm)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.i18n = I18N()
|
||||
self.extra_context = extra_context or ""
|
||||
|
||||
def handle_agent_reasoning(self) -> AgentReasoningOutput:
|
||||
"""
|
||||
@@ -55,6 +65,9 @@ class AgentReasoning:
|
||||
Returns:
|
||||
AgentReasoningOutput: The output of the agent reasoning process.
|
||||
"""
|
||||
import time
|
||||
start_time = time.time()
|
||||
|
||||
# Emit a reasoning started event (attempt 1)
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
@@ -72,6 +85,8 @@ class AgentReasoning:
|
||||
try:
|
||||
output = self.__handle_agent_reasoning()
|
||||
|
||||
duration_seconds = time.time() - start_time
|
||||
|
||||
# Emit reasoning completed event
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
@@ -82,6 +97,7 @@ class AgentReasoning:
|
||||
plan=output.plan.plan,
|
||||
ready=output.plan.ready,
|
||||
attempt=1,
|
||||
duration_seconds=duration_seconds,
|
||||
),
|
||||
)
|
||||
except Exception:
|
||||
@@ -112,25 +128,25 @@ class AgentReasoning:
|
||||
Returns:
|
||||
AgentReasoningOutput: The output of the agent reasoning process.
|
||||
"""
|
||||
plan, ready = self.__create_initial_plan()
|
||||
plan, ready, structured_plan = self.__create_initial_plan()
|
||||
|
||||
plan, ready = self.__refine_plan_if_needed(plan, ready)
|
||||
plan, ready, structured_plan = self.__refine_plan_if_needed(plan, ready, structured_plan)
|
||||
|
||||
reasoning_plan = ReasoningPlan(plan=plan, ready=ready)
|
||||
reasoning_plan = ReasoningPlan(plan=plan, ready=ready, structured_plan=structured_plan)
|
||||
return AgentReasoningOutput(plan=reasoning_plan)
|
||||
|
||||
def __create_initial_plan(self) -> Tuple[str, bool]:
|
||||
def __create_initial_plan(self) -> Tuple[str, bool, Optional[StructuredPlan]]:
|
||||
"""
|
||||
Creates the initial reasoning plan for the task.
|
||||
|
||||
Returns:
|
||||
Tuple[str, bool]: The initial plan and whether the agent is ready to execute the task.
|
||||
Tuple[str, bool, Optional[StructuredPlan]]: The initial plan, whether the agent is ready, and structured plan.
|
||||
"""
|
||||
reasoning_prompt = self.__create_reasoning_prompt()
|
||||
|
||||
if self.llm.supports_function_calling():
|
||||
plan, ready = self.__call_with_function(reasoning_prompt, "initial_plan")
|
||||
return plan, ready
|
||||
plan, ready, structured_plan = self.__call_with_function(reasoning_prompt, "initial_plan")
|
||||
return plan, ready, structured_plan
|
||||
else:
|
||||
system_prompt = self.i18n.retrieve("reasoning", "initial_plan").format(
|
||||
role=self.agent.role,
|
||||
@@ -145,18 +161,21 @@ class AgentReasoning:
|
||||
]
|
||||
)
|
||||
|
||||
return self.__parse_reasoning_response(str(response))
|
||||
plan, ready = self.__parse_reasoning_response(str(response))
|
||||
structured_plan = self.__extract_structured_plan(plan)
|
||||
return plan, ready, structured_plan
|
||||
|
||||
def __refine_plan_if_needed(self, plan: str, ready: bool) -> Tuple[str, bool]:
|
||||
def __refine_plan_if_needed(self, plan: str, ready: bool, structured_plan: Optional[StructuredPlan]) -> Tuple[str, bool, Optional[StructuredPlan]]:
|
||||
"""
|
||||
Refines the reasoning plan if the agent is not ready to execute the task.
|
||||
|
||||
Args:
|
||||
plan: The current reasoning plan.
|
||||
ready: Whether the agent is ready to execute the task.
|
||||
structured_plan: The current structured plan.
|
||||
|
||||
Returns:
|
||||
Tuple[str, bool]: The refined plan and whether the agent is ready to execute the task.
|
||||
Tuple[str, bool, Optional[StructuredPlan]]: The refined plan, ready status, and structured plan.
|
||||
"""
|
||||
attempt = 1
|
||||
max_attempts = self.agent.max_reasoning_attempts
|
||||
@@ -178,7 +197,7 @@ class AgentReasoning:
|
||||
refine_prompt = self.__create_refine_prompt(plan)
|
||||
|
||||
if self.llm.supports_function_calling():
|
||||
plan, ready = self.__call_with_function(refine_prompt, "refine_plan")
|
||||
plan, ready, structured_plan = self.__call_with_function(refine_prompt, "refine_plan")
|
||||
else:
|
||||
system_prompt = self.i18n.retrieve("reasoning", "refine_plan").format(
|
||||
role=self.agent.role,
|
||||
@@ -193,6 +212,7 @@ class AgentReasoning:
|
||||
]
|
||||
)
|
||||
plan, ready = self.__parse_reasoning_response(str(response))
|
||||
structured_plan = self.__extract_structured_plan(plan)
|
||||
|
||||
attempt += 1
|
||||
|
||||
@@ -202,9 +222,9 @@ class AgentReasoning:
|
||||
)
|
||||
break
|
||||
|
||||
return plan, ready
|
||||
return plan, ready, structured_plan
|
||||
|
||||
def __call_with_function(self, prompt: str, prompt_type: str) -> Tuple[str, bool]:
|
||||
def __call_with_function(self, prompt: str, prompt_type: str) -> Tuple[str, bool, Optional[StructuredPlan]]:
|
||||
"""
|
||||
Calls the LLM with function calling to get a reasoning plan.
|
||||
|
||||
@@ -213,7 +233,7 @@ class AgentReasoning:
|
||||
prompt_type: The type of prompt (initial_plan or refine_plan).
|
||||
|
||||
Returns:
|
||||
Tuple[str, bool]: A tuple containing the plan and whether the agent is ready.
|
||||
Tuple[str, bool, Optional[StructuredPlan]]: A tuple containing the plan, ready status, and structured plan.
|
||||
"""
|
||||
self.logger.debug(f"Using function calling for {prompt_type} reasoning")
|
||||
|
||||
@@ -232,6 +252,16 @@ class AgentReasoning:
|
||||
"ready": {
|
||||
"type": "boolean",
|
||||
"description": "Whether the agent is ready to execute the task."
|
||||
},
|
||||
"steps": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "List of steps to complete the task"
|
||||
},
|
||||
"acceptance_criteria": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Criteria that must be met before task is considered complete"
|
||||
}
|
||||
},
|
||||
"required": ["plan", "ready"]
|
||||
@@ -247,9 +277,14 @@ class AgentReasoning:
|
||||
)
|
||||
|
||||
# Prepare a simple callable that just returns the tool arguments as JSON
|
||||
def _create_reasoning_plan(plan: str, ready: bool): # noqa: N802
|
||||
def _create_reasoning_plan(plan: str, ready: bool, steps: Optional[List[str]] = None, acceptance_criteria: Optional[List[str]] = None): # noqa: N802
|
||||
"""Return the reasoning plan result in JSON string form."""
|
||||
return json.dumps({"plan": plan, "ready": ready})
|
||||
return json.dumps({
|
||||
"plan": plan,
|
||||
"ready": ready,
|
||||
"steps": steps,
|
||||
"acceptance_criteria": acceptance_criteria
|
||||
})
|
||||
|
||||
response = self.llm.call(
|
||||
[
|
||||
@@ -265,12 +300,19 @@ class AgentReasoning:
|
||||
try:
|
||||
result = json.loads(response)
|
||||
if "plan" in result and "ready" in result:
|
||||
return result["plan"], result["ready"]
|
||||
structured_plan = None
|
||||
if result.get("steps") or result.get("acceptance_criteria"):
|
||||
structured_plan = StructuredPlan(
|
||||
steps=result.get("steps", []),
|
||||
acceptance_criteria=result.get("acceptance_criteria", [])
|
||||
)
|
||||
return result["plan"], result["ready"], structured_plan
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
pass
|
||||
|
||||
response_str = str(response)
|
||||
return response_str, "READY: I am ready to execute the task." in response_str
|
||||
structured_plan = self.__extract_structured_plan(response_str)
|
||||
return response_str, "READY: I am ready to execute the task." in response_str, structured_plan
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error during function calling: {str(e)}. Falling back to text parsing.")
|
||||
@@ -290,10 +332,11 @@ class AgentReasoning:
|
||||
)
|
||||
|
||||
fallback_str = str(fallback_response)
|
||||
return fallback_str, "READY: I am ready to execute the task." in fallback_str
|
||||
structured_plan = self.__extract_structured_plan(fallback_str)
|
||||
return fallback_str, "READY: I am ready to execute the task." in fallback_str, structured_plan
|
||||
except Exception as inner_e:
|
||||
self.logger.error(f"Error during fallback text parsing: {str(inner_e)}")
|
||||
return "Failed to generate a plan due to an error.", True # Default to ready to avoid getting stuck
|
||||
return "Failed to generate a plan due to an error.", True, None # Default to ready to avoid getting stuck
|
||||
|
||||
def __get_agent_backstory(self) -> str:
|
||||
"""
|
||||
@@ -317,7 +360,7 @@ class AgentReasoning:
|
||||
role=self.agent.role,
|
||||
goal=self.agent.goal,
|
||||
backstory=self.__get_agent_backstory(),
|
||||
description=self.task.description,
|
||||
description=self.task.description + (f"\n\nContext:\n{self.extra_context}" if self.extra_context else ""),
|
||||
expected_output=self.task.expected_output,
|
||||
tools=available_tools
|
||||
)
|
||||
@@ -368,7 +411,7 @@ class AgentReasoning:
|
||||
plan = response
|
||||
ready = False
|
||||
|
||||
if "READY: I am ready to execute the task." in response:
|
||||
if "READY: I am ready to execute the task." in response or "READY: I am ready to continue executing the task." in response:
|
||||
ready = True
|
||||
|
||||
return plan, ready
|
||||
@@ -385,3 +428,403 @@ class AgentReasoning:
|
||||
"The _handle_agent_reasoning method is deprecated. Use handle_agent_reasoning instead."
|
||||
)
|
||||
return self.handle_agent_reasoning()
|
||||
|
||||
def _emit_reasoning_event(self, event_class, **kwargs):
|
||||
"""Centralized method for emitting reasoning events."""
|
||||
try:
|
||||
reasoning_trigger = "interval"
|
||||
if hasattr(self.agent, 'adaptive_reasoning') and self.agent.adaptive_reasoning:
|
||||
reasoning_trigger = "adaptive"
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self.agent,
|
||||
event_class(
|
||||
agent_role=self.agent.role,
|
||||
task_id=str(self.task.id),
|
||||
reasoning_trigger=reasoning_trigger,
|
||||
**kwargs
|
||||
),
|
||||
)
|
||||
except Exception:
|
||||
# Ignore event bus errors to avoid breaking execution
|
||||
pass
|
||||
|
||||
def handle_mid_execution_reasoning(
|
||||
self,
|
||||
current_steps: int,
|
||||
tools_used: list,
|
||||
current_progress: str,
|
||||
iteration_messages: list
|
||||
) -> AgentReasoningOutput:
|
||||
"""
|
||||
Handle reasoning during task execution with context about current progress.
|
||||
|
||||
Args:
|
||||
current_steps: Number of steps executed so far
|
||||
tools_used: List of tools that have been used
|
||||
current_progress: Summary of progress made so far
|
||||
iteration_messages: Recent conversation messages
|
||||
|
||||
Returns:
|
||||
AgentReasoningOutput: Updated reasoning plan based on current context
|
||||
"""
|
||||
import time
|
||||
start_time = time.time()
|
||||
|
||||
from crewai.utilities.events.reasoning_events import AgentMidExecutionReasoningStartedEvent
|
||||
|
||||
self._emit_reasoning_event(
|
||||
AgentMidExecutionReasoningStartedEvent,
|
||||
current_step=current_steps
|
||||
)
|
||||
|
||||
try:
|
||||
output = self.__handle_mid_execution_reasoning(
|
||||
current_steps, tools_used, current_progress, iteration_messages
|
||||
)
|
||||
|
||||
duration_seconds = time.time() - start_time
|
||||
|
||||
# Emit completed event
|
||||
from crewai.utilities.events.reasoning_events import AgentMidExecutionReasoningCompletedEvent
|
||||
|
||||
self._emit_reasoning_event(
|
||||
AgentMidExecutionReasoningCompletedEvent,
|
||||
current_step=current_steps,
|
||||
updated_plan=output.plan.plan,
|
||||
duration_seconds=duration_seconds
|
||||
)
|
||||
|
||||
return output
|
||||
except Exception as e:
|
||||
# Emit failed event
|
||||
from crewai.utilities.events.reasoning_events import AgentReasoningFailedEvent
|
||||
|
||||
self._emit_reasoning_event(
|
||||
AgentReasoningFailedEvent,
|
||||
error=str(e),
|
||||
attempt=1
|
||||
)
|
||||
|
||||
raise
|
||||
|
||||
def __handle_mid_execution_reasoning(
|
||||
self,
|
||||
current_steps: int,
|
||||
tools_used: list,
|
||||
current_progress: str,
|
||||
iteration_messages: list
|
||||
) -> AgentReasoningOutput:
|
||||
"""
|
||||
Private method that handles the mid-execution reasoning process.
|
||||
|
||||
Args:
|
||||
current_steps: Number of steps executed so far
|
||||
tools_used: List of tools that have been used
|
||||
current_progress: Summary of progress made so far
|
||||
iteration_messages: Recent conversation messages
|
||||
|
||||
Returns:
|
||||
AgentReasoningOutput: The output of the mid-execution reasoning process.
|
||||
"""
|
||||
mid_execution_prompt = self.__create_mid_execution_prompt(
|
||||
current_steps, tools_used, current_progress, iteration_messages
|
||||
)
|
||||
|
||||
if self.llm.supports_function_calling():
|
||||
plan, ready, structured_plan = self.__call_with_function(mid_execution_prompt, "mid_execution_plan")
|
||||
else:
|
||||
# Use the same prompt for system context
|
||||
system_prompt = self.i18n.retrieve("reasoning", "mid_execution_plan").format(
|
||||
role=self.agent.role,
|
||||
goal=self.agent.goal,
|
||||
backstory=self.__get_agent_backstory()
|
||||
)
|
||||
|
||||
response = self.llm.call(
|
||||
[
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": mid_execution_prompt}
|
||||
]
|
||||
)
|
||||
|
||||
plan, ready = self.__parse_reasoning_response(str(response))
|
||||
structured_plan = self.__extract_structured_plan(plan)
|
||||
|
||||
reasoning_plan = ReasoningPlan(plan=plan, ready=ready, structured_plan=structured_plan)
|
||||
return AgentReasoningOutput(plan=reasoning_plan)
|
||||
|
||||
def __create_mid_execution_prompt(
|
||||
self,
|
||||
current_steps: int,
|
||||
tools_used: list,
|
||||
current_progress: str,
|
||||
iteration_messages: list
|
||||
) -> str:
|
||||
"""
|
||||
Creates a prompt for the agent to reason during task execution.
|
||||
|
||||
Args:
|
||||
current_steps: Number of steps executed so far
|
||||
tools_used: List of tools that have been used
|
||||
current_progress: Summary of progress made so far
|
||||
iteration_messages: Recent conversation messages
|
||||
|
||||
Returns:
|
||||
str: The mid-execution reasoning prompt.
|
||||
"""
|
||||
tools_used_str = ", ".join(tools_used) if tools_used else "No tools used yet"
|
||||
|
||||
recent_messages = ""
|
||||
if iteration_messages:
|
||||
recent_msgs = iteration_messages[-6:] if len(iteration_messages) > 6 else iteration_messages
|
||||
for msg in recent_msgs:
|
||||
role = msg.get("role", "unknown")
|
||||
content = msg.get("content", "")
|
||||
if content:
|
||||
recent_messages += f"{role.upper()}: {content[:200]}...\n\n"
|
||||
|
||||
return self.i18n.retrieve("reasoning", "mid_execution_reasoning").format(
|
||||
description=self.task.description + (f"\n\nContext:\n{self.extra_context}" if self.extra_context else ""),
|
||||
expected_output=self.task.expected_output,
|
||||
current_steps=current_steps,
|
||||
tools_used=tools_used_str,
|
||||
current_progress=current_progress,
|
||||
recent_messages=recent_messages
|
||||
)
|
||||
|
||||
def should_adaptive_reason_llm(
|
||||
self,
|
||||
current_steps: int,
|
||||
tools_used: list,
|
||||
current_progress: str,
|
||||
tool_usage_stats: Optional[Dict[str, Any]] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Use LLM function calling to determine if adaptive reasoning should be triggered.
|
||||
|
||||
Args:
|
||||
current_steps: Number of steps executed so far
|
||||
tools_used: List of tools that have been used
|
||||
current_progress: Summary of progress made so far
|
||||
tool_usage_stats: Optional statistics about tool usage patterns
|
||||
|
||||
Returns:
|
||||
bool: True if reasoning should be triggered, False otherwise.
|
||||
"""
|
||||
try:
|
||||
decision_prompt = self.__create_adaptive_reasoning_decision_prompt(
|
||||
current_steps, tools_used, current_progress, tool_usage_stats
|
||||
)
|
||||
|
||||
if self.llm.supports_function_calling():
|
||||
should_reason, reasoning_expl = self.__call_adaptive_reasoning_function(decision_prompt)
|
||||
else:
|
||||
should_reason, reasoning_expl = self.__call_adaptive_reasoning_text(decision_prompt)
|
||||
|
||||
# Emit an event so the UI/console can display the decision
|
||||
try:
|
||||
from crewai.utilities.events.reasoning_events import AgentAdaptiveReasoningDecisionEvent
|
||||
|
||||
self._emit_reasoning_event(
|
||||
AgentAdaptiveReasoningDecisionEvent,
|
||||
should_reason=should_reason,
|
||||
reasoning=reasoning_expl,
|
||||
)
|
||||
except Exception:
|
||||
# Ignore event bus errors to avoid breaking execution
|
||||
pass
|
||||
|
||||
return should_reason
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error during adaptive reasoning decision: {str(e)}. Defaulting to no reasoning.")
|
||||
return False
|
||||
|
||||
def __call_adaptive_reasoning_function(self, prompt: str) -> tuple[bool, str]:
|
||||
"""Call LLM with function calling for adaptive reasoning decision."""
|
||||
function_schema = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "decide_reasoning_need",
|
||||
"description": "Decide whether reasoning is needed based on current task execution context",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"should_reason": {
|
||||
"type": "boolean",
|
||||
"description": "Whether reasoning/re-planning is needed at this point in task execution."
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "string",
|
||||
"description": "Brief explanation of why reasoning is or isn't needed."
|
||||
},
|
||||
"detected_issues": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "List of specific issues detected (e.g., 'repeated tool failures', 'no progress', 'inefficient approach')"
|
||||
}
|
||||
},
|
||||
"required": ["should_reason", "reasoning"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def _decide_reasoning_need(should_reason: bool, reasoning: str, detected_issues: Optional[List[str]] = None):
|
||||
"""Return the reasoning decision result in JSON string form."""
|
||||
result = {
|
||||
"should_reason": should_reason,
|
||||
"reasoning": reasoning
|
||||
}
|
||||
if detected_issues:
|
||||
result["detected_issues"] = detected_issues
|
||||
# Append detected issues to reasoning explanation
|
||||
issues_str = ", ".join(detected_issues)
|
||||
result["reasoning"] = f"{reasoning} Detected issues: {issues_str}"
|
||||
return json.dumps(result)
|
||||
|
||||
system_prompt = self.i18n.retrieve("reasoning", "adaptive_reasoning_decision").format(
|
||||
role=self.agent.role,
|
||||
goal=self.agent.goal,
|
||||
backstory=self.__get_agent_backstory()
|
||||
)
|
||||
|
||||
response = self.llm.call(
|
||||
[
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
tools=[function_schema],
|
||||
available_functions={"decide_reasoning_need": _decide_reasoning_need},
|
||||
)
|
||||
|
||||
try:
|
||||
result = json.loads(response)
|
||||
reasoning_text = result.get("reasoning", "No explanation provided")
|
||||
if result.get("detected_issues"):
|
||||
# Include detected issues in the reasoning text for logging
|
||||
self.logger.debug(f"Adaptive reasoning detected issues: {result['detected_issues']}")
|
||||
return result.get("should_reason", False), reasoning_text
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
return False, "No explanation provided"
|
||||
|
||||
def __call_adaptive_reasoning_text(self, prompt: str) -> tuple[bool, str]:
|
||||
"""Fallback text-based adaptive reasoning decision."""
|
||||
system_prompt = self.i18n.retrieve("reasoning", "adaptive_reasoning_decision").format(
|
||||
role=self.agent.role,
|
||||
goal=self.agent.goal,
|
||||
backstory=self.__get_agent_backstory()
|
||||
)
|
||||
|
||||
response = self.llm.call([
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": prompt + "\n\nRespond with 'YES' if reasoning is needed, 'NO' if not."}
|
||||
])
|
||||
|
||||
return "YES" in str(response).upper(), "No explanation provided"
|
||||
|
||||
def __create_adaptive_reasoning_decision_prompt(
|
||||
self,
|
||||
current_steps: int,
|
||||
tools_used: list,
|
||||
current_progress: str,
|
||||
tool_usage_stats: Optional[Dict[str, Any]] = None
|
||||
) -> str:
|
||||
"""Create prompt for adaptive reasoning decision."""
|
||||
tools_used_str = ", ".join(tools_used) if tools_used else "No tools used yet"
|
||||
|
||||
# Add tool usage statistics to the prompt
|
||||
tool_stats_str = ""
|
||||
if tool_usage_stats:
|
||||
tool_stats_str = f"\n\nTOOL USAGE STATISTICS:\n"
|
||||
tool_stats_str += f"- Total tool invocations: {tool_usage_stats.get('total_tool_uses', 0)}\n"
|
||||
tool_stats_str += f"- Unique tools used: {tool_usage_stats.get('unique_tools', 0)}\n"
|
||||
|
||||
if tool_usage_stats.get('tools_by_frequency'):
|
||||
tool_stats_str += "- Tool frequency:\n"
|
||||
for tool, count in tool_usage_stats['tools_by_frequency'].items():
|
||||
tool_stats_str += f" • {tool}: {count} times\n"
|
||||
|
||||
if tool_usage_stats.get('recent_patterns'):
|
||||
tool_stats_str += f"- Recent patterns: {tool_usage_stats['recent_patterns']}\n"
|
||||
|
||||
# Use the prompt from i18n and format it with the current context
|
||||
base_prompt = self.i18n.retrieve("reasoning", "adaptive_reasoning_decision").format(
|
||||
role=self.agent.role,
|
||||
goal=self.agent.goal,
|
||||
backstory=self.__get_agent_backstory()
|
||||
)
|
||||
|
||||
context_prompt = self.i18n.retrieve("reasoning", "adaptive_reasoning_context").format(
|
||||
description=self.task.description + (f"\n\nContext:\n{self.extra_context}" if self.extra_context else ""),
|
||||
expected_output=self.task.expected_output,
|
||||
current_steps=current_steps,
|
||||
tools_used=tools_used_str,
|
||||
current_progress=current_progress + tool_stats_str
|
||||
)
|
||||
|
||||
prompt = base_prompt + context_prompt
|
||||
|
||||
return prompt
|
||||
|
||||
def __extract_structured_plan(self, plan: str) -> Optional[StructuredPlan]:
|
||||
"""
|
||||
Extracts a structured plan from the given plan text.
|
||||
|
||||
Args:
|
||||
plan: The plan text.
|
||||
|
||||
Returns:
|
||||
Optional[StructuredPlan]: The extracted structured plan or None if no plan was found.
|
||||
"""
|
||||
if not plan:
|
||||
return None
|
||||
|
||||
import re
|
||||
|
||||
steps = []
|
||||
acceptance_criteria = []
|
||||
|
||||
# Look for numbered steps (1., 2., etc.)
|
||||
step_pattern = r'^\s*(?:\d+\.|\-|\*)\s*(.+)$'
|
||||
|
||||
# Look for acceptance criteria section
|
||||
in_acceptance_section = False
|
||||
|
||||
lines = plan.split('\n')
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
|
||||
# Check if we're entering acceptance criteria section
|
||||
if any(marker in line.lower() for marker in ['acceptance criteria', 'success criteria', 'completion criteria']):
|
||||
in_acceptance_section = True
|
||||
continue
|
||||
|
||||
# Skip empty lines
|
||||
if not line:
|
||||
continue
|
||||
|
||||
# Extract steps or criteria
|
||||
match = re.match(step_pattern, line, re.MULTILINE)
|
||||
if match:
|
||||
content = match.group(1).strip()
|
||||
if in_acceptance_section:
|
||||
acceptance_criteria.append(content)
|
||||
else:
|
||||
steps.append(content)
|
||||
elif line and not line.endswith(':'): # Non-empty line that's not a header
|
||||
if in_acceptance_section:
|
||||
acceptance_criteria.append(line)
|
||||
else:
|
||||
# Check if it looks like a step (starts with action verb)
|
||||
action_verbs = ['create', 'implement', 'design', 'build', 'test', 'verify', 'check', 'ensure', 'analyze', 'review']
|
||||
if any(line.lower().startswith(verb) for verb in action_verbs):
|
||||
steps.append(line)
|
||||
|
||||
# If we found steps or criteria, return structured plan
|
||||
if steps or acceptance_criteria:
|
||||
return StructuredPlan(
|
||||
steps=steps,
|
||||
acceptance_criteria=acceptance_criteria
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
89
tests/adaptive_reasoning_test.py
Normal file
89
tests/adaptive_reasoning_test.py
Normal file
@@ -0,0 +1,89 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
|
||||
|
||||
def _create_executor(agent): # noqa: D401,E501
|
||||
"""Utility to build a minimal CrewAgentExecutor with the given agent.
|
||||
|
||||
A real LLM call is not required for these unit-tests, so we stub it with
|
||||
MagicMock to avoid any network interaction.
|
||||
"""
|
||||
return CrewAgentExecutor(
|
||||
llm=MagicMock(),
|
||||
task=MagicMock(),
|
||||
crew=MagicMock(),
|
||||
agent=agent,
|
||||
prompt={},
|
||||
max_iter=5,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=[],
|
||||
tools_description="",
|
||||
tools_handler=MagicMock(),
|
||||
)
|
||||
|
||||
|
||||
def test_agent_adaptive_reasoning_default():
|
||||
"""Agent.adaptive_reasoning should be False by default."""
|
||||
agent = Agent(role="Test", goal="Goal", backstory="Backstory")
|
||||
assert agent.adaptive_reasoning is False
|
||||
|
||||
|
||||
@pytest.mark.parametrize("adaptive_decision,expected", [(True, True), (False, False)])
|
||||
def test_should_trigger_reasoning_with_adaptive_reasoning(adaptive_decision, expected):
|
||||
"""Verify _should_trigger_reasoning defers to _should_adaptive_reason when
|
||||
adaptive_reasoning is enabled and reasoning_interval is None."""
|
||||
# Use a lightweight mock instead of a full Agent instance to isolate the logic
|
||||
agent = MagicMock()
|
||||
agent.reasoning = True
|
||||
agent.reasoning_interval = None
|
||||
agent.adaptive_reasoning = True
|
||||
|
||||
executor = _create_executor(agent)
|
||||
|
||||
# Ensure the helper returns the desired decision
|
||||
with patch.object(executor, "_should_adaptive_reason", return_value=adaptive_decision) as mock_adaptive:
|
||||
assert executor._should_trigger_reasoning() is expected
|
||||
mock_adaptive.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_adaptive_reasoning_full_execution():
|
||||
"""End-to-end test that triggers adaptive reasoning in a real execution flow.
|
||||
|
||||
The task description intentionally contains the word "error" to activate the
|
||||
simple error-based heuristic inside `_should_adaptive_reason`, guaranteeing
|
||||
that the agent reasons mid-execution without relying on patched internals.
|
||||
"""
|
||||
agent = Agent(
|
||||
role="Math Analyst",
|
||||
goal="Solve arithmetic problems flawlessly",
|
||||
backstory="You excel at basic calculations and always double-check your steps.",
|
||||
llm="gpt-4o-mini",
|
||||
reasoning=True,
|
||||
adaptive_reasoning=True,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="There was an unexpected error earlier. Now, please calculate 3 + 5 and return only the number.",
|
||||
expected_output="The result of the calculation (a single number).",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
# Validate the answer is correct and numeric
|
||||
assert result.raw.strip() == "8"
|
||||
|
||||
# Confirm that an adaptive reasoning message (Updated plan) was injected
|
||||
assert any(
|
||||
"updated plan" in msg.get("content", "").lower()
|
||||
for msg in agent.agent_executor.messages
|
||||
)
|
||||
@@ -501,7 +501,8 @@ def test_agent_custom_max_iterations():
|
||||
def test_agent_repeated_tool_usage(capsys):
|
||||
@tool
|
||||
def get_final_answer() -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this tool non-stop."""
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
|
||||
agent = Agent(
|
||||
@@ -526,41 +527,11 @@ def test_agent_repeated_tool_usage(capsys):
|
||||
)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
output = (
|
||||
captured.out.replace("\n", " ")
|
||||
.replace(" ", " ")
|
||||
.strip()
|
||||
.replace("╭", "")
|
||||
.replace("╮", "")
|
||||
.replace("╯", "")
|
||||
.replace("╰", "")
|
||||
.replace("│", "")
|
||||
.replace("─", "")
|
||||
.replace("[", "")
|
||||
.replace("]", "")
|
||||
.replace("bold", "")
|
||||
.replace("blue", "")
|
||||
.replace("yellow", "")
|
||||
.replace("green", "")
|
||||
.replace("red", "")
|
||||
.replace("dim", "")
|
||||
.replace("🤖", "")
|
||||
.replace("🔧", "")
|
||||
.replace("✅", "")
|
||||
.replace("\x1b[93m", "")
|
||||
.replace("\x1b[00m", "")
|
||||
.replace("\\", "")
|
||||
.replace('"', "")
|
||||
.replace("'", "")
|
||||
)
|
||||
|
||||
# Look for the message in the normalized output, handling the apostrophe difference
|
||||
expected_message = (
|
||||
"I tried reusing the same input, I must stop using this action input."
|
||||
)
|
||||
assert (
|
||||
expected_message in output
|
||||
), f"Expected message not found in output. Output was: {output}"
|
||||
"I tried reusing the same input, I must stop using this action input. I'll try something else instead."
|
||||
in captured.out
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -593,42 +564,10 @@ def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
|
||||
)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
output = (
|
||||
captured.out.replace("\n", " ")
|
||||
.replace(" ", " ")
|
||||
.strip()
|
||||
.replace("╭", "")
|
||||
.replace("╮", "")
|
||||
.replace("╯", "")
|
||||
.replace("╰", "")
|
||||
.replace("│", "")
|
||||
.replace("─", "")
|
||||
.replace("[", "")
|
||||
.replace("]", "")
|
||||
.replace("bold", "")
|
||||
.replace("blue", "")
|
||||
.replace("yellow", "")
|
||||
.replace("green", "")
|
||||
.replace("red", "")
|
||||
.replace("dim", "")
|
||||
.replace("🤖", "")
|
||||
.replace("🔧", "")
|
||||
.replace("✅", "")
|
||||
.replace("\x1b[93m", "")
|
||||
.replace("\x1b[00m", "")
|
||||
.replace("\\", "")
|
||||
.replace('"', "")
|
||||
.replace("'", "")
|
||||
)
|
||||
|
||||
# Look for the message in the normalized output, handling the apostrophe difference
|
||||
expected_message = (
|
||||
"I tried reusing the same input, I must stop using this action input"
|
||||
)
|
||||
|
||||
assert (
|
||||
expected_message in output
|
||||
), f"Expected message not found in output. Output was: {output}"
|
||||
"I tried reusing the same input, I must stop using this action input. I'll try something else instead."
|
||||
in captured.out
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -2153,12 +2092,7 @@ def test_agent_from_repository_with_invalid_tools(mock_get_agent, mock_get_auth_
|
||||
"role": "test role",
|
||||
"goal": "test goal",
|
||||
"backstory": "test backstory",
|
||||
"tools": [
|
||||
{
|
||||
"name": "DoesNotExist",
|
||||
"module": "crewai_tools",
|
||||
}
|
||||
],
|
||||
"tools": [{"name": "DoesNotExist", "module": "crewai_tools",}],
|
||||
}
|
||||
mock_get_agent.return_value = mock_get_response
|
||||
with pytest.raises(
|
||||
@@ -2192,64 +2126,3 @@ def test_agent_from_repository_agent_not_found(mock_get_agent, mock_get_auth_tok
|
||||
match="Agent test_agent does not exist, make sure the name is correct or the agent is available on your organization",
|
||||
):
|
||||
Agent(from_repository="test_agent")
|
||||
|
||||
|
||||
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
|
||||
@patch("crewai.utilities.agent_utils.Settings")
|
||||
@patch("crewai.utilities.agent_utils.console")
|
||||
def test_agent_from_repository_displays_org_info(
|
||||
mock_console, mock_settings, mock_get_agent, mock_get_auth_token
|
||||
):
|
||||
mock_settings_instance = MagicMock()
|
||||
mock_settings_instance.org_uuid = "test-org-uuid"
|
||||
mock_settings_instance.org_name = "Test Organization"
|
||||
mock_settings.return_value = mock_settings_instance
|
||||
|
||||
mock_get_response = MagicMock()
|
||||
mock_get_response.status_code = 200
|
||||
mock_get_response.json.return_value = {
|
||||
"role": "test role",
|
||||
"goal": "test goal",
|
||||
"backstory": "test backstory",
|
||||
"tools": [],
|
||||
}
|
||||
mock_get_agent.return_value = mock_get_response
|
||||
|
||||
agent = Agent(from_repository="test_agent")
|
||||
|
||||
mock_console.print.assert_any_call(
|
||||
"Fetching agent from organization: Test Organization (test-org-uuid)",
|
||||
style="bold blue",
|
||||
)
|
||||
|
||||
assert agent.role == "test role"
|
||||
assert agent.goal == "test goal"
|
||||
assert agent.backstory == "test backstory"
|
||||
|
||||
|
||||
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
|
||||
@patch("crewai.utilities.agent_utils.Settings")
|
||||
@patch("crewai.utilities.agent_utils.console")
|
||||
def test_agent_from_repository_without_org_set(
|
||||
mock_console, mock_settings, mock_get_agent, mock_get_auth_token
|
||||
):
|
||||
mock_settings_instance = MagicMock()
|
||||
mock_settings_instance.org_uuid = None
|
||||
mock_settings_instance.org_name = None
|
||||
mock_settings.return_value = mock_settings_instance
|
||||
|
||||
mock_get_response = MagicMock()
|
||||
mock_get_response.status_code = 401
|
||||
mock_get_response.text = "Unauthorized access"
|
||||
mock_get_agent.return_value = mock_get_response
|
||||
|
||||
with pytest.raises(
|
||||
AgentRepositoryError,
|
||||
match="Agent test_agent could not be loaded: Unauthorized access",
|
||||
):
|
||||
Agent(from_repository="test_agent")
|
||||
|
||||
mock_console.print.assert_any_call(
|
||||
"No organization currently set. We recommend setting one before using: `crewai org switch <org_id>` command.",
|
||||
style="yellow",
|
||||
)
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,137 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You
|
||||
are an expert at gathering and organizing information. You carefully collect
|
||||
details and present them in a structured way.\nYour personal goal is: Gather
|
||||
information about the best soccer players\n\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "Top 10 best players
|
||||
in the world?"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '694'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//nFfNchtHDr7rKVBz0a6KVJGUZMm6SVrJcSw6Ktmb7NY6pQJ7wBlEPd1T
|
||||
6B5S3JTP+yw55AVy9T7YFnr4Jy7pRLmwioP+wfcB+Br4eQ8g4zw7h8yUGE1V2+5l0dh/xOvXo5MT
|
||||
ufDvJk//lNvJm+9HvR9+errPOrrDj34iExe7Do2vakuRvWvNRggj6an90+PXJ69OX50dJUPlc7K6
|
||||
rahj99h3K3bcHfQGx93eabd/Nt9dejYUsnP41x4AwM/pV/10OT1l59DrLL5UFAIWlJ0vFwFk4q1+
|
||||
yTAEDhFdzDoro/Eukkuufyx9U5TxHN6C81Mw6KDgCQFCof4DujAlAfjkbtihhYv0/xw+lgRjb62f
|
||||
siuAAyCEKI2JjVAOfkIyYZqCH0MsCUwjQi5C9DX0exC8MSRQW5yRBGCXFk292BxGGPSA9IkFapKx
|
||||
lwqdodABNCXThCpyUf+59ia0Friq0cT5PiiwIsCg139noh+RwKA3ODr/5D65/iEcHNyyd2RhSCHw
|
||||
wQH85a2LJDBkrPivnxwAdOHg4M4H1ngeHJzDjZcpSr60XfnGRZmp6UIKcpEdLo0Xa27qilO4RGu9
|
||||
g3z/OwHUg0IHqsZGri3B369vLuCqxKpm7wLcEhYNQeRoFbMlzJXj5TUQvaLpw5WvES6qL78IG0xs
|
||||
DHqDAdy8vbmAHxKZV00NEzbRy+xQsQ8U+7uZZXQwHGFdf/lF0d+hcIAPyC5235BUyO7FLNyIxmgn
|
||||
BRtOTdk5Eo38oNc/64BrKhLfBLhlxd5fon90fupg7AVKDhBqorwDufBoZNkVbQ4UHm03GC9KUy1+
|
||||
SiEkuEcK91p0JXyDaNHlCneIzpQUNOJXHGcvhvpeTbPdUDFECnGe3hotITTlCqP6m7J+a+A7aaO6
|
||||
jGCkMYwWtJp1w4bn+wGi0MhSV/nULYEweNfyOhioqhwlJo5T4GnCDv5GcCnNzNEfpmLI+ZjJ5iTb
|
||||
2LgkW3BT7aTjHc0SogofSVIkNy5dq4Q7oYpJNktAg/w8EejJUK3+oYUJB/YuLapV7pS5EVuObc6f
|
||||
KPQr4RAZnYd73ZN7BX9hu+8xBHlxAtx5iU2Bdifmk60Fj9Z2I1e0LGlNBNDEbUtBlWtHSszrxY9X
|
||||
XNl1jnT7tSs0wzvwoUZ2LWtvI9qWhldKw3uaVSjwrRzO8X/DFu2L8V8K/pt3o3/3LFRjiyzJmfDI
|
||||
1nagJCgxwNS7jWpvQ6hVkwPChONa5rdX7gdwOA97JKwgNMZQCB1gZ2yTSF2UgrI5V0hSgUwsnCoL
|
||||
9/ogRLilKbrcT8NjegIuUQxZ7/BPpIPyvpOOe1I+KE+MPNOqeZp2Ehfqb1LJSxWPIbn9AHethKQE
|
||||
mhd1byH0/Q7oOy48aqIeVhJO2M5Uby51l4Nh49iU+2HBEgUY0dgLQeUniSJdOked6DlLb2PziDD0
|
||||
ufB//6PE3BNaGGIunP8ZfbgSj5F3P476ADwrlzbXNaTaUeg6tAp+zY/9sOW9FG6qumyzaFFh88sV
|
||||
qfI71h4mLLqSdPPyTUoEvFYChr7EinL4gBZLZeCWJyS19y+vlOtiVsfflca5Li6v0Rqx9TyJKyUk
|
||||
+buhjorz662DrljqxRtvc3Jw6X2cKxJsPTfx0O8pEd+z+/Kr4SbAt19+c+xlazp8lY+vSMf2YuEk
|
||||
4CGibEg+FqlY1pVkqRU1T/y6WvxOqsxbogV+LaZuap3a5zMx8LGkQMsWtcQJablpM00u2hnkZDVc
|
||||
lAM9RUEvOTuU2bOGddGOpupIjpfe5hC4cDxmgy4Cu7FtyBmCKcfyWSfsx/NGeaPQ21xmSQoY9teq
|
||||
OzVDKI6SurDLecJ5gxbQGG8xp3C4PgYIjZuAOoq4xto1AzrnY9LZNID8OLd8Xo4c1he1+FHY2JqN
|
||||
2XEoHyTRqONFiL7OkvXzHsCPabRpnk0rWS2+quND9I+UrusPBu152WqiWllPjxbWqBFfGc5Ojjtb
|
||||
DnzIKSLbsDYdZQZNSflq62qUwiZnv2bYW4P9/+5sO7uFzq74I8evDEbbGcofaqGczXPIq2VCOnHu
|
||||
WrakOTmcBR3BDD1EJtFQ5DTGxrZzYBZmIVL1MGZXkNTC7TA4rh9eDXBwhGd9Gmd7n/f+BwAA//8D
|
||||
AMMI9CsaDwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9be627c40f260-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 15:02:05 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=qYkxv9nLxeWAtPBvECxNw8fLnoBHLorJdRI8.xVEVEA-1749567725-1.0.1.1-75sp4gwHGJocK1MFkSgRcB4xJUiCwz31VRD4LAmQGEmfYB0BMQZ5sgWS8e_UMbjCaEhaPNO88q5XdbLOCWA85_rO0vYTb4hp6tmIiaerhsM;
|
||||
path=/; expires=Tue, 10-Jun-25 15:32:05 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=HRKCwkyTqSXpCj9_i_T5lDtlr_INA290o0b3k.26oi8-1749567725794-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '42674'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '42684'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999859'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_d92e6f33fa5e0fbe43349afee8f55921
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1020,4 +1020,63 @@ interactions:
|
||||
- req_83a900d075a98ab391c27c5d1cd4fbcb
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
CtcMCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSrgwKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKUCAoQot+4QIkYBzRK/SkLTbr2XBIIKFjBQjUmSxQqDENyZXcgQ3JlYXRlZDABOdDB
|
||||
ddshVEMYQbBDfdshVEMYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTIxLjBKGgoOcHl0aG9uX3Zl
|
||||
cnNpb24SCAoGMy4xMS43Si4KCGNyZXdfa2V5EiIKIDY5NDY1NGEzMThmNzE5ODgzYzA2ZjhlNmQ5
|
||||
YTc1NDlmSjEKB2NyZXdfaWQSJgokYTgxMTFiOTktZWJkMy00ZWYzLWFmNmQtMTk1ZDhiYjNhN2Jl
|
||||
ShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3
|
||||
X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUo6ChBjcmV3
|
||||
X2ZpbmdlcnByaW50EiYKJGQ2YTE3OTk4LTQ0ODgtNDQ0Mi1iY2I3LWZiYzdlMDU1NjE4MUo7Chtj
|
||||
cmV3X2ZpbmdlcnByaW50X2NyZWF0ZWRfYXQSHAoaMjAyNS0wNS0yN1QwMToxMzowNC43NDEyNzRK
|
||||
zAIKC2NyZXdfYWdlbnRzErwCCrkCW3sia2V5IjogIjU1ODY5YmNiMTYzMjNlNzEyOWQyNTIzNjJj
|
||||
ODU1ZGE2IiwgImlkIjogIjdiOTMxZWIzLTRiM2YtNGI3OC1hOWEzLTY4ODZiNTE1M2QxZiIsICJy
|
||||
b2xlIjogIlNheSBIaSIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyNSwgIm1heF9y
|
||||
cG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJ0ZXN0LW1vZGVs
|
||||
IiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhlY3V0aW9uPyI6
|
||||
IGZhbHNlLCAibWF4X3JldHJ5X2xpbWl0IjogMiwgInRvb2xzX25hbWVzIjogW119XUr7AQoKY3Jl
|
||||
d190YXNrcxLsAQrpAVt7ImtleSI6ICJkZTI5NDBmMDZhZDhhNDE2YzI4Y2MwZjI2MTBmMTgwYiIs
|
||||
ICJpZCI6ICI5MWU0NjUyYy1kMzk0LTQyMGQtYTIwOS0wNzlhYThkN2E5MDAiLCAiYXN5bmNfZXhl
|
||||
Y3V0aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9yb2xlIjogIlNh
|
||||
eSBIaSIsICJhZ2VudF9rZXkiOiAiNTU4NjliY2IxNjMyM2U3MTI5ZDI1MjM2MmM4NTVkYTYiLCAi
|
||||
dG9vbHNfbmFtZXMiOiBbXX1degIYAYUBAAEAABKABAoQG7YeGAh0HoexHn1XjCcuCRIIHGHDZGTQ
|
||||
CskqDFRhc2sgQ3JlYXRlZDABObDFkdshVEMYQThWktshVEMYSi4KCGNyZXdfa2V5EiIKIDY5NDY1
|
||||
NGEzMThmNzE5ODgzYzA2ZjhlNmQ5YTc1NDlmSjEKB2NyZXdfaWQSJgokYTgxMTFiOTktZWJkMy00
|
||||
ZWYzLWFmNmQtMTk1ZDhiYjNhN2JlSi4KCHRhc2tfa2V5EiIKIGRlMjk0MGYwNmFkOGE0MTZjMjhj
|
||||
YzBmMjYxMGYxODBiSjEKB3Rhc2tfaWQSJgokOTFlNDY1MmMtZDM5NC00MjBkLWEyMDktMDc5YWE4
|
||||
ZDdhOTAwSjoKEGNyZXdfZmluZ2VycHJpbnQSJgokZDZhMTc5OTgtNDQ4OC00NDQyLWJjYjctZmJj
|
||||
N2UwNTU2MTgxSjoKEHRhc2tfZmluZ2VycHJpbnQSJgokZDk1ZDk1ZmYtYTBhNC00NmFjLWI2NWUt
|
||||
MWE4Njg5ODYwOWQySjsKG3Rhc2tfZmluZ2VycHJpbnRfY3JlYXRlZF9hdBIcChoyMDI1LTA1LTI3
|
||||
VDAxOjEzOjA0Ljc0MTIyNko7ChFhZ2VudF9maW5nZXJwcmludBImCiRkNTc5ZDA2Ny03YzU1LTQw
|
||||
NmQtYTg5Zi1mZTM1MzU0ZGFlYTJ6AhgBhQEAAQAA
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '1626'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.31.1
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Tue, 27 May 2025 08:13:05 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,130 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You
|
||||
are an expert at gathering and organizing information. You carefully collect
|
||||
details and present them in a structured way.\nYour personal goal is: Gather
|
||||
information about the best soccer players\n\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "Top 1 best players
|
||||
in the world?"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '693'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//TFPRbttGEHzPVwz04taQBFt1nFZvsgsnRlvYqI0aafOyPK7IjY97xO1R
|
||||
CpMv6nf0x4qlZLcvBHh3Mzs7s/vtDTCTerbGLLRUQtfHxVUzPF6udsaXHx/Cn7//8fG8e9rf/XxF
|
||||
X+vqaTZ3RKo+cygvqGVIXR+5SNLDdchMhZ31/N3FT28vL99enE0XXao5Oqzpy+IiLTpRWazOVheL
|
||||
s3eL8x+P6DZJYJut8dcbAPg2fV2n1vxltsbENZ10bEYNz9avj4BZTtFPZmQmVkjLbP7fZUhaWCfp
|
||||
j20amrascQtNewRSNLJjEBrXD1LbcwY+6Y0oRWym/zUeW0ZJPfpII2eIorSMfcqxnoMMaYu7UFLF
|
||||
Gauz1Q9ziOFXScoRv7GZLPEkNccRmRvKNdcTSNmBzjRVZyuwFALnY52Jl2JEkY7nBya0ZGipBikk
|
||||
xsFKljQYAmXmjNBSplA4y1euUY3gLyVTyrUo5RH2LDHaHDsxSToHaY2Q1E1jDePSO/+kv/CITWiF
|
||||
d9yxFlv78QKnp9dxqOz0dH2UYj1rmfR39DllKaMLbuVVDRXcXOOKcuCYlObYt5wZLaPiQB1P2BCH
|
||||
6sS8z4X3ichUizawkDLnJT5MzxQlk9qWc+YaJeGeshgeSLQs3nPuSBTf3T+8/97TWZ2tzpcHzbda
|
||||
OCv5pFJ07R+8RI1NbliLKDnZTkJJeXwJ1uG4Tj1h0/3zd5ZAk1PHqxVubm82ePL0cT30cxiHIbtm
|
||||
7z1yQ2H0gAkvddFyTsuji5s95fp/Nnqi+6Tohlikj4wrijEp6pO7DJoez9FK00Zp2vJSxgqVwbxM
|
||||
mfy08jKdTUwVxTgiKYx3nCkihUAeuS094PtIo/M8lDHylO5BiRieNe0V25SnIqIhcy1VZNRZqio6
|
||||
iiqJUsY5+sxBjNH72mlzGKc+pyhbCWgSxYWHKNos8cGd8ZVjz8PnpMm085HxZveGVjpPoiPlYccZ
|
||||
pc2+qyjeNGreshobjKmLbBbHOTp6PrjRgaYx9s13oK9yOkS5FY71chrrW93GgTUcOr7iMWk92ShW
|
||||
JNhxwU4M0vUUXhka8uU7uHEk8CuyXqbMj7t66H5Kpk+5WEdqrfRo6V8AAAD//4xVy24CIRTdz1cQ
|
||||
1m1Tx+nCr+gXGHKFO85NGSDAaF347w2ggtUmXR84931O0hcdLJP5mlC14yNzPfmJQlrB0qkkWWQW
|
||||
VMyhH62fIUVNApUXgWk8oGZH1JqRiTYzzqRe1+8hDRFYEhOY83kWVKEimbcx55mFoLTlM2+IvuoL
|
||||
fuPs0gQxsOMEkVFkM4IJiWmHD9vWaiGLVsHprRVfj+MSIBmAWbRuADDGxpxQlv3tBTnfhF7bvfN2
|
||||
F3595SMZCpPwCMGaJOohWsczeu4Y22ZDWe48gjtvZxdFtF+Yw636vvDx6mMV7TebCxptBF2BoV+9
|
||||
PCEUCiOQDo0ncQlyQlW/VgODRZFtgK4p+zGdZ9yldDL7/9BXQEp0EZVw6aTlfcn1mcfk8389u7U5
|
||||
J8wD+gNJFJHQp1EoHGHRxX15OIWIsxjJ7NE7T8WCRyfWA3wMgJu15N25+wEAAP//AwDdzCHTkAgA
|
||||
AA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9a27f5dc000f9-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:42:51 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=7hq1JYlSmmLvjUR7npK1vcLJYOvCPn947S.EYBtvTcQ-1749566571-1.0.1.1-11XCSwdUqYCYC3zE9DZk20c_BHXTPqEi6YMhVtX9dekgrj0J3a4EHGdHvcnhBNkIxYzhM4zzQsetx2sxisMk62ywkO8Tzo3rlYdo__Kov7w;
|
||||
path=/; expires=Tue, 10-Jun-25 15:12:51 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=bhxj6kzt6diFCyNbiiw60v4lKiUKaoHjQ3Yc4KWW4OI-1749566571331-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '30419'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '30424'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999859'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_b5983a9572e28ded39da7b12e678e2b7
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,643 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You
|
||||
are an expert at gathering and organizing information. You carefully collect
|
||||
details and present them in a structured way.\nYour personal goal is: Gather
|
||||
information about the best soccer players\n\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "Top 10 best players
|
||||
in the world?"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '694'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA6RXXW4cNxJ+1ykK8+JdoyVII/lH8yZprUi25BVsB0GwDoQadk13ZdgkU2TPaBIY
|
||||
2Lc9wx5hz7F7kj1JUOye38iBgrwImiZZrPr41VdVv+wBDLgcjGBgakymCXb/vGrfyvX4+zCZ/fTd
|
||||
3dWPD69v+N3V7bfjDw/N20GhJ/z4RzJpeerA+CZYSuxdt2yEMJFaPXp1cvri5YvTl6/yQuNLsnqs
|
||||
Cmn/xO837Hh/eDg82T98tX/0uj9dezYUByP4xx4AwC/5r/rpSnoYjOCwWH5pKEasaDBabQIYiLf6
|
||||
ZYAxckzo0qBYLxrvErns+qfat1WdRnANzs/BoIOKZwQIlfoP6OKcBOCzu2SHFs7y7xFckRCgEKSa
|
||||
IPkAR4cwppggemNIIFhckERgl3fMvdgSMIKfwN9N8mMSGB4OjwsYY6QSfN7GAoFk4qVBZ6gAbgKa
|
||||
VAC6EvyMBK0F9V143CrQEZLP5itsaPTZfXZHB/D8+Q17RxZuKUZ+/hz+cu0SCdwyNvzXzw4A9uHO
|
||||
R1YLI7j0Mkcp++8XvnVJFiM4k4pcYof9whVXteWqTnHUGc6OsGspOxFrPzcYCWqOQA+GglpHC6Xw
|
||||
eGzZVQXMOLJ3XTQKT4NTdhXgmC2nRQGWsNQPq6vV8IxN8rJY4jg8HA7h8vryDL7LiF60IduLXDme
|
||||
sEGX7GIDI1epEXXK2Hb8LEJsjaEYKR4oXEOF693CMjq4HWMI//2PAnaHwhE+Iru0/w1Jg+yeDNyl
|
||||
6Ns9gto75+cOJl6yO2PLMZGogzEQlTkKY9mxQQsTdhzrjFrvFnAEhCktemZlQ2Ofarj7+E0+rPBc
|
||||
CjlTg8Me/UTYFDDdunmDYZmeN1y1BEfZBitT1qd9Kw4bcqlD61jReiP6mnCFaNGVitYtOlOTRgMX
|
||||
nBZPRuq9fl48glRv+1mEyqPdj8ZnnIL4OcXYvXcSclWqocYZQYOlMq8B70gzTKFofEwwIRTKu3na
|
||||
Z+ObVnygAmqyQa3ueA+RTCsEkXK+QeJkM07GtpmdavvbN5dncFFjE3IS3hBWLWWITjKhaMYO/kZw
|
||||
Lu3C0ZMwuuVywmRLkl2YzslW3DaP4LS64f///Pd21gWVPWVPzgRvu6TrEhAanP4GrSxdfVLuylYB
|
||||
7GKSVomAVpd2osnXl75hp6TSDaVvKCY2+doOcXSgFYJSDrgj1AtF60I4Jkbn4YP6XnrF68zCe4xR
|
||||
nkymOy+prdA+ClMMnJQRs14PNLSP2JYMd+L75yuW1z9TKJOgl5IdygISWnIph7LFx164YEoUMp49
|
||||
aMa7GUnMWQTY+J40CnGhfNWzNZaAYKkiV+odBoVIAI34GKFpbeJgSSWxanuteqlgvadFgwJv5aBH
|
||||
6Yot2iejdC74Mz+GUWf3WQSsejXO5Ztn+f+ccVO2tku3KYWUA0bVJC1+vaaQKAZdBSxyPe3rQg6Z
|
||||
Yw/lil67bwDzmq2mM7uE7DLImrwK4ZamQRCK5EyXdK8UmQ9aUxPc0Bxd6edxmkvfOYoh6x3+ASKp
|
||||
9jwC0aeaVIKslpmlouwUQeMlP7+2HJQrkeorqsTirgxvSHzofco/FGskYbTryrhbz1ZR5czbLmmv
|
||||
swKhcAPn5H6mBnumXKfENZZ/vpApEP1rfyC0cIulsIosCgjp4y1p0T13ASVH1Rb1Xos8Say5q9uJ
|
||||
TN2VvMyv2LGmR3XJmj45vkKaHPVp7nvaKcKtL4X/9y8NesO7PyK4F+Ix8WONzxk0mFXPT6Dpzy8l
|
||||
tmtrlrrbe7HGwwG7iW21scn1vdIig2kTwWI3W+ghkLDSPBufeJ/G2gJqlbaWq1UCHB1q+Le+xoZK
|
||||
+IgWa43/hmckwfteH7766JvBv6kWIS2/bsaerWZtDNZHbZED9o79vipmkTDkkqwD1yRYebd82Duh
|
||||
hkmWWvDV0lFsCYQCFROmNub2el3SyKroT7o4u9z4VFOkVWuetaykxmt101Flq4RmPhbrbDa9DnY9
|
||||
ee8zK+NjIJMUEFWmrj3pdbuA6C2XPFn0nYM2+P0TxJ3SsFVyDzanFaFJG1EnJtdau7GAzvmUZTHP
|
||||
ST/0K19Wk5H1VRA/jjtHB5323Ath9E6noJh8GOTVL3sAP+QJrN0aqgZBfBPSffJTytcdDYedvcF6
|
||||
8Fuvvjw96leTT2jXC6+PjotHDN6XlJBt3BjiBgZNTeX66HriUwXwGwt7G2H/1p3HbHehs6ueYn69
|
||||
YJQfVN4HoZLNdsjrbUI6GH9t2wrm7PAgkszY0H1iEn2KkibY2m5cHcRFTNTcT9hVJEG4m1kn4f74
|
||||
BF+cIJ0em8Hel71fAQAA//8DAICIe4nBDwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9947e9abcf1fe-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:33:27 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=UscrLNUFPQp17ivpT2nYqbIb8GsK9e0GpWC7sIKWwnU-1749566007-1.0.1.1-fCm5jdN02Agxc9.Ep4aAnKukyBp9S3iOLK9NY51NLG1zib3MnfjyCm5HhsWtUvr2lIjQpD_EWosVk4JuLbGxrKYZBa4WTendGsY9lCU9naU;
|
||||
path=/; expires=Tue, 10-Jun-25 15:03:27 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=CGbpwQsAGlm5OcWs4NP0JuyqJh.mIqHyUZjIdKm8_.I-1749566007688-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '40370'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '40375'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999859'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_94fb13dc93d3bc9714811ff4ede4c08f
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Guardrail Agent. You
|
||||
are a expert at validating the output of a task. By providing effective feedback
|
||||
if the output is not valid.\nYour personal goal is: Validate the output of the
|
||||
task\n\nTo give my best complete final answer to the task respond using the
|
||||
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||
Your final answer must be the great and the most complete as possible, it must
|
||||
be outcome described.\n\nI MUST use these formats, my job depends on it!\nIMPORTANT:
|
||||
Your final answer MUST contain all the information requested in the following
|
||||
format: {\n \"valid\": bool,\n \"feedback\": str | None\n}\n\nIMPORTANT: Ensure
|
||||
the final output does not include any code block markers like ```json or ```python."},
|
||||
{"role": "user", "content": "\n Ensure the following task result complies
|
||||
with the given guardrail.\n\n Task result:\n Here are the top
|
||||
10 best soccer players in the world as of October 2023, based on their performance,
|
||||
impact, and overall contributions to the game:\n\n1. **Lionel Messi** (Inter
|
||||
Miami)\n - Position: Forward\n - Country: Argentina\n - Highlights: Messi
|
||||
continues to showcase his exceptional dribbling, vision, and playmaking ability,
|
||||
leading Argentina to victory in the 2022 FIFA World Cup and significantly contributing
|
||||
to his club''s successes.\n\n2. **Kylian Mbapp\u00e9** (Paris Saint-Germain)\n -
|
||||
Position: Forward\n - Country: France\n - Highlights: Known for his blistering
|
||||
speed and clinical finishing, Mbapp\u00e9 is a key player for both PSG and the
|
||||
French national team, known for his performances in Ligue 1 and international
|
||||
tournaments.\n\n3. **Erling Haaland** (Manchester City)\n - Position: Forward\n -
|
||||
Country: Norway\n - Highlights: Haaland''s goal-scoring prowess and strength
|
||||
have made him one of the most feared strikers in Europe, helping Manchester
|
||||
City secure several titles including the UEFA Champions League.\n\n4. **Kevin
|
||||
De Bruyne** (Manchester City)\n - Position: Midfielder\n - Country: Belgium\n -
|
||||
Highlights: De Bruyne\u2019s exceptional passing, control, and vision make him
|
||||
one of the best playmakers in the world, instrumental in Manchester City\u2019s
|
||||
dominance in domestic and European competitions.\n\n5. **Cristiano Ronaldo**
|
||||
(Al Nassr)\n - Position: Forward\n - Country: Portugal\n - Highlights:
|
||||
Despite moving to the Saudi Pro League, Ronaldo''s extraordinary talent and
|
||||
goal-scoring ability keep him in the conversation among the best, having had
|
||||
a legendary career across multiple leagues.\n\n6. **Neymar Jr.** (Al Hilal)\n -
|
||||
Position: Forward\n - Country: Brazil\n - Highlights: Neymar''s agility,
|
||||
creativity, and skill have kept him as a top performer in soccer, now showcasing
|
||||
his talents in the Saudi Pro League while maintaining a strong national team
|
||||
presence.\n\n7. **Robert Lewandowski** (Barcelona)\n - Position: Forward\n -
|
||||
Country: Poland\n - Highlights: The prolific striker continues to score consistently
|
||||
in La Liga, known for his finishing, positioning, and aerial ability, contributing
|
||||
to Barcelona\u2019s successes.\n\n8. **Karim Benzema** (Al Ittihad)\n - Position:
|
||||
Forward\n - Country: France\n - Highlights: The former Real Madrid star
|
||||
remains a top talent, displaying leadership and technical skills, now continuing
|
||||
his career in the Saudi Pro League.\n\n9. **Luka Modri\u0107** (Real Madrid)\n -
|
||||
Position: Midfielder\n - Country: Croatia\n - Highlights: A master of midfield
|
||||
control and passing, Modri\u0107 remains an influential figure at Real Madrid,
|
||||
showcasing his experience and football intelligence.\n\n10. **Mohamed Salah**
|
||||
(Liverpool)\n - Position: Forward\n - Country: Egypt\n - Highlights:
|
||||
Salah''s explosive pace and goal-scoring ability keep him as a central figure
|
||||
for Liverpool in the Premier League and European competitions, maintaining his
|
||||
status as one of the elite forwards.\n\nThese players have demonstrated exceptional
|
||||
skill, consistency, and impact in their respective teams and leagues, solidifying
|
||||
their positions among the best in the world.\n\n Guardrail:\n Only
|
||||
include Brazilian players, both women and men\n \n Your task:\n -
|
||||
Confirm if the Task result complies with the guardrail.\n - If not, provide
|
||||
clear feedback explaining what is wrong (e.g., by how much it violates the rule,
|
||||
or what specific part fails).\n - Focus only on identifying issues \u2014
|
||||
do not propose corrections.\n - If the Task result complies with the
|
||||
guardrail, saying that is valid\n "}], "model": "gpt-4o-mini", "stop":
|
||||
["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '4676'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=UscrLNUFPQp17ivpT2nYqbIb8GsK9e0GpWC7sIKWwnU-1749566007-1.0.1.1-fCm5jdN02Agxc9.Ep4aAnKukyBp9S3iOLK9NY51NLG1zib3MnfjyCm5HhsWtUvr2lIjQpD_EWosVk4JuLbGxrKYZBa4WTendGsY9lCU9naU;
|
||||
_cfuvid=CGbpwQsAGlm5OcWs4NP0JuyqJh.mIqHyUZjIdKm8_.I-1749566007688-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xUwWobMRC9+ysGnVqwjZ3YjuNbQim0hULBtIc6mLE0uzuxVtpqtHbckH8v2o29
|
||||
TpNCLwuapzfzZvRmH3sAio1agNIFRl1WdnCb119+LIvZh+/+h9zM55Op3Wzv5dt4rKei+onhN/ek
|
||||
45E11L6sLEX2roV1IIyUso6vJtfT2Ww0mjdA6Q3ZRMurOJj4QcmOBxeji8lgdDUYz5/ZhWdNohbw
|
||||
swcA8Nh8k05n6EEtYNQ/RkoSwZzU4nQJQAVvU0ShCEtEF1W/A7V3kVwjfVn4Oi/iAj6B83vQ6CDn
|
||||
HQFCnvQDOtlTAFi5j+zQwk1zXsDjygGs1A4tm5VaQIZWqN8GMyKzQb1N8ZVaFgQRZQuBpLYRjCcB
|
||||
5yM0AzvAnmMBsSDIawwmIFtAAY7ATtvakEBl8UBBIAu+hB0G9rWA9rWLgUkAnQHv7AFKcmn8AnHv
|
||||
4Tbgb7aM7kR/95UOJQb4HIYNpz2+h33BloAeUjV2+Rlz70vq+Cl7IHsYwvKFWqlIc8YarT1AoF81
|
||||
SRSI/thAK+6Vnj5sfCyeayQ9Jbl+EqML4HZCWW0ztpbMcKVW7un8CQNltWCykautPQPQOR+xmUMy
|
||||
z90z8nSyi/V5FfxG/qKqjB1LsQ6E4l2yhkRfqQZ96gHcNbasXzhNVcGXVVxHv6Wm3PV01uZT3TZ0
|
||||
6PwIRh/RdvHxaHLRfyPh2lBEtnLmbKVRF2Q6brcGWBv2Z0DvrO3Xct7K3bbOLv+f9B2gNVWRzLoK
|
||||
ZFi/bLm7Fij9Lf517TTmRrASCjvWtI5MIT2FoQxr2+6wkoNEKtcZu5xCFbhd5KxaX05wOkG6vtSq
|
||||
99T7AwAA//8DABZK9w/WBAAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9957c4b46f1fe-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:33:46 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '18280'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '18287'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149998889'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_853da6b9ca8f386ff1427c837346290d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You
|
||||
are an expert at gathering and organizing information. You carefully collect
|
||||
details and present them in a structured way.\nYour personal goal is: Gather
|
||||
information about the best soccer players\n\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "Top 10 best players
|
||||
in the world?"}, {"role": "assistant", "content": "Thought: I now can give a
|
||||
great answer \nFinal Answer: Here are the top 10 best soccer players in the
|
||||
world as of October 2023, based on their performance, impact, and overall contributions
|
||||
to the game:\n\n1. **Lionel Messi** (Inter Miami)\n - Position: Forward\n -
|
||||
Country: Argentina\n - Highlights: Messi continues to showcase his exceptional
|
||||
dribbling, vision, and playmaking ability, leading Argentina to victory in the
|
||||
2022 FIFA World Cup and significantly contributing to his club''s successes.\n\n2.
|
||||
**Kylian Mbapp\u00e9** (Paris Saint-Germain)\n - Position: Forward\n - Country:
|
||||
France\n - Highlights: Known for his blistering speed and clinical finishing,
|
||||
Mbapp\u00e9 is a key player for both PSG and the French national team, known
|
||||
for his performances in Ligue 1 and international tournaments.\n\n3. **Erling
|
||||
Haaland** (Manchester City)\n - Position: Forward\n - Country: Norway\n -
|
||||
Highlights: Haaland''s goal-scoring prowess and strength have made him one of
|
||||
the most feared strikers in Europe, helping Manchester City secure several titles
|
||||
including the UEFA Champions League.\n\n4. **Kevin De Bruyne** (Manchester City)\n -
|
||||
Position: Midfielder\n - Country: Belgium\n - Highlights: De Bruyne\u2019s
|
||||
exceptional passing, control, and vision make him one of the best playmakers
|
||||
in the world, instrumental in Manchester City\u2019s dominance in domestic and
|
||||
European competitions.\n\n5. **Cristiano Ronaldo** (Al Nassr)\n - Position:
|
||||
Forward\n - Country: Portugal\n - Highlights: Despite moving to the Saudi
|
||||
Pro League, Ronaldo''s extraordinary talent and goal-scoring ability keep him
|
||||
in the conversation among the best, having had a legendary career across multiple
|
||||
leagues.\n\n6. **Neymar Jr.** (Al Hilal)\n - Position: Forward\n - Country:
|
||||
Brazil\n - Highlights: Neymar''s agility, creativity, and skill have kept
|
||||
him as a top performer in soccer, now showcasing his talents in the Saudi Pro
|
||||
League while maintaining a strong national team presence.\n\n7. **Robert Lewandowski**
|
||||
(Barcelona)\n - Position: Forward\n - Country: Poland\n - Highlights:
|
||||
The prolific striker continues to score consistently in La Liga, known for his
|
||||
finishing, positioning, and aerial ability, contributing to Barcelona\u2019s
|
||||
successes.\n\n8. **Karim Benzema** (Al Ittihad)\n - Position: Forward\n -
|
||||
Country: France\n - Highlights: The former Real Madrid star remains a top
|
||||
talent, displaying leadership and technical skills, now continuing his career
|
||||
in the Saudi Pro League.\n\n9. **Luka Modri\u0107** (Real Madrid)\n - Position:
|
||||
Midfielder\n - Country: Croatia\n - Highlights: A master of midfield control
|
||||
and passing, Modri\u0107 remains an influential figure at Real Madrid, showcasing
|
||||
his experience and football intelligence.\n\n10. **Mohamed Salah** (Liverpool)\n -
|
||||
Position: Forward\n - Country: Egypt\n - Highlights: Salah''s explosive
|
||||
pace and goal-scoring ability keep him as a central figure for Liverpool in
|
||||
the Premier League and European competitions, maintaining his status as one
|
||||
of the elite forwards.\n\nThese players have demonstrated exceptional skill,
|
||||
consistency, and impact in their respective teams and leagues, solidifying their
|
||||
positions among the best in the world."}, {"role": "user", "content": "The task
|
||||
result does not comply with the guardrail as it includes players from various
|
||||
countries and only mentions two Brazilian players (Neymar Jr. and Neymar) while
|
||||
excluding Brazilian women players entirely. The guardrail specifically requests
|
||||
to include only Brazilian players, both women and men, which is not fulfilled."}],
|
||||
"model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '4338'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=UscrLNUFPQp17ivpT2nYqbIb8GsK9e0GpWC7sIKWwnU-1749566007-1.0.1.1-fCm5jdN02Agxc9.Ep4aAnKukyBp9S3iOLK9NY51NLG1zib3MnfjyCm5HhsWtUvr2lIjQpD_EWosVk4JuLbGxrKYZBa4WTendGsY9lCU9naU;
|
||||
_cfuvid=CGbpwQsAGlm5OcWs4NP0JuyqJh.mIqHyUZjIdKm8_.I-1749566007688-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//lFfNbhxHDr7rKYi5BDBagiTLsj032QvHceC1ExubIOtAYFdzuhlVs3pZ
|
||||
1TMeBwb2Nfa857xArnqTfZIFq2e6R9J417oImmYVfz5+JIu/HwDMuJrNYeYaTK7t/OGzuv8+vPuu
|
||||
O/2H+9k//vnF8flP4Zcf6uclPj35dlbYjVD+Ri5tbx250HaeEgcZxE4JE5nWk8dnTx+dnx+fnmdB
|
||||
Gyrydq3u0uFZOGxZ+PD0+PTs8Pjx4cmTze0msKM4m8PfDwAAfs9/zU+p6ONsDsfF9ktLMWJNs/l4
|
||||
CGCmwduXGcbIMaGkWTEJXZBEkl1/34S+btIcvgMJK3AoUPOSAKE2/wElrkgBPsgLFvRwkX/P4SUp
|
||||
ASpBaghS6ODkGEqKCWJwjhQ6j2vSCCz5xCqorwpYBNdHlhroo/N95CX5NQSBZ4qf2DPK9l4BZUgN
|
||||
rEJLAigVtCQFlBipsvOpIVboSBdBWxRH+Qy3Hbq0NVljS4ARwgLeuBRKUjg9Pn04/yAf5OQIHjz4
|
||||
K61bVHilRw8eWIAAcAhvQ2TL4RxeBF2hVqPkue/LOVx4eMke/fj5JdeN57pJcQ5vhMycWW+DYXHF
|
||||
3lMFi0FVhNqHEr1fF7Axbplg6SlCClDhp0+eYMWpgYYjVMpl6VnqIsPS4lX+30KtA/rD6IIamFiy
|
||||
57QuoMFl/g2Ra+EFO5S0RSXIAKkpdr4vsxrzdMJe0CJHD4mwPTKcTg2nv7Fc/+G4j/Dq+k/hoPeA
|
||||
60dCD6+xUq72IjbpbjACtaQ1VZY1hCtab9hg+O1qKkBCGmDN4cSOqCogkWuEHfoB9zgg5bR3jD4j
|
||||
lunIbRfUKgJaTK6hWEBswsphJqbpS+hJJsRGtFgS6QiSpyX5mGF6aDD9yK5B9RyD3AOg9yElkgZb
|
||||
eBlS7HrdC9P3ElYyxrskjZhyzrNfSGoRjjTY8STDWpILVguw5IQeWmqtGDZEvZH0rM6umMRht1Fp
|
||||
5HQNSk1TZY0s7XBTfrfomJE5M2Reoya8ByZv1KNUAd4qV7QXjwtI2hN4qkkqiyQ3im/ipvsUkE3m
|
||||
SFwQ64Akya+3iaYKGtKBJgV4woo0Ntztqa1Ow4piLIBQxT5I35KGPgIOJZ0T4FzwWFkVN2r9NPQp
|
||||
WxgcRF2DQyXSjMkjw+RCKrW+DRd+SXEvOK+5WjD5ivR2UYUWvwBLx8tgOd4UDssdZEbDBk7FMZ8c
|
||||
8KCPjroNG4YSGpoEupTLyiImoRiHumAZSsM05IPGz//XUc5z8B4F4R1KCvtD/wstSO4G/ozEmtr+
|
||||
2LNOjqDkQi38adsgSKEydTZttukcsrZD7iBJueyTzbIcA6aE7qoAiglLz7HJzYE0kl/s7U9DcyW9
|
||||
2VzvAvDYAPgWS2Xy8IpiH+8zfTSSfGH4/GQFibDwyINHYz1arPkZkMfHDdsGGApQjCTJmkiHmva2
|
||||
hgKubjShVdArUEw0tkbvuSabxcOAhkzeHPMTi/k9Xv8R4ULx+s/fwv0I/+763wHeYu/DF2ivnJt3
|
||||
TJg5PxHwNvsHJ4z6NbJQtWWLOTDSJSYNVvrT8yIPjnbj3o15kdOei2UzbQgTL7ft72kOvGGsA7xj
|
||||
v8T7sP15Qz7SF9guQB87UjbAq2xayTOWnsCRzanDMvM3W/3PP/8Vd9pcPj7VxFAHTIbLkqDFiqDh
|
||||
FhBcUCGNKQhlcLYefc3T4eTYYn/GCL+gkGJKU9L/B9PH6N+ib4kV4yS4NQHylJ5eVwWYsZ15t8NX
|
||||
0ptdfRxtFrVFM72vxsfD2BPyx+lB5XPH+Npyf2+14jkmaEbv81EWp1SxJWzz3sj8GY0GyU/XOy9j
|
||||
Y+KWzTs8HF7E267tNGy79MB/072tBRvfsYDU9NYsO6Vc+1JvbH0TQdk124e8pxrd+mh3fVBa9BFt
|
||||
hZHe+x0BioSUIciLy68byedxVfGh7jSU8dbV2YKFY3OphDGIrSUxhW6WpZ8PAH7NK1F/Y8uZdRra
|
||||
Ll2mcEXZ3JMnJ4O+2bSJTdLz883CNEs2ICfByaOz7b0bGi8rSsg+7qxVM4euoWq6O+1g2FccdgQH
|
||||
O3Hf9Wef7iF2lvpr1E8CZyObqsvOyORuxjwdU7JV9UvHRpyzw7NIumRHl4lJLRcVLbD3wwI5i+uY
|
||||
qL1csNSknfKwRS66y4dn+OgM6elDNzv4fPBfAAAA//8DAGsE37hTDwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d995f0ca0cf1fe-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:34:22 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '35941'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '35946'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149998983'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_c874fed664f962f5ee90decb8ebad875
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Guardrail Agent. You
|
||||
are a expert at validating the output of a task. By providing effective feedback
|
||||
if the output is not valid.\nYour personal goal is: Validate the output of the
|
||||
task\n\nTo give my best complete final answer to the task respond using the
|
||||
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||
Your final answer must be the great and the most complete as possible, it must
|
||||
be outcome described.\n\nI MUST use these formats, my job depends on it!\nIMPORTANT:
|
||||
Your final answer MUST contain all the information requested in the following
|
||||
format: {\n \"valid\": bool,\n \"feedback\": str | None\n}\n\nIMPORTANT: Ensure
|
||||
the final output does not include any code block markers like ```json or ```python."},
|
||||
{"role": "user", "content": "\n Ensure the following task result complies
|
||||
with the given guardrail.\n\n Task result:\n Here are the top
|
||||
10 best soccer players in the world, focusing exclusively on Brazilian players,
|
||||
both women and men, based on their performance and impact in the game as of
|
||||
October 2023:\n\n1. **Neymar Jr.** \n - Position: Forward \n - Club: Al
|
||||
Hilal \n - Highlights: One of the most skilled forwards globally, Neymar
|
||||
continues to dazzle with his dribbling, playmaking, and goal-scoring ability,
|
||||
having a significant impact on both his club and the Brazilian national team.\n\n2.
|
||||
**Vin\u00edcius J\u00fanior** \n - Position: Forward \n - Club: Real Madrid \n -
|
||||
Highlights: Vin\u00edcius has emerged as a key player for Real Madrid, noted
|
||||
for his speed, technical skills, and crucial goals in important matches, showcasing
|
||||
his talent on both club and international levels.\n\n3. **Richarlison** \n -
|
||||
Position: Forward \n - Club: Tottenham Hotspur \n - Highlights: Known
|
||||
for his versatility and aerial ability, Richarlison has become a vital member
|
||||
of the national team and has the capability to change the game with his pace
|
||||
and scoring ability.\n\n4. **Marta** \n - Position: Forward \n - Club:
|
||||
Orlando Pride \n - Highlights: A true legend of women''s soccer, Marta has
|
||||
consistently showcased her skill, leadership, and goal-scoring prowess, earning
|
||||
numerous awards and accolades throughout her legendary career.\n\n5. **Andressa
|
||||
Alves** \n - Position: Midfielder \n - Club: Roma \n - Highlights:
|
||||
A pivotal player in women''s soccer, Andressa has displayed her exceptional
|
||||
skills and tactical awareness both in club play and for the Brazilian national
|
||||
team.\n\n6. **Alana Santos** \n - Position: Defender \n - Club: Benfica \n -
|
||||
Highlights: Alana is recognized for her defensive prowess and ability to contribute
|
||||
to the attack, establishing herself as a key player for both her club and the
|
||||
national team.\n\n7. **Gabriel Jesus** \n - Position: Forward \n - Club:
|
||||
Arsenal \n - Highlights: With a flair for scoring and assisting, Gabriel
|
||||
Jesus is an essential part of the national team, known for his work rate and
|
||||
intelligence on the field.\n\n8. **Ta\u00eds Ara\u00fajo** \n - Position:
|
||||
Midfielder \n - Club: S\u00e3o Paulo \n - Highlights: A rising star in
|
||||
Brazilian women''s soccer, Ta\u00eds has gained recognition for her strong performances
|
||||
in midfield, showcasing both skill and creativity.\n\n9. **Thiago Silva** \n -
|
||||
Position: Defender \n - Club: Chelsea \n - Highlights: An experienced
|
||||
and reliable center-back, Silva\u2019s leadership and defensive abilities have
|
||||
made him a cornerstone for Chelsea and the Brazilian national team.\n\n10. **Bia
|
||||
Zaneratto** \n - Position: Forward \n - Club: Palmeiras \n - Highlights:
|
||||
A talented forward, Bia has become known for her goal-scoring capabilities and
|
||||
playmaking skills, contributing significantly to both her club and the national
|
||||
team.\n\nThis list highlights the incredible talent and contributions of Brazilian
|
||||
players in soccer, showcasing their skills across both men''s and women''s games,
|
||||
thus representing Brazil''s rich soccer legacy.\n\n Guardrail:\n Only
|
||||
include Brazilian players, both women and men\n \n Your task:\n -
|
||||
Confirm if the Task result complies with the guardrail.\n - If not, provide
|
||||
clear feedback explaining what is wrong (e.g., by how much it violates the rule,
|
||||
or what specific part fails).\n - Focus only on identifying issues \u2014
|
||||
do not propose corrections.\n - If the Task result complies with the
|
||||
guardrail, saying that is valid\n "}], "model": "gpt-4o-mini", "stop":
|
||||
["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '4571'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=UscrLNUFPQp17ivpT2nYqbIb8GsK9e0GpWC7sIKWwnU-1749566007-1.0.1.1-fCm5jdN02Agxc9.Ep4aAnKukyBp9S3iOLK9NY51NLG1zib3MnfjyCm5HhsWtUvr2lIjQpD_EWosVk4JuLbGxrKYZBa4WTendGsY9lCU9naU;
|
||||
_cfuvid=CGbpwQsAGlm5OcWs4NP0JuyqJh.mIqHyUZjIdKm8_.I-1749566007688-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4ySTW/bMAyG7/4VhM72kLj5WHxrDwEKtNilGDYshcFItK1WlgRJTjYE+e+D7DR2
|
||||
tw7YxYD58KX4kjwlAEwKVgDjDQbeWpXd1d3Dl+5Yfb3dPD6K7bri26cw899f5vruG0ujwuxfiIc3
|
||||
1SduWqsoSKMHzB1hoFh1vl5slqvVbJX3oDWCVJTVNmQLk7VSyyyf5Ytsts7mny/qxkhOnhXwIwEA
|
||||
OPXf2KcW9JMVMEvfIi15jzWx4poEwJxRMcLQe+kD6sDSEXKjA+m+9afGdHUTCrgHbY7AUUMtDwQI
|
||||
dewfUPsjOYCd3kqNCm77/wJOOw2wYwdUUuxYAcF1lA6xikjskb/GsO6U2unz9HFHVedRXeAEoNYm
|
||||
YBxgb/v5Qs5Xo8rU1pm9/0PKKqmlb0pH6I2OpnwwlvX0nAA89wPt3s2IWWdaG8pgXql/bpMvh3ps
|
||||
3ONI8/UFBhNQTVTLPP2gXikooFR+shLGkTckRum4P+yENBOQTFz/3c1HtQfnUtf/U34EnJMNJErr
|
||||
SEj+3vGY5iie+b/SrlPuG2ae3EFyKoMkFzchqMJODcfH/C8fqC0rqWty1snhAitb3ixwuUDa3HCW
|
||||
nJPfAAAA//8DAOHOvQuPAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d996d2b877f1fe-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:34:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '15341'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '15343'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149998917'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_22419b91d42cf1e03278a29f3093ed3d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,726 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You
|
||||
are an expert at gathering and organizing information. You carefully collect
|
||||
details and present them in a structured way.\nYour personal goal is: Gather
|
||||
information about the best soccer players\n\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "Top 10 best players
|
||||
in the world?"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '694'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA5yXzVIbORDH7zxF11xiqLHLNp/xDUhMssEJm49N1S4pqq1pz/SikYaWxo6Tynmf
|
||||
ZQ/7AnvNPtiWZIMNGAK5UHhaLfVPrW799XUNIOEs6UGiCvSqrHTzIK9HH/yzi6Mv44uT3z93X5fn
|
||||
Fxefs49bR3vya5IGDzv8k5S/9GopW1aaPFszMysh9BRm7exuPd3e2d1s70VDaTPSwS2vfHPLNks2
|
||||
3Oy2u1vN9m6zszf3LiwrckkP/lgDAPga/4Y4TUafkx6008svJTmHOSW9q0EAiVgdviToHDuPxifp
|
||||
wqis8WRi6O8LW+eF78FLMHYCCg3kPCZAyEP8gMZNSABOTZ8NatiPv3vwviDwtoJOG4bkPDirFAlU
|
||||
GqckDtiALwgmVnSWAjqwI3ijvB2SQLfd3UzjSkMCzsh4HjFlMERHGdjoyQJCioyHimRkpUSjyKXg
|
||||
zllrlwKXFSofBudYBgOaDOyYBLWGgCc8rEMuHHg7n9ATlq4FL0gI2MX4nJda+VooA83O907Nqem0
|
||||
YGPjmK0hDQNyjqHx0ngSGDCWDIf99Y2NUwMATTixjsMiPehbmaBk8++vaAr7fhYDuR48Ex4ONZs8
|
||||
hTE7tiaF3KJuOmWFTQ44ZM1+2pq776uCaUwlGe96MKi150oTHKDW1kD25I3AhI0hSeHQVgj7JQkr
|
||||
BFVgWcXZP4Z9h8O6uvoGjW67211vBcJuIHw11YwGBkOsqu9/Q+MEhR28Qza+eURSIpvHg76riLIU
|
||||
PKnC8EVNKYzYsCvY5Kvh+i/7+3dE29lbT+GY85qgA569DmkuLzcjsyU5zwpUXblItRmonkvYZniB
|
||||
qMOJaAzQqIJcSN8h++njiU7sJOyzm4Fdyxob59kov5rsoDYZOc05xjoJTmGiD8/7+3A4x3RwTBgA
|
||||
Z+mMOdpcT+FEqGSSS+sMPkJuxdTRmA08IziQemroQZQDzkZMOiO5CzR0iuUTGvsXj398LldGm0Lg
|
||||
rCv3IOTIth3ZULiEAzJfqERo7OvmS++5wOzxqTtaSlYaO1OJ5/F/j8qzQg1sPGnNORlFqylD1ays
|
||||
vKuTeJPMwYR9AW8JNQwwE84i3U6ge03TEgV+kVZAgxesUT8erK+RJYVs0VUenKurstHXk3UrPVIH
|
||||
zGZdxeB3Q/BvQ/f2cEwTNJmduHOGRv8QDlAUaWvwJ0Aum0MK1dwl/kASRn1/W4yd4yBcPQMyTxyc
|
||||
xJtnKS/LBTinPMbQUPB6U5yfvr2AOLAFlpTBO9RYQOOYxySVtfqnO+FoQXiVrtU8N6po0ctndXR/
|
||||
GUWCp4HgNzbf/1FcO/jl+7+GrUBj6ST+NMcjztoPy727fn8mOu14A9fnCAObCf/310qG+9rbQ/rb
|
||||
PU3g7vq5xXV5tG63iPkdFpHeF+yiwgjCIwhF1rcFj6pFguIJcieFCt1N8RNutDu0jZCrSPmg3KLM
|
||||
CQJsaH2xKPjgHEjFYPBFHeMgHzfQteb1A4Im9EgX9dkYZbqIsiJx0dFWbELOwpRhWR6Fe1jYh7KF
|
||||
OgwfWQEao67jYikMax8idXSlEAscU6AJ8pSM11MYEhkQylEyyoJmdLakIByDUosic1lVrhCVrWWN
|
||||
KzSqHQadbWqtlwxojPUxrKiuP80t3670tLZ5JXbobrgms1o+E0JnTdDOztsqidZvawCfom6vr0nx
|
||||
pBJbVv7M23OKy3W63dl8yeK5sLDubG3Ord561AvD7s52umLCs4w8snZL0j9RqArKFq6LdwLWGdsl
|
||||
w9oS9u1wVs09Q2eTP2T6hUEpqjxlZ5VQxuo68mKYUHhO3TXsaptjwIkjGbOiM88kIRUZjbDWs0dO
|
||||
4qbOU3k2YpOTVMKzl86oOtvcwu0tpKebKln7tvY/AAAA//8DAJ67Vp33DQAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9b5400dcd624b-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:55:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=8Yv8F0ZCFAo2lf.qoqxao70yxyjVvIV90zQqVF6bVzQ-1749567342-1.0.1.1-fZgnv3RDfunvCO1koxwwFJrHnxSx_rwS_FHvQ6xxDPpKHwYr7dTqIQLZrNgSX5twGyK4F22rUmkuiS6KMVogcinChk8lmHtJBTUVTFjr2KU;
|
||||
path=/; expires=Tue, 10-Jun-25 15:25:42 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=wzh8YnmXvLq1G0RcIVijtzboQtCZyIe2uZiochkBLqE-1749567342267-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '33288'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '33292'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999859'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_6a587ea22edef774ecdada790a320cab
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You
|
||||
are an expert at gathering and organizing information. You carefully collect
|
||||
details and present them in a structured way.\nYour personal goal is: Gather
|
||||
information about the best soccer players\n\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "Top 10 best players
|
||||
in the world?"}, {"role": "assistant", "content": "Thought: I now can give a
|
||||
great answer \nFinal Answer: The top 10 best soccer players in the world, as
|
||||
of October 2023, can be identified based on their recent performances, skills,
|
||||
impact on games, and overall contributions to their teams. Here is the structured
|
||||
list:\n\n1. **Lionel Messi (Inter Miami CF)**\n - Position: Forward\n -
|
||||
Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements:
|
||||
Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2.
|
||||
**Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key
|
||||
Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup
|
||||
champion (2018), Ligue 1 titles, multiple domestic cups.\n\n3. **Erling Haaland
|
||||
(Manchester City)**\n - Position: Forward\n - Key Attributes: Power, speed,
|
||||
goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA Champions
|
||||
League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne (Manchester
|
||||
City)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, creativity.\n -
|
||||
Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League
|
||||
winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n - Position: Forward\n -
|
||||
Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n - Achievements:
|
||||
2022 Ballon d''Or winner, multiple Champions Leagues with Real Madrid.\n\n6.
|
||||
**Neymar Jr. (Al Hilal)**\n - Position: Forward\n - Key Attributes: Flair,
|
||||
dribbling, creativity.\n - Achievements: Multiple domestic league titles,
|
||||
Champions League runner-up.\n\n7. **Robert Lewandowski (FC Barcelona)**\n -
|
||||
Position: Forward\n - Key Attributes: Finishing, positioning, aerial ability.\n -
|
||||
Achievements: FIFA Best Men''s Player, multiple Bundesliga titles, La Liga champion
|
||||
(2023).\n\n8. **Mohamed Salah (Liverpool)**\n - Position: Forward\n - Key
|
||||
Attributes: Speed, finishing, dribbling.\n - Achievements: Premier League
|
||||
champion, FA Cup, UEFA Champions League winner.\n\n9. **Vin\u00edcius J\u00fanior
|
||||
(Real Madrid)**\n - Position: Forward\n - Key Attributes: Speed, dribbling,
|
||||
creativity.\n - Achievements: UEFA Champions League winner (2022), La Liga
|
||||
champion (2023).\n\n10. **Luka Modri\u0107 (Real Madrid)**\n - Position:
|
||||
Midfielder\n - Key Attributes: Passing, vision, tactical intelligence.\n -
|
||||
Achievements: Multiple Champions League titles, Ballon d''Or winner (2018).\n\nThis
|
||||
list is compiled based on their current form, past performances, and contributions
|
||||
to their respective teams in both domestic and international competitions. Player
|
||||
rankings can vary based on personal opinion and specific criteria used for evaluation,
|
||||
but these players have consistently been regarded as some of the best in the
|
||||
world as of October 2023."}, {"role": "user", "content": "You are not allowed
|
||||
to include Brazilian players"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3594'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=8Yv8F0ZCFAo2lf.qoqxao70yxyjVvIV90zQqVF6bVzQ-1749567342-1.0.1.1-fZgnv3RDfunvCO1koxwwFJrHnxSx_rwS_FHvQ6xxDPpKHwYr7dTqIQLZrNgSX5twGyK4F22rUmkuiS6KMVogcinChk8lmHtJBTUVTFjr2KU;
|
||||
_cfuvid=wzh8YnmXvLq1G0RcIVijtzboQtCZyIe2uZiochkBLqE-1749567342267-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//nJfNctpIEMfvfoouXYJdwgXYjm1u4JjEsam4kuzuYZ1yNaNG6njUo50Z
|
||||
QUgq5zzLHvYF9pp9sK0ZwOAEx3EuFKjVTf+mP/TXpy2AhLOkC4kq0Kuy0s1+Xucvb06e+X7ndDzp
|
||||
DflscN6yRBfng75O0uBhRu9J+aXXrjJlpcmzkblZWUJPIWr7cP/44Onh3sFxNJQmIx3c8so3902z
|
||||
ZOFmp9XZb7YOm+2jhXdhWJFLuvDnFgDAp/gZ8pSMPiRdaKXLKyU5hzkl3dubABJrdLiSoHPsPIpP
|
||||
0pVRGfEkMfW3hanzwnfhDMRMQaFAzhMChDzkDyhuShbgSgYsqKEXf3fhBVkCdoACdZUFUNDsPJgx
|
||||
+ILAmwraLRiR8+CMUmSh0jgj64Al3jE1VmeALni8Ut6MyEKn1dlLgT4oXWcsOfQtfmTNKEvn7pVc
|
||||
SXsXdnYu2AhpGJJzDI0z8WRhyFgynAy2d3auBACacGkch4p0YWDsFG22uH5OM+h5b3lUe3JdeGZ5
|
||||
NNIseQoTdmwkhdygbjplbEgER6zZz3YX7j1VME2oJPGuC8Nae640QR+1NgLZk1cWpixCNoUTUyH0
|
||||
SrKsEFSBZRWj/xHpT+rq9ho0Oq1OZ3s3EHYC4fkskg9HWFVf/4bGJVp28AZZfPM52RJZHg/6piLK
|
||||
UvCkCuG/akphzMKuYMk3ww3OBr17sm0fbadwwXlN0AbPXpNLoVweRmZKcp4VqLpykWovUJ3acMzw
|
||||
AlGjZNAYoqiCXCjfCfvZ44kuzTScs5uD3akai/Msym8m69eSkdOcY+zW4BQC/XY66MHJAtPBBWEA
|
||||
nJcz1mhvO4VLSyWTXVrn8BFyP5aOJizwjKBv65nQT1EOORsz6YzsfaBhlNc7NC4YnjzclxuzTSFw
|
||||
1pX7KeTIdhDZ0HIJfZKPVCI0erp55j0XmD2+dM/XipXGES/xJn73qDwr1MDiSWvOSRRtpgxTs3Hy
|
||||
bjvxWzIHU/YFvCbUMMTMchbpnga612EPebigKUpmpu6GoTE4gT5aRdoIPh5ysJyvFKqFS/yBZBn1
|
||||
jzdLHL5+2KFDkicOLuMWXENb7+FFVS8wzCTe3SuLAh4GxKEpsKQM3qDGAhoXPCFbGaN/eZmMV4TZ
|
||||
co9u5vmmEVfrcN6KP+7ESHAUCH5n+fqP4trBy6//ChsLjbVi/jJHtnoIPDRaD05MZ/vHlTiOz7D6
|
||||
BmFoMsv/fXkQ4fH74RFDNLxvVm7b6vsJWzwCIk67FXheoLUzOMew8fqhUwWGtbAqljz31uTB5bD2
|
||||
wFrtid2l712Y50ZnJNA3xt8ug3tWYKzjaW1NRSgr+IIrsHXwbNZVBHxbsJsLnAIdjIgEMHtfu6B7
|
||||
vFlIFvpesEB4yI2Nqh05MEH5GEcwLQwUOCEoMSNwnAuPWaF44LJC5ZciiS0oXY/mUcxcN4ViWsFw
|
||||
hqjBecxpg4rahVNUxSKLoNMsKZMLf6SQjl0Epw+KqmWkG9bapVCRHRtboqhwQOGPce10d9dlpKVx
|
||||
7TBIWam1XjOgiPExxShg3y0sn28lqzZ5Zc3IfeOazFfJtSV0RoI8dd5USbR+3gJ4F6VxfUftJpU1
|
||||
ZeWvvbmh+HeHR+15vGSlyFfWp+2FcE688ahXhvbe8dLvTsTrjDyydmvyOlGoCspWvistjnXGZs2w
|
||||
tcb9fT6bYs/ZWfKfCb8yqFBJyq4rSxmru8yr2yyFV5b7brs955hw4shOWNG1Z7KhFhmNsdbzF4nE
|
||||
zZyn8nrMkpOtLM/fJsbV9d4+HuwjHe+pZOvz1v8AAAD//wMAVoKTVVsNAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9b6782db84d3b-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:56:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '31484'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '31490'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999166'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_aa737cf40bb76af9f458bfd35f7a77a1
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You
|
||||
are an expert at gathering and organizing information. You carefully collect
|
||||
details and present them in a structured way.\nYour personal goal is: Gather
|
||||
information about the best soccer players\n\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "Top 10 best players
|
||||
in the world?"}, {"role": "assistant", "content": "Thought: I now can give a
|
||||
great answer \nFinal Answer: The top 10 best soccer players in the world, as
|
||||
of October 2023, can be identified based on their recent performances, skills,
|
||||
impact on games, and overall contributions to their teams. Here is the structured
|
||||
list:\n\n1. **Lionel Messi (Inter Miami CF)**\n - Position: Forward\n -
|
||||
Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements:
|
||||
Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2.
|
||||
**Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key
|
||||
Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup
|
||||
champion (2018), Ligue 1 titles, multiple domestic cups.\n\n3. **Erling Haaland
|
||||
(Manchester City)**\n - Position: Forward\n - Key Attributes: Power, speed,
|
||||
goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA Champions
|
||||
League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne (Manchester
|
||||
City)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, creativity.\n -
|
||||
Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League
|
||||
winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n - Position: Forward\n -
|
||||
Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n - Achievements:
|
||||
2022 Ballon d''Or winner, multiple Champions Leagues with Real Madrid.\n\n6.
|
||||
**Neymar Jr. (Al Hilal)**\n - Position: Forward\n - Key Attributes: Flair,
|
||||
dribbling, creativity.\n - Achievements: Multiple domestic league titles,
|
||||
Champions League runner-up.\n\n7. **Robert Lewandowski (FC Barcelona)**\n -
|
||||
Position: Forward\n - Key Attributes: Finishing, positioning, aerial ability.\n -
|
||||
Achievements: FIFA Best Men''s Player, multiple Bundesliga titles, La Liga champion
|
||||
(2023).\n\n8. **Mohamed Salah (Liverpool)**\n - Position: Forward\n - Key
|
||||
Attributes: Speed, finishing, dribbling.\n - Achievements: Premier League
|
||||
champion, FA Cup, UEFA Champions League winner.\n\n9. **Vin\u00edcius J\u00fanior
|
||||
(Real Madrid)**\n - Position: Forward\n - Key Attributes: Speed, dribbling,
|
||||
creativity.\n - Achievements: UEFA Champions League winner (2022), La Liga
|
||||
champion (2023).\n\n10. **Luka Modri\u0107 (Real Madrid)**\n - Position:
|
||||
Midfielder\n - Key Attributes: Passing, vision, tactical intelligence.\n -
|
||||
Achievements: Multiple Champions League titles, Ballon d''Or winner (2018).\n\nThis
|
||||
list is compiled based on their current form, past performances, and contributions
|
||||
to their respective teams in both domestic and international competitions. Player
|
||||
rankings can vary based on personal opinion and specific criteria used for evaluation,
|
||||
but these players have consistently been regarded as some of the best in the
|
||||
world as of October 2023."}, {"role": "user", "content": "You are not allowed
|
||||
to include Brazilian players"}, {"role": "assistant", "content": "Thought: I
|
||||
now can give a great answer \nFinal Answer: Here is an updated list of the
|
||||
top 10 best soccer players in the world as of October 2023, excluding Brazilian
|
||||
players:\n\n1. **Lionel Messi (Inter Miami CF)**\n - Position: Forward\n -
|
||||
Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements:
|
||||
Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2.
|
||||
**Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key
|
||||
Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup
|
||||
champion (2018), Ligue 1 titles, multiple domestic cups.\n\n3. **Erling Haaland
|
||||
(Manchester City)**\n - Position: Forward\n - Key Attributes: Power, speed,
|
||||
goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA Champions
|
||||
League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne (Manchester
|
||||
City)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, creativity.\n -
|
||||
Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League
|
||||
winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n - Position: Forward\n -
|
||||
Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n - Achievements:
|
||||
2022 Ballon d''Or winner, multiple Champions Leagues with Real Madrid.\n\n6.
|
||||
**Robert Lewandowski (FC Barcelona)**\n - Position: Forward\n - Key Attributes:
|
||||
Finishing, positioning, aerial ability.\n - Achievements: FIFA Best Men''s
|
||||
Player, multiple Bundesliga titles, La Liga champion (2023).\n\n7. **Mohamed
|
||||
Salah (Liverpool)**\n - Position: Forward\n - Key Attributes: Speed, finishing,
|
||||
dribbling.\n - Achievements: Premier League champion, FA Cup, UEFA Champions
|
||||
League winner.\n\n8. **Vin\u00edcius J\u00fanior (Real Madrid)**\n - Position:
|
||||
Forward\n - Key Attributes: Speed, dribbling, creativity.\n - Achievements:
|
||||
UEFA Champions League winner (2022), La Liga champion (2023).\n\n9. **Luka Modri\u0107
|
||||
(Real Madrid)**\n - Position: Midfielder\n - Key Attributes: Passing, vision,
|
||||
tactical intelligence.\n - Achievements: Multiple Champions League titles,
|
||||
Ballon d''Or winner (2018).\n\n10. **Harry Kane (Bayern Munich)**\n - Position:
|
||||
Forward\n - Key Attributes: Goal-scoring, technique, playmaking.\n - Achievements:
|
||||
Golden Boot winner, Premier League titles, UEFA European Championship runner-up.\n\nThis
|
||||
list has been adjusted to exclude Brazilian players and focuses on those who
|
||||
have made significant impacts in their clubs and on the international stage
|
||||
as of October 2023. Each player is recognized for their exceptional skills,
|
||||
performances, and achievements."}, {"role": "user", "content": "You are not
|
||||
allowed to include Brazilian players"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6337'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=8Yv8F0ZCFAo2lf.qoqxao70yxyjVvIV90zQqVF6bVzQ-1749567342-1.0.1.1-fZgnv3RDfunvCO1koxwwFJrHnxSx_rwS_FHvQ6xxDPpKHwYr7dTqIQLZrNgSX5twGyK4F22rUmkuiS6KMVogcinChk8lmHtJBTUVTFjr2KU;
|
||||
_cfuvid=wzh8YnmXvLq1G0RcIVijtzboQtCZyIe2uZiochkBLqE-1749567342267-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//rJfNbhs3EMfvforBXiIHK0OSv3WTHCsxYiFunKJo6sAYkaPdqbkkS3Il
|
||||
K0HOfZY+R/tgBSnJkhPZtdteDC+HQ82P87H//bIFkLHMupCJEoOorGr2i7rwvWp2tt+qfzi4tCf2
|
||||
6Pjjx5/36FX74nOWRw8z+pVEWHrtCFNZRYGNnpuFIwwUT20f7h3vHxzuHu8lQ2UkqehW2NDcM82K
|
||||
NTc7rc5es3XYbB8tvEvDgnzWhV+2AAC+pL8xTi3pNutCK1+uVOQ9FpR17zYBZM6ouJKh9+wD6pDl
|
||||
K6MwOpBOoX8oTV2UoQtnoM0UBGooeEKAUMT4AbWfkgO40gPWqKCXnrvwhhwBewglgaMJe5Kg2Acw
|
||||
47QWjIV2C0bkA3gjBDmwCmfkPLBOO6bGKQnoo8c7EcyIHHRand0c6NYqFhzUDOhWqFqyLqDv8DMr
|
||||
Rr08p3ulr3R7B16+PGejScGQvGdonOlADoaMFcPJYPvlyysNAE24MJ5jdrowMG6KTi7W39IMeiE4
|
||||
HtWBfBdeOR6NFOsihwl7NjqHwqBqemFcDARHrDjMdhbuPVEyTagiHXwXhrUKbBVBH5UyGuSLdw6m
|
||||
rDW5HE6MRehV5FggiBIrm07/KV3ESW3v1qDRaXU62zuRsBMJ384S+XCE1v75BzQu0LGHS2Qdmq/J
|
||||
Vcj6+aCXlkjmEEiUmn+rKYcxa/Yl62Iz3OBs0Hsg2vbRdg7VEv6ci5qgDYGDIp8DagkTdGxqD9JU
|
||||
5AMLELX1iXA3Ep66eOXwBlHF3Y0halGSj6k84TB7Pt2FmcY793PIexlk7QNrETZT9mstySsuMBVx
|
||||
dIoH/Xg66MHJAtnDOWFknKc25Wt3O4cLRxWTW1rn/AlyL6WRJqzhFUHf1TNNT6IcshwzKUnuIdDY
|
||||
4uvVmgYPT/65RjdGm0PkrK1/EnJi209s6LiCPunPVCE0eqp5FgKXKJ+futdrycpTu1d4k/4PKAIL
|
||||
VMA6kFJckBa0mTJ20MYuvKvSb8k8TDmU8J5QwRClY5noDiLd+zieApzTFLU0U3/D0BicQB+dIGU0
|
||||
Ph9ysOy1HOzCJT0gOUb1+JRJjdiPo3VI+oWHizQR19DWa3iR1XOMbYn3Z8wigYcRcWhKrEjCJSos
|
||||
oXHOE3LWGPWvB8t4RSiXM3UzzzeFuBqN81J8vBITwVF6D9Q3CEMjHf/1OzTW8vj/9NUzim/4UI3d
|
||||
peP7ylyM0YRzHHHeoHMzeItxUPRjgjUMa82i/K8dtTbxV821GeS1UZI09I0Jdw30wNhIOTqtnbGE
|
||||
egVesgVXR89mbRNcuzXvKOn4wQn4WKo2vbNpTNpH3eJvWCmfg11mb9lcsaem6EiT9zvLcx4tw3tg
|
||||
TxiDH0r2cw3EHkTtovqDacmKAGVJ6dUTzEIx+eBYxLii/HlE5ezAKYpy8QSs40aSUKIHSZXRPsx/
|
||||
h24F2QXn/A7SW9dRhe4GR4rAkhsbV6EWEcobxZLHsxRUSezABwy1j4LMm4qWOi4JuH9QbDvr2tLR
|
||||
uPYY9a2ulVozoNYmYAwxqdpPC8vXOx2rTGGdGflvXLP5HLl2hN7oqFl9MDZL1q9bAJ+SXq7vSeDM
|
||||
OlPZcB3MDaWfa++1DucHZiudvjIftPYX1mACqpWh02p38g1HXksKyMqvie5MoChJrnxXCh1ryWbN
|
||||
sLUG/n08m86ew7MunnL8yiBiUZC8to7kvNw2bXMUP2Qe2nZ30SngzJObsKDrwORiMiSNsVbzz4vM
|
||||
z3yg6nrMuiBnHc+/Mcb2+qCDnV08atM42/q69TcAAAD//wMAvqcBwHENAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9b7561f204d3b-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:56:46 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '12189'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '12193'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149998513'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_1098f5a5384f4a26aecf0c9e4e4d1fc0
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You
|
||||
are an expert at gathering and organizing information. You carefully collect
|
||||
details and present them in a structured way.\nYour personal goal is: Gather
|
||||
information about the best soccer players\n\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "Top 10 best players
|
||||
in the world?"}, {"role": "assistant", "content": "Thought: I now can give a
|
||||
great answer \nFinal Answer: The top 10 best soccer players in the world, as
|
||||
of October 2023, can be identified based on their recent performances, skills,
|
||||
impact on games, and overall contributions to their teams. Here is the structured
|
||||
list:\n\n1. **Lionel Messi (Inter Miami CF)**\n - Position: Forward\n -
|
||||
Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements:
|
||||
Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2.
|
||||
**Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key
|
||||
Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup
|
||||
champion (2018), Ligue 1 titles, multiple domestic cups.\n\n3. **Erling Haaland
|
||||
(Manchester City)**\n - Position: Forward\n - Key Attributes: Power, speed,
|
||||
goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA Champions
|
||||
League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne (Manchester
|
||||
City)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, creativity.\n -
|
||||
Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League
|
||||
winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n - Position: Forward\n -
|
||||
Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n - Achievements:
|
||||
2022 Ballon d''Or winner, multiple Champions Leagues with Real Madrid.\n\n6.
|
||||
**Neymar Jr. (Al Hilal)**\n - Position: Forward\n - Key Attributes: Flair,
|
||||
dribbling, creativity.\n - Achievements: Multiple domestic league titles,
|
||||
Champions League runner-up.\n\n7. **Robert Lewandowski (FC Barcelona)**\n -
|
||||
Position: Forward\n - Key Attributes: Finishing, positioning, aerial ability.\n -
|
||||
Achievements: FIFA Best Men''s Player, multiple Bundesliga titles, La Liga champion
|
||||
(2023).\n\n8. **Mohamed Salah (Liverpool)**\n - Position: Forward\n - Key
|
||||
Attributes: Speed, finishing, dribbling.\n - Achievements: Premier League
|
||||
champion, FA Cup, UEFA Champions League winner.\n\n9. **Vin\u00edcius J\u00fanior
|
||||
(Real Madrid)**\n - Position: Forward\n - Key Attributes: Speed, dribbling,
|
||||
creativity.\n - Achievements: UEFA Champions League winner (2022), La Liga
|
||||
champion (2023).\n\n10. **Luka Modri\u0107 (Real Madrid)**\n - Position:
|
||||
Midfielder\n - Key Attributes: Passing, vision, tactical intelligence.\n -
|
||||
Achievements: Multiple Champions League titles, Ballon d''Or winner (2018).\n\nThis
|
||||
list is compiled based on their current form, past performances, and contributions
|
||||
to their respective teams in both domestic and international competitions. Player
|
||||
rankings can vary based on personal opinion and specific criteria used for evaluation,
|
||||
but these players have consistently been regarded as some of the best in the
|
||||
world as of October 2023."}, {"role": "user", "content": "You are not allowed
|
||||
to include Brazilian players"}, {"role": "assistant", "content": "Thought: I
|
||||
now can give a great answer \nFinal Answer: Here is an updated list of the
|
||||
top 10 best soccer players in the world as of October 2023, excluding Brazilian
|
||||
players:\n\n1. **Lionel Messi (Inter Miami CF)**\n - Position: Forward\n -
|
||||
Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements:
|
||||
Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2.
|
||||
**Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key
|
||||
Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup
|
||||
champion (2018), Ligue 1 titles, multiple domestic cups.\n\n3. **Erling Haaland
|
||||
(Manchester City)**\n - Position: Forward\n - Key Attributes: Power, speed,
|
||||
goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA Champions
|
||||
League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne (Manchester
|
||||
City)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, creativity.\n -
|
||||
Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League
|
||||
winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n - Position: Forward\n -
|
||||
Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n - Achievements:
|
||||
2022 Ballon d''Or winner, multiple Champions Leagues with Real Madrid.\n\n6.
|
||||
**Robert Lewandowski (FC Barcelona)**\n - Position: Forward\n - Key Attributes:
|
||||
Finishing, positioning, aerial ability.\n - Achievements: FIFA Best Men''s
|
||||
Player, multiple Bundesliga titles, La Liga champion (2023).\n\n7. **Mohamed
|
||||
Salah (Liverpool)**\n - Position: Forward\n - Key Attributes: Speed, finishing,
|
||||
dribbling.\n - Achievements: Premier League champion, FA Cup, UEFA Champions
|
||||
League winner.\n\n8. **Vin\u00edcius J\u00fanior (Real Madrid)**\n - Position:
|
||||
Forward\n - Key Attributes: Speed, dribbling, creativity.\n - Achievements:
|
||||
UEFA Champions League winner (2022), La Liga champion (2023).\n\n9. **Luka Modri\u0107
|
||||
(Real Madrid)**\n - Position: Midfielder\n - Key Attributes: Passing, vision,
|
||||
tactical intelligence.\n - Achievements: Multiple Champions League titles,
|
||||
Ballon d''Or winner (2018).\n\n10. **Harry Kane (Bayern Munich)**\n - Position:
|
||||
Forward\n - Key Attributes: Goal-scoring, technique, playmaking.\n - Achievements:
|
||||
Golden Boot winner, Premier League titles, UEFA European Championship runner-up.\n\nThis
|
||||
list has been adjusted to exclude Brazilian players and focuses on those who
|
||||
have made significant impacts in their clubs and on the international stage
|
||||
as of October 2023. Each player is recognized for their exceptional skills,
|
||||
performances, and achievements."}, {"role": "user", "content": "You are not
|
||||
allowed to include Brazilian players"}, {"role": "assistant", "content": "Thought:
|
||||
I now can give a great answer \nFinal Answer: Here is the revised list of the
|
||||
top 10 best soccer players in the world as of October 2023, explicitly excluding
|
||||
Brazilian players:\n\n1. **Lionel Messi (Inter Miami CF)**\n - Position: Forward\n -
|
||||
Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements:
|
||||
Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2.
|
||||
**Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key
|
||||
Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup
|
||||
champion (2018), multiple Ligue 1 titles, and various domestic cups.\n\n3. **Erling
|
||||
Haaland (Manchester City)**\n - Position: Forward\n - Key Attributes: Power,
|
||||
speed, goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA
|
||||
Champions League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne
|
||||
(Manchester City)**\n - Position: Midfielder\n - Key Attributes: Passing,
|
||||
vision, creativity.\n - Achievements: Multiple Premier League titles, FA Cups,
|
||||
UEFA Champions League winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n -
|
||||
Position: Forward\n - Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n -
|
||||
Achievements: 2022 Ballon d''Or winner, multiple Champions Leagues with Real
|
||||
Madrid.\n\n6. **Robert Lewandowski (FC Barcelona)**\n - Position: Forward\n -
|
||||
Key Attributes: Finishing, positioning, aerial ability.\n - Achievements:
|
||||
FIFA Best Men''s Player, multiple Bundesliga titles, La Liga champion (2023).\n\n7.
|
||||
**Mohamed Salah (Liverpool)**\n - Position: Forward\n - Key Attributes:
|
||||
Speed, finishing, dribbling.\n - Achievements: Premier League champion, FA
|
||||
Cup, UEFA Champions League winner.\n\n8. **Luka Modri\u0107 (Real Madrid)**\n -
|
||||
Position: Midfielder\n - Key Attributes: Passing, vision, tactical intelligence.\n -
|
||||
Achievements: Multiple Champions League titles, Ballon d''Or winner (2018).\n\n9.
|
||||
**Harry Kane (Bayern Munich)**\n - Position: Forward\n - Key Attributes:
|
||||
Goal-scoring, technique, playmaking.\n - Achievements: Golden Boot winner,
|
||||
Premier League titles, UEFA European Championship runner-up.\n\n10. **Rodri
|
||||
(Manchester City)**\n - Position: Midfielder\n - Key Attributes: Defensive
|
||||
skills, passing, positional awareness.\n - Achievements: Premier League titles,
|
||||
UEFA Champions League winner (2023).\n\nThis list is curated while adhering
|
||||
to the restriction of excluding Brazilian players. Each player included has
|
||||
demonstrated exceptional skills and remarkable performances, solidifying their
|
||||
status as some of the best in the world as of October 2023."}, {"role": "user",
|
||||
"content": "You are not allowed to include Brazilian players"}], "model": "gpt-4o-mini",
|
||||
"stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '9093'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=8Yv8F0ZCFAo2lf.qoqxao70yxyjVvIV90zQqVF6bVzQ-1749567342-1.0.1.1-fZgnv3RDfunvCO1koxwwFJrHnxSx_rwS_FHvQ6xxDPpKHwYr7dTqIQLZrNgSX5twGyK4F22rUmkuiS6KMVogcinChk8lmHtJBTUVTFjr2KU;
|
||||
_cfuvid=wzh8YnmXvLq1G0RcIVijtzboQtCZyIe2uZiochkBLqE-1749567342267-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//rJfNThtJEMfvPEVpLjFojGxjEuIbJhCi4ASFRKvVJkLl7vJMLT3Vk+4e
|
||||
O06U8z7LPsfug626bbAhJgm7e0F4aqqmfl0f858vWwAZ62wAmSoxqKo27WHRlEc0edUdv+q9pl/f
|
||||
2P2jsnz38dXw7J3GLI8edvw7qXDttatsVRsKbGVhVo4wUIzafdJ/uv/4Sb/bT4bKajLRrahDu2/b
|
||||
FQu3e51ev9150u4eLL1Ly4p8NoDftgAAvqS/MU/R9CkbQCe/vlKR91hQNri5CSBz1sQrGXrPPqCE
|
||||
LF8ZlZVAklJ/W9qmKMMAXoDYGSgUKHhKgFDE/AHFz8gBvJcTFjRwmH4P4JQcAXtAcDRhIQ2GfQA7
|
||||
gVASBFtDtwNj8gG8VYoc1Abn5DywpDtm1hkN6KPHaxXsmBz0Or29HEh841gKCCUGEAtDh5/ZMMpN
|
||||
DIzPFmUaTXrwXt5Ldxd2ds7YChkYkfcMrRcSyMGIsWI4Otne2XkvANCGc+s5FmkAJ9bN0Onl9Zc0
|
||||
h8MQHI+bQH4AzxyPx4alyGHKnq3kUFg0ba9syg7HbDjMd5fuh6pkmlJFEvwARo0JXBuCIRpjBfSj
|
||||
1w5mLEIuhyNbIxxW5FghqBKrOkX/JZ3IUVPfXINWr9Prbe9Gwl4kfDlPxzAaY13/9Se0ztGxhwtk
|
||||
Ce3n5CpkeTjoRU2kcwikSuGPDeUwYWFfshSb4U5enBzek233YDuH6hr+jIuGoAuBgyGfwxQd28aD
|
||||
thX5wApUU/tEtxfpjl08bjhFNCgaWiMUVZKPZTziMH842bmdxfP2C8Bb1WPxgUWFzYTDRjR5wwWm
|
||||
To5OMdC745NDOFriejgjjHyLsqZa7W3ncO6oYnLX1gV7guynEtKUBZ4RDF0zF/opyhHrCZPR5O4D
|
||||
jVO+3qlp9/D0x/25MdscImdT+59CTmz7iQ0dVzAk+UwVQuvQtF+EwCXqh5fu+Vqx8jT3FV6l/wOq
|
||||
wAoNsAQyhgsSRZsp4/RsnMCbDr1L5mHGoYQ3hAZGqB3rRPc40r2JOyrAGc1QtJ35K4bWyREM0Sky
|
||||
VvDhkCfXc5ZDvXRJP5Aco/n+hklDOIz7dUTyyMN5Wo1raOs9vKzqGcaRxNv7ZVnAJxFxZEusSMMF
|
||||
GiyhdcZTcrW15l8vlcmKUF/v0808dxpxtRYXrfj9TkwEB+kd0FwhjKx2/Pcf0Fqr4/8zVw9ovtF9
|
||||
PXZTjm87c7lCE87TiHOKzs3hJcZFMYwFFhg1wqr8rxO1tu1Xw7UZ5Lk1mgSG1oZvB+ie/ZGKddw4
|
||||
WxPK6gRKrsE1MUS7qRNltxMxL6zAKTVSRC0Erbc2BJISKzi1wdeNu6a9F/enOvAu6I968Lvg6++w
|
||||
9SX/tmS/kEIlehgTCSh0NGmMmYOjKXvSECzQpyRfAI3ZIHBmJRuCkovScFGG+MbytqJreVVZHyCg
|
||||
IQmkgUXzlHWDJqmrpd76VlrtwjGqcvmMlJ4v7UxhzMhRhe4Kx4aAJhNSgack5D3EN7G/YmNyiKox
|
||||
HW9KhwvhCSuUYOYRKJTEDgJh5cEKjG0oV4cUo8SRcYKxeGjAByzI766r0nhKHqMylsaYNQOK2JAc
|
||||
kx7+sLR8vVHAxha1s2N/xzVbFP/SEXorUe36YOssWb9uAXxISru5JZ6z2tmqDpfBXlF6XK/TO1gE
|
||||
zFYKf2V+vBD1AFmwAc2a3+N+L98Q8lJTQDZ+Ta5nClVJeuXb7R2s5D02mu3K1tlaY/82pU3hF/ws
|
||||
xVqUe8OvDEpRHUhf1o40q9vYq9scxa+g+267OeuUcObJTVnRZWBysR6aJtiYxbdJ5uc+UHU5YSnI
|
||||
1Y4XHyiT+nKvj/t9pKd7Ktv6uvUPAAAA//8DAMc1SFGuDQAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9b7d24d991d2c-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:57:29 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '35291'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '35294'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149997855'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_4676152d4227ac1825d1240ddef231d6
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -231,7 +231,7 @@ interactions:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
@@ -241,7 +241,7 @@ interactions:
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
- OpenAI/Python 1.68.2
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -251,7 +251,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
- 1.68.2
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
@@ -259,7 +259,7 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
@@ -377,7 +377,7 @@ interactions:
|
||||
AAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94f4c6967c75fb3c-SJC
|
||||
- 94640d271c367db6-LAX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -385,14 +385,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 13 Jun 2025 21:45:35 GMT
|
||||
- Tue, 27 May 2025 08:13:10 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=I11FOm0L.OsIzhpXscunzNqcbLqgXE1LlRgqtNWazAs-1749851135-1.0.1.1-R2n01WWaBpL_jzTwLFUo8WNM2u_1OD78QDkM9lSQoM9Av669lg1O9RgxK.Eew7KQ1MP_oJ9WkD408NuNCPwFcLRzX8TqwMNow5Gb_N1LChY;
|
||||
path=/; expires=Fri, 13-Jun-25 22:15:35 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=zxXbTMyK.67_c.SQXNivPXTcfsIBL5Vl1Q7WXFcTgxU-1748333590-1.0.1.1-ArIOxtxz6HMOCmEGGFc.Hxs19gY1LkxaxTZYc9hAE7zSdmh2fCrczDquGUovgGjYHvCJ94TxWQTCVlo1v7kDhnnrF0jwHy_U_LaR6AbA.94;
|
||||
path=/; expires=Tue, 27-May-25 08:43:10 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=TSYyYzslZUaIRAy.2QeTpFy6uf5Di7f.JM5ndpSEYhs-1749851135188-0.0.1.1-604800000;
|
||||
- _cfuvid=4YFW5U1WwjrbdrZ7OWIvzgymvtGAnmnPEu1zeEdQsGg-1748333590310-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -411,15 +411,15 @@ interactions:
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '76'
|
||||
- '180'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
via:
|
||||
- envoy-router-86bb9759f6-ksq4x
|
||||
- envoy-router-5f689c5f9d-mdwg9
|
||||
x-envoy-upstream-service-time:
|
||||
- '80'
|
||||
- '185'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
@@ -433,7 +433,7 @@ interactions:
|
||||
x-ratelimit-reset-tokens:
|
||||
- 3ms
|
||||
x-request-id:
|
||||
- req_335974d1e85d40d312fb1989e3b2ade5
|
||||
- req_421cd0437639cc5312b23b1d7727f0a4
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
@@ -493,7 +493,7 @@ interactions:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
@@ -501,11 +501,11 @@ interactions:
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=63wmKMTuFamkLN8FBI4fP8JZWbjWiRxWm7wb3kz.z_A-1735447412038-0.0.1.1-604800000
|
||||
- _cfuvid=SlnUP7AT9jJlQiN.Fm1c7MDyo78_hBRAz8PoabvHVSU-1736018539826-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
- OpenAI/Python 1.68.2
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -515,7 +515,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
- 1.68.2
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
@@ -525,36 +525,34 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA6xW32/bNhB+z19x4Mte7CJu0jb2W5q2WJt2KLZsw1AFxpk6S2wpHkdSdtQg//tA
|
||||
0rKU1gaKbX4wBB7v1/fdHe/+BECoUixAyBqDbKyevlTP//ig/vy8+iu8b+qvZ7Puan6+wnc3+v1v
|
||||
nZhEDV59Jhl6rSeSG6spKDZZLB1hoGh19uJ8fvFsNjt7ngQNl6SjWmXD9JynjTJq+vT06fn09MV0
|
||||
drHTrllJ8mIBn04AAO7Tf4zTlHQnFnA66U8a8h4rEov9JQDhWMcTgd4rH9AEMRmEkk0gE0M3rdYj
|
||||
QWDWS4laD47z7370PYCFWi9Pb66v52x/veDrd9cb/uDo7rO7vvhl5C+b7mwKaN0auQdpJN+fL75x
|
||||
BiAMNkn3Bv2X1xvULR6wACDQVW1DJsToxX0hfFtV5ONdX4jFp0K8NVK3JUHDjkCZQA5lUBsCNCWQ
|
||||
qbBSpoJ0poIiD2t2EGoCWStdPinEpBCXZQkb5VvUgKr0wA5Kh1tlKg+BoSZtQWnd+uAwUNZmI8kG
|
||||
nw28NZKd5SStsKFkYt0akDVqTaaiZMiRMmt2kkATOqNMldU/Ot6okgDLUsXUUINNaUjUQHcYq9BD
|
||||
qDFAxbCijk0JklsTYm6BgUyNRhK0piQXa6PMtm8nhfi7Ra1CV4jFfFIIMiHBEMG7LxINhVgU4iV6
|
||||
JeFyF0CKKtKbZB8w1HDDVsl0XpKXTtl8b1GIy5hpiZEl1D0woAw0US9FrcyG9YY8SG5WyqSotxxB
|
||||
SrSZtlmRSxBVFADB0BYCB9QZH0c6lYevle1p94Cw5uh4h1jy5r8orRPFhE53GWdyPrOU/DdsdAcB
|
||||
26oO0WOqA0cG0EVziV2sCHgNzwtx+zAZw/Q6k7GAqx79SxvJOQDY7uphyExPK7SeykThndWoDKwS
|
||||
EX0lwKobiLZ151NJ5DHlJ4DJecQ61MqDRE9HEXtFDZtcwX5cwjHRvbtQO26rGhyhnmq1pt5X7hNr
|
||||
tZK40pS7iFDWMbABwn39HMPthju4Qvc/ADZqyINoBe5AovNHAXnTulCTG7oyw7I31uPTY4KQjKT0
|
||||
vSSDTnGy/TNp6wcQ8iRRX+kH4HiVxwx8VDK07r/UUeqz3F7kAV1IAzC34FBd++S2KtRgd16PQvTa
|
||||
SG4dxvGV3r44QzvY1io67NnH8dB4VGXDEMNcOcn1rt5M1ff9cXh+9/HaG2Uq+jclM34OtthFIFLU
|
||||
AwxtcrDODgD9uHxYHwfmo+OGU4p9T6anhuIQjO3Yz/dEQDJyxcbERsqUJPj3r9BPHnhrYMVllxpr
|
||||
RSGQezzNI0i3D+LR+/hwcuj7dvT6O1q3HvX3awEawyFnFfeC253kYb+CaK6s45X/RlWslVG+XjpC
|
||||
n1524QPbHFYMITkX7aPtRVjHjQ3LwF8ouXsxn2d7YtiwBunZ6dlOmp6AQTCbnT6dHLC4LCmgSgvO
|
||||
fqeSKGsqB91ht8K2VDwSnIzy/j6eQ7Zz7spUP2J+EMg4TahcWkelko9zHq45ivP22LU9zilg4clt
|
||||
lKRlUOQiFyWtsdV5MRS+84GaZS5u61TaDsXaLs/O8dk50vxMipOHk38AAAD//wMAfAt9/isLAAA=
|
||||
H4sIAAAAAAAAAwAAAP//zFZLbxs3EL77Vwx4lgzb8lM3PxrAOaRu4xSIs4YwImd3x+GSLB+KFob/
|
||||
e0GuLcm2CqSnRgdB4ry+b+bbHT7uAAhWYgpCthhl5/T4Yv7A9qG7O4qX8k9X790df17SZ3NFV7ef
|
||||
JmKUI+z8gWR8idqVtnOaIlszmKUnjJSz7p8cnk4mk6OzvWLorCKdwxoXx4d23LHh8cHeweF472S8
|
||||
f/oc3VqWFMQUvu0AADyW74zTKFqKKZRc5aSjELAhMV05AQhvdT4RGAKHiCaK0doorYlkMnSTtN4w
|
||||
RGv1TKLW68LD53Hj97pZqPXs6mJ5+fWu/th9bRbu03J54/GPj3ft6Ua9IXXvCqA6Gblq0oZ9dT59
|
||||
UwxAGOxK7C2G778tUCfckgFAoG9SRyZm9OKxEiE1DYXsGyox/VaJL4EgcJ6TB42mSdgQhMQR55qg
|
||||
th4Qjsc9oR9brUaAAYLtCGjpNJpSNUCHPYTIWsOcYJj6crcSo0pcG6mTIuisJ2ATyaOMvMjxmN0C
|
||||
5Ar5iCNTgNhiBNmyVp4MSDTg2j5w7msPjnxtfQfRApkmA/3BsQVUijOOoeKlNYEVeWAjrXfWY2TT
|
||||
wIJDQl3KKY8/2DQhc8mQGo8654wtQSSUbfZ33koKYQRkWjQyHyWjyGflqPwvt6a3yTSgCb0hH4b6
|
||||
N94uWBFEdqE4OfR5AGANtPZHLuSJTW29pFJSWiPJRcAIbe5s4VQnAw12Q39oQb5X2G80arcS96NK
|
||||
/J1Qc+wrMT0bVYJMLMY82ceqaKQS00pcYGAJ589tKiiz9ort1jqW5UhRkJ7d4JItLWUYCrN+UK9w
|
||||
soEOY0sdRpYBbJ0nPmeTm2JSNycfMsksgxQJEKKNqIfmeNKDZFp2zwK8DoAbCVGDdeRxhfQ6AIWQ
|
||||
qaEu/ST0uh800lqrSuwwAzZNJe6fRpvcz11W2WvOv5fX1FbSH3ziGCAFUpkomhehZkqsdQrRYySY
|
||||
l5a+Vt4Wcl9e8oC0yRQhbuQrYlsleQf91vZwif7nwd/a/heBfsMyJv8f+v5XeTwBfQTHJOkX4fGB
|
||||
TUNvJ3BhVQ836LczOTcYbVeE7DKd/4vH/ZN4tQyedrb9vt9YdZ7qFFC/34FojI0DnrwE758tT6t9
|
||||
q23jvJ2HN6GiZsOhnXnCUNaYCNG6AVaGUIqL9GpVC+dt5+Is2u9Uyp2cnQ35xPo6sbYeHB8+W8tb
|
||||
Zm3Y3zuejLZknCmKyGWbry4QEmVLah27vkhgUmw3DDsbvN/j2ZZ74M6m+Zn0a4PMr1pSM+dJsXzN
|
||||
ee3mKT9Q/+a26nMBLAL5BUuaRSafZ6GoxqSHW5AIfYjUzeqid+e5XIVE7WaTQzw6RDqbSLHztPMP
|
||||
AAAA//8DAGHAjEAYCgAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94f4c6a1cf5afb44-SJC
|
||||
- 94640d2c4fbb14f8-LAX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -562,14 +560,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 13 Jun 2025 21:45:40 GMT
|
||||
- Tue, 27 May 2025 08:13:14 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=RNxOcvndh1ViOpBviaCq8vg2oE59_B32cF84QEfAM8M-1749851140-1.0.1.1-161vq6SqDcfIu41VKaJdjmGjwyhGQ3AyY0VDnfk1SUfufmIewYKYnNufCV49o2gCDVOzInyRnwwp3.Sk2rj9DoDtAbcdOdEHxpr34JvDa8w;
|
||||
path=/; expires=Fri, 13-Jun-25 22:15:40 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=2gcR.TdsIISbWE0mGGmusReUBNsIatAwc18zV72zD20-1748333594-1.0.1.1-qeHSEYFoSHrtU4ZrkbG05aW.fPl53pBh5THKK8DsmUMObmaOM2VjUu.LX4CG.kTiSHKPDctGkrALLResb.5.jJ8KafoAb00ULAntajitgj0;
|
||||
path=/; expires=Tue, 27-May-25 08:43:14 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=uF44YidguuLD6X0Fw3uiyzdru2Ad2jXf2Nx1M4V87qI-1749851140865-0.0.1.1-604800000;
|
||||
- _cfuvid=JuFQuJzf0SDdTXeiRyPt8YpIoGL3iM9ZMap_LG1xa5k-1748333594521-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -584,13 +582,13 @@ interactions:
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '4149'
|
||||
- '3966'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '4154'
|
||||
- '3976'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
@@ -604,32 +602,31 @@ interactions:
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_437679f0db60c9eec6bbdf32d399f960
|
||||
- req_da07dbd4e017b1837841676fb84c2e7f
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": ["Example: Counting Apples(Math Example): An example used to
|
||||
explain basic addition by counting physical objects, apples in this case."],
|
||||
"model": "text-embedding-3-small", "encoding_format": "base64"}'
|
||||
body: '{"input": ["Apples(Object): Fruits used in an example to illustrate basic
|
||||
addition."], "model": "text-embedding-3-small", "encoding_format": "base64"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '211'
|
||||
- '150'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=I11FOm0L.OsIzhpXscunzNqcbLqgXE1LlRgqtNWazAs-1749851135-1.0.1.1-R2n01WWaBpL_jzTwLFUo8WNM2u_1OD78QDkM9lSQoM9Av669lg1O9RgxK.Eew7KQ1MP_oJ9WkD408NuNCPwFcLRzX8TqwMNow5Gb_N1LChY;
|
||||
_cfuvid=TSYyYzslZUaIRAy.2QeTpFy6uf5Di7f.JM5ndpSEYhs-1749851135188-0.0.1.1-604800000
|
||||
- __cf_bm=zxXbTMyK.67_c.SQXNivPXTcfsIBL5Vl1Q7WXFcTgxU-1748333590-1.0.1.1-ArIOxtxz6HMOCmEGGFc.Hxs19gY1LkxaxTZYc9hAE7zSdmh2fCrczDquGUovgGjYHvCJ94TxWQTCVlo1v7kDhnnrF0jwHy_U_LaR6AbA.94;
|
||||
_cfuvid=4YFW5U1WwjrbdrZ7OWIvzgymvtGAnmnPEu1zeEdQsGg-1748333590310-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
- OpenAI/Python 1.68.2
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -639,7 +636,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
- 1.68.2
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
@@ -647,124 +644,124 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1R6SRO6Orvn/v0Up87WviUCkuTsmGcJggN2dXUBIoIiMiWQW/e7d+H/rdvdGxcY
|
||||
BZ7k+U3Jf/7rr7/+brO6yMe///nr73c1jH//j/XaPR3Tv//563/+66+//vrrP3+f/9/IosmK+736
|
||||
lL/hvy+rz72Y//7nL+6/r/zfQf/89bcQPDriHh8DoPtGkCHJhgLfnF1fExk5Jry0lo29W9H25Kvd
|
||||
bLA1e4CN/JNqixDFFfLfuCDmIo9g8sGbQ9MtP2MvnId6EqKshKDmCpwqoaEJk8IV8CDiDmt5qNQ7
|
||||
//k4wQQHE1axFmTcrs8k+ImrICh32502X0c5QEL0NvCpC64erdqBSt3WNtffP2uquwyC9/XpYv3O
|
||||
n2t62D0roBngQ0zen3oizGILZSGViazHQcy+pZXCzdA2uODmBtC9Mm9QaukJ8VFwZdwQaBS0hnoK
|
||||
eh5Y/fh5ti6cOj8h3pt3vaXblwFSjrczDufixpjIUR5JgxPiIL4rmTCHXguLj2rh424JvfmGNyI0
|
||||
eGoTDe2fMbuC+ALvdX7CMdd3WmdsQ4gWhmQSXKKjtrsz6QS9CEf4gC5jtlT+4ANXiRgx2Etju6vD
|
||||
l+gsbytilZMX70i5CxDZVAHOy0/QE0jjDSy4lBGtyaC3mA+rQMh6jdi86a7GjGM4IMntXaxuPs+a
|
||||
vvVjDrj2ERGrPIQee9iGCPMvueFgNp8x9d+eBG/LZjPtJeGe8Z9GTZDl0BO+WWirfViscdDN3zPJ
|
||||
1nrNwkU2UX79DkRGfJZ1T7adINiJHs46tWMLxtoEY4TO5GGcvxrh6m+B2Lcpp49U6mAQ97UOl8fu
|
||||
Tc4AGrFg7e8b+InLgKS3/lAvXcPZ6NaczjjRDiWj72m0gXi8CwQf917MbId1IHZvFblCbh8PiX3K
|
||||
0S5XDIJtV2RTJvUykkfhSYLtMWO8OjsUbZVKx6qXtIAJr8wHwW2k2FtSP6ZbEDTQ9uU7Tt83yaPF
|
||||
6LRAOWZncngZWOPO0iGAUrG8sJosiiZkqR1AT/be0065PgF5nOsLwo9riNUYY4/u+lhCM6MvfFO+
|
||||
Lti1+bFCj901ImZTqJ7wOpkduu8qZfr4ccWWzUUIkCKeHsS4LV02iQddAqWVbolxsXTAtpveljQn
|
||||
3AR70200Oj2fDbQD3cUXbzvEgrMtLrD2dxSHrC/7mSRTAu6QvCde31XxEiV+COtLigNQcnHN5+pe
|
||||
hFhuATnfli4W1PEewGvWAqyftzgjNyWjMFPfDKv45dTCRttzsPFMNLGji9gEb4cOvHMYkAMxHxn1
|
||||
JeqihzI/sNNYUsY2jlyg6WJuiHarHtkua+sT5PlKw/La7zS71B1qLjeIYyU/ZnQ5IRFm4iHG+Sff
|
||||
aMuZUQnNSpnh/PEWGHN8xgOHVhT/6rGrw5DCgs/LYGnLQ88rk9NAfkAvfK4eZsyt/wfU5h5gNbmb
|
||||
vWA4iog6r4kxPiZUozyYKzhyjTghwyg88nq+dLT5HCpyH5beG2ZCKeSZPZDgc53qmW7ECPTkUhHL
|
||||
jyvAIkhVBK/JkRQCCNla/xP4qIuH/UVp6uX7VRtk+PeGZL4e9sLo3yZ4iwKwzueLsQ2oNqAP2wO2
|
||||
OMQ8BsWOwtv23az4ocR8Tg2KKtY8gu863+ymxAvSrtsTMWM5ZyzMIP3NN5Hj06afDzPcwF+9LsdD
|
||||
zWbJOSXoaty5YHwBnQmDropoJB4Lht117lld9BIQtohObMUran/LBZ0qjmHtWhsx24BuA6kjHIjR
|
||||
9JYmuPdbLjpqqgT741WIWZnUFdykr/0klrLnzdZ9TGHQz3dyM040Xh58YMMVL4PelpqY7YW9DK0+
|
||||
TPHlQx+MOkMJ0XWvfIh3777xcJaMAHjfScOOvd2xOb50Ptgc/AO5PPb7+g9elZuLQ9xtOWl079oL
|
||||
2Hs0wQU+yj2nl22AvDa5kZOuQ2+8RsYJvtF0xIWa1IAJwbQBKBssnJ0jCyxq/Tr94Tsn5HNvkcpq
|
||||
QhV7PYhyK5WMEzfzBI/Z5RRI75ukDZ2tJb/5xNaO2Yw+maqi3SPNiSunJF7iUSxB4F91jD+Pl9Za
|
||||
3jjB+LDsSXxVO22OM8qh6cVOxGIgj+fac204vqUKW36sguUu1zmKH1xDChp6vcBa0YTPqXzjy3Ru
|
||||
M2rn2gmlBp2I9aVPwHM38QW35hfgA3Z6jV3HnQjfffsiqlvF3ty+2hOci72J73mtZS0d6Av21Gzw
|
||||
NVdIzGxl4GDC6T4OvXuSCRpaeHgLjBI/QmWrve6nuw9flnQmAX032kK6yZRof42xWswp42Do2DBW
|
||||
3CPx/fGZUXyfuN98kChs5JjppjMg5EId61uOsOXBmzZ81dtrAO5bje12O53CLNVf0+eeyPHuGH55
|
||||
0Dtehg8ekz1BJn0HpIK+cHzxT/FsvNUCSaxTsf6onh7TxZcPb0ouktSikkdXPkE54hi5czavMc5u
|
||||
XlB1E588bnSuh07lKDqY42uCa3/O9+YkIv7YNuQ8X5E2u/WbIru1HlhlTwTe8FQn0DITNDFzW2vL
|
||||
6+5w4NjJHTmxPMjY1RZ5RCtTJdFnG3nT7UYWwFLJJSeueGudMbMKFce3hq/te+6pUG46uK6P6Rqx
|
||||
K2BvKSzRAbwVogtWDJYo0UPElOswLePBqYdngm3I+WmIb2s/dNbnEoHjLfzgNLQzxt5SUkIWZ7fp
|
||||
/Xv+ajm3sBxfJvnxV3c4pjYEemIQtz828aKULxv1cAymlk3XbPkG1gRW/g6oHgfZLkJogOJ+6gN+
|
||||
dhONXmWjRWb9PgRf5dsBwYncfD/O+BlcvK2f0dY9cshx3m98yJ5+zYV8FoG1/6fPN/vE5Mh9ffjm
|
||||
UU3OJKm9pcxDHe3zKsOO+ij7pfJfPjrrpz1O7eO5n8/P2YfK4ShheTnh/ofvyDu/voHXXI2M8yXR
|
||||
hiGfC0Tvgqu26M+zBOA1PQbboC6zBcz2CbY7n5IgJNuYfg+RjOg2H0kC4QQY//3ykCXcPei//s6j
|
||||
fPLhwUOzHKw+Il4bzIsLwYcLj9h10NOjm4M0QRsG+996B9Naf/T6XCZsF/e9RtuobKUVH4hK1X02
|
||||
d/7YgsctCqd9EZ68+W2DCfqe7+AHzFqPxjDrpO7zPWPzOTcea7uhhU8AjlgXna7/Wr3K/RlvtdLg
|
||||
0dtTlpBEo554X3+njZhMJXh9kww7P/xNr7YMzzKqiIY9jf34DnpteiM/fUwe260KNSEbiZfHYjZv
|
||||
aSjDQ+Gfp14bAkZLMexQ9Ghd8ntf9l6cCf7hl+lsZ7Te3FQURKWPLyUI43kuJgrtTdRMm+4pgsWx
|
||||
jw3c2qSd5jGd6kV4UIh6qjdEV14m44f8IiHv3HyxpaUGILN0vQBtsjNsg/e9n5AgD/ARC37AoufE
|
||||
1n7i4KLmEnlMO6wx6a1DKEdVTdw6LT0CNFuH+VYsp+3Bxxl7eVt+byhiT8yTymoacToPN+ACJ+mu
|
||||
iDWdNldOupgfNZA2nZiRZ9ZP8KfHTLu0GN+WjAJNE6SAR9W+HszitsDx0I/Es0Tcz4/hHKK74yzE
|
||||
iCfkTS/VhLC+JJhks5ZljdTuS7iZpjhgefjsp11OKVz1e8AuPpf13EWXYfToXGwlly9jw1S8gBLf
|
||||
VRJFt332NXeljpKFVtjiT7o235tcAsNuMol+Lk4ebxvHFIlIVwNh5YfdmEIeMDHiyaHeT95CFk6S
|
||||
tvbYEuc4zGy4RPsBqlcqBpvaG8Hy6Y8yuubwTTy50gE3iNccnnf8lsib66X+8dWffjPNNsuWmTY5
|
||||
NNzbPuBfR5DRZutSmHlpMIEzqz1aOAKEx07tsL7ya0sCVEo/fX6Jn4m2LDEN4N7QumDvlEdt7vx3
|
||||
i2jeoUmk2uIxzQtPcJOJNT7GiMSv/Fbz6PlhAnE72v/xW2Bz9nbEOKQ2EFLGR1DULW/arXp4ufJ7
|
||||
Hak3vcJyfCrq5Vh9S/ihohTwTaFqzYqfMHgGFQlSNsQjRt7m5+ew/MSRNm/TngJl3zorPqYanVO9
|
||||
hKOwwUSvD01PFWJFP/+BvVd/Y0R86A3QjP2HGHYSxdM+iUWo6MuAbTYJGfnxrfEaMqwpaOgpDBUb
|
||||
pe+DhTVhx3mMKyIewYqEwTymQf9VXskEV/zFim06seBaYwDujrf88L7nAhpJEGy9PODqSWFs1WvQ
|
||||
z6CP09S32VKelBChvcn+6KUZPOQWInejB/NTrNiXJE0CEzrtsM/xl5ievuQlRTQNVn8c1MtB4yZk
|
||||
X5SF2JdmYaMSvAoEHqM2id5NWf3+0YW0XBLswovuMf6ZyRJUDu70UV/UI8UhryR633wmEd1djYQZ
|
||||
t8C0fRXYYbLgDfmt5sCkxQH++ddy32xVsOpFct6Ee9BqFlhgZweQeFJz8Pj9e76AcwA4YtTys2Zu
|
||||
1Zro/jjtsRbw33hp+VaGiVze8G/98sYMyp8fwzaHFzaFaTRBjQbOxEShY9Ml/qSw6eoLtuHnFK/8
|
||||
IML2w+n4uOSGNhZ6oqJrvnkHUy9+GXsvyvB7fhKoicamh1Z36JdfxMd+71HHuYbAbOAL48VLtblU
|
||||
nRdsdwElh0S1td3bBgM8Sss4cQoestk6X15wvz152NoqpUZbQwtgieTHxNvlB6z43/4b374sqemL
|
||||
MQr1QCxIUtxv2p9+m6OowJ6DSzbsasNE3vztcQgqk41Ls2tgbF93+KCGU9zDii8h8D7fCQ7bUFse
|
||||
Wt9ChywDCWxxr013uc+BIC5+sOT24NEh+/qwrR0N+/6oxMLiIhl28RNgjbvgfuAVOwDFe7jj5PVs
|
||||
4oWHxAfhVS2Ik0qOxu+TTIJ2wR3IRd+p8cKcaIN++Y7v7jg2X51NBTXqO8Qu7jev/xAWwZWvgu1W
|
||||
GvpJnRWKErm6kYNXGP3wPaQyVOhuwrj5TvGy+fYRtO2vGbB90mh0R6sSHcpQ+4NXOwKoCQWR+kQb
|
||||
OA3sTuATwbM1hH/8+vDr54x3GqK25ViP3IA2kLwJI+r7UserH05hEOYPHPf3qKdnyfDhOVYwMUsQ
|
||||
ZjO0DPjz0wRfgAIEPJW8VIdbb2JX1fV2q16ECj5/SHAMpb6f8E2Cj06Oce71XT2LXsXDVX/iG+l8
|
||||
bffVC+6Xz2DzjRr201s/vCVupRGtMR+4gOXYmNjrRLnfoVwK4FG4XbBG3YAx/I189Ih3/gT5wydb
|
||||
LOSWYJvOZxwX+zDeRabfwNv43E/v9+4Yz3GQJeBQBGdiaweZcfQR27B95SM5b01cj4/hHsKwOt+J
|
||||
y7PIYzLpW8BAdyCyt2H1/L4cXbTFxWV6G6oVc/dOyeEh5TbkCBxFW+b20UIhUZ2VD+8af5frAsT5
|
||||
AxPf691+0M7HAH18LZ9258apOX/5nkC01WvsXmTb4/ZcnsMwPFKy+hXvS/lvDtd8iihh4QL2zs85
|
||||
Wv0k8Ve/Px1uLYQncs9IdivsXljXO/jV78/6f2wFFbr4YGDZu4vZfGNyg3a3bYK1/VjWs7tPZfi2
|
||||
OY7guSvYdDp8VMjHU4wP+/7D6JA9/R8+E0X8PABt/DZCVaTfsLqTjJom9qkAa95BvIWYgAezfUHS
|
||||
83UmZwF/45E0Ggef9rXA6tOt+pk3ugKWLTCCre0mbLrdPgvYu/lEDt9c1ub0Xduwetkmvu/7D6BY
|
||||
pg3iwxPEZ5C5Hucvz8svDyJ6Jfoe/fnPDLw6bJBE84SlfIZIFUuA84S2MSVmMwDRqTDR+mX0WH63
|
||||
E/Rbz0WQuYx19C3DD04YSS9yq1H3xkn70cwd7If7SqNJIQ8orK53rC/GPZ7qKixQJuJ4AvP+6I0f
|
||||
8x1BIszltP08Xt6MkbZBx935G4h5J2tcKYYtPCozj4PXQa/ZZGk6nE8XD7ur3lo282LC9sPrxOmL
|
||||
PmPu/ZjDdX2u/FCD+WGFPpoi8gm41f8IKdtEMO+Zie1LE7EhOe8loAm3cSq7+zeeMqmW4SFhR+I4
|
||||
5dGjL6c14asbDPz4PHRttkGbA4urhwBuJC1rV/0OJNaq+GH3J20Rq70rWfz7NoFOdQFb1CkA6IQv
|
||||
E4mI4gmLOvmQmQvF5vGggZ0ADBsOj3JDMLm42S5NDhPAG/YNPmveyDaOXUAj3iRYD+yvRkPwlBA0
|
||||
u4D4n7zQJku9BfDLGRUJstOnns+3LoXj2X1MrL8v/TJV1wiC2MymrcG/4hld6wb9+F6Rca1NRPQq
|
||||
YKqWRdzoXPWLKbkneI+GI1bB/pGNT0Oh0HzWZSDi0AL88Bon6MMB46JXTxqXhH0Erlb9xHbWOvFO
|
||||
yuUcrflb4F6eWsyOQ5rC6P66r3kEzJZM6lVY5N4mgGs+vkwotQE42zYJ492NCco7SIG131Di59nI
|
||||
Ru0GcsjDDAXiV33287x/tr/+wGFhfBhduspFyGpGrDxFlfFYmUJJSzVl2lb3Q80MMCT7TxW8/p3v
|
||||
n+ZNKn246IjNNR8aYr2r/uhPFvDfbNEncAGz1thYbhaN8cO2fYHjdYOI0QkWo9tQEuG9Lk7E7bKw
|
||||
XuR7DYF6MytiH18wZu/XEMGV/4mzTZKeCEMlI7F+vogubsuY545PCofUkIivy0bGVDu00aQdg4AL
|
||||
7lpNud3eBfyh9ohivUdAo55X0aoPsdkul35EOF8kVUd2ACwR19PdkHQYOXeNBNGnr4n4yRI4n04e
|
||||
uZs51nbYiLg/ejCSTDNbHt6Sg4PeCxPw6rNHP6qngrxI06lsjm0912G4oJWfieNeTLCTFKORTkZh
|
||||
YTU03h57GtscrvkMkY9+741W73I/PTTtjuDl0YjzOXE36QtWQmbETJjFTsJ10a/1dWKm5EIDl0tz
|
||||
wupnG2mMcBcIm+55wabnXOpVf7yky/VqEOcytTHdLAIFBym+Eqf+vLWpGLcXsOZJ5PoYTvUffbru
|
||||
N017ZfPO5o0ulOB7+jq4KLajtlyzTQB2+9uMj6p4ZBO3Qe0f/rxoEY0nfWIX+LXCO8aDX8ZfD0YB
|
||||
etXoSvxMh/Xwyw9n2Z+xEx1Gjb2egw4Ew7+ReJZf/YyF3QlcvsmCHUi+YNFhuKDDoYgwTptPNlzl
|
||||
Q7fXy+Q9NYs8Mpaltg/Tpppx4Ny4bHRamMDerxbsfJgBlrQzEogPx3oS1BfVSDYTDlqfvTZt0uLI
|
||||
OlsAkuQwySKaKLiMdHRUf3gVbMMqzV6pGaiQOrsDCVChArrVzgU6L/cWh18ziamlXST0y7sOw+J5
|
||||
3HEfF2CRs0PAPSpFo2e8r2AdgPuU7I/fbM2PKvgCuU3MLNnEy3F7paioruY0uhZX08MDRfBTGA6x
|
||||
dEvVCHG1EKIuuZNc4/SYOzxQCNuncZnEIRnq12coJggUeSLHKXzF86ovpBMPD3jl35oF3sOVVj6f
|
||||
5ptnrvU4pODEbw7EBIGhcffOyX/Ph5138/DGjLcXyBPluPqnJRt+eeh3XzZ//A8POblCa56DjfmK
|
||||
vPmXfzjO5028BI3Z9AkvNjSt0iPqMATZ0tRq8Me/Z/N+1mZn03ZgaTuF6I9K8f7orTxTNGIbfsro
|
||||
qj8k62zEk7j6NbrmlcBCJ4uc4b1k7KTSAu2sfU/8seyz5ekMMrCSvJuk6Kz2K54OkhDWya+/a2Yc
|
||||
kwGeLoEbtPUDr/ogLwCsxnDa94UX88H+kQKjeMbEDb+CRu7XPYVb39wSfKNzT6xTKMINpQrOP+iW
|
||||
8S/8SmCbI0DkIq2yr9tzL+DGl5JYnzplS+7ZA8hsWSDG9tOBxRaYiMqsvJHfftIfvt7mgvrH33Ka
|
||||
xRZ4FEeZ+NEANBrQVALuQvc4QE4b0w83lnB/4Ayino5vMD0DqQU/fex07blnVb2UqHTpZd2v6gAb
|
||||
+TGBH3koiG5Qq6bOQysRUr0vtq95Ww8XQiBc8yyiULzJ1v1AHTHXtsjhdFV7zj1OOlj9MTlSXGSz
|
||||
lNsF7Hp5IPgJo4wVh1MlrfgYsO0gZZN+s0z44/tz/pE8CndBAJODoxA1Dbf90k7HAK3++E/ePA+l
|
||||
lsM23wK88q23PK9ODuVjsA8Wy2+9uTvBAn4W+MHGIW0B3VycCvDMHQIKFK/ffcwxgi7HN1i1/Fab
|
||||
7Te36pHxg+W1H5fVr8Be6p/E9L0xXkbpuEHs+yqxKX+VWth3WQMeLH7jIMhcMNMNDVFC4EwiruZ6
|
||||
yieEk4xGNrD+HjM2B5/rANb8mhjwXoJZvYAIrnkvVqJmrd/Cn+C6v45dvaHaPPVRDtZ8bR0vg5GT
|
||||
svTnn/+9X/jbjwdPOq96K8mE11cSYY4Hfb3/qZ7kew8hQcGLGPP1rs0LF8vo79+pgP/6119//a/f
|
||||
CYOmvRfv9WDAWMzjf/z3UYH/EP5jaNL3+88xhGlIy+Lvf/59AuHvb9823/F/j+2r+Ax///MXL/05
|
||||
a/D32I7p+/+9/q/1Vv/1r/8DAAD//wMAbF1vq+AgAAA=
|
||||
H4sIAAAAAAAAA1SaSxO6Orfm5++n2LWn9im5SZI9Q0BEbkFAxa6uLkBEUOSaADl1vnsX/t863T1x
|
||||
AEEIZK31PL+V//zXX3/93aRVno1///PX359yGP/+H+uxRzImf//z1//8119//fXXf/5+/7+ReZ3m
|
||||
j0f5LX7DfyfL7yOf//7nL+6/j/zfQf/89Xckbi/UPZZxOtWfDQHNrcjxw2hOjJN9zoAvyTuRGUlV
|
||||
T9NahrB6Dxgn0OyqvsmqDrm5AKlaYacfKd86cCu5jreZq6Ia4JiZ8P7EFdkJkcE4P0kncCxiF1vv
|
||||
61TNsaboCLKxwApwr/Z0eu1r5LYfCe+b7WkdHy5ovrgC1rbFbC/qronk21avyEabHbYMe6mBTVwJ
|
||||
2LmMijZ71sWAb5vzqaWWhU1sx4jkmxO/vK2xe/UTSt8ejCXpi0P/iwAzToWBJqUR6dWTylD4it8O
|
||||
hnJxw17/9tjEudoCu3OqUs23hGo+aEaOAk854zg6l9qUyhsTPJetRIjs7W3B0WEBD+0lx/eLy8C0
|
||||
bZsGbp/+SB1yNyuhLx1P3r1zCasWS+x5R90Y4OtxQ5W7MGpz2KUZHBy9w4H0cdnQgyuEu2SMqDYD
|
||||
1abK9qajvIU7bK3vf0b9XUaSsjvj25an6fKdHgL0vjzwIGnznmvUq4CGyhmw+3xpvdA6CMpu6qn4
|
||||
4G75dGyU7A2E6G7TaLAP9jLJ04KyqLxg3ftaFTflL4K8j2LQm7n/9DOvVDFKJkiIPL1bNnDqp4O3
|
||||
FzFoSOnTpkKQXYFxfZ9waH2+4WLqXQn7TLHwtXv0GvOqpwCdg7Kj1qvbV4KqiwQdVPwgY7qxAPO5
|
||||
ooaAfzyovpEAm9bnh/EObAkUN2JFuHKvoNvVOlCNQj/k6IcVsLgFuid5z1c1bwLfRBcBx/T0kLiw
|
||||
bEUlQ4tVJfR06YBNMrNtEJpsg2oH7aHxHphj9LGMEJtFYNlifQYFGJ2ThQ+3+dLPrH8YcnbaDh48
|
||||
BXzPfCAncPpsUmoZF48tzwO9Qj0vVfLVAafNT5h4MLi/A2zYy8SoNU0B0i705rEW5WB2pNSAF3/H
|
||||
YafXXjZ/r+cNPEb7I32U3R0wWJQLGi/HliqbyWPsUqkd0vXpSf0mJPa8t4wANpal0GczBjav+5ME
|
||||
mRYZ+OBlWcVXRHnD19CY2JDGhAlJw3tofZ/4MUeOzfGDZMEM5cybFMTC5SvSBrw3RYCvh+oUCiZW
|
||||
GqTSOKH5qXJDfic8PWhtOw7vrZfCxHzudcjVeoMfZVIxsazuGYyL7YsqiqYD8jx8r1D3owKflRKH
|
||||
yxI5NVzOfYb17Kun4kz8BD3nTeEtwy3quezFxWBrWgMB0x2w6bs5B8gq/RtOsFv0U8BoAYOPFeI7
|
||||
bvRKqPZMQJbyHbH9/hxsZteJBbkvOGB9Z6S9YJsmB7NrtHi83mUpJz+fBH5ki1Gj2AchcyxHh2Ve
|
||||
Hch8l/xUtOvARDGFJsa0XuysHIQCfjV78fhHddWmLCnf6JpFe/o8Mt+ea36IAW3GCSsG1JmoS08f
|
||||
xqZwpMeBCPb0MJcOTUmcUN/XHSa8X4UHF3E6UMVtGBt4EC5oY+oONZnQ9azJ+g7WOqDUzUVbW/hb
|
||||
KUM5GADh3e0lnVEBJtjC44NILyeuOADECT3EQKTGMCyMJfPdQEYFvzT0b0cmvBtZgWja2FQF+QgY
|
||||
6WQD3u7LFmt6zQNqbVMJWp/dmTrhC2kTz502ckg2V+xdZqOazxY1oH6YRW/hnzCcr+Nu2T2Ozg2H
|
||||
n7Fhi/4IArgISKFKHWUhr/uSBPo2bnAw3KKKr0lSQ5S1G2wR0wdtLLUyigv0oml5GrS5qPwc3IXL
|
||||
h6r+R64Y3191eBfNDvt8yoPp9ME63O6UMz3cNjWbk4ulgjSlLbXy+RVyCs44eN2qGtUqYIacuzdV
|
||||
8OiJQPfHlKZdKJ9M8K27HfbARQH8+7yUCJz2IvUdckj5DbYF8PAtATuBdUz55NlYkNu/T16RlZm9
|
||||
DH4UgKYynvhoc5E2ATXOUUHiA1Zoy6ds+94bkJ0p9ODb2mtiWtUNuueOTUOOU1MqES5B2UtmWN8e
|
||||
XK27NZWDvlP8wPbof3qypNcE0i+JaXxuRNBcc11HVWLXVKlsQ5sk8JlgWH7P+Piy+HQedtGAnndl
|
||||
T9d6ACipGh/8vsel3Sg9dxnoAKvFGEkvwijlzW0iwa2EHXwUims6Crt7Ae1dJtETrEi/nJNoAVxu
|
||||
v/6cZ/SCOICJcMJx95r7iU7nTo7lUceBm71TsnVPNbQcuODrgO1qPtV3E8L6plIz18qQLr2ugwZ/
|
||||
bvj2+GaVYJuKALfk0VDv2l+0obuNBiS9v9Cn8jAZL9lhgUgfLF7Xq9twts6RhJptKxLUl3Y/yfFB
|
||||
ArwdO9S9lIEmXtKJg+m89/C9M9xeUE1dBev8CJrENxj0ve3BELwo1uv7u5+en4CAVU/Q583a9my4
|
||||
VBwaMH1TdWFjOKEoSWSt5xSaXvbvkJ7OmoS42mioDuih4uuvMiGM3C29n5OPxmRFiNETlQVWk+Mb
|
||||
LNgMOFTVxUAN/v2uxLhuNrvP2bvQi9SL1Z94yxcQk00sQ0BT4iyA3xgeve1OSsU9btcAeQqVsSlQ
|
||||
CywPYdeBl/F6YvX+GMKhP/AmaC5kT0/T5RJO+l5z0HFcOuqgKbaXbGNnwA+jFl+9/MkWg0YOfN/i
|
||||
M74fnJ51Dio74B+XBuub5gAW+z5NYHczErJttF3PLMnS4RovnnA+WtqSDKcCzldBoUfBV8IREw/C
|
||||
zzGfqULbS8on7uQg2dze8K/ezZgYEAn+dKY3ZnlAGE5WDpspQ1TjiQ9Y8HxHKGm5AT+cpAunHuQb
|
||||
eHkernj/FF9g1U8ZusHHnvoyKrSh51wTntRbh7Uspow8hLlBYCg3WPcsLSVSmSqwQXyCXXF6g8kP
|
||||
9xDlenvxYF+YqcjxnAMPqQ0IvY5xOJWDUMJg3zvYHkcHsDU/g8McfIj0beZ0CK+Fj0gQRfR+cGw2
|
||||
vFVHgPnjy6jeHli/zm8DhvvTxIfxKIDh9EABjKQlwUdhPPaDu1dUeAy8Dus3WFVT4cSbf3/Piz1V
|
||||
i/56LjDJco/iRe3tOaz2DVyEreJJAu3AZDz6AML6ouLsLW5SVsYggshuanzKD59+Ibk/oH254bGl
|
||||
7XjWOiOEoHxeLfrLH0s3UQGa+uHrbWI5Y4vIWRv4GZYLAeJz1BbwmSN04T3FE6ezx5Y+bN5Qu4w3
|
||||
aurjPmTwcc2Bcfu8vLm+PvvpdXI6cFjOCFuvxxRSM/I9xLnWCe+pN6fTMzdlZOYNptllVOy5JkGN
|
||||
XNzsqaMhGC7nF3lDKQ8JmXrB6MXn5aHAfv8svXo8ydpCnWJCbyBN9HhmH8YKGHpILg53fJA8P6WZ
|
||||
u3OgaukCtvv8YC/BPlXl0NNnrN1Gaq/5Mt4dvLqkh0dY2FOkS1c4R07ugZfqpD3OPAWy5y73WiO0
|
||||
e+I2JIHh+06ow6VPbVmvh5GILtSIFCulrxkZwPuKwGM8mcB8AFwNgOGoP/8TipdU4mB0lHh8V5Gu
|
||||
cbC2rkB5vKdf/PaC1UQxmq+cQpPn/pYuezu+gtL4bKlmfb7pJN6Y/ItnbxvIbT+cZdP86SdvtuYr
|
||||
YH1X6DBnwwMfo2fEWCKVEhxvo00vbWTa8y8/9s23wqeuigDRX7cFKhxZPPg8PACbjokHN5vUJDJ2
|
||||
lV7oPnG5kws5pzbGGuCCJnZgHMUmzZ6HB2NgCQZ4Ui8dtad61JaLo5no6WbKT89Xc/X9QiCcOoy1
|
||||
wt70w+pn4Gdkoycws++n07XP4OYktR7juDLtPunxDVd9Si6Rh+w/ekrPCxUnE7dl/cnd1jv5BQTs
|
||||
rHp9aXzLh/7pcyDzSTRDDtZqhPxz6RA4wn3Yd1orAb0/pPjpmm06trMkI+My6zSwqrf9uz+4Pm53
|
||||
6syBCQRjKUrYGY1BXbSk6eTfjhmqcNxRx4mP9kS5m/nHb1gvpbWpzp0n5LmSTh3i3NLp/upisOze
|
||||
pSflRds/MvJYwO0+bal+J7q2pBjkYHu8fckuzX7/j3MZdmzvgR2JGJO+ZQ3PXEDIu5rkdLi84hgq
|
||||
Gu9iTE8dW66iIYDzc/GwZvEoZZwSxOgT8Zi6aOv0yzlUN0gU7ZEwn6NaczsdObnUFsubFapU/Jp/
|
||||
4GlbR/Qgpa9e6Npcgf1Udli1FTtk2wIuADvmEQfN+8NGLH10cLvBkjoTv68YWWwZXB7BDh8FDdsL
|
||||
3u8VeLPeaz3f2f3EarcA23AYsf0FTjgnF1VFGKQlGdZ4Zq+uCpAivDARD+YnHN5Ot5EPiVVhe7qn
|
||||
YL7zjxoGWGvp4TF9U7KuP9hsmie1pqPaLyoaBdhcxzu2mHy224z/1uDbPK+esGWOvZyTbIHJ41HT
|
||||
y9DRqjOQKMAL7yj0vOaj4ck20Z/5XKT0Vc2Pp2yhOu8DAjCuGPNaKQNldzCIfME3Wzg/Xjq8ed7G
|
||||
mzdLoE0WejXwMVYh3u9bbC+jmOTwvK8HT2CLbi82Thp4Fg42Nr92p03d7WOg8hlZ9LHGPzk2VgLZ
|
||||
eYTeHG9ejBHe92C5KC7OZHebrryhg8Ot64mgui/Gfjzkl58Pg2uGXCQHJko+8Ug2HKb9xHIwwBTF
|
||||
IVZkpGgCVa0Otsd688cviT+/LOwGSh9aUIY//Qdur8GgycQ9Qac0tzck8nMkGydzK/5zjBLkvd09
|
||||
jiPS/jtfy2Gh4OTkUW1Jk/MbWnHJqAc+V/bTKz9/TkB869mwxou06hGMe6aloguaBAhHZcTHMzsw
|
||||
PiOXCZz9+YsP27lhw+yLPlyC/LX6g7pirioHUhT0M3XX+j67masCV+kTuv+0ZTj5GHWw1486xuFb
|
||||
1RblnRgQnDTRgyZPUmampQpUOEn4+OBGwKK59lFWGFeq758lGz7yAoH4VMNV/xugfVsFQfBrHn9+
|
||||
T2N2ChW45geyLc93jduliSeLWh5iQ9jbgN8b5gSu2Baw1+SPalIfZx+CMdKwJbA5ZcHF8yBcBpNq
|
||||
HFeGLVwqgrAcWzg1Wa+Rb+so0MmXjBrnY2dPbXtRISqnhqCr0/ULVO85WNc/kdRzk87r+9oVNH1Q
|
||||
S91+0y5q7g4sdVZitc2jcBHbt4Bw0XzxxQ6NkOMOOwifiZB7vD251RxWpw62g1RiN91YbH6f5RKW
|
||||
nWzj/T5DYPnF1xMVBfacXQRmcCwUxKPzibCmbm22b9sG7mcrxunTt5lYntvy56epaeiiRrqhu/7i
|
||||
/6dvKoaOrQJXf02Tl9JqP32IHHEA9CYg1E/Xq+zLZeO31Fn1SZMOSQl3TjV7Wy5/a5MntBxc6yc9
|
||||
bnka/u4PfterdzmsxKZCORzgBlNXo23F4rrYoPOSn6neBjqb8k2UQTBeNezJvZlydHQjeK4lnwYZ
|
||||
Y4A+41eEBm9w6D11Dunc7tQBxSXbENZqU7gwNsE/PO7wMAhgOnefdu5y1UcAOSPkPqy1YFryPXbi
|
||||
Wkm5ZbxFsGsjE2uUPrWvV90EGaZvY62XFSP0FGXQiVyOAOMW2KT7+AX8XPuBsG1xtr8ntQlQF+In
|
||||
/sM3DwDWu10si9SN0yX9o8fod4hpmNQ07Ky+GOBpjhcc6Ec7nWJ5sMB2aG18aESFEXHYWXAjLx51
|
||||
Y+leTXKkxEj3rwV2LgUBc7uzBhC8Lx6+fzuuX3budUGnb+95UoImQM7vRUZHcDKx8zorQCDl1YF9
|
||||
ploY+/tTRWe+82RpXgTqvXsCmJqKDjpzPqFPcNM1evWMGHpxq9CsbIRqppAp8L2tOqxswm/Fhvcx
|
||||
Q280uvjKPgug1VFbeQ0syFS+3H6+E3sB3WEPvWluvlq7KOcE7VONYG/NT1xOJQ4a1eaLD1HqpayD
|
||||
voJWfY2tg3Ts5zzABFY46Va9JaWEMQnC5PGs8W+9Tas/Ar6g9nhve7nNZxKQ4XJuM6xln5c25haS
|
||||
5VVfEREGVzZT3ZHgs06e1BFffjj33l0FkCs++Hmsc8aeWc1BG4QXrBBqgPHHW0FHXtgjD6yt/KUB
|
||||
fFl6ZIPeTcVWvvqrnx6fiF97OguMQ8oiXbAbhl26fIc4Au2V7eghVx6p6BvxG9i7XKLuu7UrPoB2
|
||||
DFZ98oeXUNArMvpEIqbqbR+GrDraiWzYikmVT+KyZa0HYIY3kR4H8E1ZsoUEclvOJiORmDY9c0VG
|
||||
byBPWGuLW89OrlhDJmrFn/HTUX+VcL0eWwhbYSNx4QYWcZhhfPpMPVuqJYB7L79gw96WmvBK+BqK
|
||||
gnXBP73Az8E+Qsb9vvFYUuOUFtqog5VneRt0E9Nl2AwBlPbkiv/wb61sBPmguo9VjysaN4P9Ihcn
|
||||
/fXHD9Cfn1n9o8cv7d5mojLE8OWfNZwGpq5Nn1ZTUXjjLtjfJ3I1GNd7Biq3dukJS0k1dH4Y/PQu
|
||||
trqLqk39AVk7neO29OeHRludA4QS/oU1LxbsqS1tD+7Vo+9Nz2b88cxud1WmPTXP8YtNaz1BLz/U
|
||||
fvmCjcTrVSiYvEX1fTBXi6mXJdIPTCSC36lgVmcn++kVbL+eYSWSqvChUZoW1W9Q61vjSQq4+l9s
|
||||
wUWzR64QNnBdP1SnG9Yvx2Kq0UKFzW89h1QzTgtc+ba3W/0dgeo5h9g9PVf/Bv/oc1QL7/uv3jP+
|
||||
Ai8xOn93DQ6oPNrsLNUG/HZnRpXGyMCiJ+4GbjuBULN7nasJ1uoVXb86WudzrujKE8HKt7GFH9eq
|
||||
y18ohvhUfqi+h53WrOMhaGSdmtIm6KfIRNxPf3sd/JzByN5aAIn5ngg8PQ2wlAfTgtUcNh54DW82
|
||||
csUGwogqgkcbvwOj8H29oWVbF7rGdzWlGzWDTLsadD93Vrjk7dGDP78U3vwjYPWZFXA2u4cnd5fS
|
||||
niSx8SF9sjPWNrXajysfhP6oO/jsKpt+zMhjgqW6PWBV+oxsFgTXg70WbgmaI2f1M7YF1/kTSBwx
|
||||
pCtfBvFJfHqCnp2qRel7CUhJw1Hd1s9sfsTNVfZcWfeKLDBsQRMiFR0q6YizU8BXbW87BJyfk0dd
|
||||
R5z74Z7nhtzcyhwf2o1SiZNixpAYS+imGtjZLH22phybexVrTX2ylyXS39BC3A1bh14IhxmcJujV
|
||||
Wk7NL69WLOKWGu2qnnmiOjjhopDJQ/e+bgm/+uH5t77DYuJ+er0Sk6NRI/SJLepxGPfCztA34H39
|
||||
OPi4jRuNFHQnQF/OV3/wWPpxb5gLPD+sgSqcrmhzHhwHtPI/rEfOjk1Hk6rgI0VXbxdErGJkdxl+
|
||||
/T3qGHVdsaWSfbj6CxrVgg2mH+/45Sf3E761WWvlDhYvDLyN3DfhsvJWuOpP+uOjw+GNjN98vZXn
|
||||
VcvjcxrA9XG54/vYGiFT1D6Xv3PKkyHUebby/RJ+cSF6xdFJGCfUzxKu/SciTzepZ64UGX/8gj40
|
||||
Q0h+vKMNrS0+wSsKx+XYe/DHe12uC6v5c4zin7+mhp6dej6GNAZttM28r4AePUFTm0ED6A221FKx
|
||||
hW4or0AT4z0Nsxgz9nvfaaxTet5cLDAwPzbgeKO2J7+rPZg6Pw3AK5A++HIuPXvKPuYAY2pr5C5E
|
||||
NaBldc4RlhPLA6u/4rdK4chKa86kuwybcHEbEkPeSd90n0WnlJv9rQ/2XnbB4eOJe8FBXQP3iz3R
|
||||
Y/ck4avkkxgsJkH0CGmuzVIwRmDN/1653RvVMrgYQrdeNOpx6ialpFsMOF4OLfap42kCA9YCFQZG
|
||||
b9mes6qX7aiE2vHF4dM7GsOJjocI8u9rT9XkJjOy8kV4BLbpiQkY+zGFivPr32Jz7a+ROe8GKIqn
|
||||
EXvodgv/+Juo4CN8qcpjz9J6gSh0w4Eaqry12cGfJbTySqpYwZDOxL4SSM4vTF3iyRW5ve4xvCHf
|
||||
IIut2ClrPVMBv/5yuMSncIzivoRlv6vor3/Ef+DGg+rmc6G2c1p6Um+UAP301Ek+Xnr24vcx8A/Q
|
||||
xqb62oSEm4YGVs3tRGQzUnr+51+l/EyorvNdOgvopshXz8qpAjqUjlVV5oittdA673DFzx5SYL/c
|
||||
jmSWt1W6/J539U/klUVtuPYbLTglSUIxGbRKMIRa2QH++cBrfu6XK9tHqAucI31o22c/XYbvAPv9
|
||||
o8TuM1rC2YTeG2xMw/HEJDdWXn8uf/kWa8fh+G9/a2o6xlp924fTMIArrIf9tPJ7ISStKeXQ0Zzs
|
||||
3/z8Iy8bdNbup5VX64A7mlQBn1BZaFAun7RVQflGa/+F8O1Mw9mUTjr8+7cr4L/+9ddf/+u3w6Bu
|
||||
Hvln3Rgw5vP4H/+9VeA/xP8Y6uTz+bMNgQxJkf/9z793IPzd9k3djv97bN75d/j7n7/43Z+9Bn+P
|
||||
zZh8/t/j/1pv9V//+j8AAAD//wMAYp9Xq+AgAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94f4c6bed82cfac6-SJC
|
||||
- 94640d46896fb6c9-LAX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -772,7 +769,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 13 Jun 2025 21:45:41 GMT
|
||||
- Tue, 27 May 2025 08:13:14 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -792,15 +789,15 @@ interactions:
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '457'
|
||||
- '196'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
via:
|
||||
- envoy-router-6c6c4dc575-7ps9v
|
||||
- envoy-router-5f689c5f9d-svjx7
|
||||
x-envoy-upstream-service-time:
|
||||
- '459'
|
||||
- '200'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
@@ -808,38 +805,37 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '9999967'
|
||||
- '9999982'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_eba1229193b2c4c623e7661cac3bec77
|
||||
- req_51978202e4a91acdb0b2cf314d536032
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": ["Example: Drawing Pictures(Math Example): An example that combines
|
||||
art and math to explain addition with pictures."], "model": "text-embedding-3-small",
|
||||
"encoding_format": "base64"}'
|
||||
body: '{"input": ["Pictures(Object): Visual art pieces used in an example to illustrate
|
||||
basic addition."], "model": "text-embedding-3-small", "encoding_format": "base64"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '192'
|
||||
- '163'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=I11FOm0L.OsIzhpXscunzNqcbLqgXE1LlRgqtNWazAs-1749851135-1.0.1.1-R2n01WWaBpL_jzTwLFUo8WNM2u_1OD78QDkM9lSQoM9Av669lg1O9RgxK.Eew7KQ1MP_oJ9WkD408NuNCPwFcLRzX8TqwMNow5Gb_N1LChY;
|
||||
_cfuvid=TSYyYzslZUaIRAy.2QeTpFy6uf5Di7f.JM5ndpSEYhs-1749851135188-0.0.1.1-604800000
|
||||
- __cf_bm=zxXbTMyK.67_c.SQXNivPXTcfsIBL5Vl1Q7WXFcTgxU-1748333590-1.0.1.1-ArIOxtxz6HMOCmEGGFc.Hxs19gY1LkxaxTZYc9hAE7zSdmh2fCrczDquGUovgGjYHvCJ94TxWQTCVlo1v7kDhnnrF0jwHy_U_LaR6AbA.94;
|
||||
_cfuvid=4YFW5U1WwjrbdrZ7OWIvzgymvtGAnmnPEu1zeEdQsGg-1748333590310-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
- OpenAI/Python 1.68.2
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -849,7 +845,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
- 1.68.2
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
@@ -857,124 +853,124 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1Say86rOpeu++sqpmY3+9fHIWCzegkGkgCxORmSUmkrQE5AToBt4Ffd+xaZS7Wr
|
||||
OmkAItgeHuN9n+F///Xr1+9XXp2L/vffv343967//X/ma+WpP/3++9d//PXr169f//7+/q8nz4/8
|
||||
XJb35/X7+Pfm/Vmeh99//5L++8r/f+jvX79TObuzMZvWnrS6PSX4DEJESNScJ3GWlj6cOnZlHai2
|
||||
SLo1rg5NN0uIpecmGOyh1KHsZArHBXrn/O2PieHeAkGOF88Gw5MtF7A+dSkJmnyFqh7qKTA3wQfL
|
||||
Hj0g6fBjCoOhriQh89u8H+30AH+azMACKD/TVGx1DKc1lbnpJG07jeN4gmXtR9ztrTAX2BGWAUhM
|
||||
2EtIARqk0BuBDKnC/Wsdta9UKw+wwozihW3FVaO+PQHgsXuQ0yANuTDKUoHXoEvwpFn+pILhMsLL
|
||||
G8d8E3QciSVBrqFEmcrRqV0Dnqz9EWrXmJDt3efTdNr5ClzZNCahsFNPbgoBjejaeSQjEAP1IekB
|
||||
fGA/495dLvLx2VsBeB8oI94HPWORB8I3BoQvxLmiGkklcCVY+/GK+A7YtOoU7i1gNVmKBxqvqpc1
|
||||
mbUBrt2DRyu0ntfD1OEizSB+TrYGxkK2XHgeWMzdQ+W06tsfU8NcshX319OzHdFVC6BnBIJYDVWn
|
||||
6WNcargD9Mj3u2mbD1GFDnAbY4t9TnJeTSXWfeA9fYeEn4YCiSbLrVE59EN2o+egoT1rK3DQacUv
|
||||
Vytpp0a2dMNqaMqJ3n6816FbbsFuYjZ+XCXcSsZSXHWr89f8+GodpE7L8WRcX8GCewbatJJRlhI8
|
||||
u37ETyHNK9UaMgzfBB/5ZZJ/vMtwHQ+G98QOKYX3AoK/xAHsfGyxAE6rXC2oL8E5vjhdIFRJSV8e
|
||||
YC9YyX5Qs2hfNXKhcbQw+c5PJZ2spW/4b5qTrZ4EngCP5VVfSN2LxLekq0Rg0NTYucziXiYN7VgZ
|
||||
vg+PNiPc204bNDTFcgFJHU1YJRBPUkNcqB/ZqBFnKnAl63tPgbQeVWJp9gGxzzi8wHOV9WQzeBZQ
|
||||
X0d/ARdaHGHwbGAuvD1NjaELd2xRSBPgR8MaoR13JglTq/VEA0VkHIoO84vnvavBWi1f0I6ZyffC
|
||||
207S2y4/0Lkwk5xba5rEkiIXbq4BIyWkZ0/EUFhGw8I12afUqF6hYXbwqrAEL6W8qV6rnVkbshb6
|
||||
nN67flJ31PpAU/dXZLetRf7ZO9oBZqsOk8StzFjeKcsXRMnY8u2nsuMRN9YKXnhg8DzPn4gbg+0a
|
||||
9OrvudNXtTcMV82B1jR2fMUTt1UP59HVW8ByJnXAnNTOsBYwCOiZlI/41UrxrTzAovVDkmwSH4jQ
|
||||
MJlhmmxFNnFlxWqvjCmU49Anqzm/DJ8GvUBX4YKpK2Dlimz4B73fdAWJVc/0lBpqD/jTUIO4IQXt
|
||||
5F/HlbE6ZDEx13VbTbfycgVjSyXiqeCZD/YFBca8X/mhaO14LJl2BdHUeTzC8RrJ9/LswwvCMc8X
|
||||
deR94x90AS74rrQB6tGx1LUn6w5811hBO7J1ZsL0iPc8chKvVT3tEsF9lBXMeEoxkuzyuQTrc/Am
|
||||
2ZZq7Zi//S18JLFJzFW+RjKxvQPkPiu5Rdp73GUP4Rr7MRh5AJTLpK73Fx98duxENo7EwPiTajpM
|
||||
eaQSswC3eBBYcwDx8BZPSaN6/UZFAcBxt+XHBQX50B4GE/4Y3YvvE/RquXPWFvCa0xcb1K6NJ+Dr
|
||||
d1gMAeS20xyQ2sMxMY75qHHrlPTe4BKUGoSMI8Fv8I5F0tMDvIXBD8/uVd2y1E4FLFgA2LD1PaRs
|
||||
MgThzqZHctoBJ57wOK4Ms6QJA3drmKZmf3nox0NH+P5Wvdq+R14H3SOzuU2bQyyBh7gb14AlTGss
|
||||
0fb38ozhCTNC6GgfWv5qsofRnkKbBxb85EOPUAfNiVISeI+f+GM9hsDYl2Lg/o2WaBgm1OnAxw1W
|
||||
jA4D5ScddGOubxj80KIVuKEpPNxHjbvQCtvukZvQyKsRYG0piWm6aL4Cn3l34GtJTtHYWkNn+HE0
|
||||
kNUIVkg2DgM29g7ecHrzUKyi5uLDy9uPebKVs5Zdjj6G24dvkSOrRTssdunVkBL/xskLud6ItPQK
|
||||
U5nev/WglY5lWRiwYw9Ouunjdd/6bEU0ZT8D+KBRoRcHTrfY46ZntW0XKUvnO15SpMkYj7JhneAJ
|
||||
BTrJC++ZsyXWJAhaqhOcJVM7qreLpftXf0PSM6iA6F1xgOtz9CZemQzToMjnE5jHT+b6EUtp747A
|
||||
W/sO2e0TkXP7oEFwz8cf7o9oi8SrN6FhH8eO4IGeK0Wl/hnkwQiwmsR1NVTvFMOPFNtMP4BH+yr3
|
||||
5R2iAK/Zw28rMEbKcAK1HK5JMOcPkcv0rN830Q9fnR6XaszGoTD0XabxVSR9cvbuhpdxVbqEKalX
|
||||
TUJPxBXaHk25tW/USpwyUzJ+Nt2LoLbJUP8AXg3HRSbxTWEfJ/Ud6QlI1r5PnKjGccfengmMBD8x
|
||||
mMcvxJ52sOuo4LEh+YBdVsPDOFZZzR3RaPkAh3MB4C0O2fLcnOJxUfp36LbYJtm5Jki6OiIApxUO
|
||||
iLVvsnaS3lkH0qzb81VCZa93qZeCcxUZ/I9eWN7KEaxv0Yfg1Ht74+Z9foCjQmuyZcCOeXK8HIB8
|
||||
pAqxjt69ksQaWYanY4cTinZgupXZFWZDVnGLWftYkYUmjE5iBXFBMuSTfty7ALOsYLIWV62spJoL
|
||||
TSP6EPQp/FY22bI28KrbfuPbU8VJP8HEHhWmPmOrnWT50oFDHWnEb5KomuhK3wLn0pkk0Gp3GrIh
|
||||
tSBNsxs35/w5Wnb6gPN8kb0EtlNX7WhtwAN+kH0EtvmUDdnBmPcHmb83n4aHfoDIyShxCou10+6W
|
||||
BaB8CcgPzdTETKGZo78DduI4quN2mvUCvEA/5t4VbGLlHabQuKR+jJ8PUFfv60MsgV+PA1+vq2s+
|
||||
13MIfoo4ItubRKaxoVZglDWOuBnSNJfVQnMh32QjQQa65+Pqdr7D4Rjv2HVML5PYeOYL2KvOJFlh
|
||||
H8HYUD+A8mOuTzn85CLclac/evF0pWAarpp3hznIGo7TaucxHA0neJ+yD1mPdZt3/C4ORi1Yxm5L
|
||||
y2vVg3E5gGYITb61E9722tpTQLwbZbbY528ka8dzAfQuJNxuAIpldCyXMCP+HksRzdoplC8JhDjT
|
||||
+aZpjoDd66GG8pndyMpPf9p3zNHJyAZa8bVSXcFQZaiGoGQNXx1t6g3f/KU7ISHxqb1Nr/PaXYJb
|
||||
wRLipB76R88ofuzztZevcqkvvRUcjuGOkFMtxRPaWw9jZQZvvm/Rqx2txxAZqzw3sPTj3cEw11Ng
|
||||
rvEKs0Q+x9Pqri1hkI/TN39Vk/rSC1iwCHA7qiqvD0vvpKVjsCTOJLH2pdByhPnODzhm8S6fBjtj
|
||||
8LrECf451Uku7gcR6FpMtTl/heCpynsHtFcqiP8CWyRJjefADIolsdtiP01249dLkmUldyQfT+xl
|
||||
p9Awsjhk5ZzvpjLRMcQ5Lbj3orAdQTdIsDrSD3fKRkO9YaQFfC8o49uPZ/8zHngOA2JHoEKDeHod
|
||||
RB5b8VizfDBtPsMCugvfJvRWoZw9+rP+jR8entCqlZf79Ay3Iz1i6dqoYODUY7DZhmt+0Lym7Xap
|
||||
MI0fQiF3aTIgXhd68L2PoSVDMGzq5QvMeorvR7kE0urGJbhvsoINm8KdlKRPGbSWWUo2BtW9wdyX
|
||||
SxCRQCKI110rnPdTh+PFv5B93Rhg+upt6xK0pDwnsSd2aw6B/ehMErtUAR8n9Bbwumcxg0H+nIZ5
|
||||
PozpyK58v6FlPF1H3Yda79ckWtdt22k35BpoFbX8bMsLjx1DfwuXN7/mluOZuTrYFwboO7vxb30Z
|
||||
fX1IjO0xYsRcVOtqaj96sgRRGDCFy0ePd4YFobfGDh4f+S1/7WQawU6LHayHNG/bV3Op4c+ne/ET
|
||||
skEuMpuejSQLFF708daTLkd6h9Oxu/JwR+X2A23/Cn1VDAzs6qHlspx1+kllAdlX1uRN0vvSAdcL
|
||||
OFlDqa3E7eIu4aaMerJxUJ2Px9B3IbajkeOnfY6HKvMef/QPuVpJpZ6E7sKN3SFOk6IHYth7FrQI
|
||||
pbg9Uhj3hSmWxuccW5w4+a6Vd5RCiIr8h/ugeqHHzThLwDDCkB9nvc70qybBeAgUNmrTuhqL+3CF
|
||||
SpOpDMz6XEy+OMMCdSFrhFe1E20uJ/jY0I6vb7Y8DdM4WLDdhDbxz/FmUiw7reEjxAdOAPp4Ezzo
|
||||
d2gYccja2Bqq9kfR78A5+yY52B2pxE+wfADDZE886U2WC+ILC9ovbPLYbhQwLAEaDdWO92TrFqRV
|
||||
O6GnMKqEzAsfbaZ+c15+DCR1axKbYF3JaI8iEC98j7g0Cb2JlFkKTnUXkFSmx1hIBmXGc9sdeAgL
|
||||
rxUUCxdKRuwRfGl3iD222gfckuyN2eyPepMtH4Z+jAlZH9tV+yplV9G/epwGVg/EwTYVo3OowOJT
|
||||
f9pXWC8t+EhCkzi4WU6c0f0KipydmYasACnz/OvKid2Z4J3bSpJRdvBc4ojYwquqP+sJawrI3qkj
|
||||
MCVrX3z1Hj9BGSIOIv0KbTvoeCqaY6yO9t4CXtQ5fFvbp1aY2lMx7lvxQyKVKq34KRGGzTZec6R6
|
||||
d28MBt8Ek5rJHG3z29QXUCjwcco67hIpmNTZn2jSRBXilq3TqsfdpQBP4R/xm9VBxfE4mnDmC4TA
|
||||
WprEun8q0H9nOVkn4FoN8X05wjLEEdkr1hRPp52lGMqSqhj66AmYE6LFl59gOEpRpQhJ6/SZDzBj
|
||||
1W5nfWFvAZrwmq/U9orGLtA6uNoeDb43mzIev/5sQP6FnDxLTM/A0h3YVX7BT2XrVCptLgfwVEJE
|
||||
bJLsK1nlngDBnpb8NCAHyMv9+az3l9ghq13i5qKqlxHMWHbn8/y3/d1DDwNrbMvXkSy3vL9lH7iU
|
||||
WU32T2nyhGIgC8x+FP9YdAFGvvYPsGHxmhPVklrB78sDdG4Y8VkP5tOO+h+YKQwTM5a8SmxdYcL4
|
||||
4Hv8mx/5vfR1EIf0StJZf76ptBTgZI06HtTOixXroUVgT2jOV6HzU3G/9z9wcx57grjvtwox0tf3
|
||||
eziyCj+W4hs9QHiOA9xdpDGeortu6eaVUpLCyoqlT0g/UDLZjViq38cz74lA+mF7vul9ks/+2/rq
|
||||
F75yZWmansdLDd3dyDnSig6IJfVczfayFH95yfuspa6xYkeDKTefVeKiL33Yvrqch++ijefxv3Tt
|
||||
hGv+9ePysRaKIaZwx50QVdPQfQbTOFlC566TBNNQB2IBP0/KOdnUiTfH4xLazswDRnBFwzVMfYhg
|
||||
RjnJ6AIM4oY6Qzuwmtsl7KexK/0EGrr/5Nms34ZVsazB7NeY3sUOUJ6J9oHFTgBmbG2jHcejf4I8
|
||||
CbcEb9udxx0jw9AMjj98n0gT6h8yekA6dHv2/EH19PU3BuHdloc0vlbdJjQ/xqzXsfGmBui2mfmB
|
||||
Xz2+y5oTUNy7lhrKJfa/ftj7iL1/giONXYJqcPeG2f9DIwtDLF62HHeHkBawv8cbvomBFfMvX3PP
|
||||
EScWk1UwTDYajY3NEAmHaQUkM3cfMDh0O35YQdLKBVwq8GWxI1vM45EPLSqMXdlZJDp46+/7z4C1
|
||||
4YZj1u5idfMZoGHSbsUMrd1O4woOGF5OozHzr0c1/ND0/ief2Z3PkOje9KN/edPpMTnteI2Gxx/e
|
||||
OPPU6ss/wSn2A+74cQX4/jGaxuGZVdi22gqJRYZ8qLbhHn/eMpg6hdJRF1o2kRVpkkn4xzIFn9vs
|
||||
x2vnAobQXQpgXceObFeQt/3043VQ6fCdgZmXfv0VADbVyX4tjaAXycwvim7LzZ+k8zrwEFf42fon
|
||||
nn+kECi7YkiNmQ8wsJLClt1rrYZhkZ2//CPneDmaMIoCmWRmwabxOqQKDKrsjPVnPaD+GqYYln32
|
||||
wEC2Rm/28xJsS8rJ5oVqMJ6l4WpYG7bm9j3ZA1X0tgnm+GVgqp7xcNqlLnC7ziZHm+rT+NasyLii
|
||||
cTHzZOKNtTLUALfRyPoEbSslfWhn4LnZic2AH4xGaN0NTeCG725yjsb+baXGeMEXBlDrALE0l1d9
|
||||
9res+wEvJMq7mDNvlpNdS3NPsFSc4GZFDxzFsgJGa0I6dPzZj+ztY6WMR/8AtrvsQPZb26iG4TRY
|
||||
hpPRjLscPNqpXl8eOl+HW251wARqvM8OwB2FmPXgCY1XX6u//orp9/zRdmOMHOPU0ZrJcddXog+W
|
||||
B7g5YcSGk+R5ynlv+XDWewSpnomU5GZZ3/1N1mtwjcWzL2tAq0hlUl3v4/4zpAE0l92KZOVkVaop
|
||||
DTqkb3ojWwM07QBipBh9MfO9UD62g3NYfv7sJ6zL52rodmkCzVZ8CHKpMk3xa7SgssxUbhcTiqVa
|
||||
LAOYj37IN5OtTcOrSSNIRrbjDoLYG3fr8wsuUAY5SpoMqRvDf4BZj5JdaeceR6sBGpXKUrJt8iae
|
||||
pjLrQN9kA3eDevBmvxgZkdu5hFBJyrtdZX7Al2cMRqN43XEpXDivJ7cM+YAYOQ8Hbda/3C7yKmcJ
|
||||
1s5wtQze5IBR4437o2WC2U8xlVENCGw8F/BzDi0+54tJfLplCud+BscLS/KE1CIH5mbWMGDlG28q
|
||||
qKWAdZYvmAHpArGTfB6BdcRrJnLo5uKbXwobh/g+60M28xNYy/GabJeT3XaZ5o4wsSIFd4KW+bx/
|
||||
E7hrIk78mR/9+d7IpBe+d71XPiVrS0B8iEa+aylAfffRTIjWouWXvXyehoZ7NdgbbIOTsdjnfUHR
|
||||
FfLQP5NAp3LeWzGKINvGG+LO+YdzrB9g0tPrzIO31bCWPR+0ExV4lKUOcbbVTzDIxUSsrbVHQ9mn
|
||||
W2PmQXiJUYN4ebMk+NEzzr/rN+x7lMC3TRnBZbOIe10fXANH/pasXDmZ+1lUh7N+4Nsc2N6EbYvp
|
||||
Mx/CL9M+ea0z+DU8pZGOQYY23miH5xr6q2D4U0+6iNITfEH/SNYzr+fM1E9fXs59w4rA7A8VeL76
|
||||
MUcPq2vFbaBXMPBs+vIxb+ZNKbyDjpJjKom8m+u5cQpZQKx2MluxsMTD+HHwC6tz/uovk3eHkxe6
|
||||
HDVd1/bujS5gkQeAn3pryAd1fS5gJ3UFFjMfm/TmMoKZr+P6xzMn9TP4ENKU3rhZU6XqT6pXwNnf
|
||||
40Urn/NxUVp3+PVb3sxj30gSNXxZ3ZF4vB6mrz8FeOlvSdb7PO482f1AGWYKnnLJz6f06I9wcOId
|
||||
sU7J3vuuF5j1NTaYXQJRFAJDR4161rh+Xz2IkX5guO5cXmbtKx/01eAbvjoOfHOVNfDNV3Dquisv
|
||||
YhsCsb5R/8t/yOkshUgw7pnQckXL5/wOhN88IRyXscumh6widrS0K0STv+YbP+GIffXnHD/f/t4k
|
||||
W454AXMxfnAfNmXbK3J6gFyPt8QO5EMu3SIRQeiwB9lXcllN+8e4Mub+InfUvPb67Wf5guc0exLk
|
||||
T7dpzk8v2KQs45Zf9KjTT+IKlka25OYx8YB4UBd+9QpZ7yhtpeWtFHDuH5CNXNVgkF1tC2mJfY5k
|
||||
OYuF2Zc+mP059/be05t50gF++1nbQ0cmttwOijHzJLLyHpd4mPnId3/iH4p209jL5wVcf/IFWYN4
|
||||
Namfw5gY/JONTFXlI5j1/woyKRs5dpuFN94HyzeY6ZekPNaT917Ug2/c6uCHu13sTHJOz1tYoXBF
|
||||
MtNi03BOZn/adSesftfLZKLW9xnbYH0BHPCGQ1p8+zk8uDeyJyrVXeq21pn8+GgAYmdJu0L/EQxk
|
||||
jW25FYKJE6zScMXJNpHil4KQa1zz7MWtmZf/6UfP/hffqybNu9lfAenFboRckiSXs4tXfP0UcTyA
|
||||
WgnZzz96jNuruPLGe609YJhGEpYuRe/x1fqC9W8/Ot7VXTz0irYy8kAAtjjM/bRjSQv40EOT1TM/
|
||||
Hi4TusPbXfxw+9OiWN6oKDKyyMfkWCSiZXkxXA1TxyuynvWcbL6WNZzqTMay0aJKyjRXwPn9ZPXw
|
||||
P+1oj0NkOEnUE6cocKtcfe0B4AI/yNbyGjR6kpbonRY6//CgdLVU4PaBLT7zB/TZH30TZum4ZNpb
|
||||
ClrpsneXwBqjjqlzvLcLMV6hcc0g/tZPvrtdAvD7eyrgv/769es/vycMHq/y3MwHA/rz0P/rv48K
|
||||
/Ev9V/c4Nc2fYwisO13Pv//+5wTC73f7erz7/9u/6vOz+/33L+Wfswa/+1d/av7n9b/mv/qvv/4f
|
||||
AAAA//8DAIV5SL/gIAAA
|
||||
H4sIAAAAAAAAA1R6RxOyTLvm/vsVT71b5hRJ6O5vRxJJ0khSp6amQBFJooQG+tT571P6TE3YuEAK
|
||||
6HDfV+r//NefP//0eV3cpn/+/eefthqnf/7b99o9m7J//v3nv//rz58/f/7z9/v/3Vl0eXG/V6/y
|
||||
d/vvz+p1L9Z//v2H+z9X/u9N//7zT8ilLlHq2s55S89TOKWb6C88dWvqEyrDt+NzfnEL13o7XJAA
|
||||
I/3oYMWmTDS7HfWRwR74+XZYjkCo27JElbiaOPPTyt3G6d3AQORG7KTLTl9fDcsBpGgywYXe1mtk
|
||||
vi8Qvy4GPj/aiztA5JpwNGOMD+8eR2LPmzOc3+yOeHvJdEWK3yE8nLTLzC10G1bdAA1Mr/Xdf6qq
|
||||
kU9908uQDPWTaEcC61k6cRlUH3nid6uq6vRzKH0AfDHGeqbV7pqtOIMn6ZKQ61O81qLSWanMVryI
|
||||
D2fRigQhJRkcuitD9AJwEXkZTYaexsBh7z6zdGvNKQC/8T7GrXaX+3oykOBTgDVZHnK610IDpYF7
|
||||
IkcJ7CPOEzNDfl5Dl1hmplNhH15iGN3alLiHd1pz1zof4a6cC3xzzNVdT28/htfu3vgUNQtYPZUa
|
||||
yChONjH4x46u9DjPkI+Ws/958Zdh3bTWQZdbT+Ydcj75snFrhvgn9yHHrlQHjt4bCCPKHLCxhhPd
|
||||
GKuNYfm5ARIngww2x1YDKYDlhh8e0+rLUW44WOH27aN6ZMAC76wH3YxT8eXk3N3puN0z4NemRu4H
|
||||
ce9Si198dBtY1+fNRoqmJxpLiUcsxvrqpgM9ISmA+ettzxKZ9vW6+DsNydtIiEMSdRBvHWtAbmhk
|
||||
UkzOQeczd/7AIXq3JCay6gp0b8Uojt4bNvcqpcs0XAxotNN7Xm5PxaVkBjcIRG0lOD5K9bbTSwNR
|
||||
uH7I5ZqO+TtoQYOAY/vELxmRru2qjSjAb+xD+5XQ8Y5dEzpV3+KgwZhuhC4OssZN+rufhuz49JCn
|
||||
A4n47YdxqZpWMyL2zvZLCvV6Uce7D5M1DLGSDpRuAUhNZFoKxpbz8sEmrH4jueIaYdP0klx0n0IF
|
||||
Gi5oScw4ek7nyy6F94aNfeb9rvJReeYfGD0S4jOusgccS4cM7rFikuvSie5W7y47ENjqjXjaRY14
|
||||
9xYGckwbHaf5iafbw3t2iDwygPfTTsm5eyrJQD0lur8GQI+E1OhLwFo94zNbFdWc7fMlanz+SQrL
|
||||
fdWrFl89aDbsEx9xy9EpLxQDgc/OxRdrOehiF2sKcCZYELWvN3d5HeYKpDhtiLtwbb3ph2mGB6hC
|
||||
v44mMKwyO3bweQ1ckje5m3Mw5D8wBNsHO0Wg6t/6MOHb38k4KkCc8+8X7RF3yjRcZHJDP3n/1NBZ
|
||||
kX3su4+nTpGSyFC9rNWcC8MxFx6PKwOdiSmIgzUrFwtPgagcL73/xtJh4GeX88Di+y45NIEGBP0w
|
||||
jbBrbw7+zWfHd0cFElu28UFgFVdoR1NBEpONxOSqlk5Npzq/fkf20W2m2/vMX+T7updJnhzDYaIU
|
||||
F9K3XmZ6Ou/qCVelAJqR25M8FUk+no+vEvaM4ZOI9adhxXgpUCgaDbnq0yXiTKuD8Fa+Q5w0TzIs
|
||||
/r67wXUkJraX7qxvPrdr0HnfxCTsP+98ZYaIQeK110i2H841X9yYBdaMwmMtNhp3kT+O+ZtvfDyv
|
||||
hs5fJ+4DeOcu+cp6aKNPvVQGlKrgQPJrhoahy3ccoA/uQgo6JfpysC6K9GwvKzGTbNZHeV1ktHSP
|
||||
88wb8q0Wbna7wMdHYLETZ7y76OSqQa27r9g8HWt9/RxvBfzWBwmGk+TS7Xwxocg6HPGDFtWbo4YF
|
||||
CsrL0S/PAp8vzL0M0N1SNHLo7xNdM7fr4a7qXZJ2jE0XutxTuQ8OA9mbzTXi5HUnQ/gpD9/+LdUk
|
||||
fosdbFeGwzqje+6KkouARldJSOioI1ij62kBaOelOHsyAdji174AXOXPxBrXQKfzZUnRKSUDtmxg
|
||||
DeKYcBXipciemWvQu9uzZQupZG2VHDTzWdMkXQL0wxN7ao7DItaggsPa29gTUadvFpM2iFyohbUu
|
||||
7vRx46QMZsVbIQ8k1kMP76KHdsIxIEWIs5ze1CIFNYEVVvPilPOtz40Iu+fjr56GxbmKBvDgIfHp
|
||||
+Ahy8ZNdGmSPF56cWu2Yz6bzrODVELyZ/87PwpXiCEIudkkuOtbAdcXGoPI1VvhUf8qaDDunhC9p
|
||||
8/A1Kdh8CVDZo6HLGew8Q2dYqjLyYW4tDi62ig7kgfMZmEMbf/t3W//GB7RbsMOh8i4j7pRnHyDr
|
||||
0gO7wz0e1vWg+jB0jiq57aVOn2YXesCIZTQPkp/XvNVzBowhg7EbpZ/hYya1CV16OJLjM81/9ZVB
|
||||
TXEif2kwBkvbPTd4OcA3NvDrABaHfXCw2T+Sma/TiVKlU2IorbZJslvX5BR4/YICanUkGhm3Xo+V
|
||||
uMDmWXokf0FCqZ2fFPTtp/iLj7XwIiqHxhd6kTxKP/UaKCGHvI3YPtCWfb49Ni2FmFafuT2/r5Qz
|
||||
HjSD92zLyXn8XMAUM1sFTzobEvt0inPiJ7cZpseLSDJv3EfCbaAbOlpHgJOdzURf/DLh/ZRhnO5V
|
||||
Cii+vG7wVnXsjGLY0i1Ldj165FAhCdGfVDCTwYCP541iz2y9nNuy0IdxAK7YrUkD/u6/S6pB7GWj
|
||||
EFGls2Io2tPqS8xNdzlFf44wzR/OvANsXy/WTo7h9/n4OjXHemL21IPTGL3J3jeqnPs4VYzOev/C
|
||||
JrmwYHTYM4f2H30gxyR/upzDFSHs/MPVf/Z16FLRVxX093n1M4226X2sYAXvni9rb1+ny2Z6KHvm
|
||||
GVE5OOZbxFx3QGqVEmePXBvETj19oAPFOzaL6BGt9xPy4GEbFqzfjbGeiVk2yLyNyfxp4thdYn8n
|
||||
iD++59y6Jlq/fAHcHlyLVcAk9VRo5waAUztjU7G0YRmZ8wUd94+KHNloBWvkX3wUbpeCJOe3BNZY
|
||||
1Gf45UvYaWJOfwe7AYIMPVniDndu2FKjr6DDUAer6TboTS2kKXpLioFzXg3qTWJd5sePfa5bQE7Z
|
||||
5R3AqpcPPsc/dr/+syHldZTwxTUdna+s2YGhLx0IrsgA1iNKISxuqYEtwygp9ZN4hBV8eLP8ssOc
|
||||
U9LQgPfbaGE7KR7RJp1gBsVaw/jXv4Teoxwa13BPTNtnKD0vpgW//YKYzg7U26p5Jchy+YUd7T27
|
||||
W8ScZHgXyYQPDrsBUpvGBymAC3xweAvDkjSUgWqq2sTScnYYn3ergNCra6yUmgG6fWuNsNfRHmsJ
|
||||
bocxTIUGurHnY73cYL4KdbaD4pjl2OgWEG3ceZnBSUehz/SFAoRdGtxAaU+Mv+59nG965ckwSY45
|
||||
3h/bg07TTNfgiTFz4l3SKBqPN1mG3/3jo8vdqdepyy9wcvoTvjI3Xd+ap1Eg3TWtWXqfq5o4682C
|
||||
P/xWucB0V+2maMjMth4rx02rqQrdAnz5LPE/XE15OXM5cF8PMva+/erLx2K4g7xOcpu9glVmmwal
|
||||
x0wknmOu+uIPBx8azmzMgFeXYcnmewAJE8lzVXBIn/h9CZHUwaNPv/yHUl5TIDZIhn20eNGmFbIF
|
||||
hWL2SGF9NMDF3LUBR0BOWBnQIxr3aEph0VWIaAneDyLvPxj45ddYN1A18N/6ABJzGckpOpnRp48/
|
||||
H/BA7YXEIg/oYk27BbLbZhHnWy/bpDcbkKW0IiY9Ny6lB/kGW++5+EIwvyhFGW8AjYgIe4dlAtuO
|
||||
czIQvBQFG5B9Unra9xZcz16MtR//0DfdQnrQAGwB1hom7zEp4ld/4ILBr3olMK2gYqUy+eJZtHRv
|
||||
rkTRAQTzuHPWnFTJJ5Q/p1OAo9me3G0Z1RSJrMWRE2nznBbSpYTE77C/ozHj0krchbCl8tN/fiTe
|
||||
pYPuWZCkCiEqCHH+l39XHcywS986EPFx2v3wjajmNaDUViQBRAmq8b67nPO16WwLWufwQKytPwN6
|
||||
8x4ylPDrRnSsJjml1fUD17Mfkz24n8Ema8Um33Ln4bP2vYwIuekmzFty8yGq7mAZgPeRt2OhY0yT
|
||||
k7s8WtNDi13t/cWmTL49pNVB5m1OCO6gQLcx0rK/63kQBzPi2yJM0cevBKJ2o+OKSxwH0BHwx8f3
|
||||
xokWr3mOcLmFxQy1i5pz4s0YYZSwNdHh7jPQlzFe5L11iYj22KJhRb3qoZ4bNWKzr2bYnkoxwp9+
|
||||
dHVggjHtzRle9KAm9s4s9VWx5w6Syk389dBs0ao0+wVmtuNjLdizdKlz1gLh6f38y182836V4avM
|
||||
FGJURqKv2s3SQJ+SAqtMvKN0OwcmfFung/8RGsFduJIdwa/fhbk55aQwQgfxfXH370KmDIK7cstP
|
||||
/3/5rZ0vij+EaE3CF1bhB+p/9fbHFpZZsH0GrBLWArhe4eQvXRtSWt2vJox9hyVKUob5+tlPH6j3
|
||||
ajbD+lyCGT7WDaWhyfgySZ715giBgJLjBvz2FjfDmi+7Ator6OfGPhmAujCZkfq4Jthi1c1d0vvR
|
||||
kkvWVWf0eERgTeTiBr/93Aenh0b5eNQsqD30k//c3Vd3lZ9yD5YAtt9+zUZ0n2gFnMbTm7j7iA4L
|
||||
OEADVnSvkLC1jVqsTa8Hv3qxqRzU/Ea4EerFcMR7/sDRjZlN88cXccrpn4HmAWV+fggxiPzU1+XZ
|
||||
Z7BqtYno9Hmtmzrgux+f8OvZPuqcfD7NcP8QtpmxU9vl11BbfuP1pZ2p6FzC5pYMdkyB9T7cR2Ld
|
||||
h1885u9fP4ZEa7YeMsgUTw7/9N3MSocP/PJbsvekEmz9K9J+9Yi1NHvpi3odBai8sDTvcuXtLsLw
|
||||
ScEbzQ52Vs8eWowUBe3aS0OKkjnTPrluGRSSWCF7Snp3SXt/hmXM2MS/NCrlLTDe4Pmo0fmcSSul
|
||||
Ly/35EU6LsTa4yqnL6nagGZXD2wonZwvMXftYHZlZGKsDOcupzz8ID05qVjVhqe7fPo4RCn0Drio
|
||||
44wuBytQwEWpnPmHR1wEH6nsPsoD9uZD5m58t9dgSUWLHK7zki+fezjDVuhl7BkA19MYBAF8luOA
|
||||
v/pm2N5nlMEEjz2+6XVFV8XuOugxqoado1vWW5CgFPqf8jlLs27R8RoMqby1BGPr4jqA57ddBdfP
|
||||
eiVuKuJoaygUwHc+ZvC8Pun2iIMLlLeZ4OTrtyzLx9ygU847f6MSqse8YD9QPZ114pE1paOlPD3U
|
||||
toJPfAYcKfdMrj0cbsc9SbqLmH/58QbnfXcgztdP2uYrkSEnvY/YcIBab9MCU4BkSydnpFO9t31U
|
||||
yq1uNzN4U5Gu19wIERVcceaTbdPJIiENfvf/LDpsCFbreQwAy5iqL2+CmfNCxAdwq6BA8rbz6DoE
|
||||
L09esnglSpudcuHH9758mRy9E4iW0gkMRJiTPLMdTMEWe10AldN0wPuHXdHxI0oy/KDUJC6vBoOw
|
||||
Sy8FPO6YHXbM8x1sX/8AnqAzk798BcCtgIV0SokvzxX97p8FXaf9ed6ZWU2n7iqE6Lf+VwHK7pQ+
|
||||
2FCuVjvCh+zoDmI3v31kcfj69YMKd/t+L5wVWcUPHFvu9sXjH1/0F3mEdDq4jiXPTngkzqL2Ovn6
|
||||
g1CoL5EvslfBXTzW2oHGOs44OjrH4YcHkL0+PsSYVtvl34kewtu02/kgdG1X+PI5+F0PX2JfRk3T
|
||||
zFVg7Twf2H1BTLf3dmvgmgQvkgmNoK/Pep9Co4jsmVU2ns5+ZflwP4E9/uHRegObgn7+S14Hdr08
|
||||
tWeILNS25CBVfkSX+BZCI68Wn/vuh0mYbwX0CCTYuLs6FYKET8FapzJWZcrp22V378D+aFyxfb3X
|
||||
Na3EJfyL79HdGAequG8LRmyoYGULTSA2TLJAO+9sf9XPr3zj0LWA9zLysRGrjNtqai/AmHY6sSPr
|
||||
pnetiBt4udCEaBJ5g00yZQHeKx9gm7Xs/ItnzU8f4kMTVHRs1DMDS+W84eNEUp0WRmjBe2z15HBA
|
||||
dvTXj+mh85w3BDnw8Sd1+eE3xkWlAJF1lRI59H4idyMtB75dnREoPjMRraNqtI1KpkBkTAq25nyk
|
||||
24uoArrYckocUV8jgpRkB90y5fzlcM916ju5A5Pi9SKHixHVqzLADkLvWc/Mo3N1EWx2hV6i8MDf
|
||||
eohG77TGkF69duaFda0HsVwzSAVbxEaRtnnnrtwGv37+X/7YxM+n9+sfWO/XsuaOt02WoL06RFPe
|
||||
SsTJD6VE4PSase4NYUR2w8aA5vQCvkDPhiuqupxCn/VexGiySzSeZl0B/q7jfFnq2Ho6uJqD5ONp
|
||||
mCV0etZrNZvyb/1n2E6Nu4gzb8DKQoJPqiCo/+oJG0QcthL7mU+i8eh//jUxM5OL3v1pqeBkaAV2
|
||||
Xvyu3sZCZ2B29p5Ey81jLhRqqSHNLbxZiLE3rOftMkNcbidybBNFF8W+9/++T/j6n18+6MGv/0k8
|
||||
7qbWwu4ZapBNpxNRqsuBcsZR15CldSpWs1IG09dvAdxUvWZuOXiRsMrJR37Kpoitey3mK1cqFir6
|
||||
4oOVwNrTTX6JJRCfZkX0nWfkfOM+GaQRHvnCR6ER1djUk7nKm7HKyxqg1aIWsMguZ59juVIn1f1q
|
||||
wB0U9bkf10XfUqMsIR5NB2f3dQ+EeneR5bprUuxs58FdzkdSQVm88NgVhmO0SazOQEFAl6nfFY27
|
||||
MIlUwFOp2cTcvKoeT1YVwv1pKYlyig75IlxWBvJducyfl73lE5szKfjxt68eGsghf0HwKi8Kvjiv
|
||||
mZKUrTe5Pp522JRyUZ++fhr4fY9xKjClRfHdj23i+rzqjWCV0tWAbzQ6836R33RNH2IA9k6skKg0
|
||||
BX29s8YGv/rb38ncwd0eL2X54T+O1/BIJ92rMvTFI7wX1nUgB+FgwXuD4h+e1mPB1QJUbx/oj/aL
|
||||
B30tFCnMjMLy2XvziebDEQZwfeZHf33PtJ4uKAugchdHst/2bkQOAnbAnBvST9+7f/OHr7828w+7
|
||||
Akv3hhWstxGS6L6EYJ0U+QPe8XUiZiK9o02Nlgaxwb7wly3sAK37rP+bh6hSvw3k1okG1LrHivUC
|
||||
zzr96jUQWlFMXHCmw5e/hvDOnEufZvM7+upT55eHEFuAmU5PgyXDRxIf8bnrWroJbiugrtwMoqVS
|
||||
lW9rzMmwnOMEK8Ppqm8//tWIGuNvMLzodMcMKbxN8u4vv/zxY5TnZf3XH1msaVnA1x/Ahn1qKL/1
|
||||
xx18UetKUrHB+toWWQzZvdwSS+Ze7pYlSw855hwR9T3TYVCLa/n7Pl9Ol4u+cefdCPdOqmC1ffYD
|
||||
yY5vH0pTo+AjME1Ad/bIgMG+jfgcKmW97pVzBddczjDeTYhuatcz8NFZ0szXvP3Ne7oPrJ36QX7+
|
||||
lsDxnx7gtUHkm/eA33z88hGi//IE75ULsqLI+Tc/eLo9B9IdGNzsOoP4Oear8YljmAXdPLPf943d
|
||||
WqfgqjjmzNi8ka+H3WmEIogyojF5oG8P791B+/q5EWXpHZ2G+RBC7O87fOqOM6Bm6WnwxxevfHjP
|
||||
iS1pEOXtdCN7N2CH583/FFDRe25+G1eGLtUeKn+fr8Mr/vWTHlZVEJMom+2cj/yLB8/654UPt1Mx
|
||||
iGPf7ODPr01Nj4/GV7WfwTffnCX3+IzoVy+DhYg1Oe6fmruZ99MO1VJKiNfoh3x1T8GMUNSy+OtX
|
||||
UP5b31AQ2AvWXXag6yFcGPjrR3pppvo3HzDgvnw/iJEMGV2DyPWB22oycU0U5OskuRukH/30zUt1
|
||||
/ZOcCQe//rDPH3Su3h6S9MXvQMF5rK0uOeQEwi+ekK9eBIvXvEeIFEXGipGv4Kefdm3L+SR9HiqX
|
||||
fusFFATYM6vdtoFKZD+DcMsK7HviEfz8U/CCZYuPsKDD9ticFFro1RKFQn0QGfdRgJ2vUp+5kjoi
|
||||
CVYLOFXgjhWtsQHHmhGHDJw55Jt/uHzHPjT446uLucbg5y/L89X2iFs1dbTot7KH0e2VYpsP75GY
|
||||
7bcL2thDPkP+VbvjL5+IJ52ZX9i7Uxq/2QZ88QmrHynRxZQdNjhPfIjP2LuDbx5Wwnqh2B9+ei+b
|
||||
kxCc40dElLDoAUnSXQCXhyZi9+svU+3ZhGhbed5HYxMP4i8P/PldjP/81PTDSin0vSf65gH7YWSW
|
||||
ywf+8qDzaz3k27UXGWgOrxg7wfHjbubiBCBJcD5T+21ROg1sDxLb4IluIK0WBHcSwD+/UwH/9a8/
|
||||
f/7H74RB19+L9nswYCrW6T/+z1GB/xD/Y+yytv17DGEes7L459//+wTCP++h797T/5z6pniN//z7
|
||||
Dy/9PWvwz9RPWfv/Xv/X91X/9a//BQAA//8DAK+bEofgIAAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94f4c6c3be7efac6-SJC
|
||||
- 94640d492fdbb6c9-LAX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -982,7 +978,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 13 Jun 2025 21:45:42 GMT
|
||||
- Tue, 27 May 2025 08:13:15 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -1002,15 +998,15 @@ interactions:
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '128'
|
||||
- '393'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
via:
|
||||
- envoy-router-675c5bcfc8-v6fpw
|
||||
- envoy-router-78456c78d9-689qp
|
||||
x-envoy-upstream-service-time:
|
||||
- '131'
|
||||
- '395'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
@@ -1018,13 +1014,13 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '9999972'
|
||||
- '9999979'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_7f9985e9b536960af27b96d0fd747fbb
|
||||
- req_c9c729639c1a9714296bd221d8edd696
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -33,9 +33,9 @@ def mock_settings():
|
||||
def test_org_list_command(mock_org_command_class, runner):
|
||||
mock_org_instance = MagicMock()
|
||||
mock_org_command_class.return_value = mock_org_instance
|
||||
|
||||
|
||||
result = runner.invoke(list)
|
||||
|
||||
|
||||
assert result.exit_code == 0
|
||||
mock_org_command_class.assert_called_once()
|
||||
mock_org_instance.list.assert_called_once()
|
||||
@@ -45,9 +45,9 @@ def test_org_list_command(mock_org_command_class, runner):
|
||||
def test_org_switch_command(mock_org_command_class, runner):
|
||||
mock_org_instance = MagicMock()
|
||||
mock_org_command_class.return_value = mock_org_instance
|
||||
|
||||
|
||||
result = runner.invoke(switch, ['test-id'])
|
||||
|
||||
|
||||
assert result.exit_code == 0
|
||||
mock_org_command_class.assert_called_once()
|
||||
mock_org_instance.switch.assert_called_once_with('test-id')
|
||||
@@ -57,9 +57,9 @@ def test_org_switch_command(mock_org_command_class, runner):
|
||||
def test_org_current_command(mock_org_command_class, runner):
|
||||
mock_org_instance = MagicMock()
|
||||
mock_org_command_class.return_value = mock_org_instance
|
||||
|
||||
|
||||
result = runner.invoke(current)
|
||||
|
||||
|
||||
assert result.exit_code == 0
|
||||
mock_org_command_class.assert_called_once()
|
||||
mock_org_instance.current.assert_called_once()
|
||||
@@ -70,7 +70,7 @@ class TestOrganizationCommand(unittest.TestCase):
|
||||
with patch.object(OrganizationCommand, '__init__', return_value=None):
|
||||
self.org_command = OrganizationCommand()
|
||||
self.org_command.plus_api_client = MagicMock()
|
||||
|
||||
|
||||
@patch('crewai.cli.organization.main.console')
|
||||
@patch('crewai.cli.organization.main.Table')
|
||||
def test_list_organizations_success(self, mock_table, mock_console):
|
||||
@@ -82,11 +82,11 @@ class TestOrganizationCommand(unittest.TestCase):
|
||||
]
|
||||
self.org_command.plus_api_client = MagicMock()
|
||||
self.org_command.plus_api_client.get_organizations.return_value = mock_response
|
||||
|
||||
|
||||
mock_console.print = MagicMock()
|
||||
|
||||
|
||||
self.org_command.list()
|
||||
|
||||
|
||||
self.org_command.plus_api_client.get_organizations.assert_called_once()
|
||||
mock_table.assert_called_once_with(title="Your Organizations")
|
||||
mock_table.return_value.add_column.assert_has_calls([
|
||||
@@ -105,12 +105,12 @@ class TestOrganizationCommand(unittest.TestCase):
|
||||
mock_response.json.return_value = []
|
||||
self.org_command.plus_api_client = MagicMock()
|
||||
self.org_command.plus_api_client.get_organizations.return_value = mock_response
|
||||
|
||||
|
||||
self.org_command.list()
|
||||
|
||||
|
||||
self.org_command.plus_api_client.get_organizations.assert_called_once()
|
||||
mock_console.print.assert_called_once_with(
|
||||
"You don't belong to any organizations yet.",
|
||||
"You don't belong to any organizations yet.",
|
||||
style="yellow"
|
||||
)
|
||||
|
||||
@@ -118,14 +118,14 @@ class TestOrganizationCommand(unittest.TestCase):
|
||||
def test_list_organizations_api_error(self, mock_console):
|
||||
self.org_command.plus_api_client = MagicMock()
|
||||
self.org_command.plus_api_client.get_organizations.side_effect = requests.exceptions.RequestException("API Error")
|
||||
|
||||
|
||||
with pytest.raises(SystemExit):
|
||||
self.org_command.list()
|
||||
|
||||
|
||||
|
||||
self.org_command.plus_api_client.get_organizations.assert_called_once()
|
||||
mock_console.print.assert_called_once_with(
|
||||
"Failed to retrieve organization list: API Error",
|
||||
"Failed to retrieve organization list: API Error",
|
||||
style="bold red"
|
||||
)
|
||||
|
||||
@@ -140,12 +140,12 @@ class TestOrganizationCommand(unittest.TestCase):
|
||||
]
|
||||
self.org_command.plus_api_client = MagicMock()
|
||||
self.org_command.plus_api_client.get_organizations.return_value = mock_response
|
||||
|
||||
|
||||
mock_settings_instance = MagicMock()
|
||||
mock_settings_class.return_value = mock_settings_instance
|
||||
|
||||
|
||||
self.org_command.switch("test-id")
|
||||
|
||||
|
||||
self.org_command.plus_api_client.get_organizations.assert_called_once()
|
||||
mock_settings_instance.dump.assert_called_once()
|
||||
assert mock_settings_instance.org_name == "Test Org"
|
||||
@@ -165,9 +165,9 @@ class TestOrganizationCommand(unittest.TestCase):
|
||||
]
|
||||
self.org_command.plus_api_client = MagicMock()
|
||||
self.org_command.plus_api_client.get_organizations.return_value = mock_response
|
||||
|
||||
|
||||
self.org_command.switch("non-existent-id")
|
||||
|
||||
|
||||
self.org_command.plus_api_client.get_organizations.assert_called_once()
|
||||
mock_console.print.assert_called_once_with(
|
||||
"Organization with id 'non-existent-id' not found.",
|
||||
@@ -181,9 +181,9 @@ class TestOrganizationCommand(unittest.TestCase):
|
||||
mock_settings_instance.org_name = "Test Org"
|
||||
mock_settings_instance.org_uuid = "test-id"
|
||||
mock_settings_class.return_value = mock_settings_instance
|
||||
|
||||
|
||||
self.org_command.current()
|
||||
|
||||
|
||||
self.org_command.plus_api_client.get_organizations.assert_not_called()
|
||||
mock_console.print.assert_called_once_with(
|
||||
"Currently logged in to organization Test Org (test-id)",
|
||||
@@ -196,49 +196,11 @@ class TestOrganizationCommand(unittest.TestCase):
|
||||
mock_settings_instance = MagicMock()
|
||||
mock_settings_instance.org_uuid = None
|
||||
mock_settings_class.return_value = mock_settings_instance
|
||||
|
||||
|
||||
self.org_command.current()
|
||||
|
||||
|
||||
assert mock_console.print.call_count == 3
|
||||
mock_console.print.assert_any_call(
|
||||
"You're not currently logged in to any organization.",
|
||||
"You're not currently logged in to any organization.",
|
||||
style="yellow"
|
||||
)
|
||||
|
||||
@patch('crewai.cli.organization.main.console')
|
||||
def test_list_organizations_unauthorized(self, mock_console):
|
||||
mock_response = MagicMock()
|
||||
mock_http_error = requests.exceptions.HTTPError(
|
||||
"401 Client Error: Unauthorized",
|
||||
response=MagicMock(status_code=401)
|
||||
)
|
||||
|
||||
mock_response.raise_for_status.side_effect = mock_http_error
|
||||
self.org_command.plus_api_client.get_organizations.return_value = mock_response
|
||||
|
||||
self.org_command.list()
|
||||
|
||||
self.org_command.plus_api_client.get_organizations.assert_called_once()
|
||||
mock_console.print.assert_called_once_with(
|
||||
"You are not logged in to any organization. Use 'crewai login' to login.",
|
||||
style="bold red"
|
||||
)
|
||||
|
||||
@patch('crewai.cli.organization.main.console')
|
||||
def test_switch_organization_unauthorized(self, mock_console):
|
||||
mock_response = MagicMock()
|
||||
mock_http_error = requests.exceptions.HTTPError(
|
||||
"401 Client Error: Unauthorized",
|
||||
response=MagicMock(status_code=401)
|
||||
)
|
||||
|
||||
mock_response.raise_for_status.side_effect = mock_http_error
|
||||
self.org_command.plus_api_client.get_organizations.return_value = mock_response
|
||||
|
||||
self.org_command.switch("test-id")
|
||||
|
||||
self.org_command.plus_api_client.get_organizations.assert_called_once()
|
||||
mock_console.print.assert_called_once_with(
|
||||
"You are not logged in to any organization. Use 'crewai login' to login.",
|
||||
style="bold red"
|
||||
)
|
||||
|
||||
@@ -56,8 +56,7 @@ def test_create_success(mock_subprocess, capsys, tool_command):
|
||||
|
||||
@patch("crewai.cli.tools.main.subprocess.run")
|
||||
@patch("crewai.cli.plus_api.PlusAPI.get_tool")
|
||||
@patch("crewai.cli.tools.main.ToolCommand._print_current_organization")
|
||||
def test_install_success(mock_print_org, mock_get, mock_subprocess_run, capsys, tool_command):
|
||||
def test_install_success(mock_get, mock_subprocess_run, capsys, tool_command):
|
||||
mock_get_response = MagicMock()
|
||||
mock_get_response.status_code = 200
|
||||
mock_get_response.json.return_value = {
|
||||
@@ -86,9 +85,6 @@ def test_install_success(mock_print_org, mock_get, mock_subprocess_run, capsys,
|
||||
env=unittest.mock.ANY,
|
||||
)
|
||||
|
||||
# Verify _print_current_organization was called
|
||||
mock_print_org.assert_called_once()
|
||||
|
||||
@patch("crewai.cli.tools.main.subprocess.run")
|
||||
@patch("crewai.cli.plus_api.PlusAPI.get_tool")
|
||||
def test_install_success_from_pypi(mock_get, mock_subprocess_run, capsys, tool_command):
|
||||
@@ -170,9 +166,7 @@ def test_publish_when_not_in_sync(mock_is_synced, capsys, tool_command):
|
||||
@patch("crewai.cli.plus_api.PlusAPI.publish_tool")
|
||||
@patch("crewai.cli.tools.main.git.Repository.is_synced", return_value=False)
|
||||
@patch("crewai.cli.tools.main.extract_available_exports", return_value=[{"name": "SampleTool"}])
|
||||
@patch("crewai.cli.tools.main.ToolCommand._print_current_organization")
|
||||
def test_publish_when_not_in_sync_and_force(
|
||||
mock_print_org,
|
||||
mock_available_exports,
|
||||
mock_is_synced,
|
||||
mock_publish,
|
||||
@@ -208,7 +202,6 @@ def test_publish_when_not_in_sync_and_force(
|
||||
encoded_file=unittest.mock.ANY,
|
||||
available_exports=[{"name": "SampleTool"}],
|
||||
)
|
||||
mock_print_org.assert_called_once()
|
||||
|
||||
|
||||
@patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool")
|
||||
@@ -336,27 +329,3 @@ def test_publish_api_error(
|
||||
assert "Request to Enterprise API failed" in output
|
||||
|
||||
mock_publish.assert_called_once()
|
||||
|
||||
|
||||
|
||||
@patch("crewai.cli.tools.main.Settings")
|
||||
def test_print_current_organization_with_org(mock_settings, capsys, tool_command):
|
||||
mock_settings_instance = MagicMock()
|
||||
mock_settings_instance.org_uuid = "test-org-uuid"
|
||||
mock_settings_instance.org_name = "Test Organization"
|
||||
mock_settings.return_value = mock_settings_instance
|
||||
tool_command._print_current_organization()
|
||||
output = capsys.readouterr().out
|
||||
assert "Current organization: Test Organization (test-org-uuid)" in output
|
||||
|
||||
|
||||
@patch("crewai.cli.tools.main.Settings")
|
||||
def test_print_current_organization_without_org(mock_settings, capsys, tool_command):
|
||||
mock_settings_instance = MagicMock()
|
||||
mock_settings_instance.org_uuid = None
|
||||
mock_settings_instance.org_name = None
|
||||
mock_settings.return_value = mock_settings_instance
|
||||
tool_command._print_current_organization()
|
||||
output = capsys.readouterr().out
|
||||
assert "No organization currently set" in output
|
||||
assert "org switch <org_id>" in output
|
||||
|
||||
@@ -856,22 +856,18 @@ def test_crew_verbose_output(researcher, writer, capsys):
|
||||
)
|
||||
|
||||
expected_strings = [
|
||||
"🤖 Agent Started",
|
||||
"Agent: Researcher",
|
||||
"Task: Research AI advancements.",
|
||||
"✅ Agent Final Answer",
|
||||
"Agent: Researcher",
|
||||
"🤖 Agent Started",
|
||||
"Agent: Senior Writer",
|
||||
"Task: Write about AI in healthcare.",
|
||||
"✅ Agent Final Answer",
|
||||
"Agent: Senior Writer",
|
||||
"\x1b[1m\x1b[95m# Agent:\x1b[00m \x1b[1m\x1b[92mResearcher",
|
||||
"\x1b[00m\n\x1b[95m## Task:\x1b[00m \x1b[92mResearch AI advancements.",
|
||||
"\x1b[1m\x1b[95m# Agent:\x1b[00m \x1b[1m\x1b[92mSenior Writer",
|
||||
"\x1b[95m## Task:\x1b[00m \x1b[92mWrite about AI in healthcare.",
|
||||
"\n\n\x1b[1m\x1b[95m# Agent:\x1b[00m \x1b[1m\x1b[92mResearcher",
|
||||
"\x1b[00m\n\x1b[95m## Final Answer:",
|
||||
"\n\n\x1b[1m\x1b[95m# Agent:\x1b[00m \x1b[1m\x1b[92mSenior Writer",
|
||||
"\x1b[00m\n\x1b[95m## Final Answer:",
|
||||
]
|
||||
|
||||
for expected_string in expected_strings:
|
||||
assert (
|
||||
expected_string in filtered_output
|
||||
), f"Expected '{expected_string}' in output, but it was not found."
|
||||
assert expected_string in filtered_output
|
||||
|
||||
# Now test with verbose set to False
|
||||
crew.verbose = False
|
||||
@@ -1769,76 +1765,6 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
|
||||
)
|
||||
|
||||
|
||||
def test_hierarchical_kickoff_usage_metrics_include_manager(researcher):
|
||||
"""Ensure Crew.kickoff() sums UsageMetrics from both regular and manager agents."""
|
||||
|
||||
# ── 1. Build the manager and a simple task ──────────────────────────────────
|
||||
manager = Agent(
|
||||
role="Manager",
|
||||
goal="Coordinate everything.",
|
||||
backstory="Keeps the project on track.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Say hello",
|
||||
expected_output="Hello",
|
||||
agent=researcher, # *regular* agent
|
||||
)
|
||||
|
||||
# ── 2. Stub out each agent’s _token_process.get_summary() ───────────────────
|
||||
researcher_metrics = UsageMetrics(
|
||||
total_tokens=120, prompt_tokens=80, completion_tokens=40, successful_requests=2
|
||||
)
|
||||
manager_metrics = UsageMetrics(
|
||||
total_tokens=30, prompt_tokens=20, completion_tokens=10, successful_requests=1
|
||||
)
|
||||
|
||||
# Replace the internal _token_process objects with simple mocks
|
||||
researcher._token_process = MagicMock(
|
||||
get_summary=MagicMock(return_value=researcher_metrics)
|
||||
)
|
||||
manager._token_process = MagicMock(
|
||||
get_summary=MagicMock(return_value=manager_metrics)
|
||||
)
|
||||
|
||||
# ── 3. Create the crew (hierarchical!) and kick it off ──────────────────────
|
||||
crew = Crew(
|
||||
agents=[researcher], # regular agents
|
||||
manager_agent=manager, # manager to be included
|
||||
tasks=[task],
|
||||
process=Process.hierarchical,
|
||||
)
|
||||
|
||||
# We don’t care about LLM output here; patch execute_sync to avoid network
|
||||
with patch.object(
|
||||
Task,
|
||||
"execute_sync",
|
||||
return_value=TaskOutput(
|
||||
description="dummy", raw="Hello", agent=researcher.role
|
||||
),
|
||||
):
|
||||
crew.kickoff()
|
||||
|
||||
# ── 4. Assert the aggregated numbers are the *sum* of both agents ───────────
|
||||
assert (
|
||||
crew.usage_metrics.total_tokens
|
||||
== researcher_metrics.total_tokens + manager_metrics.total_tokens
|
||||
)
|
||||
assert (
|
||||
crew.usage_metrics.prompt_tokens
|
||||
== researcher_metrics.prompt_tokens + manager_metrics.prompt_tokens
|
||||
)
|
||||
assert (
|
||||
crew.usage_metrics.completion_tokens
|
||||
== researcher_metrics.completion_tokens + manager_metrics.completion_tokens
|
||||
)
|
||||
assert (
|
||||
crew.usage_metrics.successful_requests
|
||||
== researcher_metrics.successful_requests + manager_metrics.successful_requests
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_crew_creation_tasks_with_agents(researcher, writer):
|
||||
"""
|
||||
@@ -4480,29 +4406,27 @@ def test_sets_parent_flow_when_inside_flow(researcher, writer):
|
||||
assert result.parent_flow is flow
|
||||
|
||||
|
||||
def test_reset_knowledge_with_no_crew_knowledge(researcher, writer):
|
||||
def test_reset_knowledge_with_no_crew_knowledge(researcher,writer):
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
tasks=[
|
||||
Task(description="Task 1", expected_output="output", agent=researcher),
|
||||
Task(description="Task 2", expected_output="output", agent=writer),
|
||||
],
|
||||
]
|
||||
)
|
||||
|
||||
with pytest.raises(RuntimeError) as excinfo:
|
||||
crew.reset_memories(command_type="knowledge")
|
||||
crew.reset_memories(command_type='knowledge')
|
||||
|
||||
# Optionally, you can also check the error message
|
||||
assert "Crew Knowledge and Agent Knowledge memory system is not initialized" in str(
|
||||
excinfo.value
|
||||
) # Replace with the expected message
|
||||
assert "Crew Knowledge and Agent Knowledge memory system is not initialized" in str(excinfo.value) # Replace with the expected message
|
||||
|
||||
|
||||
def test_reset_knowledge_with_only_crew_knowledge(researcher, writer):
|
||||
def test_reset_knowledge_with_only_crew_knowledge(researcher,writer):
|
||||
mock_ks = MagicMock(spec=Knowledge)
|
||||
|
||||
with patch.object(Crew, "reset_knowledge") as mock_reset_agent_knowledge:
|
||||
with patch.object(Crew,'reset_knowledge') as mock_reset_agent_knowledge:
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
@@ -4510,14 +4434,14 @@ def test_reset_knowledge_with_only_crew_knowledge(researcher, writer):
|
||||
Task(description="Task 1", expected_output="output", agent=researcher),
|
||||
Task(description="Task 2", expected_output="output", agent=writer),
|
||||
],
|
||||
knowledge=mock_ks,
|
||||
knowledge=mock_ks
|
||||
)
|
||||
|
||||
crew.reset_memories(command_type="knowledge")
|
||||
crew.reset_memories(command_type='knowledge')
|
||||
mock_reset_agent_knowledge.assert_called_once_with([mock_ks])
|
||||
|
||||
|
||||
def test_reset_knowledge_with_crew_and_agent_knowledge(researcher, writer):
|
||||
def test_reset_knowledge_with_crew_and_agent_knowledge(researcher,writer):
|
||||
mock_ks_crew = MagicMock(spec=Knowledge)
|
||||
mock_ks_research = MagicMock(spec=Knowledge)
|
||||
mock_ks_writer = MagicMock(spec=Knowledge)
|
||||
@@ -4525,7 +4449,7 @@ def test_reset_knowledge_with_crew_and_agent_knowledge(researcher, writer):
|
||||
researcher.knowledge = mock_ks_research
|
||||
writer.knowledge = mock_ks_writer
|
||||
|
||||
with patch.object(Crew, "reset_knowledge") as mock_reset_agent_knowledge:
|
||||
with patch.object(Crew,'reset_knowledge') as mock_reset_agent_knowledge:
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
@@ -4533,23 +4457,21 @@ def test_reset_knowledge_with_crew_and_agent_knowledge(researcher, writer):
|
||||
Task(description="Task 1", expected_output="output", agent=researcher),
|
||||
Task(description="Task 2", expected_output="output", agent=writer),
|
||||
],
|
||||
knowledge=mock_ks_crew,
|
||||
knowledge=mock_ks_crew
|
||||
)
|
||||
|
||||
crew.reset_memories(command_type="knowledge")
|
||||
mock_reset_agent_knowledge.assert_called_once_with(
|
||||
[mock_ks_crew, mock_ks_research, mock_ks_writer]
|
||||
)
|
||||
crew.reset_memories(command_type='knowledge')
|
||||
mock_reset_agent_knowledge.assert_called_once_with([mock_ks_crew,mock_ks_research,mock_ks_writer])
|
||||
|
||||
|
||||
def test_reset_knowledge_with_only_agent_knowledge(researcher, writer):
|
||||
def test_reset_knowledge_with_only_agent_knowledge(researcher,writer):
|
||||
mock_ks_research = MagicMock(spec=Knowledge)
|
||||
mock_ks_writer = MagicMock(spec=Knowledge)
|
||||
|
||||
researcher.knowledge = mock_ks_research
|
||||
writer.knowledge = mock_ks_writer
|
||||
|
||||
with patch.object(Crew, "reset_knowledge") as mock_reset_agent_knowledge:
|
||||
with patch.object(Crew,'reset_knowledge') as mock_reset_agent_knowledge:
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
@@ -4559,13 +4481,11 @@ def test_reset_knowledge_with_only_agent_knowledge(researcher, writer):
|
||||
],
|
||||
)
|
||||
|
||||
crew.reset_memories(command_type="knowledge")
|
||||
mock_reset_agent_knowledge.assert_called_once_with(
|
||||
[mock_ks_research, mock_ks_writer]
|
||||
)
|
||||
crew.reset_memories(command_type='knowledge')
|
||||
mock_reset_agent_knowledge.assert_called_once_with([mock_ks_research,mock_ks_writer])
|
||||
|
||||
|
||||
def test_reset_agent_knowledge_with_no_agent_knowledge(researcher, writer):
|
||||
def test_reset_agent_knowledge_with_no_agent_knowledge(researcher,writer):
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
@@ -4576,15 +4496,13 @@ def test_reset_agent_knowledge_with_no_agent_knowledge(researcher, writer):
|
||||
)
|
||||
|
||||
with pytest.raises(RuntimeError) as excinfo:
|
||||
crew.reset_memories(command_type="agent_knowledge")
|
||||
crew.reset_memories(command_type='agent_knowledge')
|
||||
|
||||
# Optionally, you can also check the error message
|
||||
assert "Agent Knowledge memory system is not initialized" in str(
|
||||
excinfo.value
|
||||
) # Replace with the expected message
|
||||
assert "Agent Knowledge memory system is not initialized" in str(excinfo.value) # Replace with the expected message
|
||||
|
||||
|
||||
def test_reset_agent_knowledge_with_only_crew_knowledge(researcher, writer):
|
||||
def test_reset_agent_knowledge_with_only_crew_knowledge(researcher,writer):
|
||||
mock_ks = MagicMock(spec=Knowledge)
|
||||
|
||||
crew = Crew(
|
||||
@@ -4594,19 +4512,17 @@ def test_reset_agent_knowledge_with_only_crew_knowledge(researcher, writer):
|
||||
Task(description="Task 1", expected_output="output", agent=researcher),
|
||||
Task(description="Task 2", expected_output="output", agent=writer),
|
||||
],
|
||||
knowledge=mock_ks,
|
||||
knowledge=mock_ks
|
||||
)
|
||||
|
||||
with pytest.raises(RuntimeError) as excinfo:
|
||||
crew.reset_memories(command_type="agent_knowledge")
|
||||
crew.reset_memories(command_type='agent_knowledge')
|
||||
|
||||
# Optionally, you can also check the error message
|
||||
assert "Agent Knowledge memory system is not initialized" in str(
|
||||
excinfo.value
|
||||
) # Replace with the expected message
|
||||
assert "Agent Knowledge memory system is not initialized" in str(excinfo.value) # Replace with the expected message
|
||||
|
||||
|
||||
def test_reset_agent_knowledge_with_crew_and_agent_knowledge(researcher, writer):
|
||||
def test_reset_agent_knowledge_with_crew_and_agent_knowledge(researcher,writer):
|
||||
mock_ks_crew = MagicMock(spec=Knowledge)
|
||||
mock_ks_research = MagicMock(spec=Knowledge)
|
||||
mock_ks_writer = MagicMock(spec=Knowledge)
|
||||
@@ -4614,7 +4530,7 @@ def test_reset_agent_knowledge_with_crew_and_agent_knowledge(researcher, writer)
|
||||
researcher.knowledge = mock_ks_research
|
||||
writer.knowledge = mock_ks_writer
|
||||
|
||||
with patch.object(Crew, "reset_knowledge") as mock_reset_agent_knowledge:
|
||||
with patch.object(Crew,'reset_knowledge') as mock_reset_agent_knowledge:
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
@@ -4622,23 +4538,21 @@ def test_reset_agent_knowledge_with_crew_and_agent_knowledge(researcher, writer)
|
||||
Task(description="Task 1", expected_output="output", agent=researcher),
|
||||
Task(description="Task 2", expected_output="output", agent=writer),
|
||||
],
|
||||
knowledge=mock_ks_crew,
|
||||
knowledge=mock_ks_crew
|
||||
)
|
||||
|
||||
crew.reset_memories(command_type="agent_knowledge")
|
||||
mock_reset_agent_knowledge.assert_called_once_with(
|
||||
[mock_ks_research, mock_ks_writer]
|
||||
)
|
||||
crew.reset_memories(command_type='agent_knowledge')
|
||||
mock_reset_agent_knowledge.assert_called_once_with([mock_ks_research,mock_ks_writer])
|
||||
|
||||
|
||||
def test_reset_agent_knowledge_with_only_agent_knowledge(researcher, writer):
|
||||
def test_reset_agent_knowledge_with_only_agent_knowledge(researcher,writer):
|
||||
mock_ks_research = MagicMock(spec=Knowledge)
|
||||
mock_ks_writer = MagicMock(spec=Knowledge)
|
||||
|
||||
researcher.knowledge = mock_ks_research
|
||||
writer.knowledge = mock_ks_writer
|
||||
|
||||
with patch.object(Crew, "reset_knowledge") as mock_reset_agent_knowledge:
|
||||
with patch.object(Crew,'reset_knowledge') as mock_reset_agent_knowledge:
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
@@ -4648,7 +4562,7 @@ def test_reset_agent_knowledge_with_only_agent_knowledge(researcher, writer):
|
||||
],
|
||||
)
|
||||
|
||||
crew.reset_memories(command_type="agent_knowledge")
|
||||
mock_reset_agent_knowledge.assert_called_once_with(
|
||||
[mock_ks_research, mock_ks_writer]
|
||||
)
|
||||
crew.reset_memories(command_type='agent_knowledge')
|
||||
mock_reset_agent_knowledge.assert_called_once_with([mock_ks_research,mock_ks_writer])
|
||||
|
||||
|
||||
|
||||
258
tests/reasoning_interval_test.py
Normal file
258
tests/reasoning_interval_test.py
Normal file
@@ -0,0 +1,258 @@
|
||||
"""Tests for reasoning interval and adaptive reasoning in agents."""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from crewai import Agent, Task
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.utilities.reasoning_handler import AgentReasoning
|
||||
|
||||
|
||||
def test_agent_with_reasoning_interval():
|
||||
"""Ensure that the agent triggers mid-execution reasoning based on the fixed interval."""
|
||||
|
||||
# Use a mock LLM to avoid real network calls
|
||||
llm = MagicMock()
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="To test the reasoning interval feature",
|
||||
backstory="I am a test agent created to verify the reasoning interval feature works correctly.",
|
||||
llm=llm,
|
||||
reasoning=True,
|
||||
reasoning_interval=2, # Reason every 2 steps
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Multi-step task that requires periodic reasoning.",
|
||||
expected_output="The task should be completed with periodic reasoning.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
# Create a mock executor that will be injected into the agent
|
||||
mock_executor = MagicMock()
|
||||
mock_executor.steps_since_reasoning = 0
|
||||
|
||||
def mock_invoke(*args, **kwargs):
|
||||
return mock_executor._invoke_loop()
|
||||
|
||||
def mock_invoke_loop():
|
||||
assert not mock_executor._should_trigger_reasoning()
|
||||
mock_executor.steps_since_reasoning += 1
|
||||
|
||||
mock_executor.steps_since_reasoning = 2
|
||||
assert mock_executor._should_trigger_reasoning()
|
||||
mock_executor._handle_mid_execution_reasoning()
|
||||
|
||||
return {"output": "Task completed successfully."}
|
||||
|
||||
mock_executor.invoke = MagicMock(side_effect=mock_invoke)
|
||||
mock_executor._invoke_loop = MagicMock(side_effect=mock_invoke_loop)
|
||||
mock_executor._should_trigger_reasoning = MagicMock(side_effect=lambda: mock_executor.steps_since_reasoning >= 2)
|
||||
mock_executor._handle_mid_execution_reasoning = MagicMock()
|
||||
|
||||
# Monkey-patch create_agent_executor so that it sets our mock_executor
|
||||
def _fake_create_agent_executor(self, tools=None, task=None): # noqa: D401,E501
|
||||
"""Replace the real executor with the mock while preserving behaviour."""
|
||||
self.agent_executor = mock_executor
|
||||
return mock_executor
|
||||
|
||||
with patch.object(Agent, "create_agent_executor", _fake_create_agent_executor):
|
||||
result = agent.execute_task(task)
|
||||
|
||||
# Validate results and that reasoning happened when expected
|
||||
assert result == "Task completed successfully."
|
||||
mock_executor._invoke_loop.assert_called_once()
|
||||
mock_executor._handle_mid_execution_reasoning.assert_called_once()
|
||||
|
||||
|
||||
def test_agent_with_adaptive_reasoning():
|
||||
"""Test agent with adaptive reasoning."""
|
||||
# Create a mock agent with adaptive reasoning
|
||||
agent = MagicMock()
|
||||
agent.reasoning = True
|
||||
agent.reasoning_interval = None
|
||||
agent.adaptive_reasoning = True
|
||||
agent.role = "Test Agent"
|
||||
|
||||
# Create a mock task
|
||||
task = MagicMock()
|
||||
|
||||
executor = CrewAgentExecutor(
|
||||
llm=MagicMock(),
|
||||
task=task,
|
||||
crew=MagicMock(),
|
||||
agent=agent,
|
||||
prompt={},
|
||||
max_iter=10,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=[],
|
||||
tools_description="",
|
||||
tools_handler=MagicMock()
|
||||
)
|
||||
|
||||
def mock_invoke_loop():
|
||||
assert executor._should_adaptive_reason()
|
||||
executor._handle_mid_execution_reasoning()
|
||||
return {"output": "Task completed with adaptive reasoning."}
|
||||
|
||||
executor._invoke_loop = MagicMock(side_effect=mock_invoke_loop)
|
||||
executor._should_adaptive_reason = MagicMock(return_value=True)
|
||||
executor._handle_mid_execution_reasoning = MagicMock()
|
||||
|
||||
result = executor._invoke_loop()
|
||||
|
||||
assert result["output"] == "Task completed with adaptive reasoning."
|
||||
executor._should_adaptive_reason.assert_called_once()
|
||||
executor._handle_mid_execution_reasoning.assert_called_once()
|
||||
|
||||
|
||||
def test_mid_execution_reasoning_handler():
|
||||
"""Test the mid-execution reasoning handler."""
|
||||
llm = MagicMock()
|
||||
llm.call.return_value = "Based on progress, I'll adjust my approach.\n\nREADY: I am ready to continue executing the task."
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="To test the mid-execution reasoning handler",
|
||||
backstory="I am a test agent created to verify the mid-execution reasoning handler works correctly.",
|
||||
llm=llm,
|
||||
reasoning=True,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Task to test mid-execution reasoning handler.",
|
||||
expected_output="The mid-execution reasoning handler should work correctly.",
|
||||
agent=agent
|
||||
)
|
||||
|
||||
agent.llm.call = MagicMock(return_value="Based on progress, I'll adjust my approach.\n\nREADY: I am ready to continue executing the task.")
|
||||
|
||||
reasoning_handler = AgentReasoning(task=task, agent=agent)
|
||||
|
||||
result = reasoning_handler.handle_mid_execution_reasoning(
|
||||
current_steps=3,
|
||||
tools_used=["search_tool", "calculator_tool"],
|
||||
current_progress="Made progress on steps 1-3",
|
||||
iteration_messages=[
|
||||
{"role": "assistant", "content": "I'll search for information."},
|
||||
{"role": "system", "content": "Search results: ..."},
|
||||
{"role": "assistant", "content": "I'll calculate the answer."},
|
||||
{"role": "system", "content": "Calculation result: 42"}
|
||||
]
|
||||
)
|
||||
|
||||
assert result is not None
|
||||
assert hasattr(result, 'plan')
|
||||
assert hasattr(result.plan, 'plan')
|
||||
assert hasattr(result.plan, 'ready')
|
||||
assert result.plan.ready is True
|
||||
|
||||
|
||||
def test_should_trigger_reasoning_interval():
|
||||
"""Test the _should_trigger_reasoning method with interval-based reasoning."""
|
||||
agent = MagicMock()
|
||||
agent.reasoning = True
|
||||
agent.reasoning_interval = 3
|
||||
agent.adaptive_reasoning = False
|
||||
|
||||
executor = CrewAgentExecutor(
|
||||
llm=MagicMock(),
|
||||
task=MagicMock(),
|
||||
crew=MagicMock(),
|
||||
agent=agent,
|
||||
prompt={},
|
||||
max_iter=10,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=[],
|
||||
tools_description="",
|
||||
tools_handler=MagicMock()
|
||||
)
|
||||
|
||||
executor.steps_since_reasoning = 0
|
||||
assert executor._should_trigger_reasoning() is False
|
||||
|
||||
executor.steps_since_reasoning = 2
|
||||
assert executor._should_trigger_reasoning() is False
|
||||
|
||||
executor.steps_since_reasoning = 3
|
||||
assert executor._should_trigger_reasoning() is True
|
||||
|
||||
executor.steps_since_reasoning = 4
|
||||
assert executor._should_trigger_reasoning() is True
|
||||
|
||||
|
||||
def test_should_trigger_adaptive_reasoning():
|
||||
"""Test the _should_adaptive_reason method."""
|
||||
agent = MagicMock()
|
||||
agent.reasoning = True
|
||||
agent.reasoning_interval = None
|
||||
agent.adaptive_reasoning = True
|
||||
|
||||
executor = CrewAgentExecutor(
|
||||
llm=MagicMock(),
|
||||
task=MagicMock(),
|
||||
crew=MagicMock(),
|
||||
agent=agent,
|
||||
prompt={},
|
||||
max_iter=10,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=[],
|
||||
tools_description="",
|
||||
tools_handler=MagicMock()
|
||||
)
|
||||
|
||||
with patch('crewai.utilities.reasoning_handler.AgentReasoning.should_adaptive_reason_llm', return_value=True):
|
||||
assert executor._should_adaptive_reason() is True
|
||||
|
||||
executor.messages = [
|
||||
{"role": "assistant", "content": "I'll try this approach."},
|
||||
{"role": "system", "content": "Error: Failed to execute the command."},
|
||||
{"role": "assistant", "content": "Let me try something else."}
|
||||
]
|
||||
assert executor._should_adaptive_reason() is True
|
||||
|
||||
executor.messages = [
|
||||
{"role": "assistant", "content": "I'll try this approach."},
|
||||
{"role": "system", "content": "Command executed successfully."},
|
||||
{"role": "assistant", "content": "Let me continue with the next step."}
|
||||
]
|
||||
with patch('crewai.utilities.reasoning_handler.AgentReasoning.should_adaptive_reason_llm', return_value=False):
|
||||
assert executor._should_adaptive_reason() is False
|
||||
|
||||
|
||||
@pytest.mark.parametrize("interval,steps,should_reason", [
|
||||
(None, 5, False),
|
||||
(3, 2, False),
|
||||
(3, 3, True),
|
||||
(1, 1, True),
|
||||
(5, 10, True),
|
||||
])
|
||||
def test_reasoning_interval_scenarios(interval, steps, should_reason):
|
||||
"""Test various reasoning interval scenarios."""
|
||||
agent = MagicMock()
|
||||
agent.reasoning = True
|
||||
agent.reasoning_interval = interval
|
||||
agent.adaptive_reasoning = False
|
||||
|
||||
executor = CrewAgentExecutor(
|
||||
llm=MagicMock(),
|
||||
task=MagicMock(),
|
||||
crew=MagicMock(),
|
||||
agent=agent,
|
||||
prompt={},
|
||||
max_iter=10,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=[],
|
||||
tools_description="",
|
||||
tools_handler=MagicMock()
|
||||
)
|
||||
|
||||
executor.steps_since_reasoning = steps
|
||||
assert executor._should_trigger_reasoning() is should_reason
|
||||
@@ -9,14 +9,6 @@ from crewai.telemetry import Telemetry
|
||||
from opentelemetry import trace
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def cleanup_telemetry():
|
||||
"""Automatically clean up Telemetry singleton between tests."""
|
||||
Telemetry._instance = None
|
||||
yield
|
||||
Telemetry._instance = None
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"env_var,value,expected_ready",
|
||||
[
|
||||
|
||||
@@ -1,19 +1,11 @@
|
||||
import os
|
||||
from unittest.mock import patch, MagicMock
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.telemetry import Telemetry
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def cleanup_telemetry():
|
||||
"""Automatically clean up Telemetry singleton between tests."""
|
||||
Telemetry._instance = None
|
||||
yield
|
||||
Telemetry._instance = None
|
||||
|
||||
|
||||
@pytest.mark.parametrize("env_var,value,expected_ready", [
|
||||
("OTEL_SDK_DISABLED", "true", False),
|
||||
("OTEL_SDK_DISABLED", "TRUE", False),
|
||||
@@ -36,59 +28,3 @@ def test_telemetry_enabled_by_default():
|
||||
with patch("crewai.telemetry.telemetry.TracerProvider"):
|
||||
telemetry = Telemetry()
|
||||
assert telemetry.ready is True
|
||||
|
||||
|
||||
def test_telemetry_disable_after_singleton_creation():
|
||||
"""Test that telemetry operations are disabled when env var is set after singleton creation."""
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
with patch("crewai.telemetry.telemetry.TracerProvider"):
|
||||
telemetry = Telemetry()
|
||||
assert telemetry.ready is True
|
||||
|
||||
mock_operation = MagicMock()
|
||||
telemetry._safe_telemetry_operation(mock_operation)
|
||||
mock_operation.assert_called_once()
|
||||
|
||||
mock_operation.reset_mock()
|
||||
|
||||
os.environ['CREWAI_DISABLE_TELEMETRY'] = 'true'
|
||||
|
||||
telemetry._safe_telemetry_operation(mock_operation)
|
||||
mock_operation.assert_not_called()
|
||||
|
||||
|
||||
def test_telemetry_disable_with_multiple_instances():
|
||||
"""Test that multiple telemetry instances respect dynamically changed env vars."""
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
with patch("crewai.telemetry.telemetry.TracerProvider"):
|
||||
telemetry1 = Telemetry()
|
||||
assert telemetry1.ready is True
|
||||
|
||||
os.environ['CREWAI_DISABLE_TELEMETRY'] = 'true'
|
||||
|
||||
telemetry2 = Telemetry()
|
||||
assert telemetry2 is telemetry1
|
||||
assert telemetry2.ready is True
|
||||
|
||||
mock_operation = MagicMock()
|
||||
telemetry2._safe_telemetry_operation(mock_operation)
|
||||
mock_operation.assert_not_called()
|
||||
|
||||
|
||||
def test_telemetry_otel_sdk_disabled_after_creation():
|
||||
"""Test that OTEL_SDK_DISABLED also works when set after singleton creation."""
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
with patch("crewai.telemetry.telemetry.TracerProvider"):
|
||||
telemetry = Telemetry()
|
||||
assert telemetry.ready is True
|
||||
|
||||
mock_operation = MagicMock()
|
||||
telemetry._safe_telemetry_operation(mock_operation)
|
||||
mock_operation.assert_called_once()
|
||||
|
||||
mock_operation.reset_mock()
|
||||
|
||||
os.environ['OTEL_SDK_DISABLED'] = 'true'
|
||||
|
||||
telemetry._safe_telemetry_operation(mock_operation)
|
||||
mock_operation.assert_not_called()
|
||||
|
||||
215
tests/test_acceptance_criteria_validation.py
Normal file
215
tests/test_acceptance_criteria_validation.py
Normal file
@@ -0,0 +1,215 @@
|
||||
"""Unit tests for acceptance criteria validation feature at task level."""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch, call
|
||||
from typing import List, Tuple
|
||||
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.agents.agent_state import AgentState
|
||||
from crewai.tools.agent_tools.scratchpad_tool import ScratchpadTool
|
||||
from crewai.agents.parser import AgentFinish
|
||||
from crewai.utilities import Printer
|
||||
from crewai.llm import LLM
|
||||
|
||||
|
||||
class TestAcceptanceCriteriaValidation:
|
||||
"""Test suite for task-level acceptance criteria validation functionality."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Set up test fixtures."""
|
||||
self.mock_llm = MagicMock(spec=LLM)
|
||||
self.mock_agent = MagicMock()
|
||||
self.mock_task = MagicMock()
|
||||
self.mock_crew = MagicMock()
|
||||
self.mock_tools_handler = MagicMock()
|
||||
|
||||
# Set up agent attributes
|
||||
self.mock_agent.role = "Test Agent"
|
||||
self.mock_agent.reasoning = True
|
||||
self.mock_agent.verbose = False
|
||||
self.mock_agent.reasoning_interval = None
|
||||
self.mock_agent.adaptive_reasoning = False
|
||||
|
||||
# Create executor
|
||||
self.executor = CrewAgentExecutor(
|
||||
llm=self.mock_llm,
|
||||
task=self.mock_task,
|
||||
crew=self.mock_crew,
|
||||
agent=self.mock_agent,
|
||||
prompt={},
|
||||
max_iter=10,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=[],
|
||||
tools_description="",
|
||||
tools_handler=self.mock_tools_handler,
|
||||
callbacks=[]
|
||||
)
|
||||
|
||||
# Set up agent state with acceptance criteria
|
||||
self.executor.agent_state = AgentState(task_id="test-task-id")
|
||||
self.executor.agent_state.acceptance_criteria = [
|
||||
"Include all required information",
|
||||
"Format output properly",
|
||||
"Provide complete analysis"
|
||||
]
|
||||
|
||||
# Mock printer
|
||||
self.executor._printer = MagicMock(spec=Printer)
|
||||
|
||||
def test_validate_acceptance_criteria_all_met(self):
|
||||
"""Test validation when all acceptance criteria are met."""
|
||||
output = "Complete output with all information, properly formatted, with full analysis"
|
||||
|
||||
# Configure LLM to return all criteria met
|
||||
self.mock_llm.call.return_value = '''{
|
||||
"1": "MET",
|
||||
"2": "MET",
|
||||
"3": "MET"
|
||||
}'''
|
||||
|
||||
is_valid, unmet_criteria = self.executor._validate_acceptance_criteria(output)
|
||||
|
||||
assert is_valid is True
|
||||
assert unmet_criteria == []
|
||||
assert self.mock_llm.call.call_count == 1
|
||||
|
||||
def test_validate_acceptance_criteria_some_unmet(self):
|
||||
"""Test validation when some criteria are not met."""
|
||||
output = "Partial output missing formatting"
|
||||
|
||||
# Configure LLM to return mixed results
|
||||
self.mock_llm.call.return_value = '''{
|
||||
"1": "MET",
|
||||
"2": "NOT MET: Missing proper formatting",
|
||||
"3": "NOT MET: Analysis incomplete"
|
||||
}'''
|
||||
|
||||
is_valid, unmet_criteria = self.executor._validate_acceptance_criteria(output)
|
||||
|
||||
assert is_valid is False
|
||||
assert len(unmet_criteria) == 2
|
||||
assert "Format output properly" in unmet_criteria
|
||||
assert "Provide complete analysis" in unmet_criteria
|
||||
|
||||
def test_create_criteria_retry_prompt_with_scratchpad(self):
|
||||
"""Test retry prompt creation when scratchpad has data."""
|
||||
# Set up scratchpad tool with data
|
||||
self.executor.scratchpad_tool = ScratchpadTool()
|
||||
self.executor.agent_state.scratchpad = {
|
||||
"research_data": {"key": "value"},
|
||||
"analysis_results": ["item1", "item2"]
|
||||
}
|
||||
|
||||
# Set up task details
|
||||
self.mock_task.description = "Analyze research data and provide insights"
|
||||
self.mock_task.expected_output = "A comprehensive report with analysis and recommendations"
|
||||
|
||||
unmet_criteria = ["Include specific examples", "Add recommendations"]
|
||||
|
||||
prompt = self.executor._create_criteria_retry_prompt(unmet_criteria)
|
||||
|
||||
# Verify prompt content with new format
|
||||
assert "VALIDATION FAILED" in prompt
|
||||
assert "YOU CANNOT PROVIDE A FINAL ANSWER YET" in prompt
|
||||
assert "ORIGINAL TASK:" in prompt
|
||||
assert "Analyze research data" in prompt
|
||||
assert "EXPECTED OUTPUT:" in prompt
|
||||
assert "comprehensive report" in prompt
|
||||
assert "Include specific examples" in prompt
|
||||
assert "Add recommendations" in prompt
|
||||
assert "Access Scratchpad Memory" in prompt
|
||||
assert "'research_data'" in prompt
|
||||
assert "'analysis_results'" in prompt
|
||||
assert "Action:" in prompt
|
||||
assert "Action Input:" in prompt
|
||||
assert "CONTINUE WITH TOOL USAGE NOW" in prompt
|
||||
assert "DO NOT ATTEMPT ANOTHER FINAL ANSWER" in prompt
|
||||
|
||||
def test_create_criteria_retry_prompt_without_scratchpad(self):
|
||||
"""Test retry prompt creation when no scratchpad data exists."""
|
||||
unmet_criteria = ["Add more detail"]
|
||||
|
||||
prompt = self.executor._create_criteria_retry_prompt(unmet_criteria)
|
||||
|
||||
assert "Add more detail" in prompt
|
||||
assert "VALIDATION FAILED" in prompt
|
||||
assert "📦 YOUR SCRATCHPAD CONTAINS DATA" not in prompt
|
||||
|
||||
@patch('crewai.agents.crew_agent_executor.get_llm_response')
|
||||
@patch('crewai.agents.crew_agent_executor.process_llm_response')
|
||||
def test_invoke_loop_blocks_incomplete_final_answer(self, mock_process, mock_get_response):
|
||||
"""Test that invoke loop blocks incomplete final answers."""
|
||||
# Set up conditions
|
||||
self.executor.agent_state.acceptance_criteria = ["Complete all sections"]
|
||||
|
||||
# First attempt returns incomplete final answer
|
||||
incomplete_answer = AgentFinish(
|
||||
thought="Done",
|
||||
output="Exploring potential follow-up tasks!",
|
||||
text="Final Answer: Exploring potential follow-up tasks!"
|
||||
)
|
||||
|
||||
# After retry, return complete answer
|
||||
complete_answer = AgentFinish(
|
||||
thought="Done with all sections",
|
||||
output="Complete output with all sections addressed",
|
||||
text="Final Answer: Complete output with all sections addressed"
|
||||
)
|
||||
|
||||
# Configure mocks
|
||||
mock_process.side_effect = [incomplete_answer, complete_answer]
|
||||
mock_get_response.return_value = "response"
|
||||
|
||||
# Configure validation
|
||||
self.mock_llm.call.side_effect = [
|
||||
'{"1": "NOT MET: Missing required sections"}', # First validation fails
|
||||
'{"1": "MET"}' # Second validation passes
|
||||
]
|
||||
|
||||
# Execute
|
||||
result = self.executor._invoke_loop()
|
||||
|
||||
# Verify
|
||||
assert result == complete_answer
|
||||
assert self.mock_llm.call.call_count == 2 # Two validation attempts
|
||||
assert mock_process.call_count == 2 # Two processing attempts
|
||||
|
||||
# Verify error message was shown
|
||||
self._verify_validation_messages_shown()
|
||||
|
||||
def test_validation_happens_on_every_final_answer_attempt(self):
|
||||
"""Test that validation happens on every AgentFinish attempt."""
|
||||
self.executor.agent_state.acceptance_criteria = ["Complete all sections"]
|
||||
|
||||
# Configure LLM to always return criteria not met
|
||||
self.mock_llm.call.return_value = '{"1": "NOT MET: Missing required sections"}'
|
||||
|
||||
output = "Incomplete output"
|
||||
|
||||
# Validate multiple times - each should trigger validation
|
||||
for _ in range(3):
|
||||
is_valid, unmet_criteria = self.executor._validate_acceptance_criteria(output)
|
||||
assert is_valid is False
|
||||
assert len(unmet_criteria) == 1
|
||||
|
||||
# Verify validation was called every time
|
||||
assert self.mock_llm.call.call_count == 3
|
||||
|
||||
def _verify_validation_messages_shown(self):
|
||||
"""Helper to verify validation messages were displayed."""
|
||||
print_calls = self.executor._printer.print.call_args_list
|
||||
|
||||
# Check for validation message
|
||||
validation_msg_shown = any(
|
||||
"Validating acceptance criteria" in str(call)
|
||||
for call in print_calls
|
||||
)
|
||||
|
||||
# Check for failure message
|
||||
failure_msg_shown = any(
|
||||
"Cannot finalize" in str(call)
|
||||
for call in print_calls
|
||||
)
|
||||
|
||||
assert validation_msg_shown or failure_msg_shown
|
||||
@@ -1,167 +0,0 @@
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
from crewai.utilities.events.event_listener import event_listener
|
||||
|
||||
|
||||
class TestFlowHumanInputIntegration:
|
||||
"""Test integration between Flow execution and human input functionality."""
|
||||
|
||||
def test_console_formatter_pause_resume_methods(self):
|
||||
"""Test that ConsoleFormatter pause/resume methods work correctly."""
|
||||
formatter = event_listener.formatter
|
||||
|
||||
original_paused_state = formatter._live_paused
|
||||
|
||||
try:
|
||||
formatter._live_paused = False
|
||||
|
||||
formatter.pause_live_updates()
|
||||
assert formatter._live_paused
|
||||
|
||||
formatter.resume_live_updates()
|
||||
assert not formatter._live_paused
|
||||
finally:
|
||||
formatter._live_paused = original_paused_state
|
||||
|
||||
@patch('builtins.input', return_value='')
|
||||
def test_human_input_pauses_flow_updates(self, mock_input):
|
||||
"""Test that human input pauses Flow status updates."""
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
|
||||
executor = CrewAgentExecutorMixin()
|
||||
executor.crew = MagicMock()
|
||||
executor.crew._train = False
|
||||
executor._printer = MagicMock()
|
||||
|
||||
formatter = event_listener.formatter
|
||||
|
||||
original_paused_state = formatter._live_paused
|
||||
|
||||
try:
|
||||
formatter._live_paused = False
|
||||
|
||||
with patch.object(formatter, 'pause_live_updates') as mock_pause, \
|
||||
patch.object(formatter, 'resume_live_updates') as mock_resume:
|
||||
|
||||
result = executor._ask_human_input("Test result")
|
||||
|
||||
mock_pause.assert_called_once()
|
||||
mock_resume.assert_called_once()
|
||||
mock_input.assert_called_once()
|
||||
assert result == ''
|
||||
finally:
|
||||
formatter._live_paused = original_paused_state
|
||||
|
||||
@patch('builtins.input', side_effect=['feedback', ''])
|
||||
def test_multiple_human_input_rounds(self, mock_input):
|
||||
"""Test multiple rounds of human input with Flow status management."""
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
|
||||
executor = CrewAgentExecutorMixin()
|
||||
executor.crew = MagicMock()
|
||||
executor.crew._train = False
|
||||
executor._printer = MagicMock()
|
||||
|
||||
formatter = event_listener.formatter
|
||||
|
||||
original_paused_state = formatter._live_paused
|
||||
|
||||
try:
|
||||
pause_calls = []
|
||||
resume_calls = []
|
||||
|
||||
def track_pause():
|
||||
pause_calls.append(True)
|
||||
|
||||
def track_resume():
|
||||
resume_calls.append(True)
|
||||
|
||||
with patch.object(formatter, 'pause_live_updates', side_effect=track_pause), \
|
||||
patch.object(formatter, 'resume_live_updates', side_effect=track_resume):
|
||||
|
||||
result1 = executor._ask_human_input("Test result 1")
|
||||
assert result1 == 'feedback'
|
||||
|
||||
result2 = executor._ask_human_input("Test result 2")
|
||||
assert result2 == ''
|
||||
|
||||
assert len(pause_calls) == 2
|
||||
assert len(resume_calls) == 2
|
||||
finally:
|
||||
formatter._live_paused = original_paused_state
|
||||
|
||||
def test_pause_resume_with_no_live_session(self):
|
||||
"""Test pause/resume methods handle case when no Live session exists."""
|
||||
formatter = event_listener.formatter
|
||||
|
||||
original_live = formatter._live
|
||||
original_paused_state = formatter._live_paused
|
||||
|
||||
try:
|
||||
formatter._live = None
|
||||
formatter._live_paused = False
|
||||
|
||||
formatter.pause_live_updates()
|
||||
formatter.resume_live_updates()
|
||||
|
||||
assert not formatter._live_paused
|
||||
finally:
|
||||
formatter._live = original_live
|
||||
formatter._live_paused = original_paused_state
|
||||
|
||||
def test_pause_resume_exception_handling(self):
|
||||
"""Test that resume is called even if exception occurs during human input."""
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
|
||||
executor = CrewAgentExecutorMixin()
|
||||
executor.crew = MagicMock()
|
||||
executor.crew._train = False
|
||||
executor._printer = MagicMock()
|
||||
|
||||
formatter = event_listener.formatter
|
||||
|
||||
original_paused_state = formatter._live_paused
|
||||
|
||||
try:
|
||||
with patch.object(formatter, 'pause_live_updates') as mock_pause, \
|
||||
patch.object(formatter, 'resume_live_updates') as mock_resume, \
|
||||
patch('builtins.input', side_effect=KeyboardInterrupt("Test exception")):
|
||||
|
||||
with pytest.raises(KeyboardInterrupt):
|
||||
executor._ask_human_input("Test result")
|
||||
|
||||
mock_pause.assert_called_once()
|
||||
mock_resume.assert_called_once()
|
||||
finally:
|
||||
formatter._live_paused = original_paused_state
|
||||
|
||||
def test_training_mode_human_input(self):
|
||||
"""Test human input in training mode."""
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
|
||||
executor = CrewAgentExecutorMixin()
|
||||
executor.crew = MagicMock()
|
||||
executor.crew._train = True
|
||||
executor._printer = MagicMock()
|
||||
|
||||
formatter = event_listener.formatter
|
||||
|
||||
original_paused_state = formatter._live_paused
|
||||
|
||||
try:
|
||||
with patch.object(formatter, 'pause_live_updates') as mock_pause, \
|
||||
patch.object(formatter, 'resume_live_updates') as mock_resume, \
|
||||
patch('builtins.input', return_value='training feedback'):
|
||||
|
||||
result = executor._ask_human_input("Test result")
|
||||
|
||||
mock_pause.assert_called_once()
|
||||
mock_resume.assert_called_once()
|
||||
assert result == 'training feedback'
|
||||
|
||||
executor._printer.print.assert_called()
|
||||
call_args = [call[1]['content'] for call in executor._printer.print.call_args_list]
|
||||
training_prompt_found = any('TRAINING MODE' in content for content in call_args)
|
||||
assert training_prompt_found
|
||||
finally:
|
||||
formatter._live_paused = original_paused_state
|
||||
@@ -1,4 +1,4 @@
|
||||
from collections import defaultdict
|
||||
import asyncio
|
||||
from typing import cast
|
||||
from unittest.mock import Mock
|
||||
|
||||
@@ -313,108 +313,5 @@ def test_sets_parent_flow_when_inside_flow():
|
||||
nonlocal captured_agent
|
||||
captured_agent = source
|
||||
|
||||
flow.kickoff()
|
||||
result = flow.kickoff()
|
||||
assert captured_agent.parent_flow is flow
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_guardrail_is_called_using_string():
|
||||
guardrail_events = defaultdict(list)
|
||||
from crewai.utilities.events import LLMGuardrailCompletedEvent, LLMGuardrailStartedEvent
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(LLMGuardrailStartedEvent)
|
||||
def capture_guardrail_started(source, event):
|
||||
guardrail_events["started"].append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
|
||||
def capture_guardrail_completed(source, event):
|
||||
guardrail_events["completed"].append(event)
|
||||
|
||||
agent = Agent(
|
||||
role="Sports Analyst",
|
||||
goal="Gather information about the best soccer players",
|
||||
backstory="""You are an expert at gathering and organizing information. You carefully collect details and present them in a structured way.""",
|
||||
guardrail="""Only include Brazilian players, both women and men""",
|
||||
)
|
||||
|
||||
result = agent.kickoff(messages="Top 10 best players in the world?")
|
||||
|
||||
assert len(guardrail_events['started']) == 2
|
||||
assert len(guardrail_events['completed']) == 2
|
||||
assert not guardrail_events['completed'][0].success
|
||||
assert guardrail_events['completed'][1].success
|
||||
assert "Here are the top 10 best soccer players in the world, focusing exclusively on Brazilian players" in result.raw
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_guardrail_is_called_using_callable():
|
||||
guardrail_events = defaultdict(list)
|
||||
from crewai.utilities.events import LLMGuardrailCompletedEvent, LLMGuardrailStartedEvent
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(LLMGuardrailStartedEvent)
|
||||
def capture_guardrail_started(source, event):
|
||||
guardrail_events["started"].append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
|
||||
def capture_guardrail_completed(source, event):
|
||||
guardrail_events["completed"].append(event)
|
||||
|
||||
agent = Agent(
|
||||
role="Sports Analyst",
|
||||
goal="Gather information about the best soccer players",
|
||||
backstory="""You are an expert at gathering and organizing information. You carefully collect details and present them in a structured way.""",
|
||||
guardrail=lambda output: (True, "Pelé - Santos, 1958"),
|
||||
)
|
||||
|
||||
result = agent.kickoff(messages="Top 1 best players in the world?")
|
||||
|
||||
assert len(guardrail_events['started']) == 1
|
||||
assert len(guardrail_events['completed']) == 1
|
||||
assert guardrail_events['completed'][0].success
|
||||
assert "Pelé - Santos, 1958" in result.raw
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_guardrail_reached_attempt_limit():
|
||||
guardrail_events = defaultdict(list)
|
||||
from crewai.utilities.events import LLMGuardrailCompletedEvent, LLMGuardrailStartedEvent
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(LLMGuardrailStartedEvent)
|
||||
def capture_guardrail_started(source, event):
|
||||
guardrail_events["started"].append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
|
||||
def capture_guardrail_completed(source, event):
|
||||
guardrail_events["completed"].append(event)
|
||||
|
||||
agent = Agent(
|
||||
role="Sports Analyst",
|
||||
goal="Gather information about the best soccer players",
|
||||
backstory="""You are an expert at gathering and organizing information. You carefully collect details and present them in a structured way.""",
|
||||
guardrail=lambda output: (False, "You are not allowed to include Brazilian players"),
|
||||
guardrail_max_retries=2,
|
||||
)
|
||||
|
||||
with pytest.raises(Exception, match="Agent's guardrail failed validation after 2 retries"):
|
||||
agent.kickoff(messages="Top 10 best players in the world?")
|
||||
|
||||
assert len(guardrail_events['started']) == 3 # 2 retries + 1 initial call
|
||||
assert len(guardrail_events['completed']) == 3 # 2 retries + 1 initial call
|
||||
assert not guardrail_events['completed'][0].success
|
||||
assert not guardrail_events['completed'][1].success
|
||||
assert not guardrail_events['completed'][2].success
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_output_when_guardrail_returns_base_model():
|
||||
class Player(BaseModel):
|
||||
name: str
|
||||
country: str
|
||||
|
||||
agent = Agent(
|
||||
role="Sports Analyst",
|
||||
goal="Gather information about the best soccer players",
|
||||
backstory="""You are an expert at gathering and organizing information. You carefully collect details and present them in a structured way.""",
|
||||
guardrail=lambda output: (True, Player(name="Lionel Messi", country="Argentina")),
|
||||
)
|
||||
|
||||
result = agent.kickoff(messages="Top 10 best players in the world?")
|
||||
|
||||
assert result.pydantic == Player(name="Lionel Messi", country="Argentina")
|
||||
|
||||
@@ -1,189 +0,0 @@
|
||||
"""Test that crewAI lite installation works with minimal dependencies."""
|
||||
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, Mock
|
||||
|
||||
|
||||
def test_core_imports_work_without_optional_deps():
|
||||
"""Test that core crewAI functionality can be imported without optional dependencies."""
|
||||
|
||||
try:
|
||||
from crewai import Agent, Crew, Task, LLM
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai.process import Process
|
||||
assert Agent is not None
|
||||
assert Crew is not None
|
||||
assert Task is not None
|
||||
assert LLM is not None
|
||||
assert LiteAgent is not None
|
||||
assert Process is not None
|
||||
except ImportError as e:
|
||||
pytest.fail(f"Core imports should work without optional dependencies: {e}")
|
||||
|
||||
|
||||
def test_optional_memory_import_error():
|
||||
"""Test that memory functionality raises helpful error without chromadb."""
|
||||
with patch.dict('sys.modules', {'chromadb': None}):
|
||||
with patch('crewai.memory.storage.rag_storage.CHROMADB_AVAILABLE', False):
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
RAGStorage("test")
|
||||
|
||||
assert "ChromaDB is required" in str(exc_info.value)
|
||||
assert "crewai[memory]" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_optional_knowledge_import_error():
|
||||
"""Test that knowledge functionality raises helpful error without dependencies."""
|
||||
with patch.dict('sys.modules', {'chromadb': None}):
|
||||
with patch('crewai.knowledge.storage.knowledge_storage.CHROMADB_AVAILABLE', False):
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
KnowledgeStorage()
|
||||
|
||||
assert "ChromaDB is required" in str(exc_info.value)
|
||||
assert "crewai[knowledge]" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_optional_pdf_import_error():
|
||||
"""Test that PDF knowledge source raises helpful error without pdfplumber."""
|
||||
with patch.dict('sys.modules', {'pdfplumber': None}):
|
||||
with patch('crewai.knowledge.source.pdf_knowledge_source.PDFPLUMBER_AVAILABLE', False):
|
||||
from crewai.knowledge.source.pdf_knowledge_source import PDFKnowledgeSource
|
||||
|
||||
knowledge_dir = Path("knowledge/tmp")
|
||||
knowledge_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
test_file = knowledge_dir / "test.pdf"
|
||||
test_file.touch()
|
||||
|
||||
try:
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
PDFKnowledgeSource(file_paths=["tmp/test.pdf"])
|
||||
|
||||
assert "pdfplumber is required" in str(exc_info.value)
|
||||
assert "crewai[knowledge]" in str(exc_info.value)
|
||||
finally:
|
||||
if test_file.exists():
|
||||
test_file.unlink()
|
||||
if knowledge_dir.exists() and not any(knowledge_dir.iterdir()):
|
||||
knowledge_dir.rmdir()
|
||||
|
||||
|
||||
def test_optional_visualization_import_error():
|
||||
"""Test that flow visualization raises helpful error without pyvis."""
|
||||
with patch.dict('sys.modules', {'pyvis': None}):
|
||||
with patch('crewai.flow.flow_visualizer.PYVIS_AVAILABLE', False):
|
||||
from crewai.flow.flow_visualizer import plot_flow
|
||||
|
||||
mock_flow = Mock()
|
||||
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
plot_flow(mock_flow)
|
||||
|
||||
assert "Pyvis is required" in str(exc_info.value)
|
||||
assert "crewai[visualization]" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_telemetry_disabled_without_opentelemetry():
|
||||
"""Test that telemetry is disabled gracefully without opentelemetry."""
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
|
||||
telemetry = Telemetry()
|
||||
|
||||
assert isinstance(telemetry.ready, bool)
|
||||
assert isinstance(telemetry._is_telemetry_disabled(), bool)
|
||||
|
||||
|
||||
def test_lite_agent_works_without_optional_deps():
|
||||
"""Test that LiteAgent can be created and used without optional dependencies."""
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai import LLM
|
||||
from unittest.mock import Mock
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Test response"
|
||||
mock_llm.model = "test-model"
|
||||
|
||||
agent = LiteAgent(
|
||||
role="Test Agent",
|
||||
goal="Test Goal",
|
||||
backstory="Test Backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False
|
||||
)
|
||||
|
||||
assert agent.role == "Test Agent"
|
||||
assert agent.goal == "Test Goal"
|
||||
assert agent.backstory == "Test Backstory"
|
||||
|
||||
|
||||
def test_basic_crew_creation_without_optional_deps():
|
||||
"""Test that basic Crew can be created without optional dependencies."""
|
||||
from crewai import Agent, Crew, Task, LLM
|
||||
from unittest.mock import Mock
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Test response"
|
||||
mock_llm.model = "test-model"
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test Goal",
|
||||
backstory="Test Backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Test task",
|
||||
agent=agent,
|
||||
expected_output="Test output"
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=False
|
||||
)
|
||||
|
||||
assert crew.agents[0].role == "Test Agent"
|
||||
assert crew.tasks[0].description == "Test task"
|
||||
|
||||
|
||||
def test_core_functionality_without_optional_deps():
|
||||
"""Test that core crewAI functionality works without optional dependencies."""
|
||||
from crewai import Agent, Task, Crew, LLM
|
||||
from unittest.mock import Mock
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Test response"
|
||||
mock_llm.model = "test-model"
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test Goal",
|
||||
backstory="Test Backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Test task description",
|
||||
agent=agent,
|
||||
expected_output="Test expected output"
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=False
|
||||
)
|
||||
|
||||
assert agent.role == "Test Agent"
|
||||
assert task.description == "Test task description"
|
||||
assert len(crew.agents) == 1
|
||||
assert len(crew.tasks) == 1
|
||||
@@ -1,135 +0,0 @@
|
||||
"""Test optional dependency handling for crewAI lite version."""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import patch, Mock
|
||||
|
||||
|
||||
class TestOptionalDependencies:
|
||||
"""Test that optional dependencies are handled gracefully."""
|
||||
|
||||
def test_chromadb_import_error_memory(self):
|
||||
"""Test that memory functionality raises helpful error without chromadb."""
|
||||
with patch.dict('sys.modules', {'chromadb': None}):
|
||||
with patch('crewai.memory.storage.rag_storage.CHROMADB_AVAILABLE', False):
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
RAGStorage("test")
|
||||
|
||||
assert "ChromaDB is required" in str(exc_info.value)
|
||||
assert "crewai[memory]" in str(exc_info.value)
|
||||
|
||||
def test_chromadb_import_error_knowledge(self):
|
||||
"""Test that knowledge functionality raises helpful error without chromadb."""
|
||||
with patch.dict('sys.modules', {'chromadb': None}):
|
||||
with patch('crewai.knowledge.storage.knowledge_storage.CHROMADB_AVAILABLE', False):
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
KnowledgeStorage()
|
||||
|
||||
assert "ChromaDB is required" in str(exc_info.value)
|
||||
assert "crewai[knowledge]" in str(exc_info.value)
|
||||
|
||||
def test_pdfplumber_import_error(self):
|
||||
"""Test that PDF knowledge source raises helpful error without pdfplumber."""
|
||||
with patch.dict('sys.modules', {'pdfplumber': None}):
|
||||
with patch('crewai.knowledge.source.pdf_knowledge_source.PDFPLUMBER_AVAILABLE', False):
|
||||
from crewai.knowledge.source.pdf_knowledge_source import PDFKnowledgeSource
|
||||
|
||||
from pathlib import Path
|
||||
knowledge_dir = Path("knowledge")
|
||||
knowledge_dir.mkdir(exist_ok=True)
|
||||
test_file = knowledge_dir / "test.pdf"
|
||||
test_file.touch()
|
||||
|
||||
try:
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
PDFKnowledgeSource(file_paths=["test.pdf"])
|
||||
|
||||
assert "pdfplumber is required" in str(exc_info.value)
|
||||
assert "crewai[knowledge]" in str(exc_info.value)
|
||||
finally:
|
||||
if test_file.exists():
|
||||
test_file.unlink()
|
||||
if knowledge_dir.exists() and not any(knowledge_dir.iterdir()):
|
||||
knowledge_dir.rmdir()
|
||||
|
||||
def test_pyvis_import_error(self):
|
||||
"""Test that flow visualization raises helpful error without pyvis."""
|
||||
with patch.dict('sys.modules', {'pyvis': None}):
|
||||
with patch('crewai.flow.flow_visualizer.PYVIS_AVAILABLE', False):
|
||||
from crewai.flow.flow_visualizer import plot_flow
|
||||
|
||||
mock_flow = Mock()
|
||||
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
plot_flow(mock_flow)
|
||||
|
||||
assert "Pyvis is required" in str(exc_info.value)
|
||||
assert "crewai[visualization]" in str(exc_info.value)
|
||||
|
||||
def test_auth0_import_error(self):
|
||||
"""Test that authentication raises helpful error without auth0."""
|
||||
with patch.dict('sys.modules', {'auth0': None}):
|
||||
with patch('crewai.cli.authentication.utils.AUTH0_AVAILABLE', False):
|
||||
from crewai.cli.authentication.utils import validate_token
|
||||
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
validate_token("fake_token")
|
||||
|
||||
assert "Auth0 is required" in str(exc_info.value)
|
||||
assert "crewai[auth]" in str(exc_info.value)
|
||||
|
||||
def test_aisuite_import_error(self):
|
||||
"""Test that AISuite LLM raises helpful error without aisuite."""
|
||||
with patch.dict('sys.modules', {'aisuite': None}):
|
||||
with patch('crewai.llms.third_party.ai_suite.AISUITE_AVAILABLE', False):
|
||||
from crewai.llms.third_party.ai_suite import AISuiteLLM
|
||||
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
AISuiteLLM("test-model")
|
||||
|
||||
assert "AISuite is required" in str(exc_info.value)
|
||||
assert "crewai[llm-integrations]" in str(exc_info.value)
|
||||
|
||||
def test_opentelemetry_graceful_degradation(self):
|
||||
"""Test that telemetry degrades gracefully without opentelemetry."""
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
Telemetry._reset_instance()
|
||||
|
||||
with patch.dict('sys.modules', {'opentelemetry': None}):
|
||||
with patch('crewai.telemetry.telemetry.OPENTELEMETRY_AVAILABLE', False):
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
|
||||
telemetry = Telemetry()
|
||||
|
||||
assert not telemetry.ready
|
||||
assert telemetry._is_telemetry_disabled()
|
||||
assert not telemetry._should_execute_telemetry()
|
||||
|
||||
def test_embedding_configurator_import_error(self):
|
||||
"""Test that embedding configurator raises helpful error without chromadb."""
|
||||
with patch.dict('sys.modules', {'chromadb': None}):
|
||||
with patch('crewai.utilities.embedding_configurator.CHROMADB_AVAILABLE', False):
|
||||
from crewai.utilities.embedding_configurator import EmbeddingConfigurator
|
||||
|
||||
configurator = EmbeddingConfigurator()
|
||||
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
configurator.configure_embedder(None)
|
||||
|
||||
assert "ChromaDB is required" in str(exc_info.value)
|
||||
assert "crewai[memory]" in str(exc_info.value) or "crewai[knowledge]" in str(exc_info.value)
|
||||
|
||||
def test_docling_import_error(self):
|
||||
"""Test that docling knowledge source raises helpful error without docling."""
|
||||
with patch.dict('sys.modules', {'docling': None}):
|
||||
with patch('crewai.knowledge.source.crew_docling_source.DOCLING_AVAILABLE', False):
|
||||
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
|
||||
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
CrewDoclingSource()
|
||||
|
||||
assert "docling package is required" in str(exc_info.value)
|
||||
assert "uv add docling" in str(exc_info.value)
|
||||
176
tests/tools/agent_tools/test_scratchpad_tool.py
Normal file
176
tests/tools/agent_tools/test_scratchpad_tool.py
Normal file
@@ -0,0 +1,176 @@
|
||||
"""Unit tests for the ScratchpadTool."""
|
||||
|
||||
import pytest
|
||||
from crewai.tools.agent_tools.scratchpad_tool import ScratchpadTool, ScratchpadToolSchema
|
||||
|
||||
|
||||
class TestScratchpadTool:
|
||||
"""Test suite for the ScratchpadTool functionality."""
|
||||
|
||||
def test_schema_description(self):
|
||||
"""Test that the schema has helpful description."""
|
||||
schema = ScratchpadToolSchema
|
||||
key_field = schema.model_fields['key']
|
||||
|
||||
assert "Example:" in key_field.description
|
||||
assert '{"key":' in key_field.description
|
||||
|
||||
def test_empty_scratchpad_error_message(self):
|
||||
"""Test error message when scratchpad is empty."""
|
||||
tool = ScratchpadTool()
|
||||
result = tool._run(key="nonexistent")
|
||||
|
||||
assert "❌ SCRATCHPAD IS EMPTY" in result
|
||||
assert "does not contain any data yet" in result
|
||||
assert "Try executing other tools first" in result
|
||||
assert "💡 TIP:" in result
|
||||
assert "search, read, or fetch operations" in result
|
||||
|
||||
def test_key_not_found_error_message(self):
|
||||
"""Test error message when key is not found."""
|
||||
tool = ScratchpadTool(scratchpad_data={
|
||||
"existing_key": "value",
|
||||
"another_key": {"data": "test"}
|
||||
})
|
||||
|
||||
result = tool._run(key="wrong_key")
|
||||
|
||||
assert "❌ KEY NOT FOUND: 'wrong_key'" in result
|
||||
assert "📦 AVAILABLE KEYS IN SCRATCHPAD:" in result
|
||||
assert "- 'existing_key'" in result
|
||||
assert "- 'another_key'" in result
|
||||
assert '✅ CORRECT USAGE EXAMPLE:' in result
|
||||
assert 'Action: Access Scratchpad Memory' in result
|
||||
assert 'Action Input: {"key": "existing_key"}' in result
|
||||
assert "⚠️ IMPORTANT:" in result
|
||||
assert "Keys are case-sensitive and must match EXACTLY" in result
|
||||
|
||||
def test_successful_retrieval_string(self):
|
||||
"""Test successful retrieval of string data."""
|
||||
tool = ScratchpadTool(scratchpad_data={
|
||||
"message": "Hello, World!"
|
||||
})
|
||||
|
||||
result = tool._run(key="message")
|
||||
assert "✅ Successfully retrieved data for key 'message':" in result
|
||||
assert "Hello, World!" in result
|
||||
|
||||
def test_successful_retrieval_dict(self):
|
||||
"""Test successful retrieval of dictionary data."""
|
||||
test_dict = {"name": "John", "age": 30}
|
||||
tool = ScratchpadTool(scratchpad_data={
|
||||
"user_data": test_dict
|
||||
})
|
||||
|
||||
result = tool._run(key="user_data")
|
||||
assert "✅ Successfully retrieved data for key 'user_data':" in result
|
||||
assert '"name": "John"' in result
|
||||
assert '"age": 30' in result
|
||||
|
||||
def test_successful_retrieval_list(self):
|
||||
"""Test successful retrieval of list data."""
|
||||
test_list = ["item1", "item2", "item3"]
|
||||
tool = ScratchpadTool(scratchpad_data={
|
||||
"items": test_list
|
||||
})
|
||||
|
||||
result = tool._run(key="items")
|
||||
assert "✅ Successfully retrieved data for key 'items':" in result
|
||||
assert '"item1"' in result
|
||||
assert '"item2"' in result
|
||||
assert '"item3"' in result
|
||||
|
||||
def test_tool_description_empty(self):
|
||||
"""Test tool description when scratchpad is empty."""
|
||||
tool = ScratchpadTool()
|
||||
|
||||
assert "HOW TO USE THIS TOOL:" in tool.description
|
||||
assert 'Example: {"key": "email_data"}' in tool.description
|
||||
assert "📝 STATUS: Scratchpad is currently empty" in tool.description
|
||||
|
||||
def test_tool_description_with_data(self):
|
||||
"""Test tool description when scratchpad has data."""
|
||||
tool = ScratchpadTool(scratchpad_data={
|
||||
"emails": ["email1@test.com", "email2@test.com"],
|
||||
"results": {"count": 5, "status": "success"},
|
||||
"api_key": "secret_key_123"
|
||||
})
|
||||
|
||||
desc = tool.description
|
||||
|
||||
# Check basic structure
|
||||
assert "HOW TO USE THIS TOOL:" in desc
|
||||
assert "📦 AVAILABLE DATA IN SCRATCHPAD:" in desc
|
||||
assert "💡 EXAMPLE USAGE:" in desc
|
||||
|
||||
# Check key listings
|
||||
assert "📌 'emails': list of 2 items" in desc
|
||||
assert "📌 'results': dict with 2 items" in desc
|
||||
assert "📌 'api_key': string (14 chars)" in desc
|
||||
|
||||
# Check example uses first key
|
||||
assert 'Action Input: {"key": "emails"}' in desc
|
||||
|
||||
def test_update_scratchpad(self):
|
||||
"""Test updating scratchpad data."""
|
||||
tool = ScratchpadTool()
|
||||
|
||||
# Initially empty
|
||||
assert not tool.scratchpad_data
|
||||
|
||||
# Update with data
|
||||
new_data = {"test": "value"}
|
||||
tool.update_scratchpad(new_data)
|
||||
|
||||
assert tool.scratchpad_data == new_data
|
||||
assert "📌 'test': string (5 chars)" in tool.description
|
||||
|
||||
def test_complex_data_preview(self):
|
||||
"""Test preview generation for complex data structures."""
|
||||
tool = ScratchpadTool(scratchpad_data={
|
||||
"nested_dict": {
|
||||
"data": ["item1", "item2", "item3"]
|
||||
},
|
||||
"empty_list": [],
|
||||
"boolean_value": True,
|
||||
"number": 42
|
||||
})
|
||||
|
||||
desc = tool.description
|
||||
|
||||
# Special case for dict with 'data' key containing list
|
||||
assert "📌 'nested_dict': list of 3 items" in desc
|
||||
assert "📌 'empty_list': list of 0 items" in desc
|
||||
assert "📌 'boolean_value': bool" in desc
|
||||
assert "📌 'number': int" in desc
|
||||
|
||||
def test_similar_key_suggestion(self):
|
||||
"""Test that similar keys are suggested when a wrong key is used."""
|
||||
tool = ScratchpadTool(scratchpad_data={
|
||||
"email_search_results": ["email1", "email2"],
|
||||
"email_details": {"id": "123"},
|
||||
"user_preferences": {"theme": "dark"}
|
||||
})
|
||||
|
||||
# Test partial match
|
||||
result = tool._run(key="email")
|
||||
assert "🔍 Did you mean one of these?" in result
|
||||
|
||||
# Check that similar keys are in the suggestions
|
||||
# Extract just the "Did you mean" section
|
||||
did_you_mean_section = result.split("🔍 Did you mean one of these?")[1].split("✅ CORRECT USAGE EXAMPLE:")[0]
|
||||
assert "- 'email_search_results'" in did_you_mean_section
|
||||
assert "- 'email_details'" in did_you_mean_section
|
||||
assert "- 'user_preferences'" not in did_you_mean_section
|
||||
|
||||
# But user_preferences should still be in the full list
|
||||
assert "- 'user_preferences'" in result
|
||||
|
||||
# Test case-insensitive match
|
||||
result = tool._run(key="EMAIL_DETAILS")
|
||||
assert "🔍 Did you mean one of these?" in result
|
||||
assert "- 'email_details'" in result
|
||||
|
||||
# Test no similar keys
|
||||
result = tool._run(key="completely_different")
|
||||
assert "🔍 Did you mean one of these?" not in result
|
||||
@@ -25,206 +25,122 @@ def schema_class():
|
||||
return TestSchema
|
||||
|
||||
|
||||
def test_initialization(basic_function, schema_class):
|
||||
"""Test basic initialization of CrewStructuredTool"""
|
||||
tool = CrewStructuredTool(
|
||||
name="test_tool",
|
||||
description="Test tool description",
|
||||
func=basic_function,
|
||||
args_schema=schema_class,
|
||||
)
|
||||
class InternalCrewStructuredTool:
|
||||
def test_initialization(self, basic_function, schema_class):
|
||||
"""Test basic initialization of CrewStructuredTool"""
|
||||
tool = CrewStructuredTool(
|
||||
name="test_tool",
|
||||
description="Test tool description",
|
||||
func=basic_function,
|
||||
args_schema=schema_class,
|
||||
)
|
||||
|
||||
assert tool.name == "test_tool"
|
||||
assert tool.description == "Test tool description"
|
||||
assert tool.func == basic_function
|
||||
assert tool.args_schema == schema_class
|
||||
assert tool.name == "test_tool"
|
||||
assert tool.description == "Test tool description"
|
||||
assert tool.func == basic_function
|
||||
assert tool.args_schema == schema_class
|
||||
|
||||
def test_from_function(basic_function):
|
||||
"""Test creating tool from function"""
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=basic_function, name="test_tool", description="Test description"
|
||||
)
|
||||
def test_from_function(self, basic_function):
|
||||
"""Test creating tool from function"""
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=basic_function, name="test_tool", description="Test description"
|
||||
)
|
||||
|
||||
assert tool.name == "test_tool"
|
||||
assert tool.description == "Test description"
|
||||
assert tool.func == basic_function
|
||||
assert isinstance(tool.args_schema, type(BaseModel))
|
||||
assert tool.name == "test_tool"
|
||||
assert tool.description == "Test description"
|
||||
assert tool.func == basic_function
|
||||
assert isinstance(tool.args_schema, type(BaseModel))
|
||||
|
||||
def test_validate_function_signature(basic_function, schema_class):
|
||||
"""Test function signature validation"""
|
||||
tool = CrewStructuredTool(
|
||||
name="test_tool",
|
||||
description="Test tool",
|
||||
func=basic_function,
|
||||
args_schema=schema_class,
|
||||
)
|
||||
def test_validate_function_signature(self, basic_function, schema_class):
|
||||
"""Test function signature validation"""
|
||||
tool = CrewStructuredTool(
|
||||
name="test_tool",
|
||||
description="Test tool",
|
||||
func=basic_function,
|
||||
args_schema=schema_class,
|
||||
)
|
||||
|
||||
# Should not raise any exceptions
|
||||
tool._validate_function_signature()
|
||||
# Should not raise any exceptions
|
||||
tool._validate_function_signature()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ainvoke(basic_function):
|
||||
"""Test asynchronous invocation"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
@pytest.mark.asyncio
|
||||
async def test_ainvoke(self, basic_function):
|
||||
"""Test asynchronous invocation"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
|
||||
result = await tool.ainvoke(input={"param1": "test"})
|
||||
assert result == "test 0"
|
||||
result = await tool.ainvoke(input={"param1": "test"})
|
||||
assert result == "test 0"
|
||||
|
||||
def test_parse_args_dict(basic_function):
|
||||
"""Test parsing dictionary arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
def test_parse_args_dict(self, basic_function):
|
||||
"""Test parsing dictionary arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
|
||||
parsed = tool._parse_args({"param1": "test", "param2": 42})
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
parsed = tool._parse_args({"param1": "test", "param2": 42})
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
|
||||
def test_parse_args_string(basic_function):
|
||||
"""Test parsing string arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
def test_parse_args_string(self, basic_function):
|
||||
"""Test parsing string arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
|
||||
parsed = tool._parse_args('{"param1": "test", "param2": 42}')
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
parsed = tool._parse_args('{"param1": "test", "param2": 42}')
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
|
||||
def test_complex_types():
|
||||
"""Test handling of complex parameter types"""
|
||||
def test_complex_types(self):
|
||||
"""Test handling of complex parameter types"""
|
||||
|
||||
def complex_func(nested: dict, items: list) -> str:
|
||||
"""Process complex types."""
|
||||
return f"Processed {len(items)} items with {len(nested)} nested keys"
|
||||
def complex_func(nested: dict, items: list) -> str:
|
||||
"""Process complex types."""
|
||||
return f"Processed {len(items)} items with {len(nested)} nested keys"
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=complex_func, name="test_tool", description="Test complex types"
|
||||
)
|
||||
result = tool.invoke({"nested": {"key": "value"}, "items": [1, 2, 3]})
|
||||
assert result == "Processed 3 items with 1 nested keys"
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=complex_func, name="test_tool", description="Test complex types"
|
||||
)
|
||||
result = tool.invoke({"nested": {"key": "value"}, "items": [1, 2, 3]})
|
||||
assert result == "Processed 3 items with 1 nested keys"
|
||||
|
||||
def test_schema_inheritance():
|
||||
"""Test tool creation with inherited schema"""
|
||||
def test_schema_inheritance(self):
|
||||
"""Test tool creation with inherited schema"""
|
||||
|
||||
def extended_func(base_param: str, extra_param: int) -> str:
|
||||
"""Test function with inherited schema."""
|
||||
return f"{base_param} {extra_param}"
|
||||
def extended_func(base_param: str, extra_param: int) -> str:
|
||||
"""Test function with inherited schema."""
|
||||
return f"{base_param} {extra_param}"
|
||||
|
||||
class BaseSchema(BaseModel):
|
||||
base_param: str
|
||||
class BaseSchema(BaseModel):
|
||||
base_param: str
|
||||
|
||||
class ExtendedSchema(BaseSchema):
|
||||
extra_param: int
|
||||
class ExtendedSchema(BaseSchema):
|
||||
extra_param: int
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=extended_func, name="test_tool", args_schema=ExtendedSchema
|
||||
)
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=extended_func, name="test_tool", args_schema=ExtendedSchema
|
||||
)
|
||||
|
||||
result = tool.invoke({"base_param": "test", "extra_param": 42})
|
||||
assert result == "test 42"
|
||||
result = tool.invoke({"base_param": "test", "extra_param": 42})
|
||||
assert result == "test 42"
|
||||
|
||||
def test_default_values_in_schema():
|
||||
"""Test handling of default values in schema"""
|
||||
def test_default_values_in_schema(self):
|
||||
"""Test handling of default values in schema"""
|
||||
|
||||
def default_func(
|
||||
required_param: str,
|
||||
optional_param: str = "default",
|
||||
nullable_param: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Test function with default values."""
|
||||
return f"{required_param} {optional_param} {nullable_param}"
|
||||
def default_func(
|
||||
required_param: str,
|
||||
optional_param: str = "default",
|
||||
nullable_param: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Test function with default values."""
|
||||
return f"{required_param} {optional_param} {nullable_param}"
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=default_func, name="test_tool", description="Test defaults"
|
||||
)
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=default_func, name="test_tool", description="Test defaults"
|
||||
)
|
||||
|
||||
# Test with minimal parameters
|
||||
result = tool.invoke({"required_param": "test"})
|
||||
assert result == "test default None"
|
||||
# Test with minimal parameters
|
||||
result = tool.invoke({"required_param": "test"})
|
||||
assert result == "test default None"
|
||||
|
||||
# Test with all parameters
|
||||
result = tool.invoke(
|
||||
{"required_param": "test", "optional_param": "custom", "nullable_param": 42}
|
||||
)
|
||||
assert result == "test custom 42"
|
||||
|
||||
@pytest.fixture
|
||||
def custom_tool_decorator():
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool("custom_tool", result_as_answer=True)
|
||||
async def custom_tool():
|
||||
"""This is a tool that does something"""
|
||||
return "Hello World from Custom Tool"
|
||||
|
||||
return custom_tool
|
||||
|
||||
@pytest.fixture
|
||||
def custom_tool():
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
class CustomTool(BaseTool):
|
||||
name: str = "my_tool"
|
||||
description: str = "This is a tool that does something"
|
||||
result_as_answer: bool = True
|
||||
|
||||
async def _run(self):
|
||||
return "Hello World from Custom Tool"
|
||||
|
||||
return CustomTool()
|
||||
|
||||
def build_simple_crew(tool):
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
agent1 = Agent(role="Simple role", goal="Simple goal", backstory="Simple backstory", tools=[tool])
|
||||
|
||||
say_hi_task = Task(
|
||||
description="Use the custom tool result as answer.", agent=agent1, expected_output="Use the tool result"
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent1], tasks=[say_hi_task])
|
||||
return crew
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_using_within_isolated_crew(custom_tool):
|
||||
crew = build_simple_crew(custom_tool)
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_using_decorator_within_isolated_crew(custom_tool_decorator):
|
||||
crew = build_simple_crew(custom_tool_decorator)
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_within_flow(custom_tool):
|
||||
from crewai.flow.flow import Flow
|
||||
|
||||
class StructuredExampleFlow(Flow):
|
||||
from crewai.flow.flow import start
|
||||
|
||||
@start()
|
||||
async def start(self):
|
||||
crew = build_simple_crew(custom_tool)
|
||||
result = await crew.kickoff_async()
|
||||
return result
|
||||
|
||||
flow = StructuredExampleFlow()
|
||||
result = flow.kickoff()
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_using_decorator_within_flow(custom_tool_decorator):
|
||||
from crewai.flow.flow import Flow
|
||||
|
||||
class StructuredExampleFlow(Flow):
|
||||
from crewai.flow.flow import start
|
||||
@start()
|
||||
async def start(self):
|
||||
crew = build_simple_crew(custom_tool_decorator)
|
||||
result = await crew.kickoff_async()
|
||||
return result
|
||||
|
||||
flow = StructuredExampleFlow()
|
||||
result = flow.kickoff()
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
# Test with all parameters
|
||||
result = tool.invoke(
|
||||
{"required_param": "test", "optional_param": "custom", "nullable_param": 42}
|
||||
)
|
||||
assert result == "test custom 42"
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
from rich.tree import Tree
|
||||
from rich.live import Live
|
||||
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
|
||||
class TestConsoleFormatterPauseResume:
|
||||
"""Test ConsoleFormatter pause/resume functionality."""
|
||||
|
||||
def test_pause_live_updates_with_active_session(self):
|
||||
"""Test pausing when Live session is active."""
|
||||
formatter = ConsoleFormatter()
|
||||
|
||||
mock_live = MagicMock(spec=Live)
|
||||
formatter._live = mock_live
|
||||
formatter._live_paused = False
|
||||
|
||||
formatter.pause_live_updates()
|
||||
|
||||
mock_live.stop.assert_called_once()
|
||||
assert formatter._live_paused
|
||||
|
||||
def test_pause_live_updates_when_already_paused(self):
|
||||
"""Test pausing when already paused does nothing."""
|
||||
formatter = ConsoleFormatter()
|
||||
|
||||
mock_live = MagicMock(spec=Live)
|
||||
formatter._live = mock_live
|
||||
formatter._live_paused = True
|
||||
|
||||
formatter.pause_live_updates()
|
||||
|
||||
mock_live.stop.assert_not_called()
|
||||
assert formatter._live_paused
|
||||
|
||||
def test_pause_live_updates_with_no_session(self):
|
||||
"""Test pausing when no Live session exists."""
|
||||
formatter = ConsoleFormatter()
|
||||
|
||||
formatter._live = None
|
||||
formatter._live_paused = False
|
||||
|
||||
formatter.pause_live_updates()
|
||||
|
||||
assert formatter._live_paused
|
||||
|
||||
def test_resume_live_updates_when_paused(self):
|
||||
"""Test resuming when paused."""
|
||||
formatter = ConsoleFormatter()
|
||||
|
||||
formatter._live_paused = True
|
||||
|
||||
formatter.resume_live_updates()
|
||||
|
||||
assert not formatter._live_paused
|
||||
|
||||
def test_resume_live_updates_when_not_paused(self):
|
||||
"""Test resuming when not paused does nothing."""
|
||||
formatter = ConsoleFormatter()
|
||||
|
||||
formatter._live_paused = False
|
||||
|
||||
formatter.resume_live_updates()
|
||||
|
||||
assert not formatter._live_paused
|
||||
|
||||
def test_print_after_resume_restarts_live_session(self):
|
||||
"""Test that printing a Tree after resume creates new Live session."""
|
||||
formatter = ConsoleFormatter()
|
||||
|
||||
formatter._live_paused = True
|
||||
formatter._live = None
|
||||
|
||||
formatter.resume_live_updates()
|
||||
assert not formatter._live_paused
|
||||
|
||||
tree = Tree("Test")
|
||||
|
||||
with patch('crewai.utilities.events.utils.console_formatter.Live') as mock_live_class:
|
||||
mock_live_instance = MagicMock()
|
||||
mock_live_class.return_value = mock_live_instance
|
||||
|
||||
formatter.print(tree)
|
||||
|
||||
mock_live_class.assert_called_once()
|
||||
mock_live_instance.start.assert_called_once()
|
||||
assert formatter._live == mock_live_instance
|
||||
|
||||
def test_multiple_pause_resume_cycles(self):
|
||||
"""Test multiple pause/resume cycles work correctly."""
|
||||
formatter = ConsoleFormatter()
|
||||
|
||||
mock_live = MagicMock(spec=Live)
|
||||
formatter._live = mock_live
|
||||
formatter._live_paused = False
|
||||
|
||||
formatter.pause_live_updates()
|
||||
assert formatter._live_paused
|
||||
mock_live.stop.assert_called_once()
|
||||
assert formatter._live is None # Live session should be cleared
|
||||
|
||||
formatter.resume_live_updates()
|
||||
assert not formatter._live_paused
|
||||
|
||||
formatter.pause_live_updates()
|
||||
assert formatter._live_paused
|
||||
|
||||
formatter.resume_live_updates()
|
||||
assert not formatter._live_paused
|
||||
|
||||
def test_pause_resume_state_initialization(self):
|
||||
"""Test that _live_paused is properly initialized."""
|
||||
formatter = ConsoleFormatter()
|
||||
|
||||
assert hasattr(formatter, '_live_paused')
|
||||
assert not formatter._live_paused
|
||||
422
uv.lock
generated
422
uv.lock
generated
@@ -1,37 +1,96 @@
|
||||
version = 1
|
||||
requires-python = ">=3.10, <3.14"
|
||||
resolution-markers = [
|
||||
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_version < '0'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version < '3.11' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version < '3.11' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version < '3.11' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version < '3.11' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -354,7 +413,7 @@ name = "build"
|
||||
version = "1.2.2.post1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "(os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" },
|
||||
{ name = "colorama", marker = "os_name == 'nt'" },
|
||||
{ name = "importlib-metadata", marker = "python_full_version < '3.10.2'" },
|
||||
{ name = "packaging" },
|
||||
{ name = "pyproject-hooks" },
|
||||
@@ -615,7 +674,7 @@ name = "click"
|
||||
version = "8.1.8"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
{ name = "colorama", marker = "platform_system == 'Windows'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 }
|
||||
wheels = [
|
||||
@@ -666,21 +725,31 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "crewai"
|
||||
version = "0.130.0"
|
||||
version = "0.126.0"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "appdirs" },
|
||||
{ name = "auth0-python" },
|
||||
{ name = "blinker" },
|
||||
{ name = "chromadb" },
|
||||
{ name = "click" },
|
||||
{ name = "instructor" },
|
||||
{ name = "json-repair" },
|
||||
{ name = "json5" },
|
||||
{ name = "jsonref" },
|
||||
{ name = "litellm" },
|
||||
{ name = "onnxruntime" },
|
||||
{ name = "openai" },
|
||||
{ name = "openpyxl" },
|
||||
{ name = "opentelemetry-api" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http" },
|
||||
{ name = "opentelemetry-sdk" },
|
||||
{ name = "pdfplumber" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "pyvis" },
|
||||
{ name = "regex" },
|
||||
{ name = "tokenizers" },
|
||||
{ name = "tomli" },
|
||||
{ name = "tomli-w" },
|
||||
{ name = "uv" },
|
||||
@@ -693,54 +762,15 @@ agentops = [
|
||||
aisuite = [
|
||||
{ name = "aisuite" },
|
||||
]
|
||||
all = [
|
||||
{ name = "agentops" },
|
||||
{ name = "aisuite" },
|
||||
{ name = "auth0-python" },
|
||||
{ name = "chromadb" },
|
||||
{ name = "crewai-tools" },
|
||||
{ name = "docling" },
|
||||
{ name = "mem0ai" },
|
||||
{ name = "onnxruntime" },
|
||||
{ name = "openpyxl" },
|
||||
{ name = "opentelemetry-api" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http" },
|
||||
{ name = "opentelemetry-sdk" },
|
||||
{ name = "pandas" },
|
||||
{ name = "pdfplumber" },
|
||||
{ name = "pyvis" },
|
||||
{ name = "tiktoken" },
|
||||
{ name = "tokenizers" },
|
||||
]
|
||||
auth = [
|
||||
{ name = "auth0-python" },
|
||||
]
|
||||
docling = [
|
||||
{ name = "docling" },
|
||||
]
|
||||
embeddings = [
|
||||
{ name = "tiktoken" },
|
||||
]
|
||||
knowledge = [
|
||||
{ name = "chromadb" },
|
||||
{ name = "docling" },
|
||||
{ name = "openpyxl" },
|
||||
{ name = "pdfplumber" },
|
||||
{ name = "tiktoken" },
|
||||
]
|
||||
llm-integrations = [
|
||||
{ name = "aisuite" },
|
||||
{ name = "onnxruntime" },
|
||||
]
|
||||
mem0 = [
|
||||
{ name = "mem0ai" },
|
||||
]
|
||||
memory = [
|
||||
{ name = "chromadb" },
|
||||
{ name = "mem0ai" },
|
||||
{ name = "tiktoken" },
|
||||
{ name = "tokenizers" },
|
||||
]
|
||||
openpyxl = [
|
||||
{ name = "openpyxl" },
|
||||
]
|
||||
@@ -750,17 +780,9 @@ pandas = [
|
||||
pdfplumber = [
|
||||
{ name = "pdfplumber" },
|
||||
]
|
||||
telemetry = [
|
||||
{ name = "opentelemetry-api" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http" },
|
||||
{ name = "opentelemetry-sdk" },
|
||||
]
|
||||
tools = [
|
||||
{ name = "crewai-tools" },
|
||||
]
|
||||
visualization = [
|
||||
{ name = "pyvis" },
|
||||
]
|
||||
|
||||
[package.dev-dependencies]
|
||||
dev = [
|
||||
@@ -786,59 +808,36 @@ dev = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "agentops", marker = "extra == 'agentops'", specifier = ">=0.3.0" },
|
||||
{ name = "agentops", marker = "extra == 'all'", specifier = ">=0.3.0" },
|
||||
{ name = "aisuite", marker = "extra == 'aisuite'", specifier = ">=0.1.10" },
|
||||
{ name = "aisuite", marker = "extra == 'all'", specifier = ">=0.1.10" },
|
||||
{ name = "aisuite", marker = "extra == 'llm-integrations'", specifier = ">=0.1.10" },
|
||||
{ name = "appdirs", specifier = ">=1.4.4" },
|
||||
{ name = "auth0-python", marker = "extra == 'all'", specifier = ">=4.7.1" },
|
||||
{ name = "auth0-python", marker = "extra == 'auth'", specifier = ">=4.7.1" },
|
||||
{ name = "auth0-python", specifier = ">=4.7.1" },
|
||||
{ name = "blinker", specifier = ">=1.9.0" },
|
||||
{ name = "chromadb", marker = "extra == 'all'", specifier = ">=0.5.23" },
|
||||
{ name = "chromadb", marker = "extra == 'knowledge'", specifier = ">=0.5.23" },
|
||||
{ name = "chromadb", marker = "extra == 'memory'", specifier = ">=0.5.23" },
|
||||
{ name = "chromadb", specifier = ">=0.5.23" },
|
||||
{ name = "click", specifier = ">=8.1.7" },
|
||||
{ name = "crewai-tools", marker = "extra == 'all'", specifier = "~=0.47.1" },
|
||||
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.47.1" },
|
||||
{ name = "docling", marker = "extra == 'all'", specifier = ">=2.12.0" },
|
||||
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.46.0" },
|
||||
{ name = "docling", marker = "extra == 'docling'", specifier = ">=2.12.0" },
|
||||
{ name = "docling", marker = "extra == 'knowledge'", specifier = ">=2.12.0" },
|
||||
{ name = "instructor", specifier = ">=1.3.3" },
|
||||
{ name = "json-repair", specifier = ">=0.25.2" },
|
||||
{ name = "json5", specifier = ">=0.10.0" },
|
||||
{ name = "jsonref", specifier = ">=1.1.0" },
|
||||
{ name = "litellm", specifier = "==1.72.0" },
|
||||
{ name = "mem0ai", marker = "extra == 'all'", specifier = ">=0.1.94" },
|
||||
{ name = "litellm", specifier = "==1.68.0" },
|
||||
{ name = "mem0ai", marker = "extra == 'mem0'", specifier = ">=0.1.94" },
|
||||
{ name = "mem0ai", marker = "extra == 'memory'", specifier = ">=0.1.94" },
|
||||
{ name = "onnxruntime", marker = "extra == 'all'", specifier = "==1.22.0" },
|
||||
{ name = "onnxruntime", marker = "extra == 'llm-integrations'", specifier = "==1.22.0" },
|
||||
{ name = "onnxruntime", specifier = "==1.22.0" },
|
||||
{ name = "openai", specifier = ">=1.13.3" },
|
||||
{ name = "openpyxl", marker = "extra == 'all'", specifier = ">=3.1.5" },
|
||||
{ name = "openpyxl", marker = "extra == 'knowledge'", specifier = ">=3.1.5" },
|
||||
{ name = "openpyxl", specifier = ">=3.1.5" },
|
||||
{ name = "openpyxl", marker = "extra == 'openpyxl'", specifier = ">=3.1.5" },
|
||||
{ name = "opentelemetry-api", marker = "extra == 'all'", specifier = ">=1.30.0" },
|
||||
{ name = "opentelemetry-api", marker = "extra == 'telemetry'", specifier = ">=1.30.0" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http", marker = "extra == 'all'", specifier = ">=1.30.0" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http", marker = "extra == 'telemetry'", specifier = ">=1.30.0" },
|
||||
{ name = "opentelemetry-sdk", marker = "extra == 'all'", specifier = ">=1.30.0" },
|
||||
{ name = "opentelemetry-sdk", marker = "extra == 'telemetry'", specifier = ">=1.30.0" },
|
||||
{ name = "pandas", marker = "extra == 'all'", specifier = ">=2.2.3" },
|
||||
{ name = "opentelemetry-api", specifier = ">=1.30.0" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
|
||||
{ name = "opentelemetry-sdk", specifier = ">=1.30.0" },
|
||||
{ name = "pandas", marker = "extra == 'pandas'", specifier = ">=2.2.3" },
|
||||
{ name = "pdfplumber", marker = "extra == 'all'", specifier = ">=0.11.4" },
|
||||
{ name = "pdfplumber", marker = "extra == 'knowledge'", specifier = ">=0.11.4" },
|
||||
{ name = "pdfplumber", specifier = ">=0.11.4" },
|
||||
{ name = "pdfplumber", marker = "extra == 'pdfplumber'", specifier = ">=0.11.4" },
|
||||
{ name = "pydantic", specifier = ">=2.4.2" },
|
||||
{ name = "python-dotenv", specifier = ">=1.0.0" },
|
||||
{ name = "pyvis", marker = "extra == 'all'", specifier = ">=0.3.2" },
|
||||
{ name = "pyvis", marker = "extra == 'visualization'", specifier = ">=0.3.2" },
|
||||
{ name = "pyvis", specifier = ">=0.3.2" },
|
||||
{ name = "regex", specifier = ">=2024.9.11" },
|
||||
{ name = "tiktoken", marker = "extra == 'all'", specifier = "~=0.8.0" },
|
||||
{ name = "tiktoken", marker = "extra == 'embeddings'", specifier = "~=0.8.0" },
|
||||
{ name = "tiktoken", marker = "extra == 'knowledge'", specifier = "~=0.8.0" },
|
||||
{ name = "tiktoken", marker = "extra == 'memory'", specifier = "~=0.8.0" },
|
||||
{ name = "tokenizers", marker = "extra == 'all'", specifier = ">=0.20.3" },
|
||||
{ name = "tokenizers", marker = "extra == 'memory'", specifier = ">=0.20.3" },
|
||||
{ name = "tokenizers", specifier = ">=0.20.3" },
|
||||
{ name = "tomli", specifier = ">=2.0.2" },
|
||||
{ name = "tomli-w", specifier = ">=1.1.0" },
|
||||
{ name = "uv", specifier = ">=0.4.25" },
|
||||
@@ -867,7 +866,7 @@ dev = [
|
||||
|
||||
[[package]]
|
||||
name = "crewai-tools"
|
||||
version = "0.47.1"
|
||||
version = "0.46.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "chromadb" },
|
||||
@@ -881,11 +880,10 @@ dependencies = [
|
||||
{ name = "pyright" },
|
||||
{ name = "pytube" },
|
||||
{ name = "requests" },
|
||||
{ name = "tiktoken" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0e/cd/fc5a96be8c108febcc2c767714e3ec9b70cca9be8e6b29bba7c1874fb6d2/crewai_tools-0.47.1.tar.gz", hash = "sha256:4de5ebf320aeae317ffabe2e4704b98b5d791f663196871fb5ad2e7bbea14a82", size = 921418 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0d/9e/69109f5d5b398b2edeccec1055e93cdceac3becd04407bcce97de6557180/crewai_tools-0.46.0.tar.gz", hash = "sha256:c8f89247199d528c77db4b450a1ca781b5d32405982467baf516ede4b2045bd1", size = 913715 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/2c/05d9fa584d9d814c0c8c4c3793df572222417695fe3d716f14bc274376d6/crewai_tools-0.47.1-py3-none-any.whl", hash = "sha256:4dc9bb0a08e3afa33c6b9efb163e47181801a7906be7241977426e6d3dec0a05", size = 606305 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/62/0b68637ce820fbb0385495bd6d75ceb27de53f060df5417f293419826481/crewai_tools-0.46.0-py3-none-any.whl", hash = "sha256:f8e60723869ca36ede7b43dcc1491ebefc93410a972d97b7b0ce59c4bd7a826b", size = 606190 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2247,7 +2245,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "litellm"
|
||||
version = "1.72.0"
|
||||
version = "1.68.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "aiohttp" },
|
||||
@@ -2262,9 +2260,9 @@ dependencies = [
|
||||
{ name = "tiktoken" },
|
||||
{ name = "tokenizers" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/55/d3/f1a8c9c9ffdd3bab1bc410254c8140b1346f05a01b8c6b37c48b56abb4b0/litellm-1.72.0.tar.gz", hash = "sha256:135022b9b8798f712ffa84e71ac419aa4310f1d0a70f79dd2007f7ef3a381e43", size = 8082337 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ba/22/138545b646303ca3f4841b69613c697b9d696322a1386083bb70bcbba60b/litellm-1.68.0.tar.gz", hash = "sha256:9fb24643db84dfda339b64bafca505a2eef857477afbc6e98fb56512c24dbbfa", size = 7314051 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c2/98/bec08f5a3e504013db6f52b5fd68375bd92b463c91eb454d5a6460e957af/litellm-1.72.0-py3-none-any.whl", hash = "sha256:88360a7ae9aa9c96278ae1bb0a459226f909e711c5d350781296d0640386a824", size = 7979630 },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/af/1e344bc8aee41445272e677d802b774b1f8b34bdc3bb5697ba30f0fb5d52/litellm-1.68.0-py3-none-any.whl", hash = "sha256:3bca38848b1a5236b11aa6b70afa4393b60880198c939e582273f51a542d4759", size = 7684460 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2515,7 +2513,7 @@ version = "1.6.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "click" },
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
{ name = "colorama", marker = "platform_system == 'Windows'" },
|
||||
{ name = "ghp-import" },
|
||||
{ name = "jinja2" },
|
||||
{ name = "markdown" },
|
||||
@@ -2696,7 +2694,7 @@ version = "2.10.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pygments" },
|
||||
{ name = "pywin32", marker = "sys_platform == 'win32'" },
|
||||
{ name = "pywin32", marker = "platform_system == 'Windows'" },
|
||||
{ name = "tqdm" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3a/93/80ac75c20ce54c785648b4ed363c88f148bf22637e10c9863db4fbe73e74/mpire-2.10.2.tar.gz", hash = "sha256:f66a321e93fadff34585a4bfa05e95bd946cf714b442f51c529038eb45773d97", size = 271270 }
|
||||
@@ -2972,7 +2970,7 @@ name = "nvidia-cudnn-cu12"
|
||||
version = "9.5.1.17"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
|
||||
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/99/93/a201a12d3ec1caa8c6ac34c1c2f9eeb696b886f0c36ff23c638b46603bd0/nvidia_cudnn_cu12-9.5.1.17-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:9fd4584468533c61873e5fda8ca41bac3a38bcb2d12350830c69b0a96a7e4def", size = 570523509 },
|
||||
@@ -2984,7 +2982,7 @@ name = "nvidia-cufft-cu12"
|
||||
version = "11.3.0.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/37/c50d2b2f2c07e146776389e3080f4faf70bcc4fa6e19d65bb54ca174ebc3/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d16079550df460376455cba121db6564089176d9bac9e4f360493ca4741b22a6", size = 200164144 },
|
||||
@@ -3018,9 +3016,9 @@ name = "nvidia-cusolver-cu12"
|
||||
version = "11.7.1.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
|
||||
{ name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
|
||||
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
|
||||
{ name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/93/17/dbe1aa865e4fdc7b6d4d0dd308fdd5aaab60f939abfc0ea1954eac4fb113/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0ce237ef60acde1efc457335a2ddadfd7610b892d94efee7b776c64bb1cac9e0", size = 157833628 },
|
||||
@@ -3034,7 +3032,7 @@ name = "nvidia-cusparse-cu12"
|
||||
version = "12.5.4.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/eb/6681efd0aa7df96b4f8067b3ce7246833dd36830bb4cec8896182773db7d/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d25b62fb18751758fe3c93a4a08eff08effedfe4edf1c6bb5afd0890fe88f887", size = 216451147 },
|
||||
@@ -3125,7 +3123,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "1.78.0"
|
||||
version = "1.68.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@@ -3137,9 +3135,9 @@ dependencies = [
|
||||
{ name = "tqdm" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d1/7c/7c48bac9be52680e41e99ae7649d5da3a0184cd94081e028897f9005aa03/openai-1.78.0.tar.gz", hash = "sha256:254aef4980688468e96cbddb1f348ed01d274d02c64c6c69b0334bf001fb62b3", size = 442652 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3f/6b/6b002d5d38794645437ae3ddb42083059d556558493408d39a0fcea608bc/openai-1.68.2.tar.gz", hash = "sha256:b720f0a95a1dbe1429c0d9bb62096a0d98057bcda82516f6e8af10284bdd5b19", size = 413429 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cc/41/d64a6c56d0ec886b834caff7a07fc4d43e1987895594b144757e7a6b90d7/openai-1.78.0-py3-none-any.whl", hash = "sha256:1ade6a48cd323ad8a7715e7e1669bb97a17e1a5b8a916644261aaef4bf284778", size = 680407 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/34/cebce15f64eb4a3d609a83ac3568d43005cc9a1cba9d7fde5590fd415423/openai-1.68.2-py3-none-any.whl", hash = "sha256:24484cb5c9a33b58576fdc5acf0e5f92603024a4e39d0b99793dfa1eb14c2b36", size = 606073 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3595,7 +3593,7 @@ name = "portalocker"
|
||||
version = "2.10.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pywin32", marker = "sys_platform == 'win32'" },
|
||||
{ name = "pywin32", marker = "platform_system == 'Windows'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ed/d3/c6c64067759e87af98cc668c1cc75171347d0f1577fab7ca3749134e3cd4/portalocker-2.10.1.tar.gz", hash = "sha256:ef1bf844e878ab08aee7e40184156e1151f228f103aa5c6bd0724cc330960f8f", size = 40891 }
|
||||
wheels = [
|
||||
@@ -5250,23 +5248,23 @@ dependencies = [
|
||||
{ name = "fsspec" },
|
||||
{ name = "jinja2" },
|
||||
{ name = "networkx" },
|
||||
{ name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cufile-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-cufile-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "setuptools", marker = "python_full_version >= '3.12'" },
|
||||
{ name = "sympy" },
|
||||
{ name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "triton", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
wheels = [
|
||||
@@ -5329,7 +5327,7 @@ name = "tqdm"
|
||||
version = "4.66.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
{ name = "colorama", marker = "platform_system == 'Windows'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/58/83/6ba9844a41128c62e810fddddd72473201f3eacde02046066142a2d96cc5/tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad", size = 169504 }
|
||||
wheels = [
|
||||
@@ -5371,7 +5369,7 @@ name = "triton"
|
||||
version = "3.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "setuptools", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
|
||||
{ name = "setuptools", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/76/04/d54d3a6d077c646624dc9461b0059e23fd5d30e0dbe67471e3654aec81f9/triton-3.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fad99beafc860501d7fcc1fb7045d9496cbe2c882b1674640304949165a916e7", size = 156441993 },
|
||||
@@ -5535,21 +5533,51 @@ name = "vcrpy"
|
||||
version = "5.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
resolution-markers = [
|
||||
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version < '3.11' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version < '3.11' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_python_implementation == 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_python_implementation == 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
]
|
||||
dependencies = [
|
||||
{ name = "pyyaml", marker = "platform_python_implementation == 'PyPy'" },
|
||||
@@ -5566,21 +5594,51 @@ name = "vcrpy"
|
||||
version = "7.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
resolution-markers = [
|
||||
"python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version < '3.11' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'darwin'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'darwin'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version < '3.11' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform == 'linux'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform == 'linux'",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_python_implementation != 'PyPy' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_python_implementation != 'PyPy' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
]
|
||||
dependencies = [
|
||||
{ name = "pyyaml", marker = "platform_python_implementation != 'PyPy'" },
|
||||
|
||||
Reference in New Issue
Block a user