mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-02 04:38:29 +00:00
Compare commits
7 Commits
update-llm
...
bugfix/flo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
38735cba99 | ||
|
|
cde67882b4 | ||
|
|
d3df545f1e | ||
|
|
b5067a2689 | ||
|
|
362b20f052 | ||
|
|
d5408ec461 | ||
|
|
6677c9c192 |
@@ -4,7 +4,7 @@ description: View the latest updates and changes to CrewAI
|
||||
icon: timeline
|
||||
---
|
||||
|
||||
<Update label="2025-03-17" description="v0.108.0">
|
||||
<Update label="2024-03-17" description="v0.108.0">
|
||||
**Features**
|
||||
- Converted tabs to spaces in `crew.py` template
|
||||
- Enhanced LLM Streaming Response Handling and Event System
|
||||
@@ -24,7 +24,7 @@ icon: timeline
|
||||
- Added documentation for `ApifyActorsTool`
|
||||
</Update>
|
||||
|
||||
<Update label="2025-03-10" description="v0.105.0">
|
||||
<Update label="2024-03-10" description="v0.105.0">
|
||||
**Core Improvements & Fixes**
|
||||
- Fixed issues with missing template variables and user memory configuration
|
||||
- Improved async flow support and addressed agent response formatting
|
||||
@@ -45,7 +45,7 @@ icon: timeline
|
||||
- Fixed typos in prompts and updated Amazon Bedrock model listings
|
||||
</Update>
|
||||
|
||||
<Update label="2025-02-12" description="v0.102.0">
|
||||
<Update label="2024-02-12" description="v0.102.0">
|
||||
**Core Improvements & Fixes**
|
||||
- Enhanced LLM Support: Improved structured LLM output, parameter handling, and formatting for Anthropic models
|
||||
- Crew & Agent Stability: Fixed issues with cloning agents/crews using knowledge sources, multiple task outputs in conditional tasks, and ignored Crew task callbacks
|
||||
@@ -65,7 +65,7 @@ icon: timeline
|
||||
- Fixed Various Typos & Formatting Issues
|
||||
</Update>
|
||||
|
||||
<Update label="2025-01-28" description="v0.100.0">
|
||||
<Update label="2024-01-28" description="v0.100.0">
|
||||
**Features**
|
||||
- Add Composio docs
|
||||
- Add SageMaker as a LLM provider
|
||||
@@ -80,7 +80,7 @@ icon: timeline
|
||||
- Improve formatting and clarity in CLI and Composio Tool docs
|
||||
</Update>
|
||||
|
||||
<Update label="2025-01-20" description="v0.98.0">
|
||||
<Update label="2024-01-20" description="v0.98.0">
|
||||
**Features**
|
||||
- Conversation crew v1
|
||||
- Add unique ID to flow states
|
||||
@@ -101,7 +101,7 @@ icon: timeline
|
||||
- Fixed typos, nested pydantic model issue, and docling issues
|
||||
</Update>
|
||||
|
||||
<Update label="2025-01-04" description="v0.95.0">
|
||||
<Update label="2024-01-04" description="v0.95.0">
|
||||
**New Features**
|
||||
- Adding Multimodal Abilities to Crew
|
||||
- Programatic Guardrails
|
||||
@@ -131,7 +131,7 @@ icon: timeline
|
||||
- Suppressed userWarnings from litellm pydantic issues
|
||||
</Update>
|
||||
|
||||
<Update label="2024-12-05" description="v0.86.0">
|
||||
<Update label="2023-12-05" description="v0.86.0">
|
||||
**Changes**
|
||||
- Remove all references to pipeline and pipeline router
|
||||
- Add Nvidia NIM as provider in Custom LLM
|
||||
@@ -141,7 +141,7 @@ icon: timeline
|
||||
- Simplify template crew
|
||||
</Update>
|
||||
|
||||
<Update label="2024-12-04" description="v0.85.0">
|
||||
<Update label="2023-12-04" description="v0.85.0">
|
||||
**Features**
|
||||
- Added knowledge to agent level
|
||||
- Feat/remove langchain
|
||||
@@ -161,7 +161,7 @@ icon: timeline
|
||||
- Improvements to LLM Configuration and Usage
|
||||
</Update>
|
||||
|
||||
<Update label="2024-11-25" description="v0.83.0">
|
||||
<Update label="2023-11-25" description="v0.83.0">
|
||||
**New Features**
|
||||
- New before_kickoff and after_kickoff crew callbacks
|
||||
- Support to pre-seed agents with Knowledge
|
||||
@@ -178,7 +178,7 @@ icon: timeline
|
||||
- Update Docs
|
||||
</Update>
|
||||
|
||||
<Update label="2024-11-13" description="v0.80.0">
|
||||
<Update label="2023-11-13" description="v0.80.0">
|
||||
**Fixes**
|
||||
- Fixing Tokens callback replacement bug
|
||||
- Fixing Step callback issue
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
title: 'Event Listeners'
|
||||
description: 'Tap into CrewAI events to build custom integrations and monitoring'
|
||||
icon: spinner
|
||||
---
|
||||
|
||||
# Event Listeners
|
||||
|
||||
@@ -97,19 +97,13 @@
|
||||
"how-to/kickoff-async",
|
||||
"how-to/kickoff-for-each",
|
||||
"how-to/replay-tasks-from-latest-crew-kickoff",
|
||||
"how-to/conditional-tasks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Agent Monitoring & Observability",
|
||||
"pages": [
|
||||
"how-to/weave-integration",
|
||||
"how-to/conditional-tasks",
|
||||
"how-to/agentops-observability",
|
||||
"how-to/langfuse-observability",
|
||||
"how-to/langtrace-observability",
|
||||
"how-to/mlflow-observability",
|
||||
"how-to/openlit-observability",
|
||||
"how-to/portkey-observability"
|
||||
"how-to/portkey-observability",
|
||||
"how-to/langfuse-observability"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -117,8 +111,6 @@
|
||||
"pages": [
|
||||
"tools/aimindtool",
|
||||
"tools/apifyactorstool",
|
||||
"tools/bedrockinvokeagenttool",
|
||||
"tools/bedrockkbretriever",
|
||||
"tools/bravesearchtool",
|
||||
"tools/browserbaseloadtool",
|
||||
"tools/codedocssearchtool",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: AgentOps Integration
|
||||
title: Agent Monitoring with AgentOps
|
||||
description: Understanding and logging your agent performance with AgentOps.
|
||||
icon: paperclip
|
||||
---
|
||||
|
||||
@@ -39,7 +39,8 @@ analysis_crew = Crew(
|
||||
agents=[coding_agent],
|
||||
tasks=[data_analysis_task],
|
||||
verbose=True,
|
||||
memory=False
|
||||
memory=False,
|
||||
respect_context_window=True # enable by default
|
||||
)
|
||||
|
||||
datasets = [
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: Langfuse Integration
|
||||
title: Agent Monitoring with Langfuse
|
||||
description: Learn how to integrate Langfuse with CrewAI via OpenTelemetry using OpenLit
|
||||
icon: vials
|
||||
icon: magnifying-glass-chart
|
||||
---
|
||||
|
||||
# Integrate Langfuse with CrewAI
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Langtrace Integration
|
||||
title: Agent Monitoring with Langtrace
|
||||
description: How to monitor cost, latency, and performance of CrewAI Agents using Langtrace, an external observability tool.
|
||||
icon: chart-line
|
||||
---
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: MLflow Integration
|
||||
title: Agent Monitoring with MLflow
|
||||
description: Quickly start monitoring your Agents with MLflow.
|
||||
icon: bars-staggered
|
||||
---
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: OpenLIT Integration
|
||||
title: Agent Monitoring with OpenLIT
|
||||
description: Quickly start monitoring your Agents in just a single line of code with OpenTelemetry.
|
||||
icon: magnifying-glass-chart
|
||||
---
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Portkey Integration
|
||||
title: Agent Monitoring with Portkey
|
||||
description: How to use Portkey with CrewAI
|
||||
icon: key
|
||||
---
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
---
|
||||
title: Weave Integration
|
||||
description: Learn how to use Weights & Biases (W&B) Weave to track, experiment with, evaluate, and improve your CrewAI applications.
|
||||
icon: radar
|
||||
---
|
||||
|
||||
# Weave Overview
|
||||
|
||||
[Weights & Biases (W&B) Weave](https://weave-docs.wandb.ai/) is a framework for tracking, experimenting with, evaluating, deploying, and improving LLM-based applications.
|
||||
|
||||

|
||||
|
||||
Weave provides comprehensive support for every stage of your CrewAI application development:
|
||||
|
||||
- **Tracing & Monitoring**: Automatically track LLM calls and application logic to debug and analyze production systems
|
||||
- **Systematic Iteration**: Refine and iterate on prompts, datasets, and models
|
||||
- **Evaluation**: Use custom or pre-built scorers to systematically assess and enhance agent performance
|
||||
- **Guardrails**: Protect your agents with pre- and post-safeguards for content moderation and prompt safety
|
||||
|
||||
Weave automatically captures traces for your CrewAI applications, enabling you to monitor and analyze your agents' performance, interactions, and execution flow. This helps you build better evaluation datasets and optimize your agent workflows.
|
||||
|
||||
## Setup Instructions
|
||||
|
||||
<Steps>
|
||||
<Step title="Install required packages">
|
||||
```shell
|
||||
pip install crewai weave
|
||||
```
|
||||
</Step>
|
||||
<Step title="Set up W&B Account">
|
||||
Sign up for a [Weights & Biases account](https://wandb.ai) if you haven't already. You'll need this to view your traces and metrics.
|
||||
</Step>
|
||||
<Step title="Initialize Weave in Your Application">
|
||||
Add the following code to your application:
|
||||
|
||||
```python
|
||||
import weave
|
||||
|
||||
# Initialize Weave with your project name
|
||||
weave.init(project_name="crewai_demo")
|
||||
```
|
||||
|
||||
After initialization, Weave will provide a URL where you can view your traces and metrics.
|
||||
</Step>
|
||||
<Step title="Create your Crews/Flows">
|
||||
```python
|
||||
from crewai import Agent, Task, Crew, LLM, Process
|
||||
|
||||
# Create an LLM with a temperature of 0 to ensure deterministic outputs
|
||||
llm = LLM(model="gpt-4o", temperature=0)
|
||||
|
||||
# Create agents
|
||||
researcher = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Find and analyze the best investment opportunities',
|
||||
backstory='Expert in financial analysis and market research',
|
||||
llm=llm,
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
writer = Agent(
|
||||
role='Report Writer',
|
||||
goal='Write clear and concise investment reports',
|
||||
backstory='Experienced in creating detailed financial reports',
|
||||
llm=llm,
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
# Create tasks
|
||||
research_task = Task(
|
||||
description='Deep research on the {topic}',
|
||||
expected_output='Comprehensive market data including key players, market size, and growth trends.',
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
writing_task = Task(
|
||||
description='Write a detailed report based on the research',
|
||||
expected_output='The report should be easy to read and understand. Use bullet points where applicable.',
|
||||
agent=writer
|
||||
)
|
||||
|
||||
# Create a crew
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research_task, writing_task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
)
|
||||
|
||||
# Run the crew
|
||||
result = crew.kickoff(inputs={"topic": "AI in material science"})
|
||||
print(result)
|
||||
```
|
||||
</Step>
|
||||
<Step title="View Traces in Weave">
|
||||
After running your CrewAI application, visit the Weave URL provided during initialization to view:
|
||||
- LLM calls and their metadata
|
||||
- Agent interactions and task execution flow
|
||||
- Performance metrics like latency and token usage
|
||||
- Any errors or issues that occurred during execution
|
||||
|
||||
<Frame caption="Weave Tracing Dashboard">
|
||||
<img src="/images/weave-tracing.png" alt="Weave tracing example with CrewAI" />
|
||||
</Frame>
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Features
|
||||
|
||||
- Weave automatically captures all CrewAI operations: agent interactions and task executions; LLM calls with metadata and token usage; tool usage and results.
|
||||
- The integration supports all CrewAI execution methods: `kickoff()`, `kickoff_for_each()`, `kickoff_async()`, and `kickoff_for_each_async()`.
|
||||
- Automatic tracing of all [crewAI-tools](https://github.com/crewAIInc/crewAI-tools).
|
||||
- Flow feature support with decorator patching (`@start`, `@listen`, `@router`, `@or_`, `@and_`).
|
||||
- Track custom guardrails passed to CrewAI `Task` with `@weave.op()`.
|
||||
|
||||
For detailed information on what's supported, visit the [Weave CrewAI documentation](https://weave-docs.wandb.ai/guides/integrations/crewai/#getting-started-with-flow).
|
||||
|
||||
## Resources
|
||||
|
||||
- [📘 Weave Documentation](https://weave-docs.wandb.ai)
|
||||
- [📊 Example Weave x CrewAI dashboard](https://wandb.ai/ayut/crewai_demo/weave/traces?cols=%7B%22wb_run_id%22%3Afalse%2C%22attributes.weave.client_version%22%3Afalse%2C%22attributes.weave.os_name%22%3Afalse%2C%22attributes.weave.os_release%22%3Afalse%2C%22attributes.weave.os_version%22%3Afalse%2C%22attributes.weave.source%22%3Afalse%2C%22attributes.weave.sys_version%22%3Afalse%7D&peekPath=%2Fayut%2Fcrewai_demo%2Fcalls%2F0195c838-38cb-71a2-8a15-651ecddf9d89)
|
||||
- [🐦 X](https://x.com/weave_wb)
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 13 MiB |
Binary file not shown.
|
Before Width: | Height: | Size: 693 KiB |
@@ -1,187 +0,0 @@
|
||||
---
|
||||
title: Bedrock Invoke Agent Tool
|
||||
description: Enables CrewAI agents to invoke Amazon Bedrock Agents and leverage their capabilities within your workflows
|
||||
icon: aws
|
||||
---
|
||||
|
||||
# `BedrockInvokeAgentTool`
|
||||
|
||||
The `BedrockInvokeAgentTool` enables CrewAI agents to invoke Amazon Bedrock Agents and leverage their capabilities within your workflows.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
uv pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
- AWS credentials configured (either through environment variables or AWS CLI)
|
||||
- `boto3` and `python-dotenv` packages
|
||||
- Access to Amazon Bedrock Agents
|
||||
|
||||
## Usage
|
||||
|
||||
Here's how to use the tool with a CrewAI agent:
|
||||
|
||||
```python {2, 4-8}
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools.aws.bedrock.agents.invoke_agent_tool import BedrockInvokeAgentTool
|
||||
|
||||
# Initialize the tool
|
||||
agent_tool = BedrockInvokeAgentTool(
|
||||
agent_id="your-agent-id",
|
||||
agent_alias_id="your-agent-alias-id"
|
||||
)
|
||||
|
||||
# Create a CrewAI agent that uses the tool
|
||||
aws_expert = Agent(
|
||||
role='AWS Service Expert',
|
||||
goal='Help users understand AWS services and quotas',
|
||||
backstory='I am an expert in AWS services and can provide detailed information about them.',
|
||||
tools=[agent_tool],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Create a task for the agent
|
||||
quota_task = Task(
|
||||
description="Find out the current service quotas for EC2 in us-west-2 and explain any recent changes.",
|
||||
agent=aws_expert
|
||||
)
|
||||
|
||||
# Create a crew with the agent
|
||||
crew = Crew(
|
||||
agents=[aws_expert],
|
||||
tasks=[quota_task],
|
||||
verbose=2
|
||||
)
|
||||
|
||||
# Run the crew
|
||||
result = crew.kickoff()
|
||||
print(result)
|
||||
```
|
||||
|
||||
## Tool Arguments
|
||||
|
||||
| Argument | Type | Required | Default | Description |
|
||||
|:---------|:-----|:---------|:--------|:------------|
|
||||
| **agent_id** | `str` | Yes | None | The unique identifier of the Bedrock agent |
|
||||
| **agent_alias_id** | `str` | Yes | None | The unique identifier of the agent alias |
|
||||
| **session_id** | `str` | No | timestamp | The unique identifier of the session |
|
||||
| **enable_trace** | `bool` | No | False | Whether to enable trace for debugging |
|
||||
| **end_session** | `bool` | No | False | Whether to end the session after invocation |
|
||||
| **description** | `str` | No | None | Custom description for the tool |
|
||||
|
||||
## Environment Variables
|
||||
|
||||
```bash
|
||||
BEDROCK_AGENT_ID=your-agent-id # Alternative to passing agent_id
|
||||
BEDROCK_AGENT_ALIAS_ID=your-agent-alias-id # Alternative to passing agent_alias_id
|
||||
AWS_REGION=your-aws-region # Defaults to us-west-2
|
||||
AWS_ACCESS_KEY_ID=your-access-key # Required for AWS authentication
|
||||
AWS_SECRET_ACCESS_KEY=your-secret-key # Required for AWS authentication
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Multi-Agent Workflow with Session Management
|
||||
|
||||
```python {2, 4-22}
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
from crewai_tools.aws.bedrock.agents.invoke_agent_tool import BedrockInvokeAgentTool
|
||||
|
||||
# Initialize tools with session management
|
||||
initial_tool = BedrockInvokeAgentTool(
|
||||
agent_id="your-agent-id",
|
||||
agent_alias_id="your-agent-alias-id",
|
||||
session_id="custom-session-id"
|
||||
)
|
||||
|
||||
followup_tool = BedrockInvokeAgentTool(
|
||||
agent_id="your-agent-id",
|
||||
agent_alias_id="your-agent-alias-id",
|
||||
session_id="custom-session-id"
|
||||
)
|
||||
|
||||
final_tool = BedrockInvokeAgentTool(
|
||||
agent_id="your-agent-id",
|
||||
agent_alias_id="your-agent-alias-id",
|
||||
session_id="custom-session-id",
|
||||
end_session=True
|
||||
)
|
||||
|
||||
# Create agents for different stages
|
||||
researcher = Agent(
|
||||
role='AWS Service Researcher',
|
||||
goal='Gather information about AWS services',
|
||||
backstory='I am specialized in finding detailed AWS service information.',
|
||||
tools=[initial_tool]
|
||||
)
|
||||
|
||||
analyst = Agent(
|
||||
role='Service Compatibility Analyst',
|
||||
goal='Analyze service compatibility and requirements',
|
||||
backstory='I analyze AWS services for compatibility and integration possibilities.',
|
||||
tools=[followup_tool]
|
||||
)
|
||||
|
||||
summarizer = Agent(
|
||||
role='Technical Documentation Writer',
|
||||
goal='Create clear technical summaries',
|
||||
backstory='I specialize in creating clear, concise technical documentation.',
|
||||
tools=[final_tool]
|
||||
)
|
||||
|
||||
# Create tasks
|
||||
research_task = Task(
|
||||
description="Find all available AWS services in us-west-2 region.",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
analysis_task = Task(
|
||||
description="Analyze which services support IPv6 and their implementation requirements.",
|
||||
agent=analyst
|
||||
)
|
||||
|
||||
summary_task = Task(
|
||||
description="Create a summary of IPv6-compatible services and their key features.",
|
||||
agent=summarizer
|
||||
)
|
||||
|
||||
# Create a crew with the agents and tasks
|
||||
crew = Crew(
|
||||
agents=[researcher, analyst, summarizer],
|
||||
tasks=[research_task, analysis_task, summary_task],
|
||||
process=Process.sequential,
|
||||
verbose=2
|
||||
)
|
||||
|
||||
# Run the crew
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Hybrid Multi-Agent Collaborations
|
||||
- Create workflows where CrewAI agents collaborate with managed Bedrock agents running as services in AWS
|
||||
- Enable scenarios where sensitive data processing happens within your AWS environment while other agents operate externally
|
||||
- Bridge on-premises CrewAI agents with cloud-based Bedrock agents for distributed intelligence workflows
|
||||
|
||||
### Data Sovereignty and Compliance
|
||||
- Keep data-sensitive agentic workflows within your AWS environment while allowing external CrewAI agents to orchestrate tasks
|
||||
- Maintain compliance with data residency requirements by processing sensitive information only within your AWS account
|
||||
- Enable secure multi-agent collaborations where some agents cannot access your organization's private data
|
||||
|
||||
### Seamless AWS Service Integration
|
||||
- Access any AWS service through Amazon Bedrock Actions without writing complex integration code
|
||||
- Enable CrewAI agents to interact with AWS services through natural language requests
|
||||
- Leverage pre-built Bedrock agent capabilities to interact with AWS services like Bedrock Knowledge Bases, Lambda, and more
|
||||
|
||||
### Scalable Hybrid Agent Architectures
|
||||
- Offload computationally intensive tasks to managed Bedrock agents while lightweight tasks run in CrewAI
|
||||
- Scale agent processing by distributing workloads between local CrewAI agents and cloud-based Bedrock agents
|
||||
|
||||
### Cross-Organizational Agent Collaboration
|
||||
- Enable secure collaboration between your organization's CrewAI agents and partner organizations' Bedrock agents
|
||||
- Create workflows where external expertise from Bedrock agents can be incorporated without exposing sensitive data
|
||||
- Build agent ecosystems that span organizational boundaries while maintaining security and data control
|
||||
@@ -1,165 +0,0 @@
|
||||
---
|
||||
title: 'Bedrock Knowledge Base Retriever'
|
||||
description: 'Retrieve information from Amazon Bedrock Knowledge Bases using natural language queries'
|
||||
icon: aws
|
||||
---
|
||||
|
||||
# `BedrockKBRetrieverTool`
|
||||
|
||||
The `BedrockKBRetrieverTool` enables CrewAI agents to retrieve information from Amazon Bedrock Knowledge Bases using natural language queries.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
uv pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
- AWS credentials configured (either through environment variables or AWS CLI)
|
||||
- `boto3` and `python-dotenv` packages
|
||||
- Access to Amazon Bedrock Knowledge Base
|
||||
|
||||
## Usage
|
||||
|
||||
Here's how to use the tool with a CrewAI agent:
|
||||
|
||||
```python {2, 4-17}
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools.aws.bedrock.knowledge_base.retriever_tool import BedrockKBRetrieverTool
|
||||
|
||||
# Initialize the tool
|
||||
kb_tool = BedrockKBRetrieverTool(
|
||||
knowledge_base_id="your-kb-id",
|
||||
number_of_results=5
|
||||
)
|
||||
|
||||
# Create a CrewAI agent that uses the tool
|
||||
researcher = Agent(
|
||||
role='Knowledge Base Researcher',
|
||||
goal='Find information about company policies',
|
||||
backstory='I am a researcher specialized in retrieving and analyzing company documentation.',
|
||||
tools=[kb_tool],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Create a task for the agent
|
||||
research_task = Task(
|
||||
description="Find our company's remote work policy and summarize the key points.",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
# Create a crew with the agent
|
||||
crew = Crew(
|
||||
agents=[researcher],
|
||||
tasks=[research_task],
|
||||
verbose=2
|
||||
)
|
||||
|
||||
# Run the crew
|
||||
result = crew.kickoff()
|
||||
print(result)
|
||||
```
|
||||
|
||||
## Tool Arguments
|
||||
|
||||
| Argument | Type | Required | Default | Description |
|
||||
|:---------|:-----|:---------|:---------|:-------------|
|
||||
| **knowledge_base_id** | `str` | Yes | None | The unique identifier of the knowledge base (0-10 alphanumeric characters) |
|
||||
| **number_of_results** | `int` | No | 5 | Maximum number of results to return |
|
||||
| **retrieval_configuration** | `dict` | No | None | Custom configurations for the knowledge base query |
|
||||
| **guardrail_configuration** | `dict` | No | None | Content filtering settings |
|
||||
| **next_token** | `str` | No | None | Token for pagination |
|
||||
|
||||
## Environment Variables
|
||||
|
||||
```bash
|
||||
BEDROCK_KB_ID=your-knowledge-base-id # Alternative to passing knowledge_base_id
|
||||
AWS_REGION=your-aws-region # Defaults to us-east-1
|
||||
AWS_ACCESS_KEY_ID=your-access-key # Required for AWS authentication
|
||||
AWS_SECRET_ACCESS_KEY=your-secret-key # Required for AWS authentication
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
The tool returns results in JSON format:
|
||||
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"content": "Retrieved text content",
|
||||
"content_type": "text",
|
||||
"source_type": "S3",
|
||||
"source_uri": "s3://bucket/document.pdf",
|
||||
"score": 0.95,
|
||||
"metadata": {
|
||||
"additional": "metadata"
|
||||
}
|
||||
}
|
||||
],
|
||||
"nextToken": "pagination-token",
|
||||
"guardrailAction": "NONE"
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom Retrieval Configuration
|
||||
|
||||
```python
|
||||
kb_tool = BedrockKBRetrieverTool(
|
||||
knowledge_base_id="your-kb-id",
|
||||
retrieval_configuration={
|
||||
"vectorSearchConfiguration": {
|
||||
"numberOfResults": 10,
|
||||
"overrideSearchType": "HYBRID"
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
policy_expert = Agent(
|
||||
role='Policy Expert',
|
||||
goal='Analyze company policies in detail',
|
||||
backstory='I am an expert in corporate policy analysis with deep knowledge of regulatory requirements.',
|
||||
tools=[kb_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Supported Data Sources
|
||||
|
||||
- Amazon S3
|
||||
- Confluence
|
||||
- Salesforce
|
||||
- SharePoint
|
||||
- Web pages
|
||||
- Custom document locations
|
||||
- Amazon Kendra
|
||||
- SQL databases
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Enterprise Knowledge Integration
|
||||
- Enable CrewAI agents to access your organization's proprietary knowledge without exposing sensitive data
|
||||
- Allow agents to make decisions based on your company's specific policies, procedures, and documentation
|
||||
- Create agents that can answer questions based on your internal documentation while maintaining data security
|
||||
|
||||
### Specialized Domain Knowledge
|
||||
- Connect CrewAI agents to domain-specific knowledge bases (legal, medical, technical) without retraining models
|
||||
- Leverage existing knowledge repositories that are already maintained in your AWS environment
|
||||
- Combine CrewAI's reasoning with domain-specific information from your knowledge bases
|
||||
|
||||
### Data-Driven Decision Making
|
||||
- Ground CrewAI agent responses in your actual company data rather than general knowledge
|
||||
- Ensure agents provide recommendations based on your specific business context and documentation
|
||||
- Reduce hallucinations by retrieving factual information from your knowledge bases
|
||||
|
||||
### Scalable Information Access
|
||||
- Access terabytes of organizational knowledge without embedding it all into your models
|
||||
- Dynamically query only the relevant information needed for specific tasks
|
||||
- Leverage AWS's scalable infrastructure to handle large knowledge bases efficiently
|
||||
|
||||
### Compliance and Governance
|
||||
- Ensure CrewAI agents provide responses that align with your company's approved documentation
|
||||
- Create auditable trails of information sources used by your agents
|
||||
- Maintain control over what information sources your agents can access
|
||||
@@ -25,7 +25,6 @@ from crewai.tools.base_tool import BaseTool, Tool
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.string_utils import interpolate_only
|
||||
|
||||
T = TypeVar("T", bound="BaseAgent")
|
||||
|
||||
@@ -334,15 +333,9 @@ class BaseAgent(ABC, BaseModel):
|
||||
self._original_backstory = self.backstory
|
||||
|
||||
if inputs:
|
||||
self.role = interpolate_only(
|
||||
input_string=self._original_role, inputs=inputs
|
||||
)
|
||||
self.goal = interpolate_only(
|
||||
input_string=self._original_goal, inputs=inputs
|
||||
)
|
||||
self.backstory = interpolate_only(
|
||||
input_string=self._original_backstory, inputs=inputs
|
||||
)
|
||||
self.role = self._original_role.format(**inputs)
|
||||
self.goal = self._original_goal.format(**inputs)
|
||||
self.backstory = self._original_backstory.format(**inputs)
|
||||
|
||||
def set_cache_handler(self, cache_handler: CacheHandler) -> None:
|
||||
"""Set the cache handler for the agent.
|
||||
|
||||
@@ -8,45 +8,45 @@ from pydantic import BaseModel
|
||||
|
||||
class FlowPersistence(abc.ABC):
|
||||
"""Abstract base class for flow state persistence.
|
||||
|
||||
|
||||
This class defines the interface that all persistence implementations must follow.
|
||||
It supports both structured (Pydantic BaseModel) and unstructured (dict) states.
|
||||
"""
|
||||
|
||||
|
||||
@abc.abstractmethod
|
||||
def init_db(self) -> None:
|
||||
"""Initialize the persistence backend.
|
||||
|
||||
|
||||
This method should handle any necessary setup, such as:
|
||||
- Creating tables
|
||||
- Establishing connections
|
||||
- Setting up indexes
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@abc.abstractmethod
|
||||
def save_state(
|
||||
self,
|
||||
flow_uuid: str,
|
||||
method_name: str,
|
||||
state_data: Union[Dict[str, Any], BaseModel]
|
||||
state_data: Union[Dict[str, Any], BaseModel],
|
||||
) -> None:
|
||||
"""Persist the flow state after method completion.
|
||||
|
||||
|
||||
Args:
|
||||
flow_uuid: Unique identifier for the flow instance
|
||||
method_name: Name of the method that just completed
|
||||
state_data: Current state data (either dict or Pydantic model)
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@abc.abstractmethod
|
||||
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load the most recent state for a given flow UUID.
|
||||
|
||||
|
||||
Args:
|
||||
flow_uuid: Unique identifier for the flow instance
|
||||
|
||||
|
||||
Returns:
|
||||
The most recent state as a dictionary, or None if no state exists
|
||||
"""
|
||||
|
||||
@@ -11,6 +11,7 @@ from typing import Any, Dict, Optional, Union
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.state_utils import to_serializable
|
||||
|
||||
|
||||
class SQLiteFlowPersistence(FlowPersistence):
|
||||
@@ -78,34 +79,53 @@ class SQLiteFlowPersistence(FlowPersistence):
|
||||
flow_uuid: Unique identifier for the flow instance
|
||||
method_name: Name of the method that just completed
|
||||
state_data: Current state data (either dict or Pydantic model)
|
||||
"""
|
||||
# Convert state_data to dict, handling both Pydantic and dict cases
|
||||
if isinstance(state_data, BaseModel):
|
||||
state_dict = dict(state_data) # Use dict() for better type compatibility
|
||||
elif isinstance(state_data, dict):
|
||||
state_dict = state_data
|
||||
else:
|
||||
raise ValueError(
|
||||
f"state_data must be either a Pydantic BaseModel or dict, got {type(state_data)}"
|
||||
)
|
||||
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO flow_states (
|
||||
flow_uuid,
|
||||
method_name,
|
||||
timestamp,
|
||||
state_json
|
||||
) VALUES (?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
flow_uuid,
|
||||
method_name,
|
||||
datetime.now(timezone.utc).isoformat(),
|
||||
json.dumps(state_dict),
|
||||
),
|
||||
)
|
||||
Raises:
|
||||
ValueError: If state_data is neither a dict nor a BaseModel
|
||||
RuntimeError: If database operations fail
|
||||
TypeError: If JSON serialization fails
|
||||
"""
|
||||
try:
|
||||
# Convert state_data to a JSON-serializable dict using the helper method
|
||||
state_dict = to_serializable(state_data)
|
||||
|
||||
# Try to serialize to JSON to catch any serialization issues early
|
||||
try:
|
||||
state_json = json.dumps(state_dict)
|
||||
except (TypeError, ValueError, OverflowError) as json_err:
|
||||
raise TypeError(
|
||||
f"Failed to serialize state to JSON: {json_err}"
|
||||
) from json_err
|
||||
|
||||
# Perform database operation with error handling
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO flow_states (
|
||||
flow_uuid,
|
||||
method_name,
|
||||
timestamp,
|
||||
state_json
|
||||
) VALUES (?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
flow_uuid,
|
||||
method_name,
|
||||
datetime.now(timezone.utc).isoformat(),
|
||||
state_json,
|
||||
),
|
||||
)
|
||||
except sqlite3.Error as db_err:
|
||||
raise RuntimeError(f"Database operation failed: {db_err}") from db_err
|
||||
|
||||
except Exception as e:
|
||||
# Log the error but don't crash the application
|
||||
import logging
|
||||
|
||||
logging.error(f"Failed to save flow state: {e}")
|
||||
# Re-raise to allow caller to handle or ignore
|
||||
raise
|
||||
|
||||
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load the most recent state for a given flow UUID.
|
||||
|
||||
@@ -1,36 +1,16 @@
|
||||
import json
|
||||
from datetime import date, datetime
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow import Flow
|
||||
|
||||
SerializablePrimitive = Union[str, int, float, bool, None]
|
||||
Serializable = Union[
|
||||
SerializablePrimitive, List["Serializable"], Dict[str, "Serializable"]
|
||||
]
|
||||
|
||||
|
||||
def export_state(flow: Flow) -> dict[str, Serializable]:
|
||||
"""Exports the Flow's internal state as JSON-compatible data structures.
|
||||
|
||||
Performs a one-way transformation of a Flow's state into basic Python types
|
||||
that can be safely serialized to JSON. To prevent infinite recursion with
|
||||
circular references, the conversion is limited to a depth of 5 levels.
|
||||
|
||||
Args:
|
||||
flow: The Flow object whose state needs to be exported
|
||||
|
||||
Returns:
|
||||
dict[str, Any]: The transformed state using JSON-compatible Python
|
||||
types.
|
||||
"""
|
||||
result = to_serializable(flow._state)
|
||||
assert isinstance(result, dict)
|
||||
return result
|
||||
|
||||
|
||||
def to_serializable(
|
||||
obj: Any, max_depth: int = 5, _current_depth: int = 0
|
||||
) -> Serializable:
|
||||
@@ -52,6 +32,8 @@ def to_serializable(
|
||||
|
||||
if isinstance(obj, (str, int, float, bool, type(None))):
|
||||
return obj
|
||||
elif isinstance(obj, Enum):
|
||||
return obj.value
|
||||
elif isinstance(obj, (date, datetime)):
|
||||
return obj.isoformat()
|
||||
elif isinstance(obj, (list, tuple, set)):
|
||||
|
||||
@@ -2,7 +2,6 @@ import datetime
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import threading
|
||||
import uuid
|
||||
from concurrent.futures import Future
|
||||
@@ -50,7 +49,6 @@ from crewai.utilities.events import (
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.i18n import I18N
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.string_utils import interpolate_only
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
@@ -509,9 +507,7 @@ class Task(BaseModel):
|
||||
return
|
||||
|
||||
try:
|
||||
self.description = interpolate_only(
|
||||
input_string=self._original_description, inputs=inputs
|
||||
)
|
||||
self.description = self._original_description.format(**inputs)
|
||||
except KeyError as e:
|
||||
raise ValueError(
|
||||
f"Missing required template variable '{e.args[0]}' in description"
|
||||
@@ -520,7 +516,7 @@ class Task(BaseModel):
|
||||
raise ValueError(f"Error interpolating description: {str(e)}") from e
|
||||
|
||||
try:
|
||||
self.expected_output = interpolate_only(
|
||||
self.expected_output = self.interpolate_only(
|
||||
input_string=self._original_expected_output, inputs=inputs
|
||||
)
|
||||
except (KeyError, ValueError) as e:
|
||||
@@ -528,7 +524,7 @@ class Task(BaseModel):
|
||||
|
||||
if self.output_file is not None:
|
||||
try:
|
||||
self.output_file = interpolate_only(
|
||||
self.output_file = self.interpolate_only(
|
||||
input_string=self._original_output_file, inputs=inputs
|
||||
)
|
||||
except (KeyError, ValueError) as e:
|
||||
@@ -559,6 +555,72 @@ class Task(BaseModel):
|
||||
f"\n\n{conversation_instruction}\n\n{conversation_history}"
|
||||
)
|
||||
|
||||
def interpolate_only(
|
||||
self,
|
||||
input_string: Optional[str],
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]],
|
||||
) -> str:
|
||||
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched.
|
||||
|
||||
Args:
|
||||
input_string: The string containing template variables to interpolate.
|
||||
Can be None or empty, in which case an empty string is returned.
|
||||
inputs: Dictionary mapping template variables to their values.
|
||||
Supported value types are strings, integers, floats, and dicts/lists
|
||||
containing only these types and other nested dicts/lists.
|
||||
|
||||
Returns:
|
||||
The interpolated string with all template variables replaced with their values.
|
||||
Empty string if input_string is None or empty.
|
||||
|
||||
Raises:
|
||||
ValueError: If a value contains unsupported types
|
||||
"""
|
||||
|
||||
# Validation function for recursive type checking
|
||||
def validate_type(value: Any) -> None:
|
||||
if value is None:
|
||||
return
|
||||
if isinstance(value, (str, int, float, bool)):
|
||||
return
|
||||
if isinstance(value, (dict, list)):
|
||||
for item in value.values() if isinstance(value, dict) else value:
|
||||
validate_type(item)
|
||||
return
|
||||
raise ValueError(
|
||||
f"Unsupported type {type(value).__name__} in inputs. "
|
||||
"Only str, int, float, bool, dict, and list are allowed."
|
||||
)
|
||||
|
||||
# Validate all input values
|
||||
for key, value in inputs.items():
|
||||
try:
|
||||
validate_type(value)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid value for key '{key}': {str(e)}") from e
|
||||
|
||||
if input_string is None or not input_string:
|
||||
return ""
|
||||
if "{" not in input_string and "}" not in input_string:
|
||||
return input_string
|
||||
if not inputs:
|
||||
raise ValueError(
|
||||
"Inputs dictionary cannot be empty when interpolating variables"
|
||||
)
|
||||
try:
|
||||
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
|
||||
|
||||
for key in inputs.keys():
|
||||
escaped_string = escaped_string.replace(f"{{{{{key}}}}}", f"{{{key}}}")
|
||||
|
||||
return escaped_string.format(**inputs)
|
||||
except KeyError as e:
|
||||
raise KeyError(
|
||||
f"Template variable '{e.args[0]}' not found in inputs dictionary"
|
||||
) from e
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Error during string interpolation: {str(e)}") from e
|
||||
|
||||
def increment_tools_errors(self) -> None:
|
||||
"""Increment the tools errors counter."""
|
||||
self.tools_errors += 1
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
import re
|
||||
from typing import TYPE_CHECKING, List
|
||||
from typing import List
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
|
||||
def aggregate_raw_outputs_from_task_outputs(task_outputs: List["TaskOutput"]) -> str:
|
||||
def aggregate_raw_outputs_from_task_outputs(task_outputs: List[TaskOutput]) -> str:
|
||||
"""Generate string context from the task outputs."""
|
||||
dividers = "\n\n----------\n\n"
|
||||
|
||||
@@ -15,7 +13,7 @@ def aggregate_raw_outputs_from_task_outputs(task_outputs: List["TaskOutput"]) ->
|
||||
return context
|
||||
|
||||
|
||||
def aggregate_raw_outputs_from_tasks(tasks: List["Task"]) -> str:
|
||||
def aggregate_raw_outputs_from_tasks(tasks: List[Task]) -> str:
|
||||
"""Generate string context from the tasks."""
|
||||
task_outputs = [task.output for task in tasks if task.output is not None]
|
||||
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
import re
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
|
||||
def interpolate_only(
|
||||
input_string: Optional[str],
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]],
|
||||
) -> str:
|
||||
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched.
|
||||
Only interpolates placeholders that follow the pattern {variable_name} where
|
||||
variable_name starts with a letter/underscore and contains only letters, numbers, and underscores.
|
||||
|
||||
Args:
|
||||
input_string: The string containing template variables to interpolate.
|
||||
Can be None or empty, in which case an empty string is returned.
|
||||
inputs: Dictionary mapping template variables to their values.
|
||||
Supported value types are strings, integers, floats, and dicts/lists
|
||||
containing only these types and other nested dicts/lists.
|
||||
|
||||
Returns:
|
||||
The interpolated string with all template variables replaced with their values.
|
||||
Empty string if input_string is None or empty.
|
||||
|
||||
Raises:
|
||||
ValueError: If a value contains unsupported types or a template variable is missing
|
||||
"""
|
||||
|
||||
# Validation function for recursive type checking
|
||||
def validate_type(value: Any) -> None:
|
||||
if value is None:
|
||||
return
|
||||
if isinstance(value, (str, int, float, bool)):
|
||||
return
|
||||
if isinstance(value, (dict, list)):
|
||||
for item in value.values() if isinstance(value, dict) else value:
|
||||
validate_type(item)
|
||||
return
|
||||
raise ValueError(
|
||||
f"Unsupported type {type(value).__name__} in inputs. "
|
||||
"Only str, int, float, bool, dict, and list are allowed."
|
||||
)
|
||||
|
||||
# Validate all input values
|
||||
for key, value in inputs.items():
|
||||
try:
|
||||
validate_type(value)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid value for key '{key}': {str(e)}") from e
|
||||
|
||||
if input_string is None or not input_string:
|
||||
return ""
|
||||
if "{" not in input_string and "}" not in input_string:
|
||||
return input_string
|
||||
if not inputs:
|
||||
raise ValueError(
|
||||
"Inputs dictionary cannot be empty when interpolating variables"
|
||||
)
|
||||
|
||||
# The regex pattern to find valid variable placeholders
|
||||
# Matches {variable_name} where variable_name starts with a letter/underscore
|
||||
# and contains only letters, numbers, and underscores
|
||||
pattern = r"\{([A-Za-z_][A-Za-z0-9_]*)\}"
|
||||
|
||||
# Find all matching variables in the input string
|
||||
variables = re.findall(pattern, input_string)
|
||||
result = input_string
|
||||
|
||||
# Check if all variables exist in inputs
|
||||
missing_vars = [var for var in variables if var not in inputs]
|
||||
if missing_vars:
|
||||
raise KeyError(
|
||||
f"Template variable '{missing_vars[0]}' not found in inputs dictionary"
|
||||
)
|
||||
|
||||
# Replace each variable with its value
|
||||
for var in variables:
|
||||
if var in inputs:
|
||||
placeholder = "{" + var + "}"
|
||||
value = str(inputs[var])
|
||||
result = result.replace(placeholder, value)
|
||||
|
||||
return result
|
||||
@@ -15,7 +15,6 @@ from crewai import Agent, Crew, Process, Task
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.string_utils import interpolate_only
|
||||
|
||||
|
||||
def test_task_tool_reflect_agent_tools():
|
||||
@@ -823,7 +822,7 @@ def test_interpolate_only():
|
||||
|
||||
# Test JSON structure preservation
|
||||
json_string = '{"info": "Look at {placeholder}", "nested": {"val": "{nestedVal}"}}'
|
||||
result = interpolate_only(
|
||||
result = task.interpolate_only(
|
||||
input_string=json_string,
|
||||
inputs={"placeholder": "the data", "nestedVal": "something else"},
|
||||
)
|
||||
@@ -834,18 +833,20 @@ def test_interpolate_only():
|
||||
|
||||
# Test normal string interpolation
|
||||
normal_string = "Hello {name}, welcome to {place}!"
|
||||
result = interpolate_only(
|
||||
result = task.interpolate_only(
|
||||
input_string=normal_string, inputs={"name": "John", "place": "CrewAI"}
|
||||
)
|
||||
assert result == "Hello John, welcome to CrewAI!"
|
||||
|
||||
# Test empty string
|
||||
result = interpolate_only(input_string="", inputs={"unused": "value"})
|
||||
result = task.interpolate_only(input_string="", inputs={"unused": "value"})
|
||||
assert result == ""
|
||||
|
||||
# Test string with no placeholders
|
||||
no_placeholders = "Hello, this is a test"
|
||||
result = interpolate_only(input_string=no_placeholders, inputs={"unused": "value"})
|
||||
result = task.interpolate_only(
|
||||
input_string=no_placeholders, inputs={"unused": "value"}
|
||||
)
|
||||
assert result == no_placeholders
|
||||
|
||||
|
||||
@@ -857,7 +858,7 @@ def test_interpolate_only_with_dict_inside_expected_output():
|
||||
)
|
||||
|
||||
json_string = '{"questions": {"main_question": "What is the user\'s name?", "secondary_question": "What is the user\'s age?"}}'
|
||||
result = interpolate_only(
|
||||
result = task.interpolate_only(
|
||||
input_string=json_string,
|
||||
inputs={
|
||||
"questions": {
|
||||
@@ -871,16 +872,18 @@ def test_interpolate_only_with_dict_inside_expected_output():
|
||||
assert result == json_string
|
||||
|
||||
normal_string = "Hello {name}, welcome to {place}!"
|
||||
result = interpolate_only(
|
||||
result = task.interpolate_only(
|
||||
input_string=normal_string, inputs={"name": "John", "place": "CrewAI"}
|
||||
)
|
||||
assert result == "Hello John, welcome to CrewAI!"
|
||||
|
||||
result = interpolate_only(input_string="", inputs={"unused": "value"})
|
||||
result = task.interpolate_only(input_string="", inputs={"unused": "value"})
|
||||
assert result == ""
|
||||
|
||||
no_placeholders = "Hello, this is a test"
|
||||
result = interpolate_only(input_string=no_placeholders, inputs={"unused": "value"})
|
||||
result = task.interpolate_only(
|
||||
input_string=no_placeholders, inputs={"unused": "value"}
|
||||
)
|
||||
assert result == no_placeholders
|
||||
|
||||
|
||||
@@ -1082,12 +1085,12 @@ def test_interpolate_with_list_of_strings():
|
||||
# Test simple list of strings
|
||||
input_str = "Available items: {items}"
|
||||
inputs = {"items": ["apple", "banana", "cherry"]}
|
||||
result = interpolate_only(input_str, inputs)
|
||||
result = task.interpolate_only(input_str, inputs)
|
||||
assert result == f"Available items: {inputs['items']}"
|
||||
|
||||
# Test empty list
|
||||
empty_list_input = {"items": []}
|
||||
result = interpolate_only(input_str, empty_list_input)
|
||||
result = task.interpolate_only(input_str, empty_list_input)
|
||||
assert result == "Available items: []"
|
||||
|
||||
|
||||
@@ -1103,7 +1106,7 @@ def test_interpolate_with_list_of_dicts():
|
||||
{"name": "Bob", "age": 25, "skills": ["Java", "Cloud"]},
|
||||
]
|
||||
}
|
||||
result = interpolate_only("{people}", input_data)
|
||||
result = task.interpolate_only("{people}", input_data)
|
||||
|
||||
parsed_result = eval(result)
|
||||
assert isinstance(parsed_result, list)
|
||||
@@ -1135,7 +1138,7 @@ def test_interpolate_with_nested_structures():
|
||||
],
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{company}", input_data)
|
||||
result = task.interpolate_only("{company}", input_data)
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["name"] == "TechCorp"
|
||||
@@ -1158,7 +1161,7 @@ def test_interpolate_with_special_characters():
|
||||
"empty": "",
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{special_data}", input_data)
|
||||
result = task.interpolate_only("{special_data}", input_data)
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["quotes"] == """This has "double" and 'single' quotes"""
|
||||
@@ -1185,7 +1188,7 @@ def test_interpolate_mixed_types():
|
||||
},
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{data}", input_data)
|
||||
result = task.interpolate_only("{data}", input_data)
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["name"] == "Test Dataset"
|
||||
@@ -1213,7 +1216,7 @@ def test_interpolate_complex_combination():
|
||||
},
|
||||
]
|
||||
}
|
||||
result = interpolate_only("{report}", input_data)
|
||||
result = task.interpolate_only("{report}", input_data)
|
||||
parsed = eval(result)
|
||||
|
||||
assert len(parsed) == 2
|
||||
@@ -1230,7 +1233,7 @@ def test_interpolate_invalid_type_validation():
|
||||
|
||||
# Test with invalid top-level type
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
interpolate_only("{data}", {"data": set()}) # type: ignore we are purposely testing this failure
|
||||
task.interpolate_only("{data}", {"data": set()}) # type: ignore we are purposely testing this failure
|
||||
|
||||
assert "Unsupported type set" in str(excinfo.value)
|
||||
|
||||
@@ -1243,7 +1246,7 @@ def test_interpolate_invalid_type_validation():
|
||||
}
|
||||
}
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
interpolate_only("{data}", {"data": invalid_nested})
|
||||
task.interpolate_only("{data}", {"data": invalid_nested})
|
||||
assert "Unsupported type set" in str(excinfo.value)
|
||||
|
||||
|
||||
@@ -1262,22 +1265,24 @@ def test_interpolate_custom_object_validation():
|
||||
|
||||
# Test with custom object at top level
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
interpolate_only("{obj}", {"obj": CustomObject(5)}) # type: ignore we are purposely testing this failure
|
||||
task.interpolate_only("{obj}", {"obj": CustomObject(5)}) # type: ignore we are purposely testing this failure
|
||||
assert "Unsupported type CustomObject" in str(excinfo.value)
|
||||
|
||||
# Test with nested custom object in dictionary
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
interpolate_only("{data}", {"data": {"valid": 1, "invalid": CustomObject(5)}})
|
||||
task.interpolate_only(
|
||||
"{data}", {"data": {"valid": 1, "invalid": CustomObject(5)}}
|
||||
)
|
||||
assert "Unsupported type CustomObject" in str(excinfo.value)
|
||||
|
||||
# Test with nested custom object in list
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
interpolate_only("{data}", {"data": [1, "valid", CustomObject(5)]})
|
||||
task.interpolate_only("{data}", {"data": [1, "valid", CustomObject(5)]})
|
||||
assert "Unsupported type CustomObject" in str(excinfo.value)
|
||||
|
||||
# Test with deeply nested custom object
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
interpolate_only(
|
||||
task.interpolate_only(
|
||||
"{data}", {"data": {"level1": {"level2": [{"level3": CustomObject(5)}]}}}
|
||||
)
|
||||
assert "Unsupported type CustomObject" in str(excinfo.value)
|
||||
@@ -1301,7 +1306,7 @@ def test_interpolate_valid_complex_types():
|
||||
}
|
||||
|
||||
# Should not raise any errors
|
||||
result = interpolate_only("{data}", {"data": valid_data})
|
||||
result = task.interpolate_only("{data}", {"data": valid_data})
|
||||
parsed = eval(result)
|
||||
assert parsed["name"] == "Valid Dataset"
|
||||
assert parsed["stats"]["nested"]["deeper"]["b"] == 2.5
|
||||
@@ -1314,16 +1319,16 @@ def test_interpolate_edge_cases():
|
||||
)
|
||||
|
||||
# Test empty dict and list
|
||||
assert interpolate_only("{}", {"data": {}}) == "{}"
|
||||
assert interpolate_only("[]", {"data": []}) == "[]"
|
||||
assert task.interpolate_only("{}", {"data": {}}) == "{}"
|
||||
assert task.interpolate_only("[]", {"data": []}) == "[]"
|
||||
|
||||
# Test numeric types
|
||||
assert interpolate_only("{num}", {"num": 42}) == "42"
|
||||
assert interpolate_only("{num}", {"num": 3.14}) == "3.14"
|
||||
assert task.interpolate_only("{num}", {"num": 42}) == "42"
|
||||
assert task.interpolate_only("{num}", {"num": 3.14}) == "3.14"
|
||||
|
||||
# Test boolean values (valid JSON types)
|
||||
assert interpolate_only("{flag}", {"flag": True}) == "True"
|
||||
assert interpolate_only("{flag}", {"flag": False}) == "False"
|
||||
assert task.interpolate_only("{flag}", {"flag": True}) == "True"
|
||||
assert task.interpolate_only("{flag}", {"flag": False}) == "False"
|
||||
|
||||
|
||||
def test_interpolate_valid_types():
|
||||
@@ -1341,7 +1346,7 @@ def test_interpolate_valid_types():
|
||||
"nested": {"flag": True, "empty": None},
|
||||
}
|
||||
|
||||
result = interpolate_only("{data}", {"data": valid_data})
|
||||
result = task.interpolate_only("{data}", {"data": valid_data})
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["active"] is True
|
||||
|
||||
@@ -1,35 +1,17 @@
|
||||
import json
|
||||
import os
|
||||
from typing import Dict, List, Optional
|
||||
from datetime import date, datetime
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Union, cast
|
||||
from unittest.mock import MagicMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.utilities.converter import (
|
||||
Converter,
|
||||
ConverterError,
|
||||
convert_to_model,
|
||||
convert_with_instructions,
|
||||
create_converter,
|
||||
generate_model_description,
|
||||
get_conversion_instructions,
|
||||
handle_partial_json,
|
||||
validate_model,
|
||||
)
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
from crewai.flow.state_utils import _to_serializable_key, to_serializable, to_string
|
||||
|
||||
|
||||
# Sample Pydantic models for testing
|
||||
class EmailResponse(BaseModel):
|
||||
previous_message_content: str
|
||||
|
||||
|
||||
class EmailResponses(BaseModel):
|
||||
responses: list[EmailResponse]
|
||||
|
||||
|
||||
class SimpleModel(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
@@ -52,560 +34,190 @@ class Person(BaseModel):
|
||||
address: Address
|
||||
|
||||
|
||||
class CustomConverter(Converter):
|
||||
pass
|
||||
class Color(Enum):
|
||||
RED = "red"
|
||||
GREEN = "green"
|
||||
BLUE = "blue"
|
||||
|
||||
|
||||
# Fixtures
|
||||
@pytest.fixture
|
||||
def mock_agent():
|
||||
agent = Mock()
|
||||
agent.function_calling_llm = None
|
||||
agent.llm = Mock()
|
||||
return agent
|
||||
class EnumModel(BaseModel):
|
||||
name: str
|
||||
color: Color
|
||||
|
||||
|
||||
# Tests for convert_to_model
|
||||
def test_convert_to_model_with_valid_json():
|
||||
result = '{"name": "John", "age": 30}'
|
||||
output = convert_to_model(result, SimpleModel, None, None)
|
||||
assert isinstance(output, SimpleModel)
|
||||
assert output.name == "John"
|
||||
assert output.age == 30
|
||||
class OptionalModel(BaseModel):
|
||||
name: str
|
||||
age: Optional[int]
|
||||
|
||||
|
||||
def test_convert_to_model_with_invalid_json():
|
||||
result = '{"name": "John", "age": "thirty"}'
|
||||
with patch("crewai.utilities.converter.handle_partial_json") as mock_handle:
|
||||
mock_handle.return_value = "Fallback result"
|
||||
output = convert_to_model(result, SimpleModel, None, None)
|
||||
assert output == "Fallback result"
|
||||
class ListModel(BaseModel):
|
||||
items: List[int]
|
||||
|
||||
|
||||
def test_convert_to_model_with_no_model():
|
||||
result = "Plain text"
|
||||
output = convert_to_model(result, None, None, None)
|
||||
assert output == "Plain text"
|
||||
class UnionModel(BaseModel):
|
||||
field: Union[int, str, None]
|
||||
|
||||
|
||||
def test_convert_to_model_with_special_characters():
|
||||
json_string_test = """
|
||||
{
|
||||
"responses": [
|
||||
{
|
||||
"previous_message_content": "Hi Tom,\r\n\r\nNiamh has chosen the Mika phonics on"
|
||||
}
|
||||
]
|
||||
# Tests for to_serializable function
|
||||
def test_to_serializable_primitives():
|
||||
"""Test serialization of primitive types."""
|
||||
assert to_serializable("test string") == "test string"
|
||||
assert to_serializable(42) == 42
|
||||
assert to_serializable(3.14) == 3.14
|
||||
assert to_serializable(True) == True
|
||||
assert to_serializable(None) is None
|
||||
|
||||
|
||||
def test_to_serializable_dates():
|
||||
"""Test serialization of date and datetime objects."""
|
||||
test_date = date(2023, 1, 15)
|
||||
test_datetime = datetime(2023, 1, 15, 10, 30, 45)
|
||||
|
||||
assert to_serializable(test_date) == "2023-01-15"
|
||||
assert to_serializable(test_datetime) == "2023-01-15T10:30:45"
|
||||
|
||||
|
||||
def test_to_serializable_collections():
|
||||
"""Test serialization of lists, tuples, and sets."""
|
||||
test_list = [1, "two", 3.0]
|
||||
test_tuple = (4, "five", 6.0)
|
||||
test_set = {7, "eight", 9.0}
|
||||
|
||||
assert to_serializable(test_list) == [1, "two", 3.0]
|
||||
assert to_serializable(test_tuple) == [4, "five", 6.0]
|
||||
|
||||
# For sets, we can't rely on order, so we'll verify differently
|
||||
serialized_set = to_serializable(test_set)
|
||||
assert isinstance(serialized_set, list)
|
||||
assert len(serialized_set) == 3
|
||||
assert 7 in serialized_set
|
||||
assert "eight" in serialized_set
|
||||
assert 9.0 in serialized_set
|
||||
|
||||
|
||||
def test_to_serializable_dict():
|
||||
"""Test serialization of dictionaries."""
|
||||
test_dict = {"a": 1, "b": "two", "c": [3, 4, 5]}
|
||||
|
||||
assert to_serializable(test_dict) == {"a": 1, "b": "two", "c": [3, 4, 5]}
|
||||
|
||||
|
||||
def test_to_serializable_pydantic_models():
|
||||
"""Test serialization of Pydantic models."""
|
||||
simple = SimpleModel(name="John", age=30)
|
||||
|
||||
assert to_serializable(simple) == {"name": "John", "age": 30}
|
||||
|
||||
|
||||
def test_to_serializable_nested_models():
|
||||
"""Test serialization of nested Pydantic models."""
|
||||
simple = SimpleModel(name="John", age=30)
|
||||
nested = NestedModel(id=1, data=simple)
|
||||
|
||||
assert to_serializable(nested) == {"id": 1, "data": {"name": "John", "age": 30}}
|
||||
|
||||
|
||||
def test_to_serializable_complex_model():
|
||||
"""Test serialization of a complex model with nested structures."""
|
||||
person = Person(
|
||||
name="Jane",
|
||||
age=28,
|
||||
address=Address(street="123 Main St", city="Anytown", zip_code="12345"),
|
||||
)
|
||||
|
||||
assert to_serializable(person) == {
|
||||
"name": "Jane",
|
||||
"age": 28,
|
||||
"address": {"street": "123 Main St", "city": "Anytown", "zip_code": "12345"},
|
||||
}
|
||||
"""
|
||||
output = convert_to_model(json_string_test, EmailResponses, None, None)
|
||||
assert isinstance(output, EmailResponses)
|
||||
assert len(output.responses) == 1
|
||||
assert (
|
||||
output.responses[0].previous_message_content
|
||||
== "Hi Tom,\r\n\r\nNiamh has chosen the Mika phonics on"
|
||||
)
|
||||
|
||||
|
||||
def test_convert_to_model_with_escaped_special_characters():
|
||||
json_string_test = json.dumps(
|
||||
{
|
||||
"responses": [
|
||||
{
|
||||
"previous_message_content": "Hi Tom,\r\n\r\nNiamh has chosen the Mika phonics on"
|
||||
}
|
||||
]
|
||||
}
|
||||
)
|
||||
output = convert_to_model(json_string_test, EmailResponses, None, None)
|
||||
assert isinstance(output, EmailResponses)
|
||||
assert len(output.responses) == 1
|
||||
assert (
|
||||
output.responses[0].previous_message_content
|
||||
== "Hi Tom,\r\n\r\nNiamh has chosen the Mika phonics on"
|
||||
)
|
||||
def test_to_serializable_enum():
|
||||
"""Test serialization of Enum values."""
|
||||
model = EnumModel(name="ColorTest", color=Color.RED)
|
||||
|
||||
assert to_serializable(model) == {"name": "ColorTest", "color": "red"}
|
||||
|
||||
def test_convert_to_model_with_multiple_special_characters():
|
||||
json_string_test = """
|
||||
{
|
||||
"responses": [
|
||||
{
|
||||
"previous_message_content": "Line 1\r\nLine 2\tTabbed\nLine 3\r\n\rEscaped newline"
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
output = convert_to_model(json_string_test, EmailResponses, None, None)
|
||||
assert isinstance(output, EmailResponses)
|
||||
assert len(output.responses) == 1
|
||||
assert (
|
||||
output.responses[0].previous_message_content
|
||||
== "Line 1\r\nLine 2\tTabbed\nLine 3\r\n\rEscaped newline"
|
||||
)
|
||||
|
||||
def test_to_serializable_optional_fields():
|
||||
"""Test serialization of models with optional fields."""
|
||||
model_with_age = OptionalModel(name="WithAge", age=25)
|
||||
model_without_age = OptionalModel(name="WithoutAge", age=None)
|
||||
|
||||
# Tests for validate_model
|
||||
def test_validate_model_pydantic_output():
|
||||
result = '{"name": "Alice", "age": 25}'
|
||||
output = validate_model(result, SimpleModel, False)
|
||||
assert isinstance(output, SimpleModel)
|
||||
assert output.name == "Alice"
|
||||
assert output.age == 25
|
||||
assert to_serializable(model_with_age) == {"name": "WithAge", "age": 25}
|
||||
assert to_serializable(model_without_age) == {"name": "WithoutAge", "age": None}
|
||||
|
||||
|
||||
def test_validate_model_json_output():
|
||||
result = '{"name": "Bob", "age": 40}'
|
||||
output = validate_model(result, SimpleModel, True)
|
||||
assert isinstance(output, dict)
|
||||
assert output == {"name": "Bob", "age": 40}
|
||||
def test_to_serializable_list_field():
|
||||
"""Test serialization of models with list fields."""
|
||||
model = ListModel(items=[1, 2, 3, 4, 5])
|
||||
|
||||
assert to_serializable(model) == {"items": [1, 2, 3, 4, 5]}
|
||||
|
||||
# Tests for handle_partial_json
|
||||
def test_handle_partial_json_with_valid_partial():
|
||||
result = 'Some text {"name": "Charlie", "age": 35} more text'
|
||||
output = handle_partial_json(result, SimpleModel, False, None)
|
||||
assert isinstance(output, SimpleModel)
|
||||
assert output.name == "Charlie"
|
||||
assert output.age == 35
|
||||
|
||||
def test_to_serializable_union_field():
|
||||
"""Test serialization of models with union fields."""
|
||||
model_int = UnionModel(field=42)
|
||||
model_str = UnionModel(field="test")
|
||||
model_none = UnionModel(field=None)
|
||||
|
||||
def test_handle_partial_json_with_invalid_partial(mock_agent):
|
||||
result = "No valid JSON here"
|
||||
with patch("crewai.utilities.converter.convert_with_instructions") as mock_convert:
|
||||
mock_convert.return_value = "Converted result"
|
||||
output = handle_partial_json(result, SimpleModel, False, mock_agent)
|
||||
assert output == "Converted result"
|
||||
assert to_serializable(model_int) == {"field": 42}
|
||||
assert to_serializable(model_str) == {"field": "test"}
|
||||
assert to_serializable(model_none) == {"field": None}
|
||||
|
||||
|
||||
# Tests for convert_with_instructions
|
||||
@patch("crewai.utilities.converter.create_converter")
|
||||
@patch("crewai.utilities.converter.get_conversion_instructions")
|
||||
def test_convert_with_instructions_success(
|
||||
mock_get_instructions, mock_create_converter, mock_agent
|
||||
):
|
||||
mock_get_instructions.return_value = "Instructions"
|
||||
mock_converter = Mock()
|
||||
mock_converter.to_pydantic.return_value = SimpleModel(name="David", age=50)
|
||||
mock_create_converter.return_value = mock_converter
|
||||
def test_to_serializable_max_depth():
|
||||
"""Test max depth parameter to prevent infinite recursion."""
|
||||
# Create recursive structure
|
||||
a: Dict[str, Any] = {"name": "a"}
|
||||
b: Dict[str, Any] = {"name": "b", "ref": a}
|
||||
a["ref"] = b # Create circular reference
|
||||
|
||||
result = "Some text to convert"
|
||||
output = convert_with_instructions(result, SimpleModel, False, mock_agent)
|
||||
result = to_serializable(a, max_depth=3)
|
||||
|
||||
assert isinstance(output, SimpleModel)
|
||||
assert output.name == "David"
|
||||
assert output.age == 50
|
||||
assert isinstance(result, dict)
|
||||
assert "name" in result
|
||||
assert "ref" in result
|
||||
assert isinstance(result["ref"], dict)
|
||||
assert "ref" in result["ref"]
|
||||
assert isinstance(result["ref"]["ref"], dict)
|
||||
# At depth 3, it should convert to string
|
||||
assert isinstance(result["ref"]["ref"]["ref"], str)
|
||||
|
||||
|
||||
@patch("crewai.utilities.converter.create_converter")
|
||||
@patch("crewai.utilities.converter.get_conversion_instructions")
|
||||
def test_convert_with_instructions_failure(
|
||||
mock_get_instructions, mock_create_converter, mock_agent
|
||||
):
|
||||
mock_get_instructions.return_value = "Instructions"
|
||||
mock_converter = Mock()
|
||||
mock_converter.to_pydantic.return_value = ConverterError("Conversion failed")
|
||||
mock_create_converter.return_value = mock_converter
|
||||
def test_to_serializable_non_serializable():
|
||||
"""Test serialization of objects that aren't directly JSON serializable."""
|
||||
|
||||
result = "Some text to convert"
|
||||
with patch("crewai.utilities.converter.Printer") as mock_printer:
|
||||
output = convert_with_instructions(result, SimpleModel, False, mock_agent)
|
||||
assert output == result
|
||||
mock_printer.return_value.print.assert_called_once()
|
||||
class CustomObject:
|
||||
def __repr__(self):
|
||||
return "CustomObject()"
|
||||
|
||||
obj = CustomObject()
|
||||
|
||||
# Tests for get_conversion_instructions
|
||||
def test_get_conversion_instructions_gpt():
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
with patch.object(LLM, "supports_function_calling") as supports_function_calling:
|
||||
supports_function_calling.return_value = True
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
model_schema = PydanticSchemaParser(model=SimpleModel).get_schema()
|
||||
expected_instructions = (
|
||||
"Please convert the following text into valid JSON.\n\n"
|
||||
"Output ONLY the valid JSON and nothing else.\n\n"
|
||||
"The JSON must follow this schema exactly:\n```json\n"
|
||||
f"{model_schema}\n```"
|
||||
)
|
||||
assert instructions == expected_instructions
|
||||
# Should convert to string representation
|
||||
assert to_serializable(obj) == "CustomObject()"
|
||||
|
||||
|
||||
def test_get_conversion_instructions_non_gpt():
|
||||
llm = LLM(model="ollama/llama3.1", base_url="http://localhost:11434")
|
||||
with patch.object(LLM, "supports_function_calling", return_value=False):
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
assert '"name": str' in instructions
|
||||
assert '"age": int' in instructions
|
||||
def test_to_string_conversion():
|
||||
"""Test the to_string function."""
|
||||
test_dict = {"name": "Test", "values": [1, 2, 3]}
|
||||
|
||||
# Should convert to a JSON string
|
||||
assert to_string(test_dict) == '{"name": "Test", "values": [1, 2, 3]}'
|
||||
|
||||
# Tests for is_gpt
|
||||
def test_supports_function_calling_true():
|
||||
llm = LLM(model="gpt-4o")
|
||||
assert llm.supports_function_calling() is True
|
||||
# None should return None
|
||||
assert to_string(None) is None
|
||||
|
||||
|
||||
def test_supports_function_calling_false():
|
||||
llm = LLM(model="non-existent-model")
|
||||
assert llm.supports_function_calling() is False
|
||||
def test_to_serializable_key():
|
||||
"""Test serialization of dictionary keys."""
|
||||
# String and int keys are converted to strings
|
||||
assert _to_serializable_key("test") == "test"
|
||||
assert _to_serializable_key(42) == "42"
|
||||
|
||||
|
||||
def test_create_converter_with_mock_agent():
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.get_output_converter.return_value = MagicMock(spec=Converter)
|
||||
|
||||
converter = create_converter(
|
||||
agent=mock_agent,
|
||||
llm=Mock(),
|
||||
text="Sample",
|
||||
model=SimpleModel,
|
||||
instructions="Convert",
|
||||
)
|
||||
|
||||
assert isinstance(converter, Converter)
|
||||
mock_agent.get_output_converter.assert_called_once()
|
||||
|
||||
|
||||
def test_create_converter_with_custom_converter():
|
||||
converter = create_converter(
|
||||
converter_cls=CustomConverter,
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
text="Sample",
|
||||
model=SimpleModel,
|
||||
instructions="Convert",
|
||||
)
|
||||
|
||||
assert isinstance(converter, CustomConverter)
|
||||
|
||||
|
||||
def test_create_converter_fails_without_agent_or_converter_cls():
|
||||
with pytest.raises(
|
||||
ValueError, match="Either agent or converter_cls must be provided"
|
||||
):
|
||||
create_converter(
|
||||
llm=Mock(), text="Sample", model=SimpleModel, instructions="Convert"
|
||||
)
|
||||
|
||||
|
||||
def test_generate_model_description_simple_model():
|
||||
description = generate_model_description(SimpleModel)
|
||||
expected_description = '{\n "name": str,\n "age": int\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_nested_model():
|
||||
description = generate_model_description(NestedModel)
|
||||
expected_description = (
|
||||
'{\n "id": int,\n "data": {\n "name": str,\n "age": int\n}\n}'
|
||||
)
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_optional_field():
|
||||
class ModelWithOptionalField(BaseModel):
|
||||
name: Optional[str]
|
||||
age: int
|
||||
|
||||
description = generate_model_description(ModelWithOptionalField)
|
||||
expected_description = '{\n "name": Optional[str],\n "age": int\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_list_field():
|
||||
class ModelWithListField(BaseModel):
|
||||
items: List[int]
|
||||
|
||||
description = generate_model_description(ModelWithListField)
|
||||
expected_description = '{\n "items": List[int]\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_dict_field():
|
||||
class ModelWithDictField(BaseModel):
|
||||
attributes: Dict[str, int]
|
||||
|
||||
description = generate_model_description(ModelWithDictField)
|
||||
expected_description = '{\n "attributes": Dict[str, int]\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_convert_with_instructions():
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
sample_text = "Name: Alice, Age: 30"
|
||||
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
converter = Converter(
|
||||
llm=llm,
|
||||
text=sample_text,
|
||||
model=SimpleModel,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
# Act
|
||||
output = converter.to_pydantic()
|
||||
|
||||
# Assert
|
||||
assert isinstance(output, SimpleModel)
|
||||
assert output.name == "Alice"
|
||||
assert output.age == 30
|
||||
|
||||
|
||||
# Skip tests that call external APIs when running in CI/CD
|
||||
skip_external_api = pytest.mark.skipif(
|
||||
os.getenv("CI") is not None, reason="Skipping tests that call external API in CI/CD"
|
||||
)
|
||||
|
||||
|
||||
@skip_external_api
|
||||
@pytest.mark.vcr(filter_headers=["authorization"], record_mode="once")
|
||||
def test_converter_with_llama3_2_model():
|
||||
llm = LLM(model="ollama/llama3.2:3b", base_url="http://localhost:11434")
|
||||
sample_text = "Name: Alice Llama, Age: 30"
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
converter = Converter(
|
||||
llm=llm,
|
||||
text=sample_text,
|
||||
model=SimpleModel,
|
||||
instructions=instructions,
|
||||
)
|
||||
output = converter.to_pydantic()
|
||||
assert isinstance(output, SimpleModel)
|
||||
assert output.name == "Alice Llama"
|
||||
assert output.age == 30
|
||||
|
||||
|
||||
@skip_external_api
|
||||
@pytest.mark.vcr(filter_headers=["authorization"], record_mode="once")
|
||||
def test_converter_with_llama3_1_model():
|
||||
llm = LLM(model="ollama/llama3.1", base_url="http://localhost:11434")
|
||||
sample_text = "Name: Alice Llama, Age: 30"
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
converter = Converter(
|
||||
llm=llm,
|
||||
text=sample_text,
|
||||
model=SimpleModel,
|
||||
instructions=instructions,
|
||||
)
|
||||
output = converter.to_pydantic()
|
||||
assert isinstance(output, SimpleModel)
|
||||
assert output.name == "Alice Llama"
|
||||
assert output.age == 30
|
||||
|
||||
|
||||
# Skip tests that call external APIs when running in CI/CD
|
||||
skip_external_api = pytest.mark.skipif(
|
||||
os.getenv("CI") is not None, reason="Skipping tests that call external API in CI/CD"
|
||||
)
|
||||
|
||||
|
||||
@skip_external_api
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_converter_with_nested_model():
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
sample_text = "Name: John Doe\nAge: 30\nAddress: 123 Main St, Anytown, 12345"
|
||||
|
||||
instructions = get_conversion_instructions(Person, llm)
|
||||
converter = Converter(
|
||||
llm=llm,
|
||||
text=sample_text,
|
||||
model=Person,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
output = converter.to_pydantic()
|
||||
|
||||
assert isinstance(output, Person)
|
||||
assert output.name == "John Doe"
|
||||
assert output.age == 30
|
||||
assert isinstance(output.address, Address)
|
||||
assert output.address.street == "123 Main St"
|
||||
assert output.address.city == "Anytown"
|
||||
assert output.address.zip_code == "12345"
|
||||
|
||||
|
||||
# Tests for error handling
|
||||
def test_converter_error_handling():
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = False
|
||||
llm.call.return_value = "Invalid JSON"
|
||||
sample_text = "Name: Alice, Age: 30"
|
||||
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
converter = Converter(
|
||||
llm=llm,
|
||||
text=sample_text,
|
||||
model=SimpleModel,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
with pytest.raises(ConverterError) as exc_info:
|
||||
output = converter.to_pydantic()
|
||||
|
||||
assert "Failed to convert text into a Pydantic model" in str(exc_info.value)
|
||||
|
||||
|
||||
# Tests for retry logic
|
||||
def test_converter_retry_logic():
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = False
|
||||
llm.call.side_effect = [
|
||||
"Invalid JSON",
|
||||
"Still invalid",
|
||||
'{"name": "Retry Alice", "age": 30}',
|
||||
]
|
||||
sample_text = "Name: Retry Alice, Age: 30"
|
||||
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
converter = Converter(
|
||||
llm=llm,
|
||||
text=sample_text,
|
||||
model=SimpleModel,
|
||||
instructions=instructions,
|
||||
max_attempts=3,
|
||||
)
|
||||
|
||||
output = converter.to_pydantic()
|
||||
|
||||
assert isinstance(output, SimpleModel)
|
||||
assert output.name == "Retry Alice"
|
||||
assert output.age == 30
|
||||
assert llm.call.call_count == 3
|
||||
|
||||
|
||||
# Tests for optional fields
|
||||
def test_converter_with_optional_fields():
|
||||
class OptionalModel(BaseModel):
|
||||
name: str
|
||||
age: Optional[int]
|
||||
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = False
|
||||
# Simulate the LLM's response with 'age' explicitly set to null
|
||||
llm.call.return_value = '{"name": "Bob", "age": null}'
|
||||
sample_text = "Name: Bob, age: None"
|
||||
|
||||
instructions = get_conversion_instructions(OptionalModel, llm)
|
||||
converter = Converter(
|
||||
llm=llm,
|
||||
text=sample_text,
|
||||
model=OptionalModel,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
output = converter.to_pydantic()
|
||||
|
||||
assert isinstance(output, OptionalModel)
|
||||
assert output.name == "Bob"
|
||||
assert output.age is None
|
||||
|
||||
|
||||
# Tests for list fields
|
||||
def test_converter_with_list_field():
|
||||
class ListModel(BaseModel):
|
||||
items: List[int]
|
||||
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = False
|
||||
llm.call.return_value = '{"items": [1, 2, 3]}'
|
||||
sample_text = "Items: 1, 2, 3"
|
||||
|
||||
instructions = get_conversion_instructions(ListModel, llm)
|
||||
converter = Converter(
|
||||
llm=llm,
|
||||
text=sample_text,
|
||||
model=ListModel,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
output = converter.to_pydantic()
|
||||
|
||||
assert isinstance(output, ListModel)
|
||||
assert output.items == [1, 2, 3]
|
||||
|
||||
|
||||
# Tests for enums
|
||||
from enum import Enum
|
||||
|
||||
|
||||
def test_converter_with_enum():
|
||||
class Color(Enum):
|
||||
RED = "red"
|
||||
GREEN = "green"
|
||||
BLUE = "blue"
|
||||
|
||||
class EnumModel(BaseModel):
|
||||
name: str
|
||||
color: Color
|
||||
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = False
|
||||
llm.call.return_value = '{"name": "Alice", "color": "red"}'
|
||||
sample_text = "Name: Alice, Color: Red"
|
||||
|
||||
instructions = get_conversion_instructions(EnumModel, llm)
|
||||
converter = Converter(
|
||||
llm=llm,
|
||||
text=sample_text,
|
||||
model=EnumModel,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
output = converter.to_pydantic()
|
||||
|
||||
assert isinstance(output, EnumModel)
|
||||
assert output.name == "Alice"
|
||||
assert output.color == Color.RED
|
||||
|
||||
|
||||
# Tests for ambiguous input
|
||||
def test_converter_with_ambiguous_input():
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = False
|
||||
llm.call.return_value = '{"name": "Charlie", "age": "Not an age"}'
|
||||
sample_text = "Charlie is thirty years old"
|
||||
|
||||
instructions = get_conversion_instructions(SimpleModel, llm)
|
||||
converter = Converter(
|
||||
llm=llm,
|
||||
text=sample_text,
|
||||
model=SimpleModel,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
with pytest.raises(ConverterError) as exc_info:
|
||||
output = converter.to_pydantic()
|
||||
|
||||
assert "failed to convert text into a pydantic model" in str(exc_info.value).lower()
|
||||
|
||||
|
||||
# Tests for function calling support
|
||||
def test_converter_with_function_calling():
|
||||
llm = Mock(spec=LLM)
|
||||
llm.supports_function_calling.return_value = True
|
||||
|
||||
instructor = Mock()
|
||||
instructor.to_pydantic.return_value = SimpleModel(name="Eve", age=35)
|
||||
|
||||
converter = Converter(
|
||||
llm=llm,
|
||||
text="Name: Eve, Age: 35",
|
||||
model=SimpleModel,
|
||||
instructions="Convert this text.",
|
||||
)
|
||||
converter._create_instructor = Mock(return_value=instructor)
|
||||
|
||||
output = converter.to_pydantic()
|
||||
|
||||
assert isinstance(output, SimpleModel)
|
||||
assert output.name == "Eve"
|
||||
assert output.age == 35
|
||||
instructor.to_pydantic.assert_called_once()
|
||||
|
||||
|
||||
def test_generate_model_description_union_field():
|
||||
class UnionModel(BaseModel):
|
||||
field: int | str | None
|
||||
|
||||
description = generate_model_description(UnionModel)
|
||||
expected_description = '{\n "field": int | str | None\n}'
|
||||
assert description == expected_description
|
||||
# Complex objects are converted to a unique string
|
||||
obj = object()
|
||||
key_str = _to_serializable_key(obj)
|
||||
assert isinstance(key_str, str)
|
||||
assert "key_" in key_str
|
||||
assert "object" in key_str
|
||||
|
||||
@@ -1,187 +0,0 @@
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.utilities.string_utils import interpolate_only
|
||||
|
||||
|
||||
class TestInterpolateOnly:
|
||||
"""Tests for the interpolate_only function in string_utils.py."""
|
||||
|
||||
def test_basic_variable_interpolation(self):
|
||||
"""Test basic variable interpolation works correctly."""
|
||||
template = "Hello, {name}! Welcome to {company}."
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"name": "Alice",
|
||||
"company": "CrewAI",
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
|
||||
assert result == "Hello, Alice! Welcome to CrewAI."
|
||||
|
||||
def test_multiple_occurrences_of_same_variable(self):
|
||||
"""Test that multiple occurrences of the same variable are replaced."""
|
||||
template = "{name} is using {name}'s account."
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"name": "Bob"
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
|
||||
assert result == "Bob is using Bob's account."
|
||||
|
||||
def test_json_structure_preservation(self):
|
||||
"""Test that JSON structures are preserved and not interpolated incorrectly."""
|
||||
template = """
|
||||
Instructions for {agent}:
|
||||
|
||||
Please return the following object:
|
||||
|
||||
{"name": "person's name", "age": 25, "skills": ["coding", "testing"]}
|
||||
"""
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"agent": "DevAgent"
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
|
||||
assert "Instructions for DevAgent:" in result
|
||||
assert (
|
||||
'{"name": "person\'s name", "age": 25, "skills": ["coding", "testing"]}'
|
||||
in result
|
||||
)
|
||||
|
||||
def test_complex_nested_json(self):
|
||||
"""Test with complex JSON structures containing curly braces."""
|
||||
template = """
|
||||
{agent} needs to process:
|
||||
{
|
||||
"config": {
|
||||
"nested": {
|
||||
"value": 42
|
||||
},
|
||||
"arrays": [1, 2, {"inner": "value"}]
|
||||
}
|
||||
}
|
||||
"""
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"agent": "DataProcessor"
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
|
||||
assert "DataProcessor needs to process:" in result
|
||||
assert '"nested": {' in result
|
||||
assert '"value": 42' in result
|
||||
assert '[1, 2, {"inner": "value"}]' in result
|
||||
|
||||
def test_missing_variable(self):
|
||||
"""Test that an error is raised when a required variable is missing."""
|
||||
template = "Hello, {name}!"
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"not_name": "Alice"
|
||||
}
|
||||
|
||||
with pytest.raises(KeyError) as excinfo:
|
||||
interpolate_only(template, inputs)
|
||||
|
||||
assert "template variable" in str(excinfo.value).lower()
|
||||
assert "name" in str(excinfo.value)
|
||||
|
||||
def test_invalid_input_types(self):
|
||||
"""Test that an error is raised with invalid input types."""
|
||||
template = "Hello, {name}!"
|
||||
# Using Any for this test since we're intentionally testing an invalid type
|
||||
inputs: Dict[str, Any] = {"name": object()} # Object is not a valid input type
|
||||
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
interpolate_only(template, inputs)
|
||||
|
||||
assert "unsupported type" in str(excinfo.value).lower()
|
||||
|
||||
def test_empty_input_string(self):
|
||||
"""Test handling of empty or None input string."""
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"name": "Alice"
|
||||
}
|
||||
|
||||
assert interpolate_only("", inputs) == ""
|
||||
assert interpolate_only(None, inputs) == ""
|
||||
|
||||
def test_no_variables_in_template(self):
|
||||
"""Test a template with no variables to replace."""
|
||||
template = "This is a static string with no variables."
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"name": "Alice"
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
|
||||
assert result == template
|
||||
|
||||
def test_variable_name_starting_with_underscore(self):
|
||||
"""Test variables starting with underscore are replaced correctly."""
|
||||
template = "Variable: {_special_var}"
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"_special_var": "Special Value"
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
|
||||
assert result == "Variable: Special Value"
|
||||
|
||||
def test_preserves_non_matching_braces(self):
|
||||
"""Test that non-matching braces patterns are preserved."""
|
||||
template = (
|
||||
"This {123} and {!var} should not be replaced but {valid_var} should."
|
||||
)
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"valid_var": "works"
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
|
||||
assert (
|
||||
result == "This {123} and {!var} should not be replaced but works should."
|
||||
)
|
||||
|
||||
def test_complex_mixed_scenario(self):
|
||||
"""Test a complex scenario with both valid variables and JSON structures."""
|
||||
template = """
|
||||
{agent_name} is working on task {task_id}.
|
||||
|
||||
Instructions:
|
||||
1. Process the data
|
||||
2. Return results as:
|
||||
|
||||
{
|
||||
"taskId": "{task_id}",
|
||||
"results": {
|
||||
"processed_by": "agent_name",
|
||||
"status": "complete",
|
||||
"values": [1, 2, 3]
|
||||
}
|
||||
}
|
||||
"""
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]] = {
|
||||
"agent_name": "AnalyticsAgent",
|
||||
"task_id": "T-12345",
|
||||
}
|
||||
|
||||
result = interpolate_only(template, inputs)
|
||||
|
||||
assert "AnalyticsAgent is working on task T-12345" in result
|
||||
assert '"taskId": "T-12345"' in result
|
||||
assert '"processed_by": "agent_name"' in result # This shouldn't be replaced
|
||||
assert '"values": [1, 2, 3]' in result
|
||||
|
||||
def test_empty_inputs_dictionary(self):
|
||||
"""Test that an error is raised with empty inputs dictionary."""
|
||||
template = "Hello, {name}!"
|
||||
inputs: Dict[str, Any] = {}
|
||||
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
interpolate_only(template, inputs)
|
||||
|
||||
assert "inputs dictionary cannot be empty" in str(excinfo.value).lower()
|
||||
146
uv.lock
generated
146
uv.lock
generated
@@ -715,9 +715,9 @@ requires-dist = [
|
||||
{ name = "openai", specifier = ">=1.13.3" },
|
||||
{ name = "openpyxl", specifier = ">=3.1.5" },
|
||||
{ name = "openpyxl", marker = "extra == 'openpyxl'", specifier = ">=3.1.5" },
|
||||
{ name = "opentelemetry-api", specifier = ">=1.30.0" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
|
||||
{ name = "opentelemetry-sdk", specifier = ">=1.30.0" },
|
||||
{ name = "opentelemetry-api", specifier = ">=1.22.0" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.22.0" },
|
||||
{ name = "opentelemetry-sdk", specifier = ">=1.22.0" },
|
||||
{ name = "pandas", marker = "extra == 'pandas'", specifier = ">=2.2.3" },
|
||||
{ name = "pdfplumber", specifier = ">=0.11.4" },
|
||||
{ name = "pdfplumber", marker = "extra == 'pdfplumber'", specifier = ">=0.11.4" },
|
||||
@@ -1617,42 +1617,39 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "grpcio-tools"
|
||||
version = "1.67.0"
|
||||
version = "1.62.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "grpcio" },
|
||||
{ name = "protobuf" },
|
||||
{ name = "setuptools" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e7/f8/62e15867651b72f6f95313e21d81f5f1c210b69a4cc664aecf52ec4c8a53/grpcio_tools-1.67.0.tar.gz", hash = "sha256:181b3d4e61b83142c182ec366f3079b0023509743986e54c9465ca38cac255f8", size = 5159163 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/54/fa/b69bd8040eafc09b88bb0ec0fea59e8aacd1a801e688af087cead213b0d0/grpcio-tools-1.62.3.tar.gz", hash = "sha256:7c7136015c3d62c3eef493efabaf9e3380e3e66d24ee8e94c01cb71377f57833", size = 4538520 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/91/9d/7608eb89b41433a49dbf96f56d9c05b3a5ba08951702d54c368d370ab6aa/grpcio_tools-1.67.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:12aa38af76b5ef00a55808c7c374ed18d5dc7cc8081b717e56da3c50df1776e2", size = 2308120 },
|
||||
{ url = "https://files.pythonhosted.org/packages/93/f2/d8cbc35e63bba98e4352427d01c64801fef9e9d9cd7fc5eea0538128e0e6/grpcio_tools-1.67.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b0b03d055127bbc7c629454804b53b5cad2cedfcf904576d159a8a04c22b8e66", size = 5500124 },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/b5/131d0eac92205d0ae3d3f7eecf655884ef7746aecac5a93520fb83d907d0/grpcio_tools-1.67.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:02b0b50c59a8f7428326197027a2f586d216c46138c547f861533c46bff78bfe", size = 2282058 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/3f/5e4de8d7fe38e8e42567a49a39f77d67e2905b00c69165e2e88f9d3005ac/grpcio_tools-1.67.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2afdfe151ed9edbd4a3fd646716f83b58010769c57f9c0aa1cf4c3bfb1240a8", size = 2617363 },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/53/3eb4eb7c178a229676d1ff0bcda640ebc0a104d12cdbd884f6796d118c56/grpcio_tools-1.67.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3eeb87575b2b360c5ef5aef22eb76cfdd6a255d2f628a48ab0e5a61a0039fb", size = 2416026 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/9a/9c584d21ed1fb8f7adac6135a569c9b3b1378b6b467fba8d94d14de70606/grpcio_tools-1.67.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ead78089c4771605a1ff8894e47f2267440693f1beeee06fd5a788aede83370f", size = 3224904 },
|
||||
{ url = "https://files.pythonhosted.org/packages/93/6a/dab92a7aa1bae0d2e0735462fbde778011916e5124d7ee9b52d214f42552/grpcio_tools-1.67.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0671dcdccef09ca4eb415c1d6f470a857c6486733c146676f6810a3ade1d42cb", size = 2870381 },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/be/3f2c958ef65161f3eeae5a1013358ca3c2eab25174ec4fc8d46b6d6146c8/grpcio_tools-1.67.0-cp310-cp310-win32.whl", hash = "sha256:a7398d90b8c7da479aec8f853d3664d5a93c209f8ac3bd41cb7ae4e8677a45c6", size = 941140 },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/e9/461db9af08badc647659fa4a382ab546981ebccb413fc625e4b7c0413305/grpcio_tools-1.67.0-cp310-cp310-win_amd64.whl", hash = "sha256:f7e7d70a74df7e07be7cceaa694b7e8e5f3bef8e0299906f60885ecf7a40adb4", size = 1091151 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/0d/88f181eecef84c9c8c009fa4d49ce812a5717539b75aacea4a7be8b9587c/grpcio_tools-1.67.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:655716bf931a22a090134d87953710033640996d31e36f5f9b0106ff5f552d8e", size = 2307990 },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/22/94855e18588800c96eca95af3be918249f635e4635e3e46895949b0ca27e/grpcio_tools-1.67.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:484ae782f9d3ff58e0bbb2f4cad14d5f5d9132fc701835b1dffd2c2a06f73ba6", size = 5526488 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/c7/086f6c287fed85c2a5e19cb457a42a0eae2df9534666ed252947170daf8e/grpcio_tools-1.67.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:f3e34de876efe1273f91e25ef241e449ed7f9411472dd9ff56d2039618017c30", size = 2282139 },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/1a/d8e2171ef7b5b1fda54fa2dc82807725c9e31dd6b4878e9d68ab8f3c48b7/grpcio_tools-1.67.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8301719edde2c3d388995703cdd962f558b76e9750405f772dce61402e4c3d0", size = 2617333 },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/e8/e2b0a3e5890ad650d0cc9d92227f87a407784a9fc110438b85d01cf1ec71/grpcio_tools-1.67.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1629ea246044ccd473d9ac4c9f137a440d830b5e08d35225e1b354dbbb15b75d", size = 2415805 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/43/a1731299e1662c24d89795a8ae4bb725f4a8a0c8e2dc6e12d3276eb96e14/grpcio_tools-1.67.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d77a3c5cec0065267ca1a0b2589ececd1277ce25aa67f13ec50c816ee6f26f7f", size = 3224764 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/03/968dd4b8de9ec4c6d287a8ba8b844f515a2cfcb350acefdb1fcb6f2945d9/grpcio_tools-1.67.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9bf992bcc7d9e6eaa20705056e1b955593092a38cec1746fef389d873ab2056", size = 2870440 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/ea/e6bb028fec6f37aace620bd0a68e7c369bc975ece940dd3de08a2ef66edc/grpcio_tools-1.67.0-cp311-cp311-win32.whl", hash = "sha256:7e6e3db119c38629e0767cdb2ee18726ecc87e2249117d4c9e7ce06ea8c894ea", size = 940888 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/26/b6f98fc9c1e6b8fa5b676bbb07e2bc70f388d4c513140fa38ffa9a15b934/grpcio_tools-1.67.0-cp311-cp311-win_amd64.whl", hash = "sha256:c6c27aec301a0e6cf231f9ee1c467c64002af51170fa7c0f3bb10bbfcd03fee7", size = 1091094 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/b6/57e67c0244db8d7c0c312041293b806bfb1c9d66c26159e6faf39cc10356/grpcio_tools-1.67.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:dca7f053cbdb26a587d4410ddb893877c585fb60a31f22fdd128e4f7c4dab27c", size = 2307646 },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/43/837f08b85b04ac225aebe1d7da1a7a79fc313f231306c865b5112cef7dc4/grpcio_tools-1.67.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:de8c4f68ffa690769d84329c17c7fdd5fbe4c61b8f8a0de03f1ad8ef8bb06963", size = 5525447 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/5f/adb8b87f5c403ba53529b6645184beddfa63abf2c524a6dabaa430e6bab3/grpcio_tools-1.67.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:6e4ecb24c27a78f09fead45d4ed873805d6026124ccb6793b6fb93a490b78ddf", size = 2281767 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/cd/3d6a7971e28b96cb618abb281325517443744ecfe48aa03f27a17cd5d4e1/grpcio_tools-1.67.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:004d6ef1b5f724480f05c0bdc904bf8c78c43d633c537d99abe51b52ce0cadeb", size = 2617363 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/a9/b8f1eae3db0f1b6f9548bd2032f48cb6f1ec9bc6781436d52dff4b352fab/grpcio_tools-1.67.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dd257072c86eb9b36791b3674a513a215ba76bbdd38fc228f0e8c6dc5ce3524", size = 2415322 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/fc/0045bf2e5c97a5ffe0ff2c9a4e4a62894402e8d7094162c2084a809c9d1c/grpcio_tools-1.67.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a8cca551317ed26e17d13b6ee27b2bd62f5fe9b3842b4e88389deb984f995848", size = 3225044 },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/73/eaf40958dd648dd98a0fbd30df2b51c5beb7ee24127c1f0bb99ea44fd435/grpcio_tools-1.67.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a7ac3b4f837c693142f6688b629d1f6408f6ab250d927159b572555f5339fe25", size = 2870418 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/77/e307e91816123444ff657bbae2269cb912f31a9390118ed371bde9d0c1f3/grpcio_tools-1.67.0-cp312-cp312-win32.whl", hash = "sha256:95feec33388e2a8f72c360a68efe6f0bfed9c771e94d21b7f2359d0010f60219", size = 940540 },
|
||||
{ url = "https://files.pythonhosted.org/packages/be/2a/0c1a64e88fbc17235b68d3178be6cf4a69aea5bd1deed683c0bbd2f5e9f9/grpcio_tools-1.67.0-cp312-cp312-win_amd64.whl", hash = "sha256:50a31d035193ebe7154181eac84734e25bdcdb36adba849d3b2adf1c3b0c382b", size = 1090427 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/eb/eb0a3aa9480c3689d31fd2ad536df6a828e97a60f667c8a93d05bdf07150/grpcio_tools-1.62.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:2f968b049c2849540751ec2100ab05e8086c24bead769ca734fdab58698408c1", size = 5117556 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/fb/8be3dda485f7fab906bfa02db321c3ecef953a87cdb5f6572ca08b187bcb/grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:0a8c0c4724ae9c2181b7dbc9b186df46e4f62cb18dc184e46d06c0ebeccf569e", size = 2719330 },
|
||||
{ url = "https://files.pythonhosted.org/packages/63/de/6978f8d10066e240141cd63d1fbfc92818d96bb53427074f47a8eda921e1/grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5782883a27d3fae8c425b29a9d3dcf5f47d992848a1b76970da3b5a28d424b26", size = 3070818 },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/34/bb8f816893fc73fd6d830e895e8638d65d13642bb7a434f9175c5ca7da11/grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3d812daffd0c2d2794756bd45a353f89e55dc8f91eb2fc840c51b9f6be62667", size = 2804993 },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/60/b2198d7db83293cdb9760fc083f077c73e4c182da06433b3b157a1567d06/grpcio_tools-1.62.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b47d0dda1bdb0a0ba7a9a6de88e5a1ed61f07fad613964879954961e36d49193", size = 3684915 },
|
||||
{ url = "https://files.pythonhosted.org/packages/61/20/56dbdc4ecb14d42a03cd164ff45e6e84572bbe61ee59c50c39f4d556a8d5/grpcio_tools-1.62.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ca246dffeca0498be9b4e1ee169b62e64694b0f92e6d0be2573e65522f39eea9", size = 3297482 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/dc/e417a313c905744ce8cedf1e1edd81c41dc45ff400ae1c45080e18f26712/grpcio_tools-1.62.3-cp310-cp310-win32.whl", hash = "sha256:6a56d344b0bab30bf342a67e33d386b0b3c4e65868ffe93c341c51e1a8853ca5", size = 909793 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/69/75e7ebfd8d755d3e7be5c6d1aa6d13220f5bba3a98965e4b50c329046777/grpcio_tools-1.62.3-cp310-cp310-win_amd64.whl", hash = "sha256:710fecf6a171dcbfa263a0a3e7070e0df65ba73158d4c539cec50978f11dad5d", size = 1052459 },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/52/2dfe0a46b63f5ebcd976570aa5fc62f793d5a8b169e211c6a5aede72b7ae/grpcio_tools-1.62.3-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:703f46e0012af83a36082b5f30341113474ed0d91e36640da713355cd0ea5d23", size = 5147623 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/2e/29fdc6c034e058482e054b4a3c2432f84ff2e2765c1342d4f0aa8a5c5b9a/grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:7cc83023acd8bc72cf74c2edbe85b52098501d5b74d8377bfa06f3e929803492", size = 2719538 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/60/abe5deba32d9ec2c76cdf1a2f34e404c50787074a2fee6169568986273f1/grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ff7d58a45b75df67d25f8f144936a3e44aabd91afec833ee06826bd02b7fbe7", size = 3070964 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/ad/e2b066684c75f8d9a48508cde080a3a36618064b9cadac16d019ca511444/grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f2483ea232bd72d98a6dc6d7aefd97e5bc80b15cd909b9e356d6f3e326b6e43", size = 2805003 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/3f/59bf7af786eae3f9d24ee05ce75318b87f541d0950190ecb5ffb776a1a58/grpcio_tools-1.62.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:962c84b4da0f3b14b3cdb10bc3837ebc5f136b67d919aea8d7bb3fd3df39528a", size = 3685154 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/79/4dd62478b91e27084c67b35a2316ce8a967bd8b6cb8d6ed6c86c3a0df7cb/grpcio_tools-1.62.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8ad0473af5544f89fc5a1ece8676dd03bdf160fb3230f967e05d0f4bf89620e3", size = 3297942 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/cb/86449ecc58bea056b52c0b891f26977afc8c4464d88c738f9648da941a75/grpcio_tools-1.62.3-cp311-cp311-win32.whl", hash = "sha256:db3bc9fa39afc5e4e2767da4459df82b095ef0cab2f257707be06c44a1c2c3e5", size = 910231 },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/a4/9736215e3945c30ab6843280b0c6e1bff502910156ea2414cd77fbf1738c/grpcio_tools-1.62.3-cp311-cp311-win_amd64.whl", hash = "sha256:e0898d412a434e768a0c7e365acabe13ff1558b767e400936e26b5b6ed1ee51f", size = 1052496 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/a5/d6887eba415ce318ae5005e8dfac3fa74892400b54b6d37b79e8b4f14f5e/grpcio_tools-1.62.3-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d102b9b21c4e1e40af9a2ab3c6d41afba6bd29c0aa50ca013bf85c99cdc44ac5", size = 5147690 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/7c/3cde447a045e83ceb4b570af8afe67ffc86896a2fe7f59594dc8e5d0a645/grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:0a52cc9444df978438b8d2332c0ca99000521895229934a59f94f37ed896b133", size = 2720538 },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/07/f83f2750d44ac4f06c07c37395b9c1383ef5c994745f73c6bfaf767f0944/grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141d028bf5762d4a97f981c501da873589df3f7e02f4c1260e1921e565b376fa", size = 3071571 },
|
||||
{ url = "https://files.pythonhosted.org/packages/37/74/40175897deb61e54aca716bc2e8919155b48f33aafec8043dda9592d8768/grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47a5c093ab256dec5714a7a345f8cc89315cb57c298b276fa244f37a0ba507f0", size = 2806207 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/ee/d8de915105a217cbcb9084d684abdc032030dcd887277f2ef167372287fe/grpcio_tools-1.62.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f6831fdec2b853c9daa3358535c55eed3694325889aa714070528cf8f92d7d6d", size = 3685815 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/d9/4360a6c12be3d7521b0b8c39e5d3801d622fbb81cc2721dbd3eee31e28c8/grpcio_tools-1.62.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e02d7c1a02e3814c94ba0cfe43d93e872c758bd8fd5c2797f894d0c49b4a1dfc", size = 3298378 },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/3b/7cdf4a9e5a3e0a35a528b48b111355cd14da601413a4f887aa99b6da468f/grpcio_tools-1.62.3-cp312-cp312-win32.whl", hash = "sha256:b881fd9505a84457e9f7e99362eeedd86497b659030cf57c6f0070df6d9c2b9b", size = 910416 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/66/dd3ec249e44c1cc15e902e783747819ed41ead1336fcba72bf841f72c6e9/grpcio_tools-1.62.3-cp312-cp312-win_amd64.whl", hash = "sha256:11c625eebefd1fd40a228fc8bae385e448c7e32a6ae134e43cf13bbc23f902b7", size = 1052856 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3119,32 +3116,32 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-api"
|
||||
version = "1.31.1"
|
||||
version = "1.27.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "deprecated" },
|
||||
{ name = "importlib-metadata" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8a/cf/db26ab9d748bf50d6edf524fb863aa4da616ba1ce46c57a7dff1112b73fb/opentelemetry_api-1.31.1.tar.gz", hash = "sha256:137ad4b64215f02b3000a0292e077641c8611aab636414632a9b9068593b7e91", size = 64059 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c9/83/93114b6de85a98963aec218a51509a52ed3f8de918fe91eb0f7299805c3f/opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342", size = 62693 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/c8/86557ff0da32f3817bc4face57ea35cfdc2f9d3bcefd42311ef860dcefb7/opentelemetry_api-1.31.1-py3-none-any.whl", hash = "sha256:1511a3f470c9c8a32eeea68d4ea37835880c0eed09dd1a0187acc8b1301da0a1", size = 65197 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/1f/737dcdbc9fea2fa96c1b392ae47275165a7c641663fbb08a8d252968eed2/opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7", size = 63970 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-exporter-otlp-proto-common"
|
||||
version = "1.31.1"
|
||||
version = "1.27.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "opentelemetry-proto" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/53/e5/48662d9821d28f05ab8350a9a986ab99d9c0e8b23f8ff391c8df82742a9c/opentelemetry_exporter_otlp_proto_common-1.31.1.tar.gz", hash = "sha256:c748e224c01f13073a2205397ba0e415dcd3be9a0f95101ba4aace5fc730e0da", size = 20627 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/cd/2e/7eaf4ba595fb5213cf639c9158dfb64aacb2e4c7d74bfa664af89fa111f4/opentelemetry_exporter_otlp_proto_common-1.27.0.tar.gz", hash = "sha256:159d27cf49f359e3798c4c3eb8da6ef4020e292571bd8c5604a2a573231dd5c8", size = 17860 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/82/70/134282413000a3fc02e6b4e301b8c5d7127c43b50bd23cddbaf406ab33ff/opentelemetry_exporter_otlp_proto_common-1.31.1-py3-none-any.whl", hash = "sha256:7cadf89dbab12e217a33c5d757e67c76dd20ce173f8203e7370c4996f2e9efd8", size = 18823 },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/27/4610ab3d9bb3cde4309b6505f98b3aabca04a26aa480aa18cede23149837/opentelemetry_exporter_otlp_proto_common-1.27.0-py3-none-any.whl", hash = "sha256:675db7fffcb60946f3a5c43e17d1168a3307a94a930ecf8d2ea1f286f3d4f79a", size = 17848 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-exporter-otlp-proto-grpc"
|
||||
version = "1.31.1"
|
||||
version = "1.27.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "deprecated" },
|
||||
@@ -3155,14 +3152,14 @@ dependencies = [
|
||||
{ name = "opentelemetry-proto" },
|
||||
{ name = "opentelemetry-sdk" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0f/37/6ce465827ac69c52543afb5534146ccc40f54283a3a8a71ef87c91eb8933/opentelemetry_exporter_otlp_proto_grpc-1.31.1.tar.gz", hash = "sha256:c7f66b4b333c52248dc89a6583506222c896c74824d5d2060b818ae55510939a", size = 26620 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a1/d0/c1e375b292df26e0ffebf194e82cd197e4c26cc298582bda626ce3ce74c5/opentelemetry_exporter_otlp_proto_grpc-1.27.0.tar.gz", hash = "sha256:af6f72f76bcf425dfb5ad11c1a6d6eca2863b91e63575f89bb7b4b55099d968f", size = 26244 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/25/9974fa3a431d7499bd9d179fb9bd7daaa3ad9eba3313f72da5226b6d02df/opentelemetry_exporter_otlp_proto_grpc-1.31.1-py3-none-any.whl", hash = "sha256:f4055ad2c9a2ea3ae00cbb927d6253233478b3b87888e197d34d095a62305fae", size = 18588 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/80/32217460c2c64c0568cea38410124ff680a9b65f6732867bbf857c4d8626/opentelemetry_exporter_otlp_proto_grpc-1.27.0-py3-none-any.whl", hash = "sha256:56b5bbd5d61aab05e300d9d62a6b3c134827bbd28d0b12f2649c2da368006c9e", size = 18541 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-exporter-otlp-proto-http"
|
||||
version = "1.31.1"
|
||||
version = "1.27.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "deprecated" },
|
||||
@@ -3173,29 +3170,28 @@ dependencies = [
|
||||
{ name = "opentelemetry-sdk" },
|
||||
{ name = "requests" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6d/9c/d8718fce3d14042beab5a41c8e17be1864c48d2067be3a99a5652d2414a3/opentelemetry_exporter_otlp_proto_http-1.31.1.tar.gz", hash = "sha256:723bd90eb12cfb9ae24598641cb0c92ca5ba9f1762103902f6ffee3341ba048e", size = 15140 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/31/0a/f05c55e8913bf58a033583f2580a0ec31a5f4cf2beacc9e286dcb74d6979/opentelemetry_exporter_otlp_proto_http-1.27.0.tar.gz", hash = "sha256:2103479092d8eb18f61f3fbff084f67cc7f2d4a7d37e75304b8b56c1d09ebef5", size = 15059 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/19/5041dbfdd0b2a6ab340596693759bfa7dcfa8f30b9fa7112bb7117358571/opentelemetry_exporter_otlp_proto_http-1.31.1-py3-none-any.whl", hash = "sha256:5dee1f051f096b13d99706a050c39b08e3f395905f29088bfe59e54218bd1cf4", size = 17257 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/8d/4755884afc0b1db6000527cac0ca17273063b6142c773ce4ecd307a82e72/opentelemetry_exporter_otlp_proto_http-1.27.0-py3-none-any.whl", hash = "sha256:688027575c9da42e179a69fe17e2d1eba9b14d81de8d13553a21d3114f3b4d75", size = 17203 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-instrumentation"
|
||||
version = "0.52b1"
|
||||
version = "0.48b0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "opentelemetry-api" },
|
||||
{ name = "opentelemetry-semantic-conventions" },
|
||||
{ name = "packaging" },
|
||||
{ name = "setuptools" },
|
||||
{ name = "wrapt" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/49/c9/c52d444576b0776dbee71d2a4485be276cf46bec0123a5ba2f43f0cf7cde/opentelemetry_instrumentation-0.52b1.tar.gz", hash = "sha256:739f3bfadbbeec04dd59297479e15660a53df93c131d907bb61052e3d3c1406f", size = 28406 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/04/0e/d9394839af5d55c8feb3b22cd11138b953b49739b20678ca96289e30f904/opentelemetry_instrumentation-0.48b0.tar.gz", hash = "sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35", size = 24724 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/61/dd/a2b35078170941990e7a5194b9600fa75868958a9a2196a752da0e7b97a0/opentelemetry_instrumentation-0.52b1-py3-none-any.whl", hash = "sha256:8c0059c4379d77bbd8015c8d8476020efe873c123047ec069bb335e4b8717477", size = 31036 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/7f/405c41d4f359121376c9d5117dcf68149b8122d3f6c718996d037bd4d800/opentelemetry_instrumentation-0.48b0-py3-none-any.whl", hash = "sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44", size = 29449 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-instrumentation-asgi"
|
||||
version = "0.52b1"
|
||||
version = "0.48b0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "asgiref" },
|
||||
@@ -3204,14 +3200,14 @@ dependencies = [
|
||||
{ name = "opentelemetry-semantic-conventions" },
|
||||
{ name = "opentelemetry-util-http" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bc/db/79bdc2344b38e60fecc7e99159a3f5b4c0e1acec8de305fba0a713cc3692/opentelemetry_instrumentation_asgi-0.52b1.tar.gz", hash = "sha256:a6dbce9cb5b2c2f45ce4817ad21f44c67fd328358ad3ab911eb46f0be67f82ec", size = 24203 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/44/ac/fd3d40bab3234ec3f5c052a815100676baaae1832fa1067935f11e5c59c6/opentelemetry_instrumentation_asgi-0.48b0.tar.gz", hash = "sha256:04c32174b23c7fa72ddfe192dad874954968a6a924608079af9952964ecdf785", size = 23435 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/19/de/39ec078ae94a365d2f434b7e25886c267864aca5695b48fa5b60f80fbfb3/opentelemetry_instrumentation_asgi-0.52b1-py3-none-any.whl", hash = "sha256:f7179f477ed665ba21871972f979f21e8534edb971232e11920c8a22f4759236", size = 16338 },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/74/a0e0d38622856597dd8e630f2bd793760485eb165708e11b8be1696bbb5a/opentelemetry_instrumentation_asgi-0.48b0-py3-none-any.whl", hash = "sha256:ddb1b5fc800ae66e85a4e2eca4d9ecd66367a8c7b556169d9e7b57e10676e44d", size = 15958 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-instrumentation-fastapi"
|
||||
version = "0.52b1"
|
||||
version = "0.48b0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "opentelemetry-api" },
|
||||
@@ -3220,57 +3216,57 @@ dependencies = [
|
||||
{ name = "opentelemetry-semantic-conventions" },
|
||||
{ name = "opentelemetry-util-http" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/30/01/d159829077f2795c716445df6f8edfdd33391e82d712ba4613fb62b99dc5/opentelemetry_instrumentation_fastapi-0.52b1.tar.gz", hash = "sha256:d26ab15dc49e041301d5c2571605b8f5c3a6ee4a85b60940338f56c120221e98", size = 19247 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/58/20/43477da5850ef2cd3792715d442aecd051e885e0603b6ee5783b2104ba8f/opentelemetry_instrumentation_fastapi-0.48b0.tar.gz", hash = "sha256:21a72563ea412c0b535815aeed75fc580240f1f02ebc72381cfab672648637a2", size = 18497 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/23/89/acef7f625b218523873e32584dc5243d95ffa4facba737fd8b854c049c58/opentelemetry_instrumentation_fastapi-0.52b1-py3-none-any.whl", hash = "sha256:73c8804f053c5eb2fd2c948218bff9561f1ef65e89db326a6ab0b5bf829969f4", size = 12114 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/50/745ab075a3041b7a5f29a579d2c28eaad54f64b4589d8f9fd364c62cf0f3/opentelemetry_instrumentation_fastapi-0.48b0-py3-none-any.whl", hash = "sha256:afeb820a59e139d3e5d96619600f11ce0187658b8ae9e3480857dd790bc024f2", size = 11777 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-proto"
|
||||
version = "1.31.1"
|
||||
version = "1.27.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5b/b0/e763f335b9b63482f1f31f46f9299c4d8388e91fc12737aa14fdb5d124ac/opentelemetry_proto-1.31.1.tar.gz", hash = "sha256:d93e9c2b444e63d1064fb50ae035bcb09e5822274f1683886970d2734208e790", size = 34363 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/9a/59/959f0beea798ae0ee9c979b90f220736fbec924eedbefc60ca581232e659/opentelemetry_proto-1.27.0.tar.gz", hash = "sha256:33c9345d91dafd8a74fc3d7576c5a38f18b7fdf8d02983ac67485386132aedd6", size = 34749 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b6/f1/3baee86eab4f1b59b755f3c61a9b5028f380c88250bb9b7f89340502dbba/opentelemetry_proto-1.31.1-py3-none-any.whl", hash = "sha256:1398ffc6d850c2f1549ce355744e574c8cd7c1dba3eea900d630d52c41d07178", size = 55854 },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/56/3d2d826834209b19a5141eed717f7922150224d1a982385d19a9444cbf8d/opentelemetry_proto-1.27.0-py3-none-any.whl", hash = "sha256:b133873de5581a50063e1e4b29cdcf0c5e253a8c2d8dc1229add20a4c3830ace", size = 52464 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-sdk"
|
||||
version = "1.31.1"
|
||||
version = "1.27.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "opentelemetry-api" },
|
||||
{ name = "opentelemetry-semantic-conventions" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/63/d9/4fe159908a63661e9e635e66edc0d0d816ed20cebcce886132b19ae87761/opentelemetry_sdk-1.31.1.tar.gz", hash = "sha256:c95f61e74b60769f8ff01ec6ffd3d29684743404603df34b20aa16a49dc8d903", size = 159523 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0d/9a/82a6ac0f06590f3d72241a587cb8b0b751bd98728e896cc4cbd4847248e6/opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f", size = 145019 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/36/758e5d3746bc86a2af20aa5e2236a7c5aa4264b501dc0e9f40efd9078ef0/opentelemetry_sdk-1.31.1-py3-none-any.whl", hash = "sha256:882d021321f223e37afaca7b4e06c1d8bbc013f9e17ff48a7aa017460a8e7dae", size = 118866 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c1/bd/a6602e71e315055d63b2ff07172bd2d012b4cba2d4e00735d74ba42fc4d6/opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d", size = 110505 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-semantic-conventions"
|
||||
version = "0.52b1"
|
||||
version = "0.48b0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "deprecated" },
|
||||
{ name = "opentelemetry-api" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/06/8c/599f9f27cff097ec4d76fbe9fe6d1a74577ceec52efe1a999511e3c42ef5/opentelemetry_semantic_conventions-0.52b1.tar.gz", hash = "sha256:7b3d226ecf7523c27499758a58b542b48a0ac8d12be03c0488ff8ec60c5bae5d", size = 111275 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0a/89/1724ad69f7411772446067cdfa73b598694c8c91f7f8c922e344d96d81f9/opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a", size = 89445 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/98/be/d4ba300cfc1d4980886efbc9b48ee75242b9fcf940d9c4ccdc9ef413a7cf/opentelemetry_semantic_conventions-0.52b1-py3-none-any.whl", hash = "sha256:72b42db327e29ca8bb1b91e8082514ddf3bbf33f32ec088feb09526ade4bc77e", size = 183409 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/7a/4f0063dbb0b6c971568291a8bc19a4ca70d3c185db2d956230dd67429dfc/opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f", size = 149685 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-util-http"
|
||||
version = "0.52b1"
|
||||
version = "0.48b0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/23/3f/16a4225a953bbaae7d800140ed99813f092ea3071ba7780683299a87049b/opentelemetry_util_http-0.52b1.tar.gz", hash = "sha256:c03c8c23f1b75fadf548faece7ead3aecd50761c5593a2b2831b48730eee5b31", size = 8044 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d6/d7/185c494754340e0a3928fd39fde2616ee78f2c9d66253affaad62d5b7935/opentelemetry_util_http-0.48b0.tar.gz", hash = "sha256:60312015153580cc20f322e5cdc3d3ecad80a71743235bdb77716e742814623c", size = 7863 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/00/1591b397c9efc0e4215d223553a1cb9090c8499888a4447f842443077d31/opentelemetry_util_http-0.52b1-py3-none-any.whl", hash = "sha256:6a6ab6bfa23fef96f4995233e874f67602adf9d224895981b4ab9d4dde23de78", size = 7305 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/2e/36097c0a4d0115b8c7e377c90bab7783ac183bc5cb4071308f8959454311/opentelemetry_util_http-0.48b0-py3-none-any.whl", hash = "sha256:76f598af93aab50328d2a69c786beaedc8b6a7770f7a818cc307eb353debfffb", size = 6946 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3632,16 +3628,16 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "5.29.4"
|
||||
version = "4.25.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/17/7d/b9dca7365f0e2c4fa7c193ff795427cfa6290147e5185ab11ece280a18e7/protobuf-5.29.4.tar.gz", hash = "sha256:4f1dfcd7997b31ef8f53ec82781ff434a28bf71d9102ddde14d076adcfc78c99", size = 424902 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/67/dd/48d5fdb68ec74d70fabcc252e434492e56f70944d9f17b6a15e3746d2295/protobuf-4.25.5.tar.gz", hash = "sha256:7f8249476b4a9473645db7f8ab42b02fe1488cbe5fb72fddd445e0665afd8584", size = 380315 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/b2/043a1a1a20edd134563699b0e91862726a0dc9146c090743b6c44d798e75/protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7", size = 422709 },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/fc/2474b59570daa818de6124c0a15741ee3e5d6302e9d6ce0bdfd12e98119f/protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d", size = 434506 },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/de/7c126bbb06aa0f8a7b38aaf8bd746c514d70e6a2a3f6dd460b3b7aad7aae/protobuf-5.29.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:307ecba1d852ec237e9ba668e087326a67564ef83e45a0189a772ede9e854dd0", size = 417826 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/b5/bade14ae31ba871a139aa45e7a8183d869efe87c34a4850c87b936963261/protobuf-5.29.4-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:aec4962f9ea93c431d5714ed1be1c93f13e1a8618e70035ba2b0564d9e633f2e", size = 319574 },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/88/b01ed2291aae68b708f7d334288ad5fb3e7aa769a9c309c91a0d55cb91b0/protobuf-5.29.4-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:d7d3f7d1d5a66ed4942d4fefb12ac4b14a29028b209d4bfb25c68ae172059922", size = 319672 },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/fb/a586e0c973c95502e054ac5f81f88394f24ccc7982dac19c515acd9e2c93/protobuf-5.29.4-py3-none-any.whl", hash = "sha256:3fde11b505e1597f71b875ef2fc52062b6a9740e5f7c8997ce878b6009145862", size = 172551 },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/35/1b3c5a5e6107859c4ca902f4fbb762e48599b78129a05d20684fef4a4d04/protobuf-4.25.5-cp310-abi3-win32.whl", hash = "sha256:5e61fd921603f58d2f5acb2806a929b4675f8874ff5f330b7d6f7e2e784bbcd8", size = 392457 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/ad/bf3f358e90b7e70bf7fb520702cb15307ef268262292d3bdb16ad8ebc815/protobuf-4.25.5-cp310-abi3-win_amd64.whl", hash = "sha256:4be0571adcbe712b282a330c6e89eae24281344429ae95c6d85e79e84780f5ea", size = 413449 },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/49/d110f0a43beb365758a252203c43eaaad169fe7749da918869a8c991f726/protobuf-4.25.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b2fde3d805354df675ea4c7c6338c1aecd254dfc9925e88c6d31a2bcb97eb173", size = 394248 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/ab/0f384ca0bc6054b1a7b6009000ab75d28a5506e4459378b81280ae7fd358/protobuf-4.25.5-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:919ad92d9b0310070f8356c24b855c98df2b8bd207ebc1c0c6fcc9ab1e007f3d", size = 293717 },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/a6/094a2640be576d760baa34c902dcb8199d89bce9ed7dd7a6af74dcbbd62d/protobuf-4.25.5-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fe14e16c22be926d3abfcb500e60cab068baf10b542b8c858fa27e098123e331", size = 294635 },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/90/f198a61df8381fb43ae0fe81b3d2718e8dcc51ae8502c7657ab9381fbc4f/protobuf-4.25.5-py3-none-any.whl", hash = "sha256:0aebecb809cae990f8129ada5ca273d9d670b76d9bfc9b1809f0a9c02b7dbf41", size = 156467 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
Reference in New Issue
Block a user