mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-24 00:08:29 +00:00
Compare commits
24 Commits
brandon/fi
...
devin/1739
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60a2b9842d | ||
|
|
b1860cbb12 | ||
|
|
1b488b6da7 | ||
|
|
d3b398ed52 | ||
|
|
d52fd09602 | ||
|
|
d6800d8957 | ||
|
|
2fd7506ed9 | ||
|
|
161084aff2 | ||
|
|
b145cb3247 | ||
|
|
1adbcf697d | ||
|
|
e51355200a | ||
|
|
47818f4f41 | ||
|
|
9b10fd47b0 | ||
|
|
c408368267 | ||
|
|
90b3145e92 | ||
|
|
fbd0e015d5 | ||
|
|
17e25fb842 | ||
|
|
d6d98ee969 | ||
|
|
e0600e3bb9 | ||
|
|
a79d77dfd7 | ||
|
|
56ec9bc224 | ||
|
|
8eef02739a | ||
|
|
6f4ad532e6 | ||
|
|
74a1de8550 |
16
README.md
16
README.md
@@ -1,10 +1,18 @@
|
||||
<div align="center">
|
||||
|
||||

|
||||

|
||||
|
||||
# **CrewAI**
|
||||
|
||||
🤖 **CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results.
|
||||
**CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results.
|
||||
|
||||
**CrewAI Enterprise**
|
||||
Want to plan, build (+ no code), deploy, monitor and interare your agents: [CrewAI Enterprise](https://www.crewai.com/enterprise). Designed for complex, real-world applications, our enterprise solution offers:
|
||||
|
||||
- **Seamless Integrations**
|
||||
- **Scalable & Secure Deployment**
|
||||
- **Actionable Insights**
|
||||
- **24/7 Support**
|
||||
|
||||
<h3>
|
||||
|
||||
@@ -392,7 +400,7 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
|
||||
goal="Gather and validate supporting market data",
|
||||
backstory="You excel at finding and correlating multiple data sources"
|
||||
)
|
||||
|
||||
|
||||
analysis_task = Task(
|
||||
description="Analyze {sector} sector data for the past {timeframe}",
|
||||
expected_output="Detailed market analysis with confidence score",
|
||||
@@ -403,7 +411,7 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
|
||||
expected_output="Corroborating evidence and potential contradictions",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
|
||||
# Demonstrate crew autonomy
|
||||
analysis_crew = Crew(
|
||||
agents=[analyst, researcher],
|
||||
|
||||
@@ -91,7 +91,7 @@ result = crew.kickoff(inputs={"question": "What city does John live in and how o
|
||||
```
|
||||
|
||||
|
||||
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including TXT, PDF, DOCX, HTML, and more.
|
||||
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including MD, PDF, DOCX, HTML, and more.
|
||||
|
||||
<Note>
|
||||
You need to install `docling` for the following example to work: `uv add docling`
|
||||
@@ -152,10 +152,10 @@ Here are examples of how to use different types of knowledge sources:
|
||||
|
||||
### Text File Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
|
||||
from crewai.knowledge.source.text_file_knowledge_source import TextFileKnowledgeSource
|
||||
|
||||
# Create a text file knowledge source
|
||||
text_source = CrewDoclingSource(
|
||||
text_source = TextFileKnowledgeSource(
|
||||
file_paths=["document.txt", "another.txt"]
|
||||
)
|
||||
|
||||
|
||||
@@ -463,26 +463,32 @@ Learn how to get the most out of your LLM configuration:
|
||||
|
||||
<Accordion title="Google">
|
||||
```python Code
|
||||
# Option 1. Gemini accessed with an API key.
|
||||
# Option 1: Gemini accessed with an API key.
|
||||
# https://ai.google.dev/gemini-api/docs/api-key
|
||||
GEMINI_API_KEY=<your-api-key>
|
||||
|
||||
# Option 2. Vertex AI IAM credentials for Gemini, Anthropic, and anything in the Model Garden.
|
||||
# Option 2: Vertex AI IAM credentials for Gemini, Anthropic, and Model Garden.
|
||||
# https://cloud.google.com/vertex-ai/generative-ai/docs/overview
|
||||
```
|
||||
|
||||
## GET CREDENTIALS
|
||||
Get credentials:
|
||||
```python Code
|
||||
import json
|
||||
|
||||
file_path = 'path/to/vertex_ai_service_account.json'
|
||||
|
||||
# Load the JSON file
|
||||
with open(file_path, 'r') as file:
|
||||
vertex_credentials = json.load(file)
|
||||
|
||||
# Convert to JSON string
|
||||
# Convert the credentials to a JSON string
|
||||
vertex_credentials_json = json.dumps(vertex_credentials)
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gemini/gemini-1.5-pro-latest",
|
||||
temperature=0.7,
|
||||
|
||||
@@ -58,41 +58,107 @@ my_crew = Crew(
|
||||
### Example: Use Custom Memory Instances e.g FAISS as the VectorDB
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
from crewai import Crew, Process
|
||||
from crewai.memory import LongTermMemory, ShortTermMemory, EntityMemory
|
||||
from crewai.memory.storage import LTMSQLiteStorage, RAGStorage
|
||||
from typing import List, Optional
|
||||
|
||||
# Assemble your crew with memory capabilities
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process="Process.sequential",
|
||||
memory=True,
|
||||
long_term_memory=EnhanceLongTermMemory(
|
||||
my_crew: Crew = Crew(
|
||||
agents = [...],
|
||||
tasks = [...],
|
||||
process = Process.sequential,
|
||||
memory = True,
|
||||
# Long-term memory for persistent storage across sessions
|
||||
long_term_memory = LongTermMemory(
|
||||
storage=LTMSQLiteStorage(
|
||||
db_path="/my_data_dir/my_crew1/long_term_memory_storage.db"
|
||||
db_path="/my_crew1/long_term_memory_storage.db"
|
||||
)
|
||||
),
|
||||
short_term_memory=EnhanceShortTermMemory(
|
||||
storage=CustomRAGStorage(
|
||||
crew_name="my_crew",
|
||||
storage_type="short_term",
|
||||
data_dir="//my_data_dir",
|
||||
model=embedder["model"],
|
||||
dimension=embedder["dimension"],
|
||||
# Short-term memory for current context using RAG
|
||||
short_term_memory = ShortTermMemory(
|
||||
storage = RAGStorage(
|
||||
embedder_config={
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": 'text-embedding-3-small'
|
||||
}
|
||||
},
|
||||
type="short_term",
|
||||
path="/my_crew1/"
|
||||
)
|
||||
),
|
||||
),
|
||||
entity_memory=EnhanceEntityMemory(
|
||||
storage=CustomRAGStorage(
|
||||
crew_name="my_crew",
|
||||
storage_type="entities",
|
||||
data_dir="//my_data_dir",
|
||||
model=embedder["model"],
|
||||
dimension=embedder["dimension"],
|
||||
),
|
||||
# Entity memory for tracking key information about entities
|
||||
entity_memory = EntityMemory(
|
||||
storage=RAGStorage(
|
||||
embedder_config={
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": 'text-embedding-3-small'
|
||||
}
|
||||
},
|
||||
type="short_term",
|
||||
path="/my_crew1/"
|
||||
)
|
||||
),
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
When configuring memory storage:
|
||||
- Use environment variables for storage paths (e.g., `CREWAI_STORAGE_DIR`)
|
||||
- Never hardcode sensitive information like database credentials
|
||||
- Consider access permissions for storage directories
|
||||
- Use relative paths when possible to maintain portability
|
||||
|
||||
Example using environment variables:
|
||||
```python
|
||||
import os
|
||||
from crewai import Crew
|
||||
from crewai.memory import LongTermMemory
|
||||
from crewai.memory.storage import LTMSQLiteStorage
|
||||
|
||||
# Configure storage path using environment variable
|
||||
storage_path = os.getenv("CREWAI_STORAGE_DIR", "./storage")
|
||||
crew = Crew(
|
||||
memory=True,
|
||||
long_term_memory=LongTermMemory(
|
||||
storage=LTMSQLiteStorage(
|
||||
db_path="{storage_path}/memory.db".format(storage_path=storage_path)
|
||||
)
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Basic Memory Configuration
|
||||
```python
|
||||
from crewai import Crew
|
||||
from crewai.memory import LongTermMemory
|
||||
|
||||
# Simple memory configuration
|
||||
crew = Crew(memory=True) # Uses default storage locations
|
||||
```
|
||||
|
||||
### Custom Storage Configuration
|
||||
```python
|
||||
from crewai import Crew
|
||||
from crewai.memory import LongTermMemory
|
||||
from crewai.memory.storage import LTMSQLiteStorage
|
||||
|
||||
# Configure custom storage paths
|
||||
crew = Crew(
|
||||
memory=True,
|
||||
long_term_memory=LongTermMemory(
|
||||
storage=LTMSQLiteStorage(db_path="./memory.db")
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
## Integrating Mem0 for Enhanced User Memory
|
||||
|
||||
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
|
||||
@@ -216,6 +282,19 @@ my_crew = Crew(
|
||||
|
||||
### Using Google AI embeddings
|
||||
|
||||
#### Prerequisites
|
||||
Before using Google AI embeddings, ensure you have:
|
||||
- Access to the Gemini API
|
||||
- The necessary API keys and permissions
|
||||
|
||||
You will need to update your *pyproject.toml* dependencies:
|
||||
```YAML
|
||||
dependencies = [
|
||||
"google-generativeai>=0.8.4", #main version in January/2025 - crewai v.0.100.0 and crewai-tools 0.33.0
|
||||
"crewai[tools]>=0.100.0,<1.0.0"
|
||||
]
|
||||
```
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
@@ -368,6 +447,38 @@ my_crew = Crew(
|
||||
)
|
||||
```
|
||||
|
||||
### Using Amazon Bedrock embeddings
|
||||
|
||||
```python Code
|
||||
# Note: Ensure you have installed `boto3` for Bedrock embeddings to work.
|
||||
|
||||
import os
|
||||
import boto3
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
boto3_session = boto3.Session(
|
||||
region_name=os.environ.get("AWS_REGION_NAME"),
|
||||
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
|
||||
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY")
|
||||
)
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "bedrock",
|
||||
"config":{
|
||||
"session": boto3_session,
|
||||
"model": "amazon.titan-embed-text-v2:0",
|
||||
"vector_dimension": 1024
|
||||
}
|
||||
}
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
### Adding Custom Embedding Function
|
||||
|
||||
```python Code
|
||||
|
||||
@@ -268,7 +268,7 @@ analysis_task = Task(
|
||||
|
||||
Task guardrails provide a way to validate and transform task outputs before they
|
||||
are passed to the next task. This feature helps ensure data quality and provides
|
||||
efeedback to agents when their output doesn't meet specific criteria.
|
||||
feedback to agents when their output doesn't meet specific criteria.
|
||||
|
||||
### Using Task Guardrails
|
||||
|
||||
|
||||
98
docs/how-to/langfuse-observability.mdx
Normal file
98
docs/how-to/langfuse-observability.mdx
Normal file
@@ -0,0 +1,98 @@
|
||||
---
|
||||
title: Agent Monitoring with Langfuse
|
||||
description: Learn how to integrate Langfuse with CrewAI via OpenTelemetry using OpenLit
|
||||
icon: magnifying-glass-chart
|
||||
---
|
||||
|
||||
# Integrate Langfuse with CrewAI
|
||||
|
||||
This notebook demonstrates how to integrate **Langfuse** with **CrewAI** using OpenTelemetry via the **OpenLit** SDK. By the end of this notebook, you will be able to trace your CrewAI applications with Langfuse for improved observability and debugging.
|
||||
|
||||
> **What is Langfuse?** [Langfuse](https://langfuse.com) is an open-source LLM engineering platform. It provides tracing and monitoring capabilities for LLM applications, helping developers debug, analyze, and optimize their AI systems. Langfuse integrates with various tools and frameworks via native integrations, OpenTelemetry, and APIs/SDKs.
|
||||
|
||||
## Get Started
|
||||
|
||||
We'll walk through a simple example of using CrewAI and integrating it with Langfuse via OpenTelemetry using OpenLit.
|
||||
|
||||
### Step 1: Install Dependencies
|
||||
|
||||
|
||||
```python
|
||||
%pip install langfuse openlit crewai crewai_tools
|
||||
```
|
||||
|
||||
### Step 2: Set Up Environment Variables
|
||||
|
||||
Set your Langfuse API keys and configure OpenTelemetry export settings to send traces to Langfuse. Please refer to the [Langfuse OpenTelemetry Docs](https://langfuse.com/docs/opentelemetry/get-started) for more information on the Langfuse OpenTelemetry endpoint `/api/public/otel` and authentication.
|
||||
|
||||
|
||||
```python
|
||||
import os
|
||||
import base64
|
||||
|
||||
LANGFUSE_PUBLIC_KEY="pk-lf-..."
|
||||
LANGFUSE_SECRET_KEY="sk-lf-..."
|
||||
LANGFUSE_AUTH=base64.b64encode(f"{LANGFUSE_PUBLIC_KEY}:{LANGFUSE_SECRET_KEY}".encode()).decode()
|
||||
|
||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://cloud.langfuse.com/api/public/otel" # EU data region
|
||||
# os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://us.cloud.langfuse.com/api/public/otel" # US data region
|
||||
os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"Authorization=Basic {LANGFUSE_AUTH}"
|
||||
|
||||
# your openai key
|
||||
os.environ["OPENAI_API_KEY"] = "sk-..."
|
||||
```
|
||||
|
||||
### Step 3: Initialize OpenLit
|
||||
|
||||
Initialize the OpenLit OpenTelemetry instrumentation SDK to start capturing OpenTelemetry traces.
|
||||
|
||||
|
||||
```python
|
||||
import openlit
|
||||
|
||||
openlit.init()
|
||||
```
|
||||
|
||||
### Step 4: Create a Simple CrewAI Application
|
||||
|
||||
We'll create a simple CrewAI application where multiple agents collaborate to answer a user's question.
|
||||
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
from crewai_tools import (
|
||||
WebsiteSearchTool
|
||||
)
|
||||
|
||||
web_rag_tool = WebsiteSearchTool()
|
||||
|
||||
writer = Agent(
|
||||
role="Writer",
|
||||
goal="You make math engaging and understandable for young children through poetry",
|
||||
backstory="You're an expert in writing haikus but you know nothing of math.",
|
||||
tools=[web_rag_tool],
|
||||
)
|
||||
|
||||
task = Task(description=("What is {multiplication}?"),
|
||||
expected_output=("Compose a haiku that includes the answer."),
|
||||
agent=writer)
|
||||
|
||||
crew = Crew(
|
||||
agents=[writer],
|
||||
tasks=[task],
|
||||
share_crew=False
|
||||
)
|
||||
```
|
||||
|
||||
### Step 5: See Traces in Langfuse
|
||||
|
||||
After running the agent, you can view the traces generated by your CrewAI application in [Langfuse](https://cloud.langfuse.com). You should see detailed steps of the LLM interactions, which can help you debug and optimize your AI agent.
|
||||
|
||||

|
||||
|
||||
_[Public example trace in Langfuse](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/e2cf380ffc8d47d28da98f136140642b?timestamp=2025-02-05T15%3A12%3A02.717Z&observation=3b32338ee6a5d9af)_
|
||||
|
||||
## References
|
||||
|
||||
- [Langfuse OpenTelemetry Docs](https://langfuse.com/docs/opentelemetry/get-started)
|
||||
@@ -1,211 +0,0 @@
|
||||
# Portkey Integration with CrewAI
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-CrewAI.png" alt="Portkey CrewAI Header Image" width="70%" />
|
||||
|
||||
|
||||
[Portkey](https://portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) is a 2-line upgrade to make your CrewAI agents reliable, cost-efficient, and fast.
|
||||
|
||||
Portkey adds 4 core production capabilities to any CrewAI agent:
|
||||
1. Routing to **200+ LLMs**
|
||||
2. Making each LLM call more robust
|
||||
3. Full-stack tracing & cost, performance analytics
|
||||
4. Real-time guardrails to enforce behavior
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. **Install Required Packages:**
|
||||
|
||||
```bash
|
||||
pip install -qU crewai portkey-ai
|
||||
```
|
||||
|
||||
2. **Configure the LLM Client:**
|
||||
|
||||
To build CrewAI Agents with Portkey, you'll need two keys:
|
||||
- **Portkey API Key**: Sign up on the [Portkey app](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) and copy your API key
|
||||
- **Virtual Key**: Virtual Keys securely manage your LLM API keys in one place. Store your LLM provider API keys securely in Portkey's vault
|
||||
|
||||
```python
|
||||
from crewai import LLM
|
||||
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
|
||||
|
||||
gpt_llm = LLM(
|
||||
model="gpt-4",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy", # We are using Virtual key
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_VIRTUAL_KEY", # Enter your Virtual key from Portkey
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
3. **Create and Run Your First Agent:**
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# Define your agents with roles and goals
|
||||
coder = Agent(
|
||||
role='Software developer',
|
||||
goal='Write clear, concise code on demand',
|
||||
backstory='An expert coder with a keen eye for software trends.',
|
||||
llm=gpt_llm
|
||||
)
|
||||
|
||||
# Create tasks for your agents
|
||||
task1 = Task(
|
||||
description="Define the HTML for making a simple website with heading- Hello World! Portkey is working!",
|
||||
expected_output="A clear and concise HTML code",
|
||||
agent=coder
|
||||
)
|
||||
|
||||
# Instantiate your crew
|
||||
crew = Crew(
|
||||
agents=[coder],
|
||||
tasks=[task1],
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
print(result)
|
||||
```
|
||||
|
||||
|
||||
## Key Features
|
||||
|
||||
| Feature | Description |
|
||||
|---------|-------------|
|
||||
| 🌐 Multi-LLM Support | Access OpenAI, Anthropic, Gemini, Azure, and 250+ providers through a unified interface |
|
||||
| 🛡️ Production Reliability | Implement retries, timeouts, load balancing, and fallbacks |
|
||||
| 📊 Advanced Observability | Track 40+ metrics including costs, tokens, latency, and custom metadata |
|
||||
| 🔍 Comprehensive Logging | Debug with detailed execution traces and function call logs |
|
||||
| 🚧 Security Controls | Set budget limits and implement role-based access control |
|
||||
| 🔄 Performance Analytics | Capture and analyze feedback for continuous improvement |
|
||||
| 💾 Intelligent Caching | Reduce costs and latency with semantic or simple caching |
|
||||
|
||||
|
||||
## Production Features with Portkey Configs
|
||||
|
||||
All features mentioned below are through Portkey's Config system. Portkey's Config system allows you to define routing strategies using simple JSON objects in your LLM API calls. You can create and manage Configs directly in your code or through the Portkey Dashboard. Each Config has a unique ID for easy reference.
|
||||
|
||||
<Frame>
|
||||
<img src="https://raw.githubusercontent.com/Portkey-AI/docs-core/refs/heads/main/images/libraries/libraries-3.avif"/>
|
||||
</Frame>
|
||||
|
||||
|
||||
### 1. Use 250+ LLMs
|
||||
Access various LLMs like Anthropic, Gemini, Mistral, Azure OpenAI, and more with minimal code changes. Switch between providers or use them together seamlessly. [Learn more about Universal API](https://portkey.ai/docs/product/ai-gateway/universal-api)
|
||||
|
||||
|
||||
Easily switch between different LLM providers:
|
||||
|
||||
```python
|
||||
# Anthropic Configuration
|
||||
anthropic_llm = LLM(
|
||||
model="claude-3-5-sonnet-latest",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy",
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_ANTHROPIC_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||
trace_id="anthropic_agent"
|
||||
)
|
||||
)
|
||||
|
||||
# Azure OpenAI Configuration
|
||||
azure_llm = LLM(
|
||||
model="gpt-4",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy",
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_AZURE_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||
trace_id="azure_agent"
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
### 2. Caching
|
||||
Improve response times and reduce costs with two powerful caching modes:
|
||||
- **Simple Cache**: Perfect for exact matches
|
||||
- **Semantic Cache**: Matches responses for requests that are semantically similar
|
||||
[Learn more about Caching](https://portkey.ai/docs/product/ai-gateway/cache-simple-and-semantic)
|
||||
|
||||
```py
|
||||
config = {
|
||||
"cache": {
|
||||
"mode": "semantic", # or "simple" for exact matching
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Production Reliability
|
||||
Portkey provides comprehensive reliability features:
|
||||
- **Automatic Retries**: Handle temporary failures gracefully
|
||||
- **Request Timeouts**: Prevent hanging operations
|
||||
- **Conditional Routing**: Route requests based on specific conditions
|
||||
- **Fallbacks**: Set up automatic provider failovers
|
||||
- **Load Balancing**: Distribute requests efficiently
|
||||
|
||||
[Learn more about Reliability Features](https://portkey.ai/docs/product/ai-gateway/)
|
||||
|
||||
|
||||
|
||||
### 4. Metrics
|
||||
|
||||
Agent runs are complex. Portkey automatically logs **40+ comprehensive metrics** for your AI agents, including cost, tokens used, latency, etc. Whether you need a broad overview or granular insights into your agent runs, Portkey's customizable filters provide the metrics you need.
|
||||
|
||||
|
||||
- Cost per agent interaction
|
||||
- Response times and latency
|
||||
- Token usage and efficiency
|
||||
- Success/failure rates
|
||||
- Cache hit rates
|
||||
|
||||
<img src="https://github.com/siddharthsambharia-portkey/Portkey-Product-Images/blob/main/Portkey-Dashboard.png?raw=true" width="70%" alt="Portkey Dashboard" />
|
||||
|
||||
### 5. Detailed Logging
|
||||
Logs are essential for understanding agent behavior, diagnosing issues, and improving performance. They provide a detailed record of agent activities and tool use, which is crucial for debugging and optimizing processes.
|
||||
|
||||
|
||||
Access a dedicated section to view records of agent executions, including parameters, outcomes, function calls, and errors. Filter logs based on multiple parameters such as trace ID, model, tokens used, and metadata.
|
||||
|
||||
<details>
|
||||
<summary><b>Traces</b></summary>
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Traces.png" alt="Portkey Traces" width="70%" />
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>Logs</b></summary>
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Logs.png" alt="Portkey Logs" width="70%" />
|
||||
</details>
|
||||
|
||||
### 6. Enterprise Security Features
|
||||
- Set budget limit and rate limts per Virtual Key (disposable API keys)
|
||||
- Implement role-based access control
|
||||
- Track system changes with audit logs
|
||||
- Configure data retention policies
|
||||
|
||||
|
||||
|
||||
For detailed information on creating and managing Configs, visit the [Portkey documentation](https://docs.portkey.ai/product/ai-gateway/configs).
|
||||
|
||||
## Resources
|
||||
|
||||
- [📘 Portkey Documentation](https://docs.portkey.ai)
|
||||
- [📊 Portkey Dashboard](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai)
|
||||
- [🐦 Twitter](https://twitter.com/portkeyai)
|
||||
- [💬 Discord Community](https://discord.gg/DD7vgKK299)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Portkey Observability and Guardrails
|
||||
title: Agent Monitoring with Portkey
|
||||
description: How to use Portkey with CrewAI
|
||||
icon: key
|
||||
---
|
||||
|
||||
@@ -103,7 +103,8 @@
|
||||
"how-to/langtrace-observability",
|
||||
"how-to/mlflow-observability",
|
||||
"how-to/openlit-observability",
|
||||
"how-to/portkey-observability"
|
||||
"how-to/portkey-observability",
|
||||
"how-to/langfuse-observability"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -8,9 +8,9 @@ icon: file-pen
|
||||
|
||||
## Description
|
||||
|
||||
The `FileWriterTool` is a component of the crewai_tools package, designed to simplify the process of writing content to files.
|
||||
The `FileWriterTool` is a component of the crewai_tools package, designed to simplify the process of writing content to files with cross-platform compatibility (Windows, Linux, macOS).
|
||||
It is particularly useful in scenarios such as generating reports, saving logs, creating configuration files, and more.
|
||||
This tool supports creating new directories if they don't exist, making it easier to organize your output.
|
||||
This tool handles path differences across operating systems, supports UTF-8 encoding, and automatically creates directories if they don't exist, making it easier to organize your output reliably across different platforms.
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -43,6 +43,8 @@ print(result)
|
||||
|
||||
## Conclusion
|
||||
|
||||
By integrating the `FileWriterTool` into your crews, the agents can execute the process of writing content to files and creating directories.
|
||||
This tool is essential for tasks that require saving output data, creating structured file systems, and more. By adhering to the setup and usage guidelines provided,
|
||||
incorporating this tool into projects is straightforward and efficient.
|
||||
By integrating the `FileWriterTool` into your crews, the agents can reliably write content to files across different operating systems.
|
||||
This tool is essential for tasks that require saving output data, creating structured file systems, and handling cross-platform file operations.
|
||||
It's particularly recommended for Windows users who may encounter file writing issues with standard Python file operations.
|
||||
|
||||
By adhering to the setup and usage guidelines provided, incorporating this tool into projects is straightforward and ensures consistent file writing behavior across all platforms.
|
||||
|
||||
@@ -17,12 +17,51 @@ The SeleniumScrapingTool is crafted for high-efficiency web scraping tasks.
|
||||
It allows for precise extraction of content from web pages by using CSS selectors to target specific elements.
|
||||
Its design caters to a wide range of scraping needs, offering flexibility to work with any provided website URL.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Python 3.7 or higher
|
||||
- Chrome browser installed (for ChromeDriver)
|
||||
|
||||
## Installation
|
||||
|
||||
To get started with the SeleniumScrapingTool, install the crewai_tools package using pip:
|
||||
### Option 1: All-in-one installation
|
||||
```shell
|
||||
pip install 'crewai[tools]' selenium>=4.0.0 webdriver-manager>=3.8.0
|
||||
```
|
||||
|
||||
### Option 2: Step-by-step installation
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
pip install selenium>=4.0.0
|
||||
pip install webdriver-manager>=3.8.0
|
||||
```
|
||||
|
||||
### Common Installation Issues
|
||||
|
||||
1. If you encounter WebDriver issues, ensure your Chrome browser is up-to-date
|
||||
2. For Linux users, you might need to install additional system packages:
|
||||
```shell
|
||||
sudo apt-get install chromium-chromedriver
|
||||
```
|
||||
|
||||
## Basic Usage
|
||||
|
||||
Here's a simple example to get you started with error handling:
|
||||
|
||||
```python
|
||||
from crewai_tools import SeleniumScrapingTool
|
||||
|
||||
try:
|
||||
# Initialize the tool with a specific website
|
||||
tool = SeleniumScrapingTool(website_url='https://example.com')
|
||||
|
||||
# Extract content
|
||||
content = tool.run()
|
||||
print(content)
|
||||
except Exception as e:
|
||||
print(f"Error during scraping: {str(e)}")
|
||||
# Ensure proper cleanup in case of errors
|
||||
tool.cleanup()
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "crewai"
|
||||
version = "0.100.1"
|
||||
version = "0.102.0"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
@@ -45,7 +45,7 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools>=0.32.1"]
|
||||
tools = ["crewai-tools>=0.36.0"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.7.0"
|
||||
]
|
||||
|
||||
@@ -14,7 +14,7 @@ warnings.filterwarnings(
|
||||
category=UserWarning,
|
||||
module="pydantic.main",
|
||||
)
|
||||
__version__ = "0.100.1"
|
||||
__version__ = "0.102.0"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"Crew",
|
||||
|
||||
@@ -16,7 +16,6 @@ from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
@@ -146,7 +145,7 @@ class Agent(BaseAgent):
|
||||
def _set_knowledge(self):
|
||||
try:
|
||||
if self.knowledge_sources:
|
||||
full_pattern = re.compile(r'[^a-zA-Z0-9\-_\r\n]|(\.\.)')
|
||||
full_pattern = re.compile(r"[^a-zA-Z0-9\-_\r\n]|(\.\.)")
|
||||
knowledge_agent_name = f"{re.sub(full_pattern, '_', self.role)}"
|
||||
if isinstance(self.knowledge_sources, list) and all(
|
||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||
|
||||
@@ -3,11 +3,6 @@ import subprocess
|
||||
import click
|
||||
|
||||
from crewai.cli.utils import get_crew
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
|
||||
|
||||
def reset_memories_command(
|
||||
|
||||
@@ -56,7 +56,8 @@ def test():
|
||||
Test the crew execution and returns the results.
|
||||
"""
|
||||
inputs = {
|
||||
"topic": "AI LLMs"
|
||||
"topic": "AI LLMs",
|
||||
"current_year": str(datetime.now().year)
|
||||
}
|
||||
try:
|
||||
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0"
|
||||
"crewai[tools]>=0.102.0,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"crewai[tools]>=0.102.0,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1"
|
||||
"crewai[tools]>=0.102.0"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import asyncio
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import uuid
|
||||
import warnings
|
||||
from concurrent.futures import Future
|
||||
@@ -381,6 +380,22 @@ class Crew(BaseModel):
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_must_have_non_conditional_task(self) -> "Crew":
|
||||
"""Ensure that a crew has at least one non-conditional task."""
|
||||
if not self.tasks:
|
||||
return self
|
||||
non_conditional_count = sum(
|
||||
1 for task in self.tasks if not isinstance(task, ConditionalTask)
|
||||
)
|
||||
if non_conditional_count == 0:
|
||||
raise PydanticCustomError(
|
||||
"only_conditional_tasks",
|
||||
"Crew must include at least one non-conditional task",
|
||||
{},
|
||||
)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_first_task(self) -> "Crew":
|
||||
"""Ensure the first task is not a ConditionalTask."""
|
||||
@@ -441,6 +456,7 @@ class Crew(BaseModel):
|
||||
return self
|
||||
|
||||
|
||||
|
||||
@property
|
||||
def key(self) -> str:
|
||||
source = [agent.key for agent in self.agents] + [
|
||||
@@ -743,6 +759,7 @@ class Crew(BaseModel):
|
||||
task, task_outputs, futures, task_index, was_replayed
|
||||
)
|
||||
if skipped_task_output:
|
||||
task_outputs.append(skipped_task_output)
|
||||
continue
|
||||
|
||||
if task.async_execution:
|
||||
@@ -766,7 +783,7 @@ class Crew(BaseModel):
|
||||
context=context,
|
||||
tools=tools_for_task,
|
||||
)
|
||||
task_outputs = [task_output]
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(task, task_output)
|
||||
self._store_execution_log(task, task_output, task_index, was_replayed)
|
||||
|
||||
@@ -787,7 +804,7 @@ class Crew(BaseModel):
|
||||
task_outputs = self._process_async_tasks(futures, was_replayed)
|
||||
futures.clear()
|
||||
|
||||
previous_output = task_outputs[task_index - 1] if task_outputs else None
|
||||
previous_output = task_outputs[-1] if task_outputs else None
|
||||
if previous_output is not None and not task.should_execute(previous_output):
|
||||
self._logger.log(
|
||||
"debug",
|
||||
@@ -909,11 +926,15 @@ class Crew(BaseModel):
|
||||
)
|
||||
|
||||
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
||||
if len(task_outputs) != 1:
|
||||
raise ValueError(
|
||||
"Something went wrong. Kickoff should return only one task output."
|
||||
)
|
||||
final_task_output = task_outputs[0]
|
||||
if not task_outputs:
|
||||
raise ValueError("No task outputs available to create crew output.")
|
||||
|
||||
# Filter out empty outputs and get the last valid one as the main output
|
||||
valid_outputs = [t for t in task_outputs if t.raw]
|
||||
if not valid_outputs:
|
||||
raise ValueError("No valid task outputs available to create crew output.")
|
||||
final_task_output = valid_outputs[-1]
|
||||
|
||||
final_string_output = final_task_output.raw
|
||||
self._finish_execution(final_string_output)
|
||||
token_usage = self.calculate_usage_metrics()
|
||||
@@ -922,7 +943,7 @@ class Crew(BaseModel):
|
||||
raw=final_task_output.raw,
|
||||
pydantic=final_task_output.pydantic,
|
||||
json_dict=final_task_output.json_dict,
|
||||
tasks_output=[task.output for task in self.tasks if task.output],
|
||||
tasks_output=task_outputs,
|
||||
token_usage=token_usage,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import asyncio
|
||||
import copy
|
||||
import inspect
|
||||
import logging
|
||||
from typing import (
|
||||
@@ -394,7 +395,6 @@ class FlowMeta(type):
|
||||
or hasattr(attr_value, "__trigger_methods__")
|
||||
or hasattr(attr_value, "__is_router__")
|
||||
):
|
||||
|
||||
# Register start methods
|
||||
if hasattr(attr_value, "__is_start_method__"):
|
||||
start_methods.append(attr_name)
|
||||
@@ -569,6 +569,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
f"Initial state must be dict or BaseModel, got {type(self.initial_state)}"
|
||||
)
|
||||
|
||||
def _copy_state(self) -> T:
|
||||
return copy.deepcopy(self._state)
|
||||
|
||||
@property
|
||||
def state(self) -> T:
|
||||
return self._state
|
||||
@@ -740,6 +743,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
event=FlowStartedEvent(
|
||||
type="flow_started",
|
||||
flow_name=self.__class__.__name__,
|
||||
inputs=inputs,
|
||||
),
|
||||
)
|
||||
self._log_flow_event(
|
||||
@@ -803,6 +807,18 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
async def _execute_method(
|
||||
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
|
||||
) -> Any:
|
||||
dumped_params = {f"_{i}": arg for i, arg in enumerate(args)} | (kwargs or {})
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=MethodExecutionStartedEvent(
|
||||
type="method_execution_started",
|
||||
method_name=method_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
params=dumped_params,
|
||||
state=self._copy_state(),
|
||||
),
|
||||
)
|
||||
|
||||
result = (
|
||||
await method(*args, **kwargs)
|
||||
if asyncio.iscoroutinefunction(method)
|
||||
@@ -812,6 +828,18 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._method_execution_counts[method_name] = (
|
||||
self._method_execution_counts.get(method_name, 0) + 1
|
||||
)
|
||||
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=MethodExecutionFinishedEvent(
|
||||
type="method_execution_finished",
|
||||
method_name=method_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
state=self._copy_state(),
|
||||
result=result,
|
||||
),
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
async def _execute_listeners(self, trigger_method: str, result: Any) -> None:
|
||||
@@ -950,16 +978,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
"""
|
||||
try:
|
||||
method = self._methods[listener_name]
|
||||
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=MethodExecutionStartedEvent(
|
||||
type="method_execution_started",
|
||||
method_name=listener_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
),
|
||||
)
|
||||
|
||||
sig = inspect.signature(method)
|
||||
params = list(sig.parameters.values())
|
||||
method_params = [p for p in params if p.name != "self"]
|
||||
@@ -971,15 +989,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
else:
|
||||
listener_result = await self._execute_method(listener_name, method)
|
||||
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=MethodExecutionFinishedEvent(
|
||||
type="method_execution_finished",
|
||||
method_name=listener_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
),
|
||||
)
|
||||
|
||||
# Execute listeners (and possibly routers) of this listener
|
||||
await self._execute_listeners(listener_name, listener_result)
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -15,17 +17,21 @@ class Event:
|
||||
|
||||
@dataclass
|
||||
class FlowStartedEvent(Event):
|
||||
pass
|
||||
inputs: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MethodExecutionStartedEvent(Event):
|
||||
method_name: str
|
||||
state: Union[Dict[str, Any], BaseModel]
|
||||
params: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MethodExecutionFinishedEvent(Event):
|
||||
method_name: str
|
||||
state: Union[Dict[str, Any], BaseModel]
|
||||
result: Any = None
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -1,28 +1,138 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
from typing import Dict, Iterator, List, Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
from pydantic import Field, field_validator
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
|
||||
class ExcelKnowledgeSource(BaseFileKnowledgeSource):
|
||||
class ExcelKnowledgeSource(BaseKnowledgeSource):
|
||||
"""A knowledge source that stores and queries Excel file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess Excel file content."""
|
||||
pd = self._import_dependencies()
|
||||
# override content to be a dict of file paths to sheet names to csv content
|
||||
|
||||
_logger: Logger = Logger(verbose=True)
|
||||
|
||||
file_path: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
||||
default=None,
|
||||
description="[Deprecated] The path to the file. Use file_paths instead.",
|
||||
)
|
||||
file_paths: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
||||
default_factory=list, description="The path to the file"
|
||||
)
|
||||
chunks: List[str] = Field(default_factory=list)
|
||||
content: Dict[Path, Dict[str, str]] = Field(default_factory=dict)
|
||||
safe_file_paths: List[Path] = Field(default_factory=list)
|
||||
|
||||
@field_validator("file_path", "file_paths", mode="before")
|
||||
def validate_file_path(cls, v, info):
|
||||
"""Validate that at least one of file_path or file_paths is provided."""
|
||||
# Single check if both are None, O(1) instead of nested conditions
|
||||
if (
|
||||
v is None
|
||||
and info.data.get(
|
||||
"file_path" if info.field_name == "file_paths" else "file_paths"
|
||||
)
|
||||
is None
|
||||
):
|
||||
raise ValueError("Either file_path or file_paths must be provided")
|
||||
return v
|
||||
|
||||
def _process_file_paths(self) -> List[Path]:
|
||||
"""Convert file_path to a list of Path objects."""
|
||||
|
||||
if hasattr(self, "file_path") and self.file_path is not None:
|
||||
self._logger.log(
|
||||
"warning",
|
||||
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
|
||||
color="yellow",
|
||||
)
|
||||
self.file_paths = self.file_path
|
||||
|
||||
if self.file_paths is None:
|
||||
raise ValueError("Your source must be provided with a file_paths: []")
|
||||
|
||||
# Convert single path to list
|
||||
path_list: List[Union[Path, str]] = (
|
||||
[self.file_paths]
|
||||
if isinstance(self.file_paths, (str, Path))
|
||||
else list(self.file_paths)
|
||||
if isinstance(self.file_paths, list)
|
||||
else []
|
||||
)
|
||||
|
||||
if not path_list:
|
||||
raise ValueError(
|
||||
"file_path/file_paths must be a Path, str, or a list of these types"
|
||||
)
|
||||
|
||||
return [self.convert_to_path(path) for path in path_list]
|
||||
|
||||
def validate_content(self):
|
||||
"""Validate the paths."""
|
||||
for path in self.safe_file_paths:
|
||||
if not path.exists():
|
||||
self._logger.log(
|
||||
"error",
|
||||
f"File not found: {path}. Try adding sources to the knowledge directory. If it's inside the knowledge directory, use the relative path.",
|
||||
color="red",
|
||||
)
|
||||
raise FileNotFoundError(f"File not found: {path}")
|
||||
if not path.is_file():
|
||||
self._logger.log(
|
||||
"error",
|
||||
f"Path is not a file: {path}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def model_post_init(self, _) -> None:
|
||||
if self.file_path:
|
||||
self._logger.log(
|
||||
"warning",
|
||||
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
|
||||
color="yellow",
|
||||
)
|
||||
self.file_paths = self.file_path
|
||||
self.safe_file_paths = self._process_file_paths()
|
||||
self.validate_content()
|
||||
self.content = self._load_content()
|
||||
|
||||
def _load_content(self) -> Dict[Path, Dict[str, str]]:
|
||||
"""Load and preprocess Excel file content from multiple sheets.
|
||||
|
||||
Each sheet's content is converted to CSV format and stored.
|
||||
|
||||
Returns:
|
||||
Dict[Path, Dict[str, str]]: A mapping of file paths to their respective sheet contents.
|
||||
|
||||
Raises:
|
||||
ImportError: If required dependencies are missing.
|
||||
FileNotFoundError: If the specified Excel file cannot be opened.
|
||||
"""
|
||||
pd = self._import_dependencies()
|
||||
content_dict = {}
|
||||
for file_path in self.safe_file_paths:
|
||||
file_path = self.convert_to_path(file_path)
|
||||
df = pd.read_excel(file_path)
|
||||
content = df.to_csv(index=False)
|
||||
content_dict[file_path] = content
|
||||
with pd.ExcelFile(file_path) as xl:
|
||||
sheet_dict = {
|
||||
str(sheet_name): str(
|
||||
pd.read_excel(xl, sheet_name).to_csv(index=False)
|
||||
)
|
||||
for sheet_name in xl.sheet_names
|
||||
}
|
||||
content_dict[file_path] = sheet_dict
|
||||
return content_dict
|
||||
|
||||
def convert_to_path(self, path: Union[Path, str]) -> Path:
|
||||
"""Convert a path to a Path object."""
|
||||
return Path(KNOWLEDGE_DIRECTORY + "/" + path) if isinstance(path, str) else path
|
||||
|
||||
def _import_dependencies(self):
|
||||
"""Dynamically import dependencies."""
|
||||
try:
|
||||
import openpyxl # noqa
|
||||
import pandas as pd
|
||||
|
||||
return pd
|
||||
@@ -38,10 +148,14 @@ class ExcelKnowledgeSource(BaseFileKnowledgeSource):
|
||||
and save the embeddings.
|
||||
"""
|
||||
# Convert dictionary values to a single string if content is a dictionary
|
||||
if isinstance(self.content, dict):
|
||||
content_str = "\n".join(str(value) for value in self.content.values())
|
||||
else:
|
||||
content_str = str(self.content)
|
||||
# Updated to account for .xlsx workbooks with multiple tabs/sheets
|
||||
content_str = ""
|
||||
for value in self.content.values():
|
||||
if isinstance(value, dict):
|
||||
for sheet_value in value.values():
|
||||
content_str += str(sheet_value) + "\n"
|
||||
else:
|
||||
content_str += str(value) + "\n"
|
||||
|
||||
new_chunks = self._chunk_text(content_str)
|
||||
self.chunks.extend(new_chunks)
|
||||
|
||||
@@ -164,6 +164,7 @@ class LLM:
|
||||
self.context_window_size = 0
|
||||
self.reasoning_effort = reasoning_effort
|
||||
self.additional_params = kwargs
|
||||
self.is_anthropic = self._is_anthropic_model(model)
|
||||
|
||||
litellm.drop_params = True
|
||||
|
||||
@@ -178,42 +179,62 @@ class LLM:
|
||||
self.set_callbacks(callbacks)
|
||||
self.set_env_callbacks()
|
||||
|
||||
def _is_anthropic_model(self, model: str) -> bool:
|
||||
"""Determine if the model is from Anthropic provider.
|
||||
|
||||
Args:
|
||||
model: The model identifier string.
|
||||
|
||||
Returns:
|
||||
bool: True if the model is from Anthropic, False otherwise.
|
||||
"""
|
||||
ANTHROPIC_PREFIXES = ('anthropic/', 'claude-', 'claude/')
|
||||
return any(prefix in model.lower() for prefix in ANTHROPIC_PREFIXES)
|
||||
|
||||
def call(
|
||||
self,
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
tools: Optional[List[dict]] = None,
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
High-level llm call method that:
|
||||
1) Accepts either a string or a list of messages
|
||||
2) Converts string input to the required message format
|
||||
3) Calls litellm.completion
|
||||
4) Handles function/tool calls if any
|
||||
5) Returns the final text response or tool result
|
||||
|
||||
Parameters:
|
||||
- messages (Union[str, List[Dict[str, str]]]): The input messages for the LLM.
|
||||
- If a string is provided, it will be converted into a message list with a single entry.
|
||||
- If a list of dictionaries is provided, each dictionary should have 'role' and 'content' keys.
|
||||
- tools (Optional[List[dict]]): A list of tool schemas for function calling.
|
||||
- callbacks (Optional[List[Any]]): A list of callback functions to be executed.
|
||||
- available_functions (Optional[Dict[str, Any]]): A dictionary mapping function names to actual Python functions.
|
||||
|
||||
) -> Union[str, Any]:
|
||||
"""High-level LLM call method.
|
||||
|
||||
Args:
|
||||
messages: Input messages for the LLM.
|
||||
Can be a string or list of message dictionaries.
|
||||
If string, it will be converted to a single user message.
|
||||
If list, each dict must have 'role' and 'content' keys.
|
||||
tools: Optional list of tool schemas for function calling.
|
||||
Each tool should define its name, description, and parameters.
|
||||
callbacks: Optional list of callback functions to be executed
|
||||
during and after the LLM call.
|
||||
available_functions: Optional dict mapping function names to callables
|
||||
that can be invoked by the LLM.
|
||||
|
||||
Returns:
|
||||
- str: The final text response from the LLM or the result of a tool function call.
|
||||
|
||||
Union[str, Any]: Either a text response from the LLM (str) or
|
||||
the result of a tool function call (Any).
|
||||
|
||||
Raises:
|
||||
TypeError: If messages format is invalid
|
||||
ValueError: If response format is not supported
|
||||
LLMContextLengthExceededException: If input exceeds model's context limit
|
||||
|
||||
Examples:
|
||||
---------
|
||||
# Example 1: Using a string input
|
||||
response = llm.call("Return the name of a random city in the world.")
|
||||
print(response)
|
||||
|
||||
# Example 2: Using a list of messages
|
||||
messages = [{"role": "user", "content": "What is the capital of France?"}]
|
||||
response = llm.call(messages)
|
||||
print(response)
|
||||
# Example 1: Simple string input
|
||||
>>> response = llm.call("Return the name of a random city.")
|
||||
>>> print(response)
|
||||
"Paris"
|
||||
|
||||
# Example 2: Message list with system and user messages
|
||||
>>> messages = [
|
||||
... {"role": "system", "content": "You are a geography expert"},
|
||||
... {"role": "user", "content": "What is France's capital?"}
|
||||
... ]
|
||||
>>> response = llm.call(messages)
|
||||
>>> print(response)
|
||||
"The capital of France is Paris."
|
||||
"""
|
||||
# Validate parameters before proceeding with the call.
|
||||
self._validate_call_params()
|
||||
@@ -233,10 +254,13 @@ class LLM:
|
||||
self.set_callbacks(callbacks)
|
||||
|
||||
try:
|
||||
# --- 1) Prepare the parameters for the completion call
|
||||
# --- 1) Format messages according to provider requirements
|
||||
formatted_messages = self._format_messages_for_provider(messages)
|
||||
|
||||
# --- 2) Prepare the parameters for the completion call
|
||||
params = {
|
||||
"model": self.model,
|
||||
"messages": messages,
|
||||
"messages": formatted_messages,
|
||||
"timeout": self.timeout,
|
||||
"temperature": self.temperature,
|
||||
"top_p": self.top_p,
|
||||
@@ -324,6 +348,38 @@ class LLM:
|
||||
logging.error(f"LiteLLM call failed: {str(e)}")
|
||||
raise
|
||||
|
||||
def _format_messages_for_provider(self, messages: List[Dict[str, str]]) -> List[Dict[str, str]]:
|
||||
"""Format messages according to provider requirements.
|
||||
|
||||
Args:
|
||||
messages: List of message dictionaries with 'role' and 'content' keys.
|
||||
Can be empty or None.
|
||||
|
||||
Returns:
|
||||
List of formatted messages according to provider requirements.
|
||||
For Anthropic models, ensures first message has 'user' role.
|
||||
|
||||
Raises:
|
||||
TypeError: If messages is None or contains invalid message format.
|
||||
"""
|
||||
if messages is None:
|
||||
raise TypeError("Messages cannot be None")
|
||||
|
||||
# Validate message format first
|
||||
for msg in messages:
|
||||
if not isinstance(msg, dict) or "role" not in msg or "content" not in msg:
|
||||
raise TypeError("Invalid message format. Each message must be a dict with 'role' and 'content' keys")
|
||||
|
||||
if not self.is_anthropic:
|
||||
return messages
|
||||
|
||||
# Anthropic requires messages to start with 'user' role
|
||||
if not messages or messages[0]["role"] == "system":
|
||||
# If first message is system or empty, add a placeholder user message
|
||||
return [{"role": "user", "content": "."}, *messages]
|
||||
|
||||
return messages
|
||||
|
||||
def _get_custom_llm_provider(self) -> str:
|
||||
"""
|
||||
Derives the custom_llm_provider from the model string.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Optional
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
|
||||
|
||||
class Memory(BaseModel):
|
||||
"""
|
||||
|
||||
@@ -674,19 +674,32 @@ class Task(BaseModel):
|
||||
return OutputFormat.PYDANTIC
|
||||
return OutputFormat.RAW
|
||||
|
||||
def _save_file(self, result: Any) -> None:
|
||||
def _save_file(self, result: Union[Dict, str, Any]) -> None:
|
||||
"""Save task output to a file.
|
||||
|
||||
Note:
|
||||
For cross-platform file writing, especially on Windows, consider using FileWriterTool
|
||||
from the crewai_tools package:
|
||||
pip install 'crewai[tools]'
|
||||
from crewai_tools import FileWriterTool
|
||||
|
||||
Args:
|
||||
result: The result to save to the file. Can be a dict or any stringifiable object.
|
||||
|
||||
Raises:
|
||||
ValueError: If output_file is not set
|
||||
RuntimeError: If there is an error writing to the file
|
||||
RuntimeError: If there is an error writing to the file. For cross-platform
|
||||
compatibility, especially on Windows, use FileWriterTool from crewai_tools
|
||||
package.
|
||||
"""
|
||||
if self.output_file is None:
|
||||
raise ValueError("output_file is not set.")
|
||||
|
||||
FILEWRITER_RECOMMENDATION = (
|
||||
"For cross-platform file writing, especially on Windows, "
|
||||
"use FileWriterTool from crewai_tools package."
|
||||
)
|
||||
|
||||
try:
|
||||
resolved_path = Path(self.output_file).expanduser().resolve()
|
||||
directory = resolved_path.parent
|
||||
@@ -702,7 +715,12 @@ class Task(BaseModel):
|
||||
else:
|
||||
file.write(str(result))
|
||||
except (OSError, IOError) as e:
|
||||
raise RuntimeError(f"Failed to save output file: {e}")
|
||||
raise RuntimeError(
|
||||
"\n".join([
|
||||
f"Failed to save output file: {e}",
|
||||
FILEWRITER_RECOMMENDATION
|
||||
])
|
||||
)
|
||||
return None
|
||||
|
||||
def __repr__(self):
|
||||
|
||||
@@ -49,6 +49,41 @@ writer = Agent(
|
||||
)
|
||||
|
||||
|
||||
def test_crew_with_only_conditional_tasks_raises_error():
|
||||
"""Test that creating a crew with only conditional tasks raises an error."""
|
||||
|
||||
def condition_func(task_output: TaskOutput) -> bool:
|
||||
return True
|
||||
|
||||
conditional1 = ConditionalTask(
|
||||
description="Conditional task 1",
|
||||
expected_output="Output 1",
|
||||
agent=researcher,
|
||||
condition=condition_func,
|
||||
)
|
||||
conditional2 = ConditionalTask(
|
||||
description="Conditional task 2",
|
||||
expected_output="Output 2",
|
||||
agent=researcher,
|
||||
condition=condition_func,
|
||||
)
|
||||
conditional3 = ConditionalTask(
|
||||
description="Conditional task 3",
|
||||
expected_output="Output 3",
|
||||
agent=researcher,
|
||||
condition=condition_func,
|
||||
)
|
||||
|
||||
with pytest.raises(
|
||||
pydantic_core._pydantic_core.ValidationError,
|
||||
match="Crew must include at least one non-conditional task",
|
||||
):
|
||||
Crew(
|
||||
agents=[researcher],
|
||||
tasks=[conditional1, conditional2, conditional3],
|
||||
)
|
||||
|
||||
|
||||
def test_crew_config_conditional_requirement():
|
||||
with pytest.raises(ValueError):
|
||||
Crew(process=Process.sequential)
|
||||
@@ -556,12 +591,12 @@ def test_crew_with_delegating_agents_should_not_override_task_tools():
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(isinstance(tool, TestTool) for tool in tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -620,12 +655,12 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(isinstance(tool, TestTool) for tool in new_ceo.tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in new_ceo.tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -749,17 +784,17 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Confirm AnotherTestTool is present but TestTool is not
|
||||
assert any(isinstance(tool, AnotherTestTool) for tool in used_tools), (
|
||||
"AnotherTestTool should be present"
|
||||
)
|
||||
assert not any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"TestTool should not be present among used tools"
|
||||
)
|
||||
assert any(
|
||||
isinstance(tool, AnotherTestTool) for tool in used_tools
|
||||
), "AnotherTestTool should be present"
|
||||
assert not any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "TestTool should not be present among used tools"
|
||||
|
||||
# Confirm delegation tool(s) are present
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
|
||||
# Finally, make sure the agent's original tools remain unchanged
|
||||
assert len(researcher_with_delegation.tools) == 1
|
||||
@@ -1560,9 +1595,9 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
||||
|
||||
# Verify that exactly one tool was used and it was a CodeInterpreterTool
|
||||
assert len(used_tools) == 1, "Should have exactly one tool"
|
||||
assert isinstance(used_tools[0], CodeInterpreterTool), (
|
||||
"Tool should be CodeInterpreterTool"
|
||||
)
|
||||
assert isinstance(
|
||||
used_tools[0], CodeInterpreterTool
|
||||
), "Tool should be CodeInterpreterTool"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1919,6 +1954,7 @@ def test_task_callback_on_crew():
|
||||
|
||||
def test_task_callback_both_on_task_and_crew():
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
mock_callback_on_task = MagicMock()
|
||||
mock_callback_on_crew = MagicMock()
|
||||
|
||||
@@ -2060,6 +2096,210 @@ def test_tools_with_custom_caching():
|
||||
assert result.raw == "3"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_task_uses_last_output():
|
||||
"""Test that conditional tasks use the last task output for condition evaluation."""
|
||||
task1 = Task(
|
||||
description="First task",
|
||||
expected_output="First output",
|
||||
agent=researcher,
|
||||
)
|
||||
|
||||
def condition_fails(task_output: TaskOutput) -> bool:
|
||||
# This condition will never be met
|
||||
return "never matches" in task_output.raw.lower()
|
||||
|
||||
def condition_succeeds(task_output: TaskOutput) -> bool:
|
||||
# This condition will match first task's output
|
||||
return "first success" in task_output.raw.lower()
|
||||
|
||||
conditional_task1 = ConditionalTask(
|
||||
description="Second task - conditional that fails condition",
|
||||
expected_output="Second output",
|
||||
agent=researcher,
|
||||
condition=condition_fails,
|
||||
)
|
||||
|
||||
conditional_task2 = ConditionalTask(
|
||||
description="Third task - conditional that succeeds using first task output",
|
||||
expected_output="Third output",
|
||||
agent=writer,
|
||||
condition=condition_succeeds,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, conditional_task1, conditional_task2],
|
||||
)
|
||||
|
||||
# Mock outputs for tasks
|
||||
mock_first = TaskOutput(
|
||||
description="First task output",
|
||||
raw="First success output", # Will be used by third task's condition
|
||||
agent=researcher.role,
|
||||
)
|
||||
mock_third = TaskOutput(
|
||||
description="Third task output",
|
||||
raw="Third task executed", # Output when condition succeeds using first task output
|
||||
agent=writer.role,
|
||||
)
|
||||
|
||||
# Set up mocks for task execution and conditional logic
|
||||
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
|
||||
# First conditional fails, second succeeds
|
||||
mock_should_execute.side_effect = [False, True]
|
||||
with patch.object(Task, "execute_sync") as mock_execute:
|
||||
mock_execute.side_effect = [mock_first, mock_third]
|
||||
result = crew.kickoff()
|
||||
|
||||
# Verify execution behavior
|
||||
assert mock_execute.call_count == 2 # Only first and third tasks execute
|
||||
assert mock_should_execute.call_count == 2 # Both conditionals checked
|
||||
|
||||
# Verify outputs collection:
|
||||
# First executed task output, followed by an automatically generated (skipped) output, then the conditional execution
|
||||
assert len(result.tasks_output) == 3
|
||||
assert (
|
||||
result.tasks_output[0].raw == "First success output"
|
||||
) # First task succeeded
|
||||
assert (
|
||||
result.tasks_output[1].raw == ""
|
||||
) # Second task skipped (condition failed)
|
||||
assert (
|
||||
result.tasks_output[2].raw == "Third task executed"
|
||||
) # Third task used first task's output
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_tasks_result_collection():
|
||||
"""Test that task outputs are properly collected based on execution status."""
|
||||
task1 = Task(
|
||||
description="Normal task that always executes",
|
||||
expected_output="First output",
|
||||
agent=researcher,
|
||||
)
|
||||
|
||||
def condition_never_met(task_output: TaskOutput) -> bool:
|
||||
return "never matches" in task_output.raw.lower()
|
||||
|
||||
def condition_always_met(task_output: TaskOutput) -> bool:
|
||||
return "success" in task_output.raw.lower()
|
||||
|
||||
task2 = ConditionalTask(
|
||||
description="Conditional task that never executes",
|
||||
expected_output="Second output",
|
||||
agent=researcher,
|
||||
condition=condition_never_met,
|
||||
)
|
||||
|
||||
task3 = ConditionalTask(
|
||||
description="Conditional task that always executes",
|
||||
expected_output="Third output",
|
||||
agent=writer,
|
||||
condition=condition_always_met,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, task2, task3],
|
||||
)
|
||||
|
||||
# Mock outputs for different execution paths
|
||||
mock_success = TaskOutput(
|
||||
description="Success output",
|
||||
raw="Success output", # Triggers third task's condition
|
||||
agent=researcher.role,
|
||||
)
|
||||
mock_conditional = TaskOutput(
|
||||
description="Conditional output",
|
||||
raw="Conditional task executed",
|
||||
agent=writer.role,
|
||||
)
|
||||
|
||||
# Set up mocks for task execution and conditional logic
|
||||
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
|
||||
# First conditional fails, second succeeds
|
||||
mock_should_execute.side_effect = [False, True]
|
||||
with patch.object(Task, "execute_sync") as mock_execute:
|
||||
mock_execute.side_effect = [mock_success, mock_conditional]
|
||||
result = crew.kickoff()
|
||||
|
||||
# Verify execution behavior
|
||||
assert mock_execute.call_count == 2 # Only first and third tasks execute
|
||||
assert mock_should_execute.call_count == 2 # Both conditionals checked
|
||||
|
||||
# Verify task output collection:
|
||||
# There should be three outputs: normal task, skipped conditional task (empty output),
|
||||
# and the conditional task that executed.
|
||||
assert len(result.tasks_output) == 3
|
||||
assert (
|
||||
result.tasks_output[0].raw == "Success output"
|
||||
) # Normal task executed
|
||||
assert result.tasks_output[1].raw == "" # Second task skipped
|
||||
assert (
|
||||
result.tasks_output[2].raw == "Conditional task executed"
|
||||
) # Third task executed
|
||||
|
||||
# Verify task output collection
|
||||
assert len(result.tasks_output) == 3
|
||||
assert (
|
||||
result.tasks_output[0].raw == "Success output"
|
||||
) # Normal task executed
|
||||
assert result.tasks_output[1].raw == "" # Second task skipped
|
||||
assert (
|
||||
result.tasks_output[2].raw == "Conditional task executed"
|
||||
) # Third task executed
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_multiple_conditional_tasks():
|
||||
"""Test that having multiple conditional tasks in sequence works correctly."""
|
||||
task1 = Task(
|
||||
description="Initial research task",
|
||||
expected_output="Research output",
|
||||
agent=researcher,
|
||||
)
|
||||
|
||||
def condition1(task_output: TaskOutput) -> bool:
|
||||
return "success" in task_output.raw.lower()
|
||||
|
||||
def condition2(task_output: TaskOutput) -> bool:
|
||||
return "proceed" in task_output.raw.lower()
|
||||
|
||||
task2 = ConditionalTask(
|
||||
description="First conditional task",
|
||||
expected_output="Conditional output 1",
|
||||
agent=writer,
|
||||
condition=condition1,
|
||||
)
|
||||
|
||||
task3 = ConditionalTask(
|
||||
description="Second conditional task",
|
||||
expected_output="Conditional output 2",
|
||||
agent=writer,
|
||||
condition=condition2,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, task2, task3],
|
||||
)
|
||||
|
||||
# Mock different task outputs to test conditional logic
|
||||
mock_success = TaskOutput(
|
||||
description="Mock success",
|
||||
raw="Success and proceed output",
|
||||
agent=researcher.role,
|
||||
)
|
||||
|
||||
# Set up mocks for task execution
|
||||
with patch.object(Task, "execute_sync", return_value=mock_success) as mock_execute:
|
||||
result = crew.kickoff()
|
||||
# Verify all tasks were executed (no IndexError)
|
||||
assert mock_execute.call_count == 3
|
||||
assert len(result.tasks_output) == 3
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_using_contextual_memory():
|
||||
from unittest.mock import patch
|
||||
@@ -3178,9 +3418,9 @@ def test_fetch_inputs():
|
||||
expected_placeholders = {"role_detail", "topic", "field"}
|
||||
actual_placeholders = crew.fetch_inputs()
|
||||
|
||||
assert actual_placeholders == expected_placeholders, (
|
||||
f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
)
|
||||
assert (
|
||||
actual_placeholders == expected_placeholders
|
||||
), f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
|
||||
|
||||
def test_task_tools_preserve_code_execution_tools():
|
||||
@@ -3253,20 +3493,20 @@ def test_task_tools_preserve_code_execution_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Verify all expected tools are present
|
||||
assert any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"Task's TestTool should be present"
|
||||
)
|
||||
assert any(isinstance(tool, CodeInterpreterTool) for tool in used_tools), (
|
||||
"CodeInterpreterTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "Task's TestTool should be present"
|
||||
assert any(
|
||||
isinstance(tool, CodeInterpreterTool) for tool in used_tools
|
||||
), "CodeInterpreterTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
|
||||
# Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools)
|
||||
assert len(used_tools) == 4, (
|
||||
"Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
)
|
||||
assert (
|
||||
len(used_tools) == 4
|
||||
), "Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -3310,9 +3550,9 @@ def test_multimodal_flag_adds_multimodal_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Check that the multimodal tool was added
|
||||
assert any(isinstance(tool, AddImageTool) for tool in used_tools), (
|
||||
"AddImageTool should be present when agent is multimodal"
|
||||
)
|
||||
assert any(
|
||||
isinstance(tool, AddImageTool) for tool in used_tools
|
||||
), "AddImageTool should be present when agent is multimodal"
|
||||
|
||||
# Verify we have exactly one tool (just the AddImageTool)
|
||||
assert len(used_tools) == 1, "Should only have the AddImageTool"
|
||||
@@ -3538,9 +3778,9 @@ def test_crew_guardrail_feedback_in_context():
|
||||
assert len(execution_contexts) > 1, "Task should have been executed multiple times"
|
||||
|
||||
# Verify that the second execution included the guardrail feedback
|
||||
assert "Output must contain the keyword 'IMPORTANT'" in execution_contexts[1], (
|
||||
"Guardrail feedback should be included in retry context"
|
||||
)
|
||||
assert (
|
||||
"Output must contain the keyword 'IMPORTANT'" in execution_contexts[1]
|
||||
), "Guardrail feedback should be included in retry context"
|
||||
|
||||
# Verify final output meets guardrail requirements
|
||||
assert "IMPORTANT" in result.raw, "Final output should contain required keyword"
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
"""Test Flow creation and execution basic functionality."""
|
||||
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow.flow import Flow, and_, listen, or_, router, start
|
||||
from crewai.flow.flow_events import (
|
||||
FlowFinishedEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
|
||||
|
||||
def test_simple_sequential_flow():
|
||||
@@ -398,3 +405,218 @@ def test_router_with_multiple_conditions():
|
||||
|
||||
# final_step should run after router_and
|
||||
assert execution_order.index("log_final_step") > execution_order.index("router_and")
|
||||
|
||||
|
||||
def test_unstructured_flow_event_emission():
|
||||
"""Test that the correct events are emitted during unstructured flow
|
||||
execution with all fields validated."""
|
||||
|
||||
class PoemFlow(Flow):
|
||||
@start()
|
||||
def prepare_flower(self):
|
||||
self.state["flower"] = "roses"
|
||||
return "foo"
|
||||
|
||||
@start()
|
||||
def prepare_color(self):
|
||||
self.state["color"] = "red"
|
||||
return "bar"
|
||||
|
||||
@listen(prepare_color)
|
||||
def write_first_sentence(self):
|
||||
return f"{self.state['flower']} are {self.state['color']}"
|
||||
|
||||
@listen(write_first_sentence)
|
||||
def finish_poem(self, first_sentence):
|
||||
separator = self.state.get("separator", "\n")
|
||||
return separator.join([first_sentence, "violets are blue"])
|
||||
|
||||
@listen(finish_poem)
|
||||
def save_poem_to_database(self):
|
||||
# A method without args/kwargs to ensure events are sent correctly
|
||||
pass
|
||||
|
||||
event_log = []
|
||||
|
||||
def handle_event(_, event):
|
||||
event_log.append(event)
|
||||
|
||||
flow = PoemFlow()
|
||||
flow.event_emitter.connect(handle_event)
|
||||
flow.kickoff(inputs={"separator": ", "})
|
||||
|
||||
assert isinstance(event_log[0], FlowStartedEvent)
|
||||
assert event_log[0].flow_name == "PoemFlow"
|
||||
assert event_log[0].inputs == {"separator": ", "}
|
||||
assert isinstance(event_log[0].timestamp, datetime)
|
||||
|
||||
# Asserting for concurrent start method executions in a for loop as you
|
||||
# can't guarantee ordering in asynchronous executions
|
||||
for i in range(1, 5):
|
||||
event = event_log[i]
|
||||
assert isinstance(event.state, dict)
|
||||
assert isinstance(event.state["id"], str)
|
||||
|
||||
if event.method_name == "prepare_flower":
|
||||
if isinstance(event, MethodExecutionStartedEvent):
|
||||
assert event.params == {}
|
||||
assert event.state["separator"] == ", "
|
||||
elif isinstance(event, MethodExecutionFinishedEvent):
|
||||
assert event.result == "foo"
|
||||
assert event.state["flower"] == "roses"
|
||||
assert event.state["separator"] == ", "
|
||||
else:
|
||||
assert False, "Unexpected event type for prepare_flower"
|
||||
elif event.method_name == "prepare_color":
|
||||
if isinstance(event, MethodExecutionStartedEvent):
|
||||
assert event.params == {}
|
||||
assert event.state["separator"] == ", "
|
||||
elif isinstance(event, MethodExecutionFinishedEvent):
|
||||
assert event.result == "bar"
|
||||
assert event.state["color"] == "red"
|
||||
assert event.state["separator"] == ", "
|
||||
else:
|
||||
assert False, "Unexpected event type for prepare_color"
|
||||
else:
|
||||
assert False, f"Unexpected method {event.method_name} in prepare events"
|
||||
|
||||
assert isinstance(event_log[5], MethodExecutionStartedEvent)
|
||||
assert event_log[5].method_name == "write_first_sentence"
|
||||
assert event_log[5].params == {}
|
||||
assert isinstance(event_log[5].state, dict)
|
||||
assert event_log[5].state["flower"] == "roses"
|
||||
assert event_log[5].state["color"] == "red"
|
||||
assert event_log[5].state["separator"] == ", "
|
||||
|
||||
assert isinstance(event_log[6], MethodExecutionFinishedEvent)
|
||||
assert event_log[6].method_name == "write_first_sentence"
|
||||
assert event_log[6].result == "roses are red"
|
||||
|
||||
assert isinstance(event_log[7], MethodExecutionStartedEvent)
|
||||
assert event_log[7].method_name == "finish_poem"
|
||||
assert event_log[7].params == {"_0": "roses are red"}
|
||||
assert isinstance(event_log[7].state, dict)
|
||||
assert event_log[7].state["flower"] == "roses"
|
||||
assert event_log[7].state["color"] == "red"
|
||||
|
||||
assert isinstance(event_log[8], MethodExecutionFinishedEvent)
|
||||
assert event_log[8].method_name == "finish_poem"
|
||||
assert event_log[8].result == "roses are red, violets are blue"
|
||||
|
||||
assert isinstance(event_log[9], MethodExecutionStartedEvent)
|
||||
assert event_log[9].method_name == "save_poem_to_database"
|
||||
assert event_log[9].params == {}
|
||||
assert isinstance(event_log[9].state, dict)
|
||||
assert event_log[9].state["flower"] == "roses"
|
||||
assert event_log[9].state["color"] == "red"
|
||||
|
||||
assert isinstance(event_log[10], MethodExecutionFinishedEvent)
|
||||
assert event_log[10].method_name == "save_poem_to_database"
|
||||
assert event_log[10].result is None
|
||||
|
||||
assert isinstance(event_log[11], FlowFinishedEvent)
|
||||
assert event_log[11].flow_name == "PoemFlow"
|
||||
assert event_log[11].result is None
|
||||
assert isinstance(event_log[11].timestamp, datetime)
|
||||
|
||||
|
||||
def test_structured_flow_event_emission():
|
||||
"""Test that the correct events are emitted during structured flow
|
||||
execution with all fields validated."""
|
||||
|
||||
class OnboardingState(BaseModel):
|
||||
name: str = ""
|
||||
sent: bool = False
|
||||
|
||||
class OnboardingFlow(Flow[OnboardingState]):
|
||||
@start()
|
||||
def user_signs_up(self):
|
||||
self.state.sent = False
|
||||
|
||||
@listen(user_signs_up)
|
||||
def send_welcome_message(self):
|
||||
self.state.sent = True
|
||||
return f"Welcome, {self.state.name}!"
|
||||
|
||||
event_log = []
|
||||
|
||||
def handle_event(_, event):
|
||||
event_log.append(event)
|
||||
|
||||
flow = OnboardingFlow()
|
||||
flow.event_emitter.connect(handle_event)
|
||||
flow.kickoff(inputs={"name": "Anakin"})
|
||||
|
||||
assert isinstance(event_log[0], FlowStartedEvent)
|
||||
assert event_log[0].flow_name == "OnboardingFlow"
|
||||
assert event_log[0].inputs == {"name": "Anakin"}
|
||||
assert isinstance(event_log[0].timestamp, datetime)
|
||||
|
||||
assert isinstance(event_log[1], MethodExecutionStartedEvent)
|
||||
assert event_log[1].method_name == "user_signs_up"
|
||||
|
||||
assert isinstance(event_log[2], MethodExecutionFinishedEvent)
|
||||
assert event_log[2].method_name == "user_signs_up"
|
||||
|
||||
assert isinstance(event_log[3], MethodExecutionStartedEvent)
|
||||
assert event_log[3].method_name == "send_welcome_message"
|
||||
assert event_log[3].params == {}
|
||||
assert getattr(event_log[3].state, "sent") is False
|
||||
|
||||
assert isinstance(event_log[4], MethodExecutionFinishedEvent)
|
||||
assert event_log[4].method_name == "send_welcome_message"
|
||||
assert getattr(event_log[4].state, "sent") is True
|
||||
assert event_log[4].result == "Welcome, Anakin!"
|
||||
|
||||
assert isinstance(event_log[5], FlowFinishedEvent)
|
||||
assert event_log[5].flow_name == "OnboardingFlow"
|
||||
assert event_log[5].result == "Welcome, Anakin!"
|
||||
assert isinstance(event_log[5].timestamp, datetime)
|
||||
|
||||
|
||||
def test_stateless_flow_event_emission():
|
||||
"""Test that the correct events are emitted stateless during flow execution
|
||||
with all fields validated."""
|
||||
|
||||
class StatelessFlow(Flow):
|
||||
@start()
|
||||
def init(self):
|
||||
pass
|
||||
|
||||
@listen(init)
|
||||
def process(self):
|
||||
return "Deeds will not be less valiant because they are unpraised."
|
||||
|
||||
event_log = []
|
||||
|
||||
def handle_event(_, event):
|
||||
event_log.append(event)
|
||||
|
||||
flow = StatelessFlow()
|
||||
flow.event_emitter.connect(handle_event)
|
||||
flow.kickoff()
|
||||
|
||||
assert isinstance(event_log[0], FlowStartedEvent)
|
||||
assert event_log[0].flow_name == "StatelessFlow"
|
||||
assert event_log[0].inputs is None
|
||||
assert isinstance(event_log[0].timestamp, datetime)
|
||||
|
||||
assert isinstance(event_log[1], MethodExecutionStartedEvent)
|
||||
assert event_log[1].method_name == "init"
|
||||
|
||||
assert isinstance(event_log[2], MethodExecutionFinishedEvent)
|
||||
assert event_log[2].method_name == "init"
|
||||
|
||||
assert isinstance(event_log[3], MethodExecutionStartedEvent)
|
||||
assert event_log[3].method_name == "process"
|
||||
|
||||
assert isinstance(event_log[4], MethodExecutionFinishedEvent)
|
||||
assert event_log[4].method_name == "process"
|
||||
|
||||
assert isinstance(event_log[5], FlowFinishedEvent)
|
||||
assert event_log[5].flow_name == "StatelessFlow"
|
||||
assert (
|
||||
event_log[5].result
|
||||
== "Deeds will not be less valiant because they are unpraised."
|
||||
)
|
||||
assert isinstance(event_log[5].timestamp, datetime)
|
||||
|
||||
@@ -286,6 +286,79 @@ def test_o3_mini_reasoning_effort_medium():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.fixture
|
||||
def anthropic_llm():
|
||||
"""Fixture providing an Anthropic LLM instance."""
|
||||
return LLM(model="anthropic/claude-3-sonnet")
|
||||
|
||||
@pytest.fixture
|
||||
def system_message():
|
||||
"""Fixture providing a system message."""
|
||||
return {"role": "system", "content": "test"}
|
||||
|
||||
@pytest.fixture
|
||||
def user_message():
|
||||
"""Fixture providing a user message."""
|
||||
return {"role": "user", "content": "test"}
|
||||
|
||||
def test_anthropic_message_formatting_edge_cases(anthropic_llm):
|
||||
"""Test edge cases for Anthropic message formatting."""
|
||||
# Test None messages
|
||||
with pytest.raises(TypeError, match="Messages cannot be None"):
|
||||
anthropic_llm._format_messages_for_provider(None)
|
||||
|
||||
# Test empty message list
|
||||
formatted = anthropic_llm._format_messages_for_provider([])
|
||||
assert len(formatted) == 1
|
||||
assert formatted[0]["role"] == "user"
|
||||
assert formatted[0]["content"] == "."
|
||||
|
||||
# Test invalid message format
|
||||
with pytest.raises(TypeError, match="Invalid message format"):
|
||||
anthropic_llm._format_messages_for_provider([{"invalid": "message"}])
|
||||
|
||||
def test_anthropic_model_detection():
|
||||
"""Test Anthropic model detection with various formats."""
|
||||
models = [
|
||||
("anthropic/claude-3", True),
|
||||
("claude-instant", True),
|
||||
("claude/v1", True),
|
||||
("gpt-4", False),
|
||||
("", False),
|
||||
("anthropomorphic", False), # Should not match partial words
|
||||
]
|
||||
|
||||
for model, expected in models:
|
||||
llm = LLM(model=model)
|
||||
assert llm.is_anthropic == expected, f"Failed for model: {model}"
|
||||
|
||||
def test_anthropic_message_formatting(anthropic_llm, system_message, user_message):
|
||||
"""Test Anthropic message formatting with fixtures."""
|
||||
# Test when first message is system
|
||||
formatted = anthropic_llm._format_messages_for_provider([system_message])
|
||||
assert len(formatted) == 2
|
||||
assert formatted[0]["role"] == "user"
|
||||
assert formatted[0]["content"] == "."
|
||||
assert formatted[1] == system_message
|
||||
|
||||
# Test when first message is already user
|
||||
formatted = anthropic_llm._format_messages_for_provider([user_message])
|
||||
assert len(formatted) == 1
|
||||
assert formatted[0] == user_message
|
||||
|
||||
# Test with empty message list
|
||||
formatted = anthropic_llm._format_messages_for_provider([])
|
||||
assert len(formatted) == 1
|
||||
assert formatted[0]["role"] == "user"
|
||||
assert formatted[0]["content"] == "."
|
||||
|
||||
# Test with non-Anthropic model (should not modify messages)
|
||||
non_anthropic_llm = LLM(model="gpt-4")
|
||||
formatted = non_anthropic_llm._format_messages_for_provider([system_message])
|
||||
assert len(formatted) == 1
|
||||
assert formatted[0] == system_message
|
||||
|
||||
|
||||
def test_deepseek_r1_with_open_router():
|
||||
if not os.getenv("OPEN_ROUTER_API_KEY"):
|
||||
pytest.skip("OPEN_ROUTER_API_KEY not set; skipping test.")
|
||||
|
||||
419
uv.lock
generated
419
uv.lock
generated
@@ -198,15 +198,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/39/e3/893e8757be2612e6c266d9bb58ad2e3651524b5b40cf56761e985a28b13e/asgiref-3.8.1-py3-none-any.whl", hash = "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47", size = 23828 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "asn1crypto"
|
||||
version = "1.5.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/de/cf/d547feed25b5244fcb9392e288ff9fdc3280b10260362fc45d37a798a6ee/asn1crypto-1.5.1.tar.gz", hash = "sha256:13ae38502be632115abf8a24cbe5f4da52e3b5231990aff31123c805306ccb9c", size = 121080 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/7f/09065fd9e27da0eda08b4d6897f1c13535066174cc023af248fc2a8d5e5a/asn1crypto-1.5.1-py2.py3-none-any.whl", hash = "sha256:db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67", size = 105045 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "asttokens"
|
||||
version = "2.4.1"
|
||||
@@ -228,15 +219,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/fa/e01228c2938de91d47b307831c62ab9e4001e747789d0b05baf779a6488c/async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028", size = 5721 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atpublic"
|
||||
version = "5.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5d/18/b1d247792440378abeeb0853f9daa2a127284b68776af6815990be7fcdb0/atpublic-5.0.tar.gz", hash = "sha256:d5cb6cbabf00ec1d34e282e8ce7cbc9b74ba4cb732e766c24e2d78d1ad7f723f", size = 14646 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/03/2cb0e5326e19b7d877bc9c3a7ef436a30a06835b638580d1f5e21a0409ed/atpublic-5.0-py3-none-any.whl", hash = "sha256:b651dcd886666b1042d1e38158a22a4f2c267748f4e97fde94bc492a4a28a3f3", size = 5207 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "attrs"
|
||||
version = "24.2.0"
|
||||
@@ -262,18 +244,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/0e/38cb7b781371e79e9c697fb78f3ccd18fda8bd547d0a2e76e616561a3792/auth0_python-4.7.2-py3-none-any.whl", hash = "sha256:df2224f9b1e170b3aa12d8bc7ff02eadb7cc229307a09ec6b8a55fd1e0e05dc8", size = 131834 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "authlib"
|
||||
version = "1.3.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "cryptography" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/09/47/df70ecd34fbf86d69833fe4e25bb9ecbaab995c8e49df726dd416f6bb822/authlib-1.3.1.tar.gz", hash = "sha256:7ae843f03c06c5c0debd63c9db91f9fda64fa62a42a77419fa15fbb7e7a58917", size = 146074 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/87/1f/bc95e43ffb57c05b8efcc376dd55a0240bf58f47ddf5a0f92452b6457b75/Authlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:d35800b973099bbadc49b42b256ecb80041ad56b7fe1216a362c7943c088f377", size = 223827 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autoflake"
|
||||
version = "2.3.1"
|
||||
@@ -595,14 +565,14 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "click"
|
||||
version = "8.1.7"
|
||||
version = "8.1.8"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "platform_system == 'Windows'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de", size = 336121 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", size = 97941 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -649,7 +619,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "crewai"
|
||||
version = "0.100.1"
|
||||
version = "0.102.0"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "appdirs" },
|
||||
@@ -733,7 +703,7 @@ requires-dist = [
|
||||
{ name = "blinker", specifier = ">=1.9.0" },
|
||||
{ name = "chromadb", specifier = ">=0.5.23" },
|
||||
{ name = "click", specifier = ">=8.1.7" },
|
||||
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = ">=0.32.1" },
|
||||
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = ">=0.36.0" },
|
||||
{ name = "docling", marker = "extra == 'docling'", specifier = ">=2.12.0" },
|
||||
{ name = "fastembed", marker = "extra == 'fastembed'", specifier = ">=0.4.1" },
|
||||
{ name = "instructor", specifier = ">=1.3.3" },
|
||||
@@ -782,33 +752,24 @@ dev = [
|
||||
|
||||
[[package]]
|
||||
name = "crewai-tools"
|
||||
version = "0.32.1"
|
||||
version = "0.36.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "beautifulsoup4" },
|
||||
{ name = "chromadb" },
|
||||
{ name = "click" },
|
||||
{ name = "crewai" },
|
||||
{ name = "docker" },
|
||||
{ name = "docx2txt" },
|
||||
{ name = "embedchain" },
|
||||
{ name = "lancedb" },
|
||||
{ name = "linkup-sdk" },
|
||||
{ name = "openai" },
|
||||
{ name = "patronus" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "pyright" },
|
||||
{ name = "pytube" },
|
||||
{ name = "requests" },
|
||||
{ name = "scrapegraph-py" },
|
||||
{ name = "selenium" },
|
||||
{ name = "serpapi" },
|
||||
{ name = "snowflake" },
|
||||
{ name = "spider-client" },
|
||||
{ name = "weaviate-client" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e9/e7/fb07f0089028f7c9003770641d21f5844d4fa22bf5cc4c4b3676bfa0e1fe/crewai_tools-0.32.1.tar.gz", hash = "sha256:41acea9243b17a463f355d48dfe7d73bd59738c8862a8da780eae008e0136414", size = 887378 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/4d/e1/d65778cf4aea106f3f60a4208521f04bc7f1d26be4e34eeb63cae6297d50/crewai_tools-0.36.0.tar.gz", hash = "sha256:761b396ee6a4019a988720dd6a14e1409f5de9d0cdc2a8662b487d87efb1a6bf", size = 900178 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/36/f0/8f98f1a2b90b9b989bd01cf48b5e3bb2d842be2062bfd3177a77561e7b61/crewai_tools-0.32.1-py3-none-any.whl", hash = "sha256:6cb436dc66e19e35285a4fce501158a13bce99b244370574f568ec33c5513351", size = 537264 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/b6/533632a6c2a2e623fc4a1677458aff3539413a196fb220a7fece4ead3f71/crewai_tools-0.36.0-py3-none-any.whl", hash = "sha256:dbd0d95a080acfb281e105f4376e1e98576dae6d53d94f7b883c57af893668b3", size = 545937 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1099,12 +1060,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/7c/e9fcff7623954d86bdc17782036cbf715ecab1bec4847c008557affe1ca8/docstring_parser-0.16-py3-none-any.whl", hash = "sha256:bf0a1387354d3691d102edef7ec124f219ef639982d096e26e3b60aeffa90637", size = 36533 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "docx2txt"
|
||||
version = "0.8"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7d/7d/60ee3f2b16d9bfdfa72e8599470a2c1a5b759cb113c6fe1006be28359327/docx2txt-0.8.tar.gz", hash = "sha256:2c06d98d7cfe2d3947e5760a57d924e3ff07745b379c8737723922e7009236e5", size = 2814 }
|
||||
|
||||
[[package]]
|
||||
name = "durationpy"
|
||||
version = "0.9"
|
||||
@@ -1646,19 +1601,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/1f/acf03ee901313446d52c3916d527d4981de9f6f3edc69267d05509dcfa7b/grpcio-1.67.0-cp312-cp312-win_amd64.whl", hash = "sha256:985b2686f786f3e20326c4367eebdaed3e7aa65848260ff0c6644f817042cb15", size = 4343545 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grpcio-health-checking"
|
||||
version = "1.62.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "grpcio" },
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/eb/9f/09df9b02fc8eafa3031d878c8a4674a0311293c8c6f1c942cdaeec204126/grpcio-health-checking-1.62.3.tar.gz", hash = "sha256:5074ba0ce8f0dcfe328408ec5c7551b2a835720ffd9b69dade7fa3e0dc1c7a93", size = 15640 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/40/4c/ee3173906196b741ac6ba55a9788ba9ebf2cd05f91715a49b6c3bfbb9d73/grpcio_health_checking-1.62.3-py3-none-any.whl", hash = "sha256:f29da7dd144d73b4465fe48f011a91453e9ff6c8af0d449254cf80021cab3e0d", size = 18547 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grpcio-status"
|
||||
version = "1.62.3"
|
||||
@@ -1870,52 +1812,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ijson"
|
||||
version = "3.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6c/83/28e9e93a3a61913e334e3a2e78ea9924bb9f9b1ac45898977f9d9dd6133f/ijson-3.3.0.tar.gz", hash = "sha256:7f172e6ba1bee0d4c8f8ebd639577bfe429dee0f3f96775a067b8bae4492d8a0", size = 60079 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/89/96e3608499b4a500b9bc27aa8242704e675849dd65bdfa8682b00a92477e/ijson-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7f7a5250599c366369fbf3bc4e176f5daa28eb6bc7d6130d02462ed335361675", size = 85009 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/7e/1098503500f5316c5f7912a51c91aca5cbc609c09ce4ecd9c4809983c560/ijson-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f87a7e52f79059f9c58f6886c262061065eb6f7554a587be7ed3aa63e6b71b34", size = 57796 },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/f7/27b8c27a285628719ff55b68507581c86b551eb162ce810fe51e3e1a25f2/ijson-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b73b493af9e947caed75d329676b1b801d673b17481962823a3e55fe529c8b8b", size = 57218 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/c5/1698094cb6a336a223c30e1167cc1b15cdb4bfa75399c1a2eb82fa76cc3c/ijson-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5576415f3d76290b160aa093ff968f8bf6de7d681e16e463a0134106b506f49", size = 117153 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/21/c206dda0945bd832cc9b0894596b0efc2cb1819a0ac61d8be1429ac09494/ijson-3.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e9ffe358d5fdd6b878a8a364e96e15ca7ca57b92a48f588378cef315a8b019e", size = 110781 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/f5/2d733e64577109a9b255d14d031e44a801fa20df9ccc58b54a31e8ecf9e6/ijson-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8643c255a25824ddd0895c59f2319c019e13e949dc37162f876c41a283361527", size = 114527 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/a8/78bfee312aa23417b86189a65f30b0edbceaee96dc6a616cc15f611187d1/ijson-3.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:df3ab5e078cab19f7eaeef1d5f063103e1ebf8c26d059767b26a6a0ad8b250a3", size = 116824 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/a4/aff410f7d6aa1a77ee2ab2d6a2d2758422726270cb149c908a9baf33cf58/ijson-3.3.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3dc1fb02c6ed0bae1b4bf96971258bf88aea72051b6e4cebae97cff7090c0607", size = 112647 },
|
||||
{ url = "https://files.pythonhosted.org/packages/77/ee/2b5122dc4713f5a954267147da36e7156240ca21b04ed5295bc0cabf0fbe/ijson-3.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e9afd97339fc5a20f0542c971f90f3ca97e73d3050cdc488d540b63fae45329a", size = 114156 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/d7/ad3b266490b60c6939e8a07fd8e4b7e2002aea08eaa9572a016c3e3a9129/ijson-3.3.0-cp310-cp310-win32.whl", hash = "sha256:844c0d1c04c40fd1b60f148dc829d3f69b2de789d0ba239c35136efe9a386529", size = 48931 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/68/b9e1c743274c8a23dddb12d2ed13b5f021f6d21669d51ff7fa2e9e6c19df/ijson-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:d654d045adafdcc6c100e8e911508a2eedbd2a1b5f93f930ba13ea67d7704ee9", size = 50965 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/df/565ba72a6f4b2c833d051af8e2228cfa0b1fef17bb44995c00ad27470c52/ijson-3.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:501dce8eaa537e728aa35810656aa00460a2547dcb60937c8139f36ec344d7fc", size = 85041 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/42/1361eaa57ece921d0239881bae6a5e102333be5b6e0102a05ec3caadbd5a/ijson-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:658ba9cad0374d37b38c9893f4864f284cdcc7d32041f9808fba8c7bcaadf134", size = 57829 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/b0/143dbfe12e1d1303ea8d8cd6f40e95cea8f03bcad5b79708614a7856c22e/ijson-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2636cb8c0f1023ef16173f4b9a233bcdb1df11c400c603d5f299fac143ca8d70", size = 57217 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/80/b3b60c5e5be2839365b03b915718ca462c544fdc71e7a79b7262837995ef/ijson-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd174b90db68c3bcca273e9391934a25d76929d727dc75224bf244446b28b03b", size = 121878 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/eb/7560fafa4d40412efddf690cb65a9bf2d3429d6035e544103acbf5561dc4/ijson-3.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:97a9aea46e2a8371c4cf5386d881de833ed782901ac9f67ebcb63bb3b7d115af", size = 115620 },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/2b/5a34c7841388dce161966e5286931518de832067cd83e6f003d93271e324/ijson-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c594c0abe69d9d6099f4ece17763d53072f65ba60b372d8ba6de8695ce6ee39e", size = 119200 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/b7/1d64fbec0d0a7b0c02e9ad988a89614532028ead8bb52a2456c92e6ee35a/ijson-3.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8e0ff16c224d9bfe4e9e6bd0395826096cda4a3ef51e6c301e1b61007ee2bd24", size = 121107 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/b9/01044f09850bc545ffc85b35aaec473d4f4ca2b6667299033d252c1b60dd/ijson-3.3.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0015354011303175eae7e2ef5136414e91de2298e5a2e9580ed100b728c07e51", size = 116658 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/0d/53856b61f3d952d299d1695c487e8e28058d01fa2adfba3d6d4b4660c242/ijson-3.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:034642558afa57351a0ffe6de89e63907c4cf6849070cc10a3b2542dccda1afe", size = 118186 },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/2d/5bd86e2307dd594840ee51c4e32de953fee837f028acf0f6afb08914cd06/ijson-3.3.0-cp311-cp311-win32.whl", hash = "sha256:192e4b65495978b0bce0c78e859d14772e841724d3269fc1667dc6d2f53cc0ea", size = 48938 },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/e1/4ba2b65b87f67fb19d698984d92635e46d9ce9dd748ce7d009441a586710/ijson-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:72e3488453754bdb45c878e31ce557ea87e1eb0f8b4fc610373da35e8074ce42", size = 50972 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/4d/3992f7383e26a950e02dc704bc6c5786a080d5c25fe0fc5543ef477c1883/ijson-3.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:988e959f2f3d59ebd9c2962ae71b97c0df58323910d0b368cc190ad07429d1bb", size = 84550 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/cc/3d4372e0d0b02a821b982f1fdf10385512dae9b9443c1597719dd37769a9/ijson-3.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b2f73f0d0fce5300f23a1383d19b44d103bb113b57a69c36fd95b7c03099b181", size = 57572 },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/de/970d48b1ff9da5d9513c86fdd2acef5cb3415541c8069e0d92a151b84adb/ijson-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0ee57a28c6bf523d7cb0513096e4eb4dac16cd935695049de7608ec110c2b751", size = 56902 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/a0/4537722c8b3b05e82c23dfe09a3a64dd1e44a013a5ca58b1e77dfe48b2f1/ijson-3.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0155a8f079c688c2ccaea05de1ad69877995c547ba3d3612c1c336edc12a3a5", size = 127400 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/96/54956062a99cf49f7a7064b573dcd756da0563ce57910dc34e27a473d9b9/ijson-3.3.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ab00721304af1ae1afa4313ecfa1bf16b07f55ef91e4a5b93aeaa3e2bd7917c", size = 118786 },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/74/795319531c5b5504508f595e631d592957f24bed7ff51a15bc4c61e7b24c/ijson-3.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40ee3821ee90be0f0e95dcf9862d786a7439bd1113e370736bfdf197e9765bfb", size = 126288 },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/6a/e0cec06fbd98851d5d233b59058c1dc2ea767c9bb6feca41aa9164fff769/ijson-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3b6987a0bc3e6d0f721b42c7a0198ef897ae50579547b0345f7f02486898f5", size = 129569 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/4f/82c0d896d8dcb175f99ced7d87705057bcd13523998b48a629b90139a0dc/ijson-3.3.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:63afea5f2d50d931feb20dcc50954e23cef4127606cc0ecf7a27128ed9f9a9e6", size = 121508 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/b6/8973474eba4a917885e289d9e138267d3d1f052c2d93b8c968755661a42d/ijson-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b5c3e285e0735fd8c5a26d177eca8b52512cdd8687ca86ec77a0c66e9c510182", size = 127896 },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/25/00e66af887adbbe70002e0479c3c2340bdfa17a168e25d4ab5a27b53582d/ijson-3.3.0-cp312-cp312-win32.whl", hash = "sha256:907f3a8674e489abdcb0206723e5560a5cb1fa42470dcc637942d7b10f28b695", size = 49272 },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/a2/e187beee237808b2c417109ae0f4f7ee7c81ecbe9706305d6ac2a509cc45/ijson-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:8f890d04ad33262d0c77ead53c85f13abfb82f2c8f078dfbf24b78f59534dfdd", size = 51272 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/28/2e1cf00abe5d97aef074e7835b86a94c9a06be4629a0e2c12600792b51ba/ijson-3.3.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2af323a8aec8a50fa9effa6d640691a30a9f8c4925bd5364a1ca97f1ac6b9b5c", size = 54308 },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/d2/8c541c28da4f931bac8177e251efe2b6902f7c486d2d4bdd669eed4ff5c0/ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f64f01795119880023ba3ce43072283a393f0b90f52b66cc0ea1a89aa64a9ccb", size = 66010 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/02/8fec0b9037a368811dba7901035e8e0973ebda308f57f30c42101a16a5f7/ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a716e05547a39b788deaf22725490855337fc36613288aa8ae1601dc8c525553", size = 66770 },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/23/90c61f978c83647112460047ea0137bde9c7fe26600ce255bb3e17ea7a21/ijson-3.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:473f5d921fadc135d1ad698e2697025045cd8ed7e5e842258295012d8a3bc702", size = 64159 },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/af/aab1a36072590af62d848f03981f1c587ca40a391fc61e418e388d8b0d46/ijson-3.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd26b396bc3a1e85f4acebeadbf627fa6117b97f4c10b177d5779577c6607744", size = 51095 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "imageio"
|
||||
version = "2.36.1"
|
||||
@@ -2359,19 +2255,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/83/60/d497a310bde3f01cb805196ac61b7ad6dc5dcf8dce66634dc34364b20b4f/lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc", size = 12097 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linkup-sdk"
|
||||
version = "0.2.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "httpx" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2e/ba/b06e8f2ca2f0ce255a40ee4505637536acfe83ec997cd8b61bd5cd031513/linkup_sdk-0.2.1.tar.gz", hash = "sha256:b00ba7cb0117358e975d50196501ac49b247509fd236121e40abe40e6a2a3e9a", size = 8918 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/90/2903b9e2eba501ceb6c6b4fc57bbeddde7e8964921a05d424f5a6125cbd0/linkup_sdk-0.2.1-py3-none-any.whl", hash = "sha256:bf50c88e659c6d9291cbd5e3e99b6a20a14c9b1eb2dc7acca763a3ae6f84b26e", size = 7961 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "litellm"
|
||||
version = "1.60.2"
|
||||
@@ -3424,18 +3307,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/8a/ce7c28e4ea337f6d95261345d7c61322f8561c52f57b263a3ad7025984f4/orjson-3.10.10-cp312-none-win_amd64.whl", hash = "sha256:384cd13579a1b4cd689d218e329f459eb9ddc504fa48c5a83ef4889db7fd7a4f", size = 139389 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "outcome"
|
||||
version = "1.3.0.post0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "attrs" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/98/df/77698abfac98571e65ffeb0c1fba8ffd692ab8458d617a0eed7d9a8d38f2/outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8", size = 21060 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/55/8b/5ab7257531a5d830fc8000c476e63c935488d74609b50f9384a643ec0a62/outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b", size = 10692 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "overrides"
|
||||
version = "7.7.0"
|
||||
@@ -3525,24 +3396,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "patronus"
|
||||
version = "0.0.17"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "httpx" },
|
||||
{ name = "pandas" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "pydantic-settings" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "tqdm" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c5/a0/d5218ff6f2eab18c5a90266d21cdac673c85070e82e3f8aba538b3200f54/patronus-0.0.17.tar.gz", hash = "sha256:7298f770d4f6774b955806fb319c2c872fda3551bd7fa63d975bbeedc14b28de", size = 27377 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/9e/717c4508d675549ff081a7fecf25af7d70f9d7ad87ea0d4825e02de3b801/patronus-0.0.17-py3-none-any.whl", hash = "sha256:1f322eeee838974515fdb7cbf8530ad25c6c59686abbcb28c1fdbf23d34eb10d", size = 31516 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pdfminer-six"
|
||||
version = "20231228"
|
||||
@@ -4103,18 +3956,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c2/35/c0edf199257ef0a7d407d29cd51c4e70d1dad4370a5f44deb65a7a5475e2/pymdown_extensions-10.11.2-py3-none-any.whl", hash = "sha256:41cdde0a77290e480cf53892f5c5e50921a7ee3e5cd60ba91bf19837b33badcf", size = 259044 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyopenssl"
|
||||
version = "24.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "cryptography" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c1/d4/1067b82c4fc674d6f6e9e8d26b3dff978da46d351ca3bac171544693e085/pyopenssl-24.3.0.tar.gz", hash = "sha256:49f7a019577d834746bc55c5fce6ecbcec0f2b4ec5ce1cf43a9a173b8138bb36", size = 178944 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/42/22/40f9162e943f86f0fc927ebc648078be87def360d9d8db346619fb97df2b/pyOpenSSL-24.3.0-py3-none-any.whl", hash = "sha256:e474f5a473cd7f92221cc04976e48f4d11502804657a08a989fb3be5514c904a", size = 56111 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pypdf"
|
||||
version = "5.0.1"
|
||||
@@ -4192,15 +4033,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/48/0a/c99fb7d7e176f8b176ef19704a32e6a9c6aafdf19ef75a187f701fc15801/pysbd-0.3.4-py3-none-any.whl", hash = "sha256:cd838939b7b0b185fcf86b0baf6636667dfb6e474743beeff878e9f42e022953", size = 71082 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pysocks"
|
||||
version = "1.7.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bd/11/293dd436aea955d45fc4e8a35b6ae7270f5b8e00b53cf6c024c83b657a11/PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0", size = 284429 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/59/b4572118e098ac8e46e399a1dd0f2d85403ce8bbaad9ec79373ed6badaf9/PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5", size = 16725 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "8.3.3"
|
||||
@@ -4860,39 +4692,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/7d/43ab67228ef98c6b5dd42ab386eae2d7877036970a0d7e3dd3eb47a0d530/scipy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:2ff38e22128e6c03ff73b6bb0f85f897d2362f8c052e3b8ad00532198fbdae3f", size = 44521212 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scrapegraph-py"
|
||||
version = "1.8.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "aiohttp" },
|
||||
{ name = "beautifulsoup4" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "requests" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/33/90/2388754061394a6c95fd5ad48cf4550208ce081c99cbc883672d52ccc360/scrapegraph_py-1.8.0.tar.gz", hash = "sha256:e075f6e6012a14a038537d0664609229069d9d2c2956bcbf9362f0c5c48de786", size = 108112 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/80/14aeb7ba092cfc6928844a6726855f0c33489107f344e71dd8071f6433ed/scrapegraph_py-1.8.0-py3-none-any.whl", hash = "sha256:279176c972a770bac37a284e0bc25e34793797f30ff24dfba8fbcbfda79c8c88", size = 14460 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "selenium"
|
||||
version = "4.25.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "certifi" },
|
||||
{ name = "trio" },
|
||||
{ name = "trio-websocket" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "urllib3", extra = ["socks"] },
|
||||
{ name = "websocket-client" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0e/5a/d3735b189b91715fd0f5a9b8d55e2605061309849470e96ab830f02cba40/selenium-4.25.0.tar.gz", hash = "sha256:95d08d3b82fb353f3c474895154516604c7f0e6a9a565ae6498ef36c9bac6921", size = 957765 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/85/fa44f23dd5d5066a72f7c4304cce4b5ff9a6e7fd92431a48b2c63fbf63ec/selenium-4.25.0-py3-none-any.whl", hash = "sha256:3798d2d12b4a570bc5790163ba57fef10b2afee958bf1d80f2a3cf07c4141f33", size = 9693127 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "semchunk"
|
||||
version = "2.2.0"
|
||||
@@ -4906,18 +4705,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/85/3940bb4c586e10603d169d13ffccd59ed32fcb8d1b8104c3aef0e525b3b2/semchunk-2.2.0-py3-none-any.whl", hash = "sha256:7db19ca90ddb48f99265e789e07a7bb111ae25185f9cc3d44b94e1e61b9067fc", size = 10243 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serpapi"
|
||||
version = "0.1.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "requests" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f0/fa/3fd8809287f3977a3e752bb88610e918d49cb1038b14f4bc51e13e594197/serpapi-0.1.5.tar.gz", hash = "sha256:b9707ed54750fdd2f62dc3a17c6a3fb7fa421dc37902fd65b2263c0ac765a1a5", size = 14191 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/df/6a/21deade04100d64844e494353a5d65e7971fbdfddf78eb1f248423593ad0/serpapi-0.1.5-py2.py3-none-any.whl", hash = "sha256:6467b6adec1231059f754ccaa952b229efeaa8b9cae6e71f879703ec9e5bb3d1", size = 10966 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "setuptools"
|
||||
version = "75.2.0"
|
||||
@@ -4983,96 +4770,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "snowflake"
|
||||
version = "1.0.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "snowflake-core" },
|
||||
{ name = "snowflake-legacy" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/80/d1/830929fb7b54586f4ee601f409e80343e16f32b9b579246cd6fa9984bcff/snowflake-1.0.2.tar.gz", hash = "sha256:4009e59af24e444de4a9e9d340fff0979cca8a02a4feee4665da97eb9c76d958", size = 6033 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b6/25/4cbba4da3f9b333d132680a66221d1a101309cce330fa8be38b674ceafd0/snowflake-1.0.2-py3-none-any.whl", hash = "sha256:6bb0fc70aa10234769202861ccb4b091f5e9fb1bbc61a1e708db93baa3f221f4", size = 5623 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "snowflake-connector-python"
|
||||
version = "3.12.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "asn1crypto" },
|
||||
{ name = "certifi" },
|
||||
{ name = "cffi" },
|
||||
{ name = "charset-normalizer" },
|
||||
{ name = "cryptography" },
|
||||
{ name = "filelock" },
|
||||
{ name = "idna" },
|
||||
{ name = "packaging" },
|
||||
{ name = "platformdirs" },
|
||||
{ name = "pyjwt" },
|
||||
{ name = "pyopenssl" },
|
||||
{ name = "pytz" },
|
||||
{ name = "requests" },
|
||||
{ name = "sortedcontainers" },
|
||||
{ name = "tomlkit" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6b/de/f43d9c827ccc1974696ffd3c0495e2d4e98b0414b2353b7de932621f23dd/snowflake_connector_python-3.12.4.tar.gz", hash = "sha256:289e0691dfbf8ec8b7a8f58bcbb95a819890fe5e5b278fdbfc885059a63a946f", size = 743445 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/53/6c/edc8909e424654a7a3c18cbf804d8a35c17a65a2131f866a87ed8e762bd0/snowflake_connector_python-3.12.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f141c159e3244bd660279f87f32e39351b2845fcb75f8138f31d2219f983b05", size = 958038 },
|
||||
{ url = "https://files.pythonhosted.org/packages/93/a3/34c5082dfb9b555c914f4233224b8bc1f2c4d5668bc71bb587680b8dcd73/snowflake_connector_python-3.12.4-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:091458ba777c24adff659c5c28f0f5bb0bcca8a9b6ecc5641ae25b7c20a8f43d", size = 970665 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/87/9eceaaba58b2ec4f9094fc3a04d953bbabbfdcc05a6b14ef12610c1039f9/snowflake_connector_python-3.12.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23049d341da681ec7131cead71cdf7b1761ae5bcc08bcbdb931dcef6c25e8a5f", size = 2496731 },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/0a/e35e9e0a142f3779007b0246166a245305858b198ed0dd3a41a3d2405512/snowflake_connector_python-3.12.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc88a09d77a8ce7e445094b2409b606ddb208b5fc9f7c7a379d0255a8d566e9d", size = 2520041 },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/77/9a238c153600adff8fbd1136d9f4be1e42cb827cbe1865924bfe84653e85/snowflake_connector_python-3.12.4-cp310-cp310-win_amd64.whl", hash = "sha256:3c33fbba036805c1767ea48eb40ffc3fb79d61f2a4bb4e77b571ea6f6a998be8", size = 918272 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/95/e8aac28d6913e4b59f96e6d361f31b9576b5f0abe4d2c4f7decf9f075932/snowflake_connector_python-3.12.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ec5cfaa1526084cf4d0e7849d5ace601245cb4ad9675ab3cd7d799b3abea481", size = 958125 },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/b6/a847a94e03bdf39010048feacd57f250a91a655eed333d7d32b165f65201/snowflake_connector_python-3.12.4-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:ff225824b3a0fa5e822442de72172f97028f04ae183877f1305d538d8d6c5d11", size = 970770 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/91/f97812ae9946944bcd9bfe1965af1cb9b1844919da879d90b90dfd3e5086/snowflake_connector_python-3.12.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9beced2789dc75e8f1e749aa637e7ec9b03302b4ed4b793ae0f1ff32823370e", size = 2519875 },
|
||||
{ url = "https://files.pythonhosted.org/packages/37/52/500d72079bfb322ebdf3892180ecf3dc73c117b3a966ee8d4bb1378882b2/snowflake_connector_python-3.12.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ea47450a04ff713f3adf28053e34103bd990291e62daee9721c76597af4b2b5", size = 2542320 },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/92/74ead6bee8dd29fe372002ce59477221e04b9da96ad7aafe584afce02937/snowflake_connector_python-3.12.4-cp311-cp311-win_amd64.whl", hash = "sha256:748f9125854dca07ea471bb2bb3c5bb932a53f9b8a77ba348b50b738c77203ce", size = 918363 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/a3/1cbe0b52b810f069bdc96c372b2d91ac51aeac32986c2832aa3fe0b0b0e5/snowflake_connector_python-3.12.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4bcd0371b20d199f15e6a3c0b489bf18e27f2a88c84cf3194b2569ca039fa7d1", size = 957561 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/05/8a5e16bd908a89f36d59686d356890c4bd6a976a487f86274181010f4b49/snowflake_connector_python-3.12.4-cp312-cp312-macosx_11_0_x86_64.whl", hash = "sha256:7900d82a450b206fa2ed6c42cd65d9b3b9fd4547eca1696937175fac2a03ba37", size = 969045 },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/1b/8f5ab15d224d7bf76533c55cfd8ce73b185ce94d84241f0e900739ce3f37/snowflake_connector_python-3.12.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:300f0562aeea55e40ee03b45205dbef7b78f5ba2f1787a278c7b807e7d8db22c", size = 2533969 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/d9/2e2fd72e0251691b5c54a219256c455141a2d3c104e411b82de598c62553/snowflake_connector_python-3.12.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6762a00948f003be55d7dc5de9de690315d01951a94371ec3db069d9303daba", size = 2558052 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/cb/e0ab230ad5adc9932e595bdbec693b2499d446666daf6cb9cae306a41dd2/snowflake_connector_python-3.12.4-cp312-cp312-win_amd64.whl", hash = "sha256:83ca896790a7463b6c8cd42e1a29b8ea197cc920839ae6ee96a467475eab4ec2", size = 916627 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "snowflake-core"
|
||||
version = "1.0.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "atpublic" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "python-dateutil" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "requests" },
|
||||
{ name = "snowflake-connector-python" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1d/cf/6f91e5b2daaf3df9ae666a65f5ba3938f11a40784e4ada5218ecf154b29a/snowflake_core-1.0.2.tar.gz", hash = "sha256:8bf267ff1efcd17f157432c6e24f6d2eb6c2aeed66f43ab34b215aa76d8edf02", size = 1092618 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/75/3c/ec228b7325b32781081c72254dd0ef793943e853d82616e862e231909c6c/snowflake_core-1.0.2-py3-none-any.whl", hash = "sha256:55c37cf526a0d78dd3359ad96b9ecd7130bbbbc2f5a2fec77bb3da0dac2dc688", size = 1555690 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "snowflake-legacy"
|
||||
version = "1.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/94/41/a6211bd2109913eee1506d37865ab13cf9a8cc2faa41833da3d1ffec654b/snowflake_legacy-1.0.0.tar.gz", hash = "sha256:2044661c79ba01841ab279c5e74b994532244c9d103224eba16eb159c8ed6033", size = 4043 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/8c/64f9b5ee0c3f376a733584c480b31addbf2baff7bb41f655e5e3f3719d3b/snowflake_legacy-1.0.0-py3-none-any.whl", hash = "sha256:25f9678f180d7d5f5b60d17f8112f0ee8a7a77b82c67fd599ed6e27bd502be5a", size = 3059 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sortedcontainers"
|
||||
version = "2.4.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e8/c4/ba2f8066cceb6f23394729afe52f3bf7adec04bf9ed2c820b39e19299111/sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", size = 30594 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/32/46/9cb0e58b2deb7f82b84065f37f3bffeb12413f947f9388e4cac22c4621ce/sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0", size = 29575 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "soupsieve"
|
||||
version = "2.6"
|
||||
@@ -5082,18 +4779,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/c2/fe97d779f3ef3b15f05c94a2f1e3d21732574ed441687474db9d342a7315/soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9", size = 36186 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spider-client"
|
||||
version = "0.1.25"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "aiohttp" },
|
||||
{ name = "ijson" },
|
||||
{ name = "requests" },
|
||||
{ name = "tenacity" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b8/f2/06d89322f0054ea72e8d5580199f580e29df23476cb3cfe83a70a2a58a1b/spider-client-0.1.25.tar.gz", hash = "sha256:92ca4ce1d9d715dd8db52684ea417653940d8f3bbc13383d78683bc4fbb899a2", size = 15412 }
|
||||
|
||||
[[package]]
|
||||
name = "sqlalchemy"
|
||||
version = "2.0.36"
|
||||
@@ -5325,15 +5010,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/ac/ce90573ba446a9bbe65838ded066a805234d159b4446ae9f8ec5bbd36cbd/tomli_w-1.1.0-py3-none-any.whl", hash = "sha256:1403179c78193e3184bfaade390ddbd071cba48a32a2e62ba11aae47490c63f7", size = 6440 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tomlkit"
|
||||
version = "0.13.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b1/09/a439bec5888f00a54b8b9f05fa94d7f901d6735ef4e55dcec9bc37b5d8fa/tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79", size = 192885 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/b6/a447b5e4ec71e13871be01ba81f5dfc9d0af7e473da256ff46bc0e24026f/tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde", size = 37955 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "torch"
|
||||
version = "2.4.1"
|
||||
@@ -5439,38 +5115,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/51/51/b87caa939fedf307496e4dbf412f4b909af3d9ca8b189fc3b65c1faa456f/transformers-4.46.3-py3-none-any.whl", hash = "sha256:a12ef6f52841fd190a3e5602145b542d03507222f2c64ebb7ee92e8788093aef", size = 10034536 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "trio"
|
||||
version = "0.27.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "attrs" },
|
||||
{ name = "cffi", marker = "implementation_name != 'pypy' and os_name == 'nt'" },
|
||||
{ name = "exceptiongroup", marker = "python_full_version < '3.11'" },
|
||||
{ name = "idna" },
|
||||
{ name = "outcome" },
|
||||
{ name = "sniffio" },
|
||||
{ name = "sortedcontainers" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/17/d1/a83dee5be404da7afe5a71783a33b8907bacb935a6dc8c69ab785e4a3eed/trio-0.27.0.tar.gz", hash = "sha256:1dcc95ab1726b2da054afea8fd761af74bad79bd52381b84eae408e983c76831", size = 568064 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/83/ec3196c360afffbc5b342ead48d1eb7393dd74fa70bca75d33905a86f211/trio-0.27.0-py3-none-any.whl", hash = "sha256:68eabbcf8f457d925df62da780eff15ff5dc68fd6b367e2dde59f7aaf2a0b884", size = 481734 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "trio-websocket"
|
||||
version = "0.11.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "exceptiongroup", marker = "python_full_version < '3.11'" },
|
||||
{ name = "trio" },
|
||||
{ name = "wsproto" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/dd/36/abad2385853077424a11b818d9fd8350d249d9e31d583cb9c11cd4c85eda/trio-websocket-0.11.1.tar.gz", hash = "sha256:18c11793647703c158b1f6e62de638acada927344d534e3c7628eedcb746839f", size = 26511 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/48/be/a9ae5f50cad5b6f85bd2574c2c923730098530096e170c1ce7452394d7aa/trio_websocket-0.11.1-py3-none-any.whl", hash = "sha256:520d046b0d030cf970b8b2b2e00c4c2245b3807853ecd44214acd33d74581638", size = 17408 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "triton"
|
||||
version = "3.0.0"
|
||||
@@ -5551,11 +5195,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338 },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
socks = [
|
||||
{ name = "pysocks" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uv"
|
||||
version = "0.4.26"
|
||||
@@ -5632,15 +5271,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/eb/f7032be105877bcf924709c97b1bf3b90255b4ec251f9340cef912559f28/uvloop-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:183aef7c8730e54c9a3ee3227464daed66e37ba13040bb3f350bc2ddc040f22f", size = 4659022 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "validators"
|
||||
version = "0.34.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/64/07/91582d69320f6f6daaf2d8072608a4ad8884683d4840e7e4f3a9dbdcc639/validators-0.34.0.tar.gz", hash = "sha256:647fe407b45af9a74d245b943b18e6a816acf4926974278f6dd617778e1e781f", size = 70955 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/78/36828a4d857b25896f9774c875714ba4e9b3bc8a92d2debe3f4df3a83d4f/validators-0.34.0-py3-none-any.whl", hash = "sha256:c804b476e3e6d3786fa07a30073a4ef694e617805eb1946ceee3fe5a9b8b1321", size = 43536 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vcrpy"
|
||||
version = "5.1.0"
|
||||
@@ -5760,25 +5390,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "weaviate-client"
|
||||
version = "4.9.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "authlib" },
|
||||
{ name = "grpcio" },
|
||||
{ name = "grpcio-health-checking" },
|
||||
{ name = "grpcio-tools" },
|
||||
{ name = "httpx" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "requests" },
|
||||
{ name = "validators" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5d/7d/3894d12065d006743271b0b6bcc3bf911910473e91179d5966966816d694/weaviate_client-4.9.6.tar.gz", hash = "sha256:56d67c40fc94b0d53e81e0aa4477baaebbf3646fbec26551df66e396a72adcb6", size = 696813 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/40/e3550e743b92ddd8dc69ebfd69cceb6de45b7d9a1cd439995454b499e9a3/weaviate_client-4.9.6-py3-none-any.whl", hash = "sha256:1d3b551939c0f7314f25e417cbcf4cf34e7adf942627993eef36ae6b4a044673", size = 386998 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "webencodings"
|
||||
version = "0.5.1"
|
||||
@@ -5893,18 +5504,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/21/abdedb4cdf6ff41ebf01a74087740a709e2edb146490e4d9beea054b0b7a/wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1", size = 23362 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wsproto"
|
||||
version = "1.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "h11" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c9/4a/44d3c295350d776427904d73c189e10aeae66d7f555bb2feee16d1e4ba5a/wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065", size = 53425 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/78/58/e860788190eba3bcce367f74d29c4675466ce8dddfba85f7827588416f01/wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736", size = 24226 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "xlsxwriter"
|
||||
version = "3.2.0"
|
||||
|
||||
Reference in New Issue
Block a user