mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-04-20 18:02:37 +00:00
Compare commits
23 Commits
pr-2174
...
feat/funct
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
abddf77fbe | ||
|
|
53067f8b92 | ||
|
|
d3a09c3180 | ||
|
|
4d7aacb5f2 | ||
|
|
6b1cf78e41 | ||
|
|
80f1a88b63 | ||
|
|
32da76a2ca | ||
|
|
3aa48dcd58 | ||
|
|
03f1d57463 | ||
|
|
4725d0de0d | ||
|
|
b766af75f2 | ||
|
|
b2c8779f4c | ||
|
|
df266bda01 | ||
|
|
2155acb3a3 | ||
|
|
794574957e | ||
|
|
66b19311a7 | ||
|
|
9fc84fc1ac | ||
|
|
f8f9df6d1d | ||
|
|
6e94edb777 | ||
|
|
bbe896d48c | ||
|
|
9298054436 | ||
|
|
90b7937796 | ||
|
|
520933b4c5 |
187
docs/changelog.mdx
Normal file
187
docs/changelog.mdx
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
---
|
||||||
|
title: Changelog
|
||||||
|
description: View the latest updates and changes to CrewAI
|
||||||
|
icon: timeline
|
||||||
|
---
|
||||||
|
|
||||||
|
<Update label="2024-03-17" description="v0.108.0">
|
||||||
|
**Features**
|
||||||
|
- Converted tabs to spaces in `crew.py` template
|
||||||
|
- Enhanced LLM Streaming Response Handling and Event System
|
||||||
|
- Included `model_name`
|
||||||
|
- Enhanced Event Listener with rich visualization and improved logging
|
||||||
|
- Added fingerprints
|
||||||
|
|
||||||
|
**Bug Fixes**
|
||||||
|
- Fixed Mistral issues
|
||||||
|
- Fixed a bug in documentation
|
||||||
|
- Fixed type check error in fingerprint property
|
||||||
|
|
||||||
|
**Documentation Updates**
|
||||||
|
- Improved tool documentation
|
||||||
|
- Updated installation guide for the `uv` tool package
|
||||||
|
- Added instructions for upgrading crewAI with the `uv` tool
|
||||||
|
- Added documentation for `ApifyActorsTool`
|
||||||
|
</Update>
|
||||||
|
|
||||||
|
<Update label="2024-03-10" description="v0.105.0">
|
||||||
|
**Core Improvements & Fixes**
|
||||||
|
- Fixed issues with missing template variables and user memory configuration
|
||||||
|
- Improved async flow support and addressed agent response formatting
|
||||||
|
- Enhanced memory reset functionality and fixed CLI memory commands
|
||||||
|
- Fixed type issues, tool calling properties, and telemetry decoupling
|
||||||
|
|
||||||
|
**New Features & Enhancements**
|
||||||
|
- Added Flow state export and improved state utilities
|
||||||
|
- Enhanced agent knowledge setup with optional crew embedder
|
||||||
|
- Introduced event emitter for better observability and LLM call tracking
|
||||||
|
- Added support for Python 3.10 and ChatOllama from langchain_ollama
|
||||||
|
- Integrated context window size support for the o3-mini model
|
||||||
|
- Added support for multiple router calls
|
||||||
|
|
||||||
|
**Documentation & Guides**
|
||||||
|
- Improved documentation layout and hierarchical structure
|
||||||
|
- Added QdrantVectorSearchTool guide and clarified event listener usage
|
||||||
|
- Fixed typos in prompts and updated Amazon Bedrock model listings
|
||||||
|
</Update>
|
||||||
|
|
||||||
|
<Update label="2024-02-12" description="v0.102.0">
|
||||||
|
**Core Improvements & Fixes**
|
||||||
|
- Enhanced LLM Support: Improved structured LLM output, parameter handling, and formatting for Anthropic models
|
||||||
|
- Crew & Agent Stability: Fixed issues with cloning agents/crews using knowledge sources, multiple task outputs in conditional tasks, and ignored Crew task callbacks
|
||||||
|
- Memory & Storage Fixes: Fixed short-term memory handling with Bedrock, ensured correct embedder initialization, and added a reset memories function in the crew class
|
||||||
|
- Training & Execution Reliability: Fixed broken training and interpolation issues with dict and list input types
|
||||||
|
|
||||||
|
**New Features & Enhancements**
|
||||||
|
- Advanced Knowledge Management: Improved naming conventions and enhanced embedding configuration with custom embedder support
|
||||||
|
- Expanded Logging & Observability: Added JSON format support for logging and integrated MLflow tracing documentation
|
||||||
|
- Data Handling Improvements: Updated excel_knowledge_source.py to process multi-tab files
|
||||||
|
- General Performance & Codebase Clean-Up: Streamlined enterprise code alignment and resolved linting issues
|
||||||
|
- Adding new tool: `QdrantVectorSearchTool`
|
||||||
|
|
||||||
|
**Documentation & Guides**
|
||||||
|
- Updated AI & Memory Docs: Improved Bedrock, Google AI, and long-term memory documentation
|
||||||
|
- Task & Workflow Clarity: Added "Human Input" row to Task Attributes, Langfuse guide, and FileWriterTool documentation
|
||||||
|
- Fixed Various Typos & Formatting Issues
|
||||||
|
</Update>
|
||||||
|
|
||||||
|
<Update label="2024-01-28" description="v0.100.0">
|
||||||
|
**Features**
|
||||||
|
- Add Composio docs
|
||||||
|
- Add SageMaker as a LLM provider
|
||||||
|
|
||||||
|
**Fixes**
|
||||||
|
- Overall LLM connection issues
|
||||||
|
- Using safe accessors on training
|
||||||
|
- Add version check to crew_chat.py
|
||||||
|
|
||||||
|
**Documentation**
|
||||||
|
- New docs for crewai chat
|
||||||
|
- Improve formatting and clarity in CLI and Composio Tool docs
|
||||||
|
</Update>
|
||||||
|
|
||||||
|
<Update label="2024-01-20" description="v0.98.0">
|
||||||
|
**Features**
|
||||||
|
- Conversation crew v1
|
||||||
|
- Add unique ID to flow states
|
||||||
|
- Add @persist decorator with FlowPersistence interface
|
||||||
|
|
||||||
|
**Integrations**
|
||||||
|
- Add SambaNova integration
|
||||||
|
- Add NVIDIA NIM provider in cli
|
||||||
|
- Introducing VoyageAI
|
||||||
|
|
||||||
|
**Fixes**
|
||||||
|
- Fix API Key Behavior and Entity Handling in Mem0 Integration
|
||||||
|
- Fixed core invoke loop logic and relevant tests
|
||||||
|
- Make tool inputs actual objects and not strings
|
||||||
|
- Add important missing parts to creating tools
|
||||||
|
- Drop litellm version to prevent windows issue
|
||||||
|
- Before kickoff if inputs are none
|
||||||
|
- Fixed typos, nested pydantic model issue, and docling issues
|
||||||
|
</Update>
|
||||||
|
|
||||||
|
<Update label="2024-01-04" description="v0.95.0">
|
||||||
|
**New Features**
|
||||||
|
- Adding Multimodal Abilities to Crew
|
||||||
|
- Programatic Guardrails
|
||||||
|
- HITL multiple rounds
|
||||||
|
- Gemini 2.0 Support
|
||||||
|
- CrewAI Flows Improvements
|
||||||
|
- Add Workflow Permissions
|
||||||
|
- Add support for langfuse with litellm
|
||||||
|
- Portkey Integration with CrewAI
|
||||||
|
- Add interpolate_only method and improve error handling
|
||||||
|
- Docling Support
|
||||||
|
- Weviate Support
|
||||||
|
|
||||||
|
**Fixes**
|
||||||
|
- output_file not respecting system path
|
||||||
|
- disk I/O error when resetting short-term memory
|
||||||
|
- CrewJSONEncoder now accepts enums
|
||||||
|
- Python max version
|
||||||
|
- Interpolation for output_file in Task
|
||||||
|
- Handle coworker role name case/whitespace properly
|
||||||
|
- Add tiktoken as explicit dependency and document Rust requirement
|
||||||
|
- Include agent knowledge in planning process
|
||||||
|
- Change storage initialization to None for KnowledgeStorage
|
||||||
|
- Fix optional storage checks
|
||||||
|
- include event emitter in flows
|
||||||
|
- Docstring, Error Handling, and Type Hints Improvements
|
||||||
|
- Suppressed userWarnings from litellm pydantic issues
|
||||||
|
</Update>
|
||||||
|
|
||||||
|
<Update label="2023-12-05" description="v0.86.0">
|
||||||
|
**Changes**
|
||||||
|
- Remove all references to pipeline and pipeline router
|
||||||
|
- Add Nvidia NIM as provider in Custom LLM
|
||||||
|
- Add knowledge demo + improve knowledge docs
|
||||||
|
- Add HITL multiple rounds of followup
|
||||||
|
- New docs about yaml crew with decorators
|
||||||
|
- Simplify template crew
|
||||||
|
</Update>
|
||||||
|
|
||||||
|
<Update label="2023-12-04" description="v0.85.0">
|
||||||
|
**Features**
|
||||||
|
- Added knowledge to agent level
|
||||||
|
- Feat/remove langchain
|
||||||
|
- Improve typed task outputs
|
||||||
|
- Log in to Tool Repository on crewai login
|
||||||
|
|
||||||
|
**Fixes**
|
||||||
|
- Fixes issues with result as answer not properly exiting LLM loop
|
||||||
|
- Fix missing key name when running with ollama provider
|
||||||
|
- Fix spelling issue found
|
||||||
|
|
||||||
|
**Documentation**
|
||||||
|
- Update readme for running mypy
|
||||||
|
- Add knowledge to mint.json
|
||||||
|
- Update Github actions
|
||||||
|
- Update Agents docs to include two approaches for creating an agent
|
||||||
|
- Improvements to LLM Configuration and Usage
|
||||||
|
</Update>
|
||||||
|
|
||||||
|
<Update label="2023-11-25" description="v0.83.0">
|
||||||
|
**New Features**
|
||||||
|
- New before_kickoff and after_kickoff crew callbacks
|
||||||
|
- Support to pre-seed agents with Knowledge
|
||||||
|
- Add support for retrieving user preferences and memories using Mem0
|
||||||
|
|
||||||
|
**Fixes**
|
||||||
|
- Fix Async Execution
|
||||||
|
- Upgrade chroma and adjust embedder function generator
|
||||||
|
- Update CLI Watson supported models + docs
|
||||||
|
- Reduce level for Bandit
|
||||||
|
- Fixing all tests
|
||||||
|
|
||||||
|
**Documentation**
|
||||||
|
- Update Docs
|
||||||
|
</Update>
|
||||||
|
|
||||||
|
<Update label="2023-11-13" description="v0.80.0">
|
||||||
|
**Fixes**
|
||||||
|
- Fixing Tokens callback replacement bug
|
||||||
|
- Fixing Step callback issue
|
||||||
|
- Add cached prompt tokens info on usage metrics
|
||||||
|
- Fix crew_train_success test
|
||||||
|
</Update>
|
||||||
@@ -150,6 +150,8 @@ result = crew.kickoff(
|
|||||||
|
|
||||||
Here are examples of how to use different types of knowledge sources:
|
Here are examples of how to use different types of knowledge sources:
|
||||||
|
|
||||||
|
Note: Please ensure that you create the ./knowldge folder. All source files (e.g., .txt, .pdf, .xlsx, .json) should be placed in this folder for centralized management.
|
||||||
|
|
||||||
### Text File Knowledge Source
|
### Text File Knowledge Source
|
||||||
```python
|
```python
|
||||||
from crewai.knowledge.source.text_file_knowledge_source import TextFileKnowledgeSource
|
from crewai.knowledge.source.text_file_knowledge_source import TextFileKnowledgeSource
|
||||||
@@ -460,12 +462,12 @@ class SpaceNewsKnowledgeSource(BaseKnowledgeSource):
|
|||||||
data = response.json()
|
data = response.json()
|
||||||
articles = data.get('results', [])
|
articles = data.get('results', [])
|
||||||
|
|
||||||
formatted_data = self._format_articles(articles)
|
formatted_data = self.validate_content(articles)
|
||||||
return {self.api_endpoint: formatted_data}
|
return {self.api_endpoint: formatted_data}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise ValueError(f"Failed to fetch space news: {str(e)}")
|
raise ValueError(f"Failed to fetch space news: {str(e)}")
|
||||||
|
|
||||||
def _format_articles(self, articles: list) -> str:
|
def validate_content(self, articles: list) -> str:
|
||||||
"""Format articles into readable text."""
|
"""Format articles into readable text."""
|
||||||
formatted = "Space News Articles:\n\n"
|
formatted = "Space News Articles:\n\n"
|
||||||
for article in articles:
|
for article in articles:
|
||||||
|
|||||||
@@ -158,7 +158,11 @@ In this section, you'll find detailed examples that help you select, configure,
|
|||||||
|
|
||||||
<Accordion title="Anthropic">
|
<Accordion title="Anthropic">
|
||||||
```toml Code
|
```toml Code
|
||||||
|
# Required
|
||||||
ANTHROPIC_API_KEY=sk-ant-...
|
ANTHROPIC_API_KEY=sk-ant-...
|
||||||
|
|
||||||
|
# Optional
|
||||||
|
ANTHROPIC_API_BASE=<custom-base-url>
|
||||||
```
|
```
|
||||||
|
|
||||||
Example usage in your CrewAI project:
|
Example usage in your CrewAI project:
|
||||||
@@ -250,6 +254,40 @@ In this section, you'll find detailed examples that help you select, configure,
|
|||||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
|
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Before using Amazon Bedrock, make sure you have boto3 installed in your environment
|
||||||
|
|
||||||
|
[Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/models-regions.html) is a managed service that provides access to multiple foundation models from top AI companies through a unified API, enabling secure and responsible AI application development.
|
||||||
|
|
||||||
|
| Model | Context Window | Best For |
|
||||||
|
|-------------------------|----------------------|-------------------------------------------------------------------|
|
||||||
|
| Amazon Nova Pro | Up to 300k tokens | High-performance, model balancing accuracy, speed, and cost-effectiveness across diverse tasks. |
|
||||||
|
| Amazon Nova Micro | Up to 128k tokens | High-performance, cost-effective text-only model optimized for lowest latency responses. |
|
||||||
|
| Amazon Nova Lite | Up to 300k tokens | High-performance, affordable multimodal processing for images, video, and text with real-time capabilities. |
|
||||||
|
| Claude 3.7 Sonnet | Up to 128k tokens | High-performance, best for complex reasoning, coding & AI agents |
|
||||||
|
| Claude 3.5 Sonnet v2 | Up to 200k tokens | State-of-the-art model specialized in software engineering, agentic capabilities, and computer interaction at optimized cost. |
|
||||||
|
| Claude 3.5 Sonnet | Up to 200k tokens | High-performance model delivering superior intelligence and reasoning across diverse tasks with optimal speed-cost balance. |
|
||||||
|
| Claude 3.5 Haiku | Up to 200k tokens | Fast, compact multimodal model optimized for quick responses and seamless human-like interactions |
|
||||||
|
| Claude 3 Sonnet | Up to 200k tokens | Multimodal model balancing intelligence and speed for high-volume deployments. |
|
||||||
|
| Claude 3 Haiku | Up to 200k tokens | Compact, high-speed multimodal model optimized for quick responses and natural conversational interactions |
|
||||||
|
| Claude 3 Opus | Up to 200k tokens | Most advanced multimodal model exceling at complex tasks with human-like reasoning and superior contextual understanding. |
|
||||||
|
| Claude 2.1 | Up to 200k tokens | Enhanced version with expanded context window, improved reliability, and reduced hallucinations for long-form and RAG applications |
|
||||||
|
| Claude | Up to 100k tokens | Versatile model excelling in sophisticated dialogue, creative content, and precise instruction following. |
|
||||||
|
| Claude Instant | Up to 100k tokens | Fast, cost-effective model for everyday tasks like dialogue, analysis, summarization, and document Q&A |
|
||||||
|
| Llama 3.1 405B Instruct | Up to 128k tokens | Advanced LLM for synthetic data generation, distillation, and inference for chatbots, coding, and domain-specific tasks. |
|
||||||
|
| Llama 3.1 70B Instruct | Up to 128k tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. |
|
||||||
|
| Llama 3.1 8B Instruct | Up to 128k tokens | Advanced state-of-the-art model with language understanding, superior reasoning, and text generation. |
|
||||||
|
| Llama 3 70B Instruct | Up to 8k tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. |
|
||||||
|
| Llama 3 8B Instruct | Up to 8k tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. |
|
||||||
|
| Titan Text G1 - Lite | Up to 4k tokens | Lightweight, cost-effective model optimized for English tasks and fine-tuning with focus on summarization and content generation. |
|
||||||
|
| Titan Text G1 - Express | Up to 8k tokens | Versatile model for general language tasks, chat, and RAG applications with support for English and 100+ languages. |
|
||||||
|
| Cohere Command | Up to 4k tokens | Model specialized in following user commands and delivering practical enterprise solutions. |
|
||||||
|
| Jurassic-2 Mid | Up to 8,191 tokens | Cost-effective model balancing quality and affordability for diverse language tasks like Q&A, summarization, and content generation. |
|
||||||
|
| Jurassic-2 Ultra | Up to 8,191 tokens | Model for advanced text generation and comprehension, excelling in complex tasks like analysis and content creation. |
|
||||||
|
| Jamba-Instruct | Up to 256k tokens | Model with extended context window optimized for cost-effective text generation, summarization, and Q&A. |
|
||||||
|
| Mistral 7B Instruct | Up to 32k tokens | This LLM follows instructions, completes requests, and generates creative text. |
|
||||||
|
| Mistral 8x7B Instruct | Up to 32k tokens | An MOE LLM that follows instructions, completes requests, and generates creative text. |
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
|
|
||||||
<Accordion title="Amazon SageMaker">
|
<Accordion title="Amazon SageMaker">
|
||||||
@@ -368,6 +406,46 @@ In this section, you'll find detailed examples that help you select, configure,
|
|||||||
| baichuan-inc/baichuan2-13b-chat | 4,096 tokens | Support Chinese and English chat, coding, math, instruction following, solving quizzes |
|
| baichuan-inc/baichuan2-13b-chat | 4,096 tokens | Support Chinese and English chat, coding, math, instruction following, solving quizzes |
|
||||||
</Accordion>
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Local NVIDIA NIM Deployed using WSL2">
|
||||||
|
|
||||||
|
NVIDIA NIM enables you to run powerful LLMs locally on your Windows machine using WSL2 (Windows Subsystem for Linux).
|
||||||
|
This approach allows you to leverage your NVIDIA GPU for private, secure, and cost-effective AI inference without relying on cloud services.
|
||||||
|
Perfect for development, testing, or production scenarios where data privacy or offline capabilities are required.
|
||||||
|
|
||||||
|
Here is a step-by-step guide to setting up a local NVIDIA NIM model:
|
||||||
|
|
||||||
|
1. Follow installation instructions from [NVIDIA Website](https://docs.nvidia.com/nim/wsl2/latest/getting-started.html)
|
||||||
|
|
||||||
|
2. Install the local model. For Llama 3.1-8b follow [instructions](https://build.nvidia.com/meta/llama-3_1-8b-instruct/deploy)
|
||||||
|
|
||||||
|
3. Configure your crewai local models:
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai.llm import LLM
|
||||||
|
|
||||||
|
local_nvidia_nim_llm = LLM(
|
||||||
|
model="openai/meta/llama-3.1-8b-instruct", # it's an openai-api compatible model
|
||||||
|
base_url="http://localhost:8000/v1",
|
||||||
|
api_key="<your_api_key|any text if you have not configured it>", # api_key is required, but you can use any text
|
||||||
|
)
|
||||||
|
|
||||||
|
# Then you can use it in your crew:
|
||||||
|
|
||||||
|
@CrewBase
|
||||||
|
class MyCrew():
|
||||||
|
# ...
|
||||||
|
|
||||||
|
@agent
|
||||||
|
def researcher(self) -> Agent:
|
||||||
|
return Agent(
|
||||||
|
config=self.agents_config['researcher'],
|
||||||
|
llm=local_nvidia_nim_llm
|
||||||
|
)
|
||||||
|
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
<Accordion title="Groq">
|
<Accordion title="Groq">
|
||||||
Set the following environment variables in your `.env` file:
|
Set the following environment variables in your `.env` file:
|
||||||
|
|
||||||
@@ -708,5 +786,5 @@ Learn how to get the most out of your LLM configuration:
|
|||||||
<Tip>
|
<Tip>
|
||||||
Use larger context models for extensive tasks
|
Use larger context models for extensive tasks
|
||||||
</Tip>
|
</Tip>
|
||||||
|
</Tab>
|
||||||
```
|
</Tabs>
|
||||||
|
|||||||
@@ -60,7 +60,8 @@ my_crew = Crew(
|
|||||||
```python Code
|
```python Code
|
||||||
from crewai import Crew, Process
|
from crewai import Crew, Process
|
||||||
from crewai.memory import LongTermMemory, ShortTermMemory, EntityMemory
|
from crewai.memory import LongTermMemory, ShortTermMemory, EntityMemory
|
||||||
from crewai.memory.storage import LTMSQLiteStorage, RAGStorage
|
from crewai.memory.storage.rag_storage import RAGStorage
|
||||||
|
from crewai.memory.storage.ltm_sqlite_storage import LTMSQLiteStorage
|
||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
|
|
||||||
# Assemble your crew with memory capabilities
|
# Assemble your crew with memory capabilities
|
||||||
@@ -119,7 +120,7 @@ Example using environment variables:
|
|||||||
import os
|
import os
|
||||||
from crewai import Crew
|
from crewai import Crew
|
||||||
from crewai.memory import LongTermMemory
|
from crewai.memory import LongTermMemory
|
||||||
from crewai.memory.storage import LTMSQLiteStorage
|
from crewai.memory.storage.ltm_sqlite_storage import LTMSQLiteStorage
|
||||||
|
|
||||||
# Configure storage path using environment variable
|
# Configure storage path using environment variable
|
||||||
storage_path = os.getenv("CREWAI_STORAGE_DIR", "./storage")
|
storage_path = os.getenv("CREWAI_STORAGE_DIR", "./storage")
|
||||||
@@ -148,7 +149,7 @@ crew = Crew(memory=True) # Uses default storage locations
|
|||||||
```python
|
```python
|
||||||
from crewai import Crew
|
from crewai import Crew
|
||||||
from crewai.memory import LongTermMemory
|
from crewai.memory import LongTermMemory
|
||||||
from crewai.memory.storage import LTMSQLiteStorage
|
from crewai.memory.storage.ltm_sqlite_storage import LTMSQLiteStorage
|
||||||
|
|
||||||
# Configure custom storage paths
|
# Configure custom storage paths
|
||||||
crew = Crew(
|
crew = Crew(
|
||||||
|
|||||||
223
docs/docs.json
Normal file
223
docs/docs.json
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://mintlify.com/docs.json",
|
||||||
|
"theme": "palm",
|
||||||
|
"name": "CrewAI",
|
||||||
|
"colors": {
|
||||||
|
"primary": "#EB6658",
|
||||||
|
"light": "#F3A78B",
|
||||||
|
"dark": "#C94C3C"
|
||||||
|
},
|
||||||
|
"favicon": "favicon.svg",
|
||||||
|
"navigation": {
|
||||||
|
"tabs": [
|
||||||
|
{
|
||||||
|
"tab": "Get Started",
|
||||||
|
"groups": [
|
||||||
|
{
|
||||||
|
"group": "Get Started",
|
||||||
|
"pages": [
|
||||||
|
"introduction",
|
||||||
|
"installation",
|
||||||
|
"quickstart",
|
||||||
|
"changelog"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Guides",
|
||||||
|
"pages": [
|
||||||
|
{
|
||||||
|
"group": "Concepts",
|
||||||
|
"pages": [
|
||||||
|
"guides/concepts/evaluating-use-cases"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Agents",
|
||||||
|
"pages": [
|
||||||
|
"guides/agents/crafting-effective-agents"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Crews",
|
||||||
|
"pages": [
|
||||||
|
"guides/crews/first-crew"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Flows",
|
||||||
|
"pages": [
|
||||||
|
"guides/flows/first-flow",
|
||||||
|
"guides/flows/mastering-flow-state"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Advanced",
|
||||||
|
"pages": [
|
||||||
|
"guides/advanced/customizing-prompts",
|
||||||
|
"guides/advanced/fingerprinting"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Core Concepts",
|
||||||
|
"pages": [
|
||||||
|
"concepts/agents",
|
||||||
|
"concepts/tasks",
|
||||||
|
"concepts/crews",
|
||||||
|
"concepts/flows",
|
||||||
|
"concepts/knowledge",
|
||||||
|
"concepts/llms",
|
||||||
|
"concepts/processes",
|
||||||
|
"concepts/collaboration",
|
||||||
|
"concepts/training",
|
||||||
|
"concepts/memory",
|
||||||
|
"concepts/planning",
|
||||||
|
"concepts/testing",
|
||||||
|
"concepts/cli",
|
||||||
|
"concepts/tools",
|
||||||
|
"concepts/event-listener",
|
||||||
|
"concepts/langchain-tools",
|
||||||
|
"concepts/llamaindex-tools"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "How to Guides",
|
||||||
|
"pages": [
|
||||||
|
"how-to/create-custom-tools",
|
||||||
|
"how-to/sequential-process",
|
||||||
|
"how-to/hierarchical-process",
|
||||||
|
"how-to/custom-manager-agent",
|
||||||
|
"how-to/llm-connections",
|
||||||
|
"how-to/customizing-agents",
|
||||||
|
"how-to/multimodal-agents",
|
||||||
|
"how-to/coding-agents",
|
||||||
|
"how-to/force-tool-output-as-result",
|
||||||
|
"how-to/human-input-on-execution",
|
||||||
|
"how-to/kickoff-async",
|
||||||
|
"how-to/kickoff-for-each",
|
||||||
|
"how-to/replay-tasks-from-latest-crew-kickoff",
|
||||||
|
"how-to/conditional-tasks",
|
||||||
|
"how-to/agentops-observability",
|
||||||
|
"how-to/langtrace-observability",
|
||||||
|
"how-to/mlflow-observability",
|
||||||
|
"how-to/openlit-observability",
|
||||||
|
"how-to/portkey-observability",
|
||||||
|
"how-to/langfuse-observability"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Tools",
|
||||||
|
"pages": [
|
||||||
|
"tools/aimindtool",
|
||||||
|
"tools/apifyactorstool",
|
||||||
|
"tools/bravesearchtool",
|
||||||
|
"tools/browserbaseloadtool",
|
||||||
|
"tools/codedocssearchtool",
|
||||||
|
"tools/codeinterpretertool",
|
||||||
|
"tools/composiotool",
|
||||||
|
"tools/csvsearchtool",
|
||||||
|
"tools/dalletool",
|
||||||
|
"tools/directorysearchtool",
|
||||||
|
"tools/directoryreadtool",
|
||||||
|
"tools/docxsearchtool",
|
||||||
|
"tools/exasearchtool",
|
||||||
|
"tools/filereadtool",
|
||||||
|
"tools/filewritetool",
|
||||||
|
"tools/firecrawlcrawlwebsitetool",
|
||||||
|
"tools/firecrawlscrapewebsitetool",
|
||||||
|
"tools/firecrawlsearchtool",
|
||||||
|
"tools/githubsearchtool",
|
||||||
|
"tools/hyperbrowserloadtool",
|
||||||
|
"tools/linkupsearchtool",
|
||||||
|
"tools/llamaindextool",
|
||||||
|
"tools/serperdevtool",
|
||||||
|
"tools/s3readertool",
|
||||||
|
"tools/s3writertool",
|
||||||
|
"tools/scrapegraphscrapetool",
|
||||||
|
"tools/scrapeelementfromwebsitetool",
|
||||||
|
"tools/jsonsearchtool",
|
||||||
|
"tools/mdxsearchtool",
|
||||||
|
"tools/mysqltool",
|
||||||
|
"tools/multiontool",
|
||||||
|
"tools/nl2sqltool",
|
||||||
|
"tools/patronustools",
|
||||||
|
"tools/pdfsearchtool",
|
||||||
|
"tools/pgsearchtool",
|
||||||
|
"tools/qdrantvectorsearchtool",
|
||||||
|
"tools/ragtool",
|
||||||
|
"tools/scrapewebsitetool",
|
||||||
|
"tools/scrapflyscrapetool",
|
||||||
|
"tools/seleniumscrapingtool",
|
||||||
|
"tools/snowflakesearchtool",
|
||||||
|
"tools/spidertool",
|
||||||
|
"tools/txtsearchtool",
|
||||||
|
"tools/visiontool",
|
||||||
|
"tools/weaviatevectorsearchtool",
|
||||||
|
"tools/websitesearchtool",
|
||||||
|
"tools/xmlsearchtool",
|
||||||
|
"tools/youtubechannelsearchtool",
|
||||||
|
"tools/youtubevideosearchtool"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Telemetry",
|
||||||
|
"pages": [
|
||||||
|
"telemetry"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"tab": "Examples",
|
||||||
|
"groups": [
|
||||||
|
{
|
||||||
|
"group": "Examples",
|
||||||
|
"pages": [
|
||||||
|
"examples/example"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"global": {
|
||||||
|
"anchors": [
|
||||||
|
{
|
||||||
|
"anchor": "Community",
|
||||||
|
"href": "https://community.crewai.com",
|
||||||
|
"icon": "discourse"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"logo": {
|
||||||
|
"light": "crew_only_logo.png",
|
||||||
|
"dark": "crew_only_logo.png"
|
||||||
|
},
|
||||||
|
"appearance": {
|
||||||
|
"default": "dark",
|
||||||
|
"strict": false
|
||||||
|
},
|
||||||
|
"navbar": {
|
||||||
|
"primary": {
|
||||||
|
"type": "github",
|
||||||
|
"href": "https://github.com/crewAIInc/crewAI"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"search": {
|
||||||
|
"prompt": "Search CrewAI docs"
|
||||||
|
},
|
||||||
|
"seo": {
|
||||||
|
"indexing": "navigable"
|
||||||
|
},
|
||||||
|
"footer": {
|
||||||
|
"socials": {
|
||||||
|
"website": "https://crewai.com",
|
||||||
|
"x": "https://x.com/crewAIInc",
|
||||||
|
"github": "https://github.com/crewAIInc/crewAI",
|
||||||
|
"linkedin": "https://www.linkedin.com/company/crewai-inc",
|
||||||
|
"youtube": "https://youtube.com/@crewAIInc",
|
||||||
|
"reddit": "https://www.reddit.com/r/crewAIInc/"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
---title: Customizing Prompts
|
---
|
||||||
|
title: Customizing Prompts
|
||||||
description: Dive deeper into low-level prompt customization for CrewAI, enabling super custom and complex use cases for different models and languages.
|
description: Dive deeper into low-level prompt customization for CrewAI, enabling super custom and complex use cases for different models and languages.
|
||||||
icon: message-pen
|
icon: message-pen
|
||||||
---
|
---
|
||||||
|
|||||||
225
docs/mint.json
225
docs/mint.json
@@ -1,225 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "CrewAI",
|
|
||||||
"theme": "venus",
|
|
||||||
"logo": {
|
|
||||||
"dark": "crew_only_logo.png",
|
|
||||||
"light": "crew_only_logo.png"
|
|
||||||
},
|
|
||||||
"favicon": "favicon.svg",
|
|
||||||
"colors": {
|
|
||||||
"primary": "#EB6658",
|
|
||||||
"light": "#F3A78B",
|
|
||||||
"dark": "#C94C3C",
|
|
||||||
"anchors": {
|
|
||||||
"from": "#737373",
|
|
||||||
"to": "#EB6658"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"seo": {
|
|
||||||
"indexHiddenPages": false
|
|
||||||
},
|
|
||||||
"modeToggle": {
|
|
||||||
"default": "dark",
|
|
||||||
"isHidden": false
|
|
||||||
},
|
|
||||||
"feedback": {
|
|
||||||
"suggestEdit": true,
|
|
||||||
"raiseIssue": true,
|
|
||||||
"thumbsRating": true
|
|
||||||
},
|
|
||||||
"topbarCtaButton": {
|
|
||||||
"type": "github",
|
|
||||||
"url": "https://github.com/crewAIInc/crewAI"
|
|
||||||
},
|
|
||||||
"primaryTab": {
|
|
||||||
"name": "Get Started"
|
|
||||||
},
|
|
||||||
"tabs": [
|
|
||||||
{
|
|
||||||
"name": "Examples",
|
|
||||||
"url": "examples"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"anchors": [
|
|
||||||
{
|
|
||||||
"name": "Community",
|
|
||||||
"icon": "discourse",
|
|
||||||
"url": "https://community.crewai.com"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Changelog",
|
|
||||||
"icon": "timeline",
|
|
||||||
"url": "https://github.com/crewAIInc/crewAI/releases"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"navigation": [
|
|
||||||
{
|
|
||||||
"group": "Get Started",
|
|
||||||
"pages": [
|
|
||||||
"introduction",
|
|
||||||
"installation",
|
|
||||||
"quickstart"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"group": "Guides",
|
|
||||||
"pages": [
|
|
||||||
{
|
|
||||||
"group": "Concepts",
|
|
||||||
"pages": [
|
|
||||||
"guides/concepts/evaluating-use-cases"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"group": "Agents",
|
|
||||||
"pages": [
|
|
||||||
"guides/agents/crafting-effective-agents"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"group": "Crews",
|
|
||||||
"pages": [
|
|
||||||
"guides/crews/first-crew"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"group": "Flows",
|
|
||||||
"pages": [
|
|
||||||
"guides/flows/first-flow",
|
|
||||||
"guides/flows/mastering-flow-state"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"group": "Advanced",
|
|
||||||
"pages": [
|
|
||||||
"guides/advanced/customizing-prompts",
|
|
||||||
"guides/advanced/fingerprinting"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"group": "Core Concepts",
|
|
||||||
"pages": [
|
|
||||||
"concepts/agents",
|
|
||||||
"concepts/tasks",
|
|
||||||
"concepts/crews",
|
|
||||||
"concepts/flows",
|
|
||||||
"concepts/knowledge",
|
|
||||||
"concepts/llms",
|
|
||||||
"concepts/processes",
|
|
||||||
"concepts/collaboration",
|
|
||||||
"concepts/training",
|
|
||||||
"concepts/memory",
|
|
||||||
"concepts/planning",
|
|
||||||
"concepts/testing",
|
|
||||||
"concepts/cli",
|
|
||||||
"concepts/tools",
|
|
||||||
"concepts/event-listener",
|
|
||||||
"concepts/langchain-tools",
|
|
||||||
"concepts/llamaindex-tools"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"group": "How to Guides",
|
|
||||||
"pages": [
|
|
||||||
"how-to/create-custom-tools",
|
|
||||||
"how-to/sequential-process",
|
|
||||||
"how-to/hierarchical-process",
|
|
||||||
"how-to/custom-manager-agent",
|
|
||||||
"how-to/llm-connections",
|
|
||||||
"how-to/customizing-agents",
|
|
||||||
"how-to/multimodal-agents",
|
|
||||||
"how-to/coding-agents",
|
|
||||||
"how-to/force-tool-output-as-result",
|
|
||||||
"how-to/human-input-on-execution",
|
|
||||||
"how-to/kickoff-async",
|
|
||||||
"how-to/kickoff-for-each",
|
|
||||||
"how-to/replay-tasks-from-latest-crew-kickoff",
|
|
||||||
"how-to/conditional-tasks",
|
|
||||||
"how-to/agentops-observability",
|
|
||||||
"how-to/langtrace-observability",
|
|
||||||
"how-to/mlflow-observability",
|
|
||||||
"how-to/openlit-observability",
|
|
||||||
"how-to/portkey-observability",
|
|
||||||
"how-to/langfuse-observability"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"group": "Examples",
|
|
||||||
"pages": [
|
|
||||||
"examples/example"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"group": "Tools",
|
|
||||||
"pages": [
|
|
||||||
"tools/aimindtool",
|
|
||||||
"tools/apifyactorstool",
|
|
||||||
"tools/bravesearchtool",
|
|
||||||
"tools/browserbaseloadtool",
|
|
||||||
"tools/codedocssearchtool",
|
|
||||||
"tools/codeinterpretertool",
|
|
||||||
"tools/composiotool",
|
|
||||||
"tools/csvsearchtool",
|
|
||||||
"tools/dalletool",
|
|
||||||
"tools/directorysearchtool",
|
|
||||||
"tools/directoryreadtool",
|
|
||||||
"tools/docxsearchtool",
|
|
||||||
"tools/exasearchtool",
|
|
||||||
"tools/filereadtool",
|
|
||||||
"tools/filewritetool",
|
|
||||||
"tools/firecrawlcrawlwebsitetool",
|
|
||||||
"tools/firecrawlscrapewebsitetool",
|
|
||||||
"tools/firecrawlsearchtool",
|
|
||||||
"tools/githubsearchtool",
|
|
||||||
"tools/hyperbrowserloadtool",
|
|
||||||
"tools/linkupsearchtool",
|
|
||||||
"tools/llamaindextool",
|
|
||||||
"tools/serperdevtool",
|
|
||||||
"tools/s3readertool",
|
|
||||||
"tools/s3writertool",
|
|
||||||
"tools/scrapegraphscrapetool",
|
|
||||||
"tools/scrapeelementfromwebsitetool",
|
|
||||||
"tools/jsonsearchtool",
|
|
||||||
"tools/mdxsearchtool",
|
|
||||||
"tools/mysqltool",
|
|
||||||
"tools/multiontool",
|
|
||||||
"tools/nl2sqltool",
|
|
||||||
"tools/patronustools",
|
|
||||||
"tools/pdfsearchtool",
|
|
||||||
"tools/pgsearchtool",
|
|
||||||
"tools/qdrantvectorsearchtool",
|
|
||||||
"tools/ragtool",
|
|
||||||
"tools/scrapewebsitetool",
|
|
||||||
"tools/scrapflyscrapetool",
|
|
||||||
"tools/seleniumscrapingtool",
|
|
||||||
"tools/snowflakesearchtool",
|
|
||||||
"tools/spidertool",
|
|
||||||
"tools/txtsearchtool",
|
|
||||||
"tools/visiontool",
|
|
||||||
"tools/weaviatevectorsearchtool",
|
|
||||||
"tools/websitesearchtool",
|
|
||||||
"tools/xmlsearchtool",
|
|
||||||
"tools/youtubechannelsearchtool",
|
|
||||||
"tools/youtubevideosearchtool"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"group": "Telemetry",
|
|
||||||
"pages": [
|
|
||||||
"telemetry"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"search": {
|
|
||||||
"prompt": "Search CrewAI docs"
|
|
||||||
},
|
|
||||||
"footerSocials": {
|
|
||||||
"website": "https://crewai.com",
|
|
||||||
"x": "https://x.com/crewAIInc",
|
|
||||||
"github": "https://github.com/crewAIInc/crewAI",
|
|
||||||
"linkedin": "https://www.linkedin.com/company/crewai-inc",
|
|
||||||
"youtube": "https://youtube.com/@crewAIInc"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -300,7 +300,7 @@ email_summarizer:
|
|||||||
```
|
```
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
Note how we use the same name for the agent in the `tasks.yaml` (`email_summarizer_task`) file as the method name in the `crew.py` (`email_summarizer_task`) file.
|
Note how we use the same name for the task in the `tasks.yaml` (`email_summarizer_task`) file as the method name in the `crew.py` (`email_summarizer_task`) file.
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
```yaml tasks.yaml
|
```yaml tasks.yaml
|
||||||
|
|||||||
@@ -7,8 +7,10 @@ icon: file-code
|
|||||||
# `JSONSearchTool`
|
# `JSONSearchTool`
|
||||||
|
|
||||||
<Note>
|
<Note>
|
||||||
The JSONSearchTool is currently in an experimental phase. This means the tool is under active development, and users might encounter unexpected behavior or changes.
|
The JSONSearchTool is currently in an experimental phase. This means the tool
|
||||||
We highly encourage feedback on any issues or suggestions for improvements.
|
is under active development, and users might encounter unexpected behavior or
|
||||||
|
changes. We highly encourage feedback on any issues or suggestions for
|
||||||
|
improvements.
|
||||||
</Note>
|
</Note>
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
@@ -60,7 +62,7 @@ tool = JSONSearchTool(
|
|||||||
# stream=true,
|
# stream=true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"embedder": {
|
"embedding_model": {
|
||||||
"provider": "google", # or openai, ollama, ...
|
"provider": "google", # or openai, ollama, ...
|
||||||
"config": {
|
"config": {
|
||||||
"model": "models/embedding-001",
|
"model": "models/embedding-001",
|
||||||
@@ -70,4 +72,4 @@ tool = JSONSearchTool(
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ icon: vector-square
|
|||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
The `RagTool` is designed to answer questions by leveraging the power of Retrieval-Augmented Generation (RAG) through EmbedChain.
|
The `RagTool` is designed to answer questions by leveraging the power of Retrieval-Augmented Generation (RAG) through EmbedChain.
|
||||||
It provides a dynamic knowledge base that can be queried to retrieve relevant information from various data sources.
|
It provides a dynamic knowledge base that can be queried to retrieve relevant information from various data sources.
|
||||||
This tool is particularly useful for applications that require access to a vast array of information and need to provide contextually relevant answers.
|
This tool is particularly useful for applications that require access to a vast array of information and need to provide contextually relevant answers.
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
@@ -138,7 +138,7 @@ config = {
|
|||||||
"model": "gpt-4",
|
"model": "gpt-4",
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"embedder": {
|
"embedding_model": {
|
||||||
"provider": "openai",
|
"provider": "openai",
|
||||||
"config": {
|
"config": {
|
||||||
"model": "text-embedding-ada-002"
|
"model": "text-embedding-ada-002"
|
||||||
@@ -151,4 +151,4 @@ rag_tool = RagTool(config=config, summarize=True)
|
|||||||
|
|
||||||
## Conclusion
|
## Conclusion
|
||||||
|
|
||||||
The `RagTool` provides a powerful way to create and query knowledge bases from various data sources. By leveraging Retrieval-Augmented Generation, it enables agents to access and retrieve relevant information efficiently, enhancing their ability to provide accurate and contextually appropriate responses.
|
The `RagTool` provides a powerful way to create and query knowledge bases from various data sources. By leveraging Retrieval-Augmented Generation, it enables agents to access and retrieve relevant information efficiently, enhancing their ability to provide accurate and contextually appropriate responses.
|
||||||
|
|||||||
@@ -17,9 +17,9 @@ dependencies = [
|
|||||||
"pdfplumber>=0.11.4",
|
"pdfplumber>=0.11.4",
|
||||||
"regex>=2024.9.11",
|
"regex>=2024.9.11",
|
||||||
# Telemetry and Monitoring
|
# Telemetry and Monitoring
|
||||||
"opentelemetry-api>=1.22.0",
|
"opentelemetry-api>=1.30.0",
|
||||||
"opentelemetry-sdk>=1.22.0",
|
"opentelemetry-sdk>=1.30.0",
|
||||||
"opentelemetry-exporter-otlp-proto-http>=1.22.0",
|
"opentelemetry-exporter-otlp-proto-http>=1.30.0",
|
||||||
# Data Handling
|
# Data Handling
|
||||||
"chromadb>=0.5.23",
|
"chromadb>=0.5.23",
|
||||||
"openpyxl>=3.1.5",
|
"openpyxl>=3.1.5",
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ from crewai.tools.agent_tools.agent_tools import AgentTools
|
|||||||
from crewai.utilities import Converter, Prompts
|
from crewai.utilities import Converter, Prompts
|
||||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||||
from crewai.utilities.converter import generate_model_description
|
from crewai.utilities.converter import generate_model_description
|
||||||
from crewai.utilities.embedding_configurator import EmbeddingConfig
|
|
||||||
from crewai.utilities.events.agent_events import (
|
from crewai.utilities.events.agent_events import (
|
||||||
AgentExecutionCompletedEvent,
|
AgentExecutionCompletedEvent,
|
||||||
AgentExecutionErrorEvent,
|
AgentExecutionErrorEvent,
|
||||||
@@ -109,7 +108,7 @@ class Agent(BaseAgent):
|
|||||||
default="safe",
|
default="safe",
|
||||||
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
|
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
|
||||||
)
|
)
|
||||||
embedder: Optional[EmbeddingConfig] = Field(
|
embedder: Optional[Dict[str, Any]] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="Embedder configuration for the agent.",
|
description="Embedder configuration for the agent.",
|
||||||
)
|
)
|
||||||
@@ -135,7 +134,7 @@ class Agent(BaseAgent):
|
|||||||
self.cache_handler = CacheHandler()
|
self.cache_handler = CacheHandler()
|
||||||
self.set_cache_handler(self.cache_handler)
|
self.set_cache_handler(self.cache_handler)
|
||||||
|
|
||||||
def set_knowledge(self, crew_embedder: Optional[EmbeddingConfig] = None):
|
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
||||||
try:
|
try:
|
||||||
if self.embedder is None and crew_embedder:
|
if self.embedder is None and crew_embedder:
|
||||||
self.embedder = crew_embedder
|
self.embedder = crew_embedder
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ from crewai.tools.base_tool import BaseTool, Tool
|
|||||||
from crewai.utilities import I18N, Logger, RPMController
|
from crewai.utilities import I18N, Logger, RPMController
|
||||||
from crewai.utilities.config import process_config
|
from crewai.utilities.config import process_config
|
||||||
from crewai.utilities.converter import Converter
|
from crewai.utilities.converter import Converter
|
||||||
from crewai.utilities.embedding_configurator import EmbeddingConfig
|
|
||||||
|
|
||||||
T = TypeVar("T", bound="BaseAgent")
|
T = TypeVar("T", bound="BaseAgent")
|
||||||
|
|
||||||
@@ -363,5 +362,5 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
self._rpm_controller = rpm_controller
|
self._rpm_controller = rpm_controller
|
||||||
self.create_agent_executor()
|
self.create_agent_executor()
|
||||||
|
|
||||||
def set_knowledge(self, crew_embedder: Optional[EmbeddingConfig] = None):
|
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -124,9 +124,9 @@ class CrewAgentParser:
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _extract_thought(self, text: str) -> str:
|
def _extract_thought(self, text: str) -> str:
|
||||||
thought_index = text.find("\n\nAction")
|
thought_index = text.find("\nAction")
|
||||||
if thought_index == -1:
|
if thought_index == -1:
|
||||||
thought_index = text.find("\n\nFinal Answer")
|
thought_index = text.find("\nFinal Answer")
|
||||||
if thought_index == -1:
|
if thought_index == -1:
|
||||||
return ""
|
return ""
|
||||||
thought = text[:thought_index].strip()
|
thought = text[:thought_index].strip()
|
||||||
@@ -136,7 +136,7 @@ class CrewAgentParser:
|
|||||||
|
|
||||||
def _clean_action(self, text: str) -> str:
|
def _clean_action(self, text: str) -> str:
|
||||||
"""Clean action string by removing non-essential formatting characters."""
|
"""Clean action string by removing non-essential formatting characters."""
|
||||||
return re.sub(r"^\s*\*+\s*|\s*\*+\s*$", "", text).strip()
|
return text.strip().strip("*").strip()
|
||||||
|
|
||||||
def _safe_repair_json(self, tool_input: str) -> str:
|
def _safe_repair_json(self, tool_input: str) -> str:
|
||||||
UNABLE_TO_REPAIR_JSON_RESULTS = ['""', "{}"]
|
UNABLE_TO_REPAIR_JSON_RESULTS = ['""', "{}"]
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
from functools import lru_cache
|
||||||
|
|
||||||
|
|
||||||
class Repository:
|
class Repository:
|
||||||
@@ -35,6 +36,7 @@ class Repository:
|
|||||||
encoding="utf-8",
|
encoding="utf-8",
|
||||||
).strip()
|
).strip()
|
||||||
|
|
||||||
|
@lru_cache(maxsize=None)
|
||||||
def is_git_repo(self) -> bool:
|
def is_git_repo(self) -> bool:
|
||||||
"""Check if the current directory is a git repository."""
|
"""Check if the current directory is a git repository."""
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ dependencies = [
|
|||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
kickoff = "{{folder_name}}.main:kickoff"
|
kickoff = "{{folder_name}}.main:kickoff"
|
||||||
|
run_crew = "{{folder_name}}.main:kickoff"
|
||||||
plot = "{{folder_name}}.main:plot"
|
plot = "{{folder_name}}.main:plot"
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
|
|||||||
@@ -41,7 +41,6 @@ from crewai.tools.base_tool import Tool
|
|||||||
from crewai.types.usage_metrics import UsageMetrics
|
from crewai.types.usage_metrics import UsageMetrics
|
||||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||||
from crewai.utilities.embedding_configurator import EmbeddingConfig
|
|
||||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||||
from crewai.utilities.events.crew_events import (
|
from crewai.utilities.events.crew_events import (
|
||||||
@@ -146,7 +145,7 @@ class Crew(BaseModel):
|
|||||||
default=None,
|
default=None,
|
||||||
description="An instance of the UserMemory to be used by the Crew to store/fetch memories of a specific user.",
|
description="An instance of the UserMemory to be used by the Crew to store/fetch memories of a specific user.",
|
||||||
)
|
)
|
||||||
embedder: Optional[EmbeddingConfig] = Field(
|
embedder: Optional[dict] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="Configuration for the embedder to be used for the crew.",
|
description="Configuration for the embedder to be used for the crew.",
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ from pydantic import BaseModel, ConfigDict, Field
|
|||||||
|
|
||||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||||
from crewai.utilities.embedding_configurator import EmbeddingConfig
|
|
||||||
|
|
||||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # removes logging from fastembed
|
os.environ["TOKENIZERS_PARALLELISM"] = "false" # removes logging from fastembed
|
||||||
|
|
||||||
@@ -22,14 +21,14 @@ class Knowledge(BaseModel):
|
|||||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||||
embedder: Optional[EmbeddingConfig] = None
|
embedder: Optional[Dict[str, Any]] = None
|
||||||
collection_name: Optional[str] = None
|
collection_name: Optional[str] = None
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
collection_name: str,
|
collection_name: str,
|
||||||
sources: List[BaseKnowledgeSource],
|
sources: List[BaseKnowledgeSource],
|
||||||
embedder: Optional[EmbeddingConfig] = None,
|
embedder: Optional[Dict[str, Any]] = None,
|
||||||
storage: Optional[KnowledgeStorage] = None,
|
storage: Optional[KnowledgeStorage] = None,
|
||||||
**data,
|
**data,
|
||||||
):
|
):
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ from chromadb.config import Settings
|
|||||||
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
|
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
|
||||||
from crewai.utilities import EmbeddingConfigurator
|
from crewai.utilities import EmbeddingConfigurator
|
||||||
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
|
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
|
||||||
from crewai.utilities.embedding_configurator import EmbeddingConfig
|
|
||||||
from crewai.utilities.logger import Logger
|
from crewai.utilities.logger import Logger
|
||||||
from crewai.utilities.paths import db_storage_path
|
from crewai.utilities.paths import db_storage_path
|
||||||
|
|
||||||
@@ -49,7 +48,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
embedder: Optional[EmbeddingConfig] = None,
|
embedder: Optional[Dict[str, Any]] = None,
|
||||||
collection_name: Optional[str] = None,
|
collection_name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
self.collection_name = collection_name
|
self.collection_name = collection_name
|
||||||
@@ -188,7 +187,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|||||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _set_embedder_config(self, embedder: Optional[EmbeddingConfig] = None) -> None:
|
def _set_embedder_config(self, embedder: Optional[Dict[str, Any]] = None) -> None:
|
||||||
"""Set the embedding configuration for the knowledge storage.
|
"""Set the embedding configuration for the knowledge storage.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
from typing import Any, Dict, List
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
from mem0 import MemoryClient
|
from mem0 import Memory, MemoryClient
|
||||||
|
|
||||||
from crewai.memory.storage.interface import Storage
|
from crewai.memory.storage.interface import Storage
|
||||||
|
|
||||||
@@ -32,13 +32,16 @@ class Mem0Storage(Storage):
|
|||||||
mem0_org_id = config.get("org_id")
|
mem0_org_id = config.get("org_id")
|
||||||
mem0_project_id = config.get("project_id")
|
mem0_project_id = config.get("project_id")
|
||||||
|
|
||||||
# Initialize MemoryClient with available parameters
|
# Initialize MemoryClient or Memory based on the presence of the mem0_api_key
|
||||||
if mem0_org_id and mem0_project_id:
|
if mem0_api_key:
|
||||||
self.memory = MemoryClient(
|
if mem0_org_id and mem0_project_id:
|
||||||
api_key=mem0_api_key, org_id=mem0_org_id, project_id=mem0_project_id
|
self.memory = MemoryClient(
|
||||||
)
|
api_key=mem0_api_key, org_id=mem0_org_id, project_id=mem0_project_id
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.memory = MemoryClient(api_key=mem0_api_key)
|
||||||
else:
|
else:
|
||||||
self.memory = MemoryClient(api_key=mem0_api_key)
|
self.memory = Memory() # Fallback to Memory if no Mem0 API key is provided
|
||||||
|
|
||||||
def _sanitize_role(self, role: str) -> str:
|
def _sanitize_role(self, role: str) -> str:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -19,6 +19,8 @@ from typing import (
|
|||||||
Tuple,
|
Tuple,
|
||||||
Type,
|
Type,
|
||||||
Union,
|
Union,
|
||||||
|
get_args,
|
||||||
|
get_origin,
|
||||||
)
|
)
|
||||||
|
|
||||||
from pydantic import (
|
from pydantic import (
|
||||||
@@ -178,15 +180,29 @@ class Task(BaseModel):
|
|||||||
"""
|
"""
|
||||||
if v is not None:
|
if v is not None:
|
||||||
sig = inspect.signature(v)
|
sig = inspect.signature(v)
|
||||||
if len(sig.parameters) != 1:
|
positional_args = [
|
||||||
|
param
|
||||||
|
for param in sig.parameters.values()
|
||||||
|
if param.default is inspect.Parameter.empty
|
||||||
|
]
|
||||||
|
if len(positional_args) != 1:
|
||||||
raise ValueError("Guardrail function must accept exactly one parameter")
|
raise ValueError("Guardrail function must accept exactly one parameter")
|
||||||
|
|
||||||
# Check return annotation if present, but don't require it
|
# Check return annotation if present, but don't require it
|
||||||
return_annotation = sig.return_annotation
|
return_annotation = sig.return_annotation
|
||||||
if return_annotation != inspect.Signature.empty:
|
if return_annotation != inspect.Signature.empty:
|
||||||
|
|
||||||
|
return_annotation_args = get_args(return_annotation)
|
||||||
if not (
|
if not (
|
||||||
return_annotation == Tuple[bool, Any]
|
get_origin(return_annotation) is tuple
|
||||||
or str(return_annotation) == "Tuple[bool, Any]"
|
and len(return_annotation_args) == 2
|
||||||
|
and return_annotation_args[0] is bool
|
||||||
|
and (
|
||||||
|
return_annotation_args[1] is Any
|
||||||
|
or return_annotation_args[1] is str
|
||||||
|
or return_annotation_args[1] is TaskOutput
|
||||||
|
or return_annotation_args[1] == Union[str, TaskOutput]
|
||||||
|
)
|
||||||
):
|
):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"If return type is annotated, it must be Tuple[bool, Any]"
|
"If return type is annotated, it must be Tuple[bool, Any]"
|
||||||
|
|||||||
@@ -281,8 +281,16 @@ class Telemetry:
|
|||||||
return self._safe_telemetry_operation(operation)
|
return self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def task_ended(self, span: Span, task: Task, crew: Crew):
|
def task_ended(self, span: Span, task: Task, crew: Crew):
|
||||||
"""Records task execution in a crew."""
|
"""Records the completion of a task execution in a crew.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
span (Span): The OpenTelemetry span tracking the task execution
|
||||||
|
task (Task): The task that was completed
|
||||||
|
crew (Crew): The crew context in which the task was executed
|
||||||
|
|
||||||
|
Note:
|
||||||
|
If share_crew is enabled, this will also record the task output
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
if crew.share_crew:
|
if crew.share_crew:
|
||||||
self._add_attribute(
|
self._add_attribute(
|
||||||
@@ -297,8 +305,13 @@ class Telemetry:
|
|||||||
self._safe_telemetry_operation(operation)
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
|
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||||
"""Records the repeated usage 'error' of a tool by an agent."""
|
"""Records when a tool is used repeatedly, which might indicate an issue.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
llm (Any): The language model being used
|
||||||
|
tool_name (str): Name of the tool being repeatedly used
|
||||||
|
attempts (int): Number of attempts made with this tool
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Tool Repeated Usage")
|
span = tracer.start_span("Tool Repeated Usage")
|
||||||
@@ -317,8 +330,13 @@ class Telemetry:
|
|||||||
self._safe_telemetry_operation(operation)
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
|
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||||
"""Records the usage of a tool by an agent."""
|
"""Records the usage of a tool by an agent.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
llm (Any): The language model being used
|
||||||
|
tool_name (str): Name of the tool being used
|
||||||
|
attempts (int): Number of attempts made with this tool
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Tool Usage")
|
span = tracer.start_span("Tool Usage")
|
||||||
@@ -337,8 +355,11 @@ class Telemetry:
|
|||||||
self._safe_telemetry_operation(operation)
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def tool_usage_error(self, llm: Any):
|
def tool_usage_error(self, llm: Any):
|
||||||
"""Records the usage of a tool by an agent."""
|
"""Records when a tool usage results in an error.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
llm (Any): The language model being used when the error occurred
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Tool Usage Error")
|
span = tracer.start_span("Tool Usage Error")
|
||||||
@@ -357,6 +378,14 @@ class Telemetry:
|
|||||||
def individual_test_result_span(
|
def individual_test_result_span(
|
||||||
self, crew: Crew, quality: float, exec_time: int, model_name: str
|
self, crew: Crew, quality: float, exec_time: int, model_name: str
|
||||||
):
|
):
|
||||||
|
"""Records individual test results for a crew execution.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
crew (Crew): The crew being tested
|
||||||
|
quality (float): Quality score of the execution
|
||||||
|
exec_time (int): Execution time in seconds
|
||||||
|
model_name (str): Name of the model used
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Crew Individual Test Result")
|
span = tracer.start_span("Crew Individual Test Result")
|
||||||
@@ -383,6 +412,14 @@ class Telemetry:
|
|||||||
inputs: dict[str, Any] | None,
|
inputs: dict[str, Any] | None,
|
||||||
model_name: str,
|
model_name: str,
|
||||||
):
|
):
|
||||||
|
"""Records the execution of a test suite for a crew.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
crew (Crew): The crew being tested
|
||||||
|
iterations (int): Number of test iterations
|
||||||
|
inputs (dict[str, Any] | None): Input parameters for the test
|
||||||
|
model_name (str): Name of the model used in testing
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Crew Test Execution")
|
span = tracer.start_span("Crew Test Execution")
|
||||||
@@ -408,6 +445,7 @@ class Telemetry:
|
|||||||
self._safe_telemetry_operation(operation)
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def deploy_signup_error_span(self):
|
def deploy_signup_error_span(self):
|
||||||
|
"""Records when an error occurs during the deployment signup process."""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Deploy Signup Error")
|
span = tracer.start_span("Deploy Signup Error")
|
||||||
@@ -417,6 +455,11 @@ class Telemetry:
|
|||||||
self._safe_telemetry_operation(operation)
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def start_deployment_span(self, uuid: Optional[str] = None):
|
def start_deployment_span(self, uuid: Optional[str] = None):
|
||||||
|
"""Records the start of a deployment process.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
uuid (Optional[str]): Unique identifier for the deployment
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Start Deployment")
|
span = tracer.start_span("Start Deployment")
|
||||||
@@ -428,6 +471,7 @@ class Telemetry:
|
|||||||
self._safe_telemetry_operation(operation)
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def create_crew_deployment_span(self):
|
def create_crew_deployment_span(self):
|
||||||
|
"""Records the creation of a new crew deployment."""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Create Crew Deployment")
|
span = tracer.start_span("Create Crew Deployment")
|
||||||
@@ -437,6 +481,12 @@ class Telemetry:
|
|||||||
self._safe_telemetry_operation(operation)
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"):
|
def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"):
|
||||||
|
"""Records the retrieval of crew logs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
uuid (Optional[str]): Unique identifier for the crew
|
||||||
|
log_type (str, optional): Type of logs being retrieved. Defaults to "deployment".
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Get Crew Logs")
|
span = tracer.start_span("Get Crew Logs")
|
||||||
@@ -449,6 +499,11 @@ class Telemetry:
|
|||||||
self._safe_telemetry_operation(operation)
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def remove_crew_span(self, uuid: Optional[str] = None):
|
def remove_crew_span(self, uuid: Optional[str] = None):
|
||||||
|
"""Records the removal of a crew.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
uuid (Optional[str]): Unique identifier for the crew being removed
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Remove Crew")
|
span = tracer.start_span("Remove Crew")
|
||||||
@@ -574,6 +629,11 @@ class Telemetry:
|
|||||||
self._safe_telemetry_operation(operation)
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def flow_creation_span(self, flow_name: str):
|
def flow_creation_span(self, flow_name: str):
|
||||||
|
"""Records the creation of a new flow.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
flow_name (str): Name of the flow being created
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Flow Creation")
|
span = tracer.start_span("Flow Creation")
|
||||||
@@ -584,6 +644,12 @@ class Telemetry:
|
|||||||
self._safe_telemetry_operation(operation)
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def flow_plotting_span(self, flow_name: str, node_names: list[str]):
|
def flow_plotting_span(self, flow_name: str, node_names: list[str]):
|
||||||
|
"""Records flow visualization/plotting activity.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
flow_name (str): Name of the flow being plotted
|
||||||
|
node_names (list[str]): List of node names in the flow
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Flow Plotting")
|
span = tracer.start_span("Flow Plotting")
|
||||||
@@ -595,6 +661,12 @@ class Telemetry:
|
|||||||
self._safe_telemetry_operation(operation)
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def flow_execution_span(self, flow_name: str, node_names: list[str]):
|
def flow_execution_span(self, flow_name: str, node_names: list[str]):
|
||||||
|
"""Records the execution of a flow.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
flow_name (str): Name of the flow being executed
|
||||||
|
node_names (list[str]): List of nodes being executed in the flow
|
||||||
|
"""
|
||||||
def operation():
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Flow Execution")
|
span = tracer.start_span("Flow Execution")
|
||||||
|
|||||||
@@ -337,11 +337,23 @@ class ToolUsage:
|
|||||||
return "\n--\n".join(descriptions)
|
return "\n--\n".join(descriptions)
|
||||||
|
|
||||||
def _function_calling(self, tool_string: str):
|
def _function_calling(self, tool_string: str):
|
||||||
model = (
|
supports_function_calling = (
|
||||||
InstructorToolCalling
|
self.function_calling_llm.supports_function_calling()
|
||||||
if self.function_calling_llm.supports_function_calling()
|
|
||||||
else ToolCalling
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if not supports_function_calling:
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
warnings.warn(
|
||||||
|
"The model you're using doesn't natively support function calling. "
|
||||||
|
"CrewAI will attempt to use a workaround, but this may be less reliable. "
|
||||||
|
"Consider using a model with native function calling support for better results.",
|
||||||
|
UserWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
|
||||||
|
model = InstructorToolCalling if supports_function_calling else ToolCalling
|
||||||
|
|
||||||
converter = Converter(
|
converter = Converter(
|
||||||
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid output schema:\n\n### TEXT \n{tool_string}",
|
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid output schema:\n\n### TEXT \n{tool_string}",
|
||||||
llm=self.function_calling_llm,
|
llm=self.function_calling_llm,
|
||||||
|
|||||||
@@ -1,84 +1,8 @@
|
|||||||
import os
|
import os
|
||||||
from typing import Any, Callable, Literal, cast
|
from typing import Any, Dict, Optional, cast
|
||||||
|
|
||||||
from chromadb import Documents, EmbeddingFunction, Embeddings
|
from chromadb import Documents, EmbeddingFunction, Embeddings
|
||||||
from chromadb.api.types import validate_embedding_function
|
from chromadb.api.types import validate_embedding_function
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
|
|
||||||
class EmbeddingProviderConfig(BaseModel):
|
|
||||||
"""Configuration model for embedding providers.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
# Core Model Configuration
|
|
||||||
model (str | None): The model identifier for embeddings, used across multiple providers
|
|
||||||
like OpenAI, Azure, Watson, etc.
|
|
||||||
embedder (str | Callable | None): Custom embedding function or callable for custom
|
|
||||||
embedding implementations.
|
|
||||||
|
|
||||||
# API Authentication & Configuration
|
|
||||||
api_key (str | None): Authentication key for various providers (OpenAI, VertexAI,
|
|
||||||
Google, Cohere, VoyageAI, Watson).
|
|
||||||
api_base (str | None): Base API URL override for OpenAI and Azure services.
|
|
||||||
api_type (str | None): API type specification, particularly used for Azure configuration.
|
|
||||||
api_version (str | None): API version for OpenAI and Azure services.
|
|
||||||
api_url (str | None): API endpoint URL, used by HuggingFace and Watson services.
|
|
||||||
url (str | None): Base URL for the embedding service, primarily used for Ollama and
|
|
||||||
HuggingFace endpoints.
|
|
||||||
|
|
||||||
# Service-Specific Configuration
|
|
||||||
project_id (str | None): Project identifier used by VertexAI and Watson services.
|
|
||||||
organization_id (str | None): Organization identifier for OpenAI and Azure services.
|
|
||||||
deployment_id (str | None): Deployment identifier for OpenAI and Azure services.
|
|
||||||
region (str | None): Geographic region for VertexAI services.
|
|
||||||
session (str | None): Session configuration for Amazon Bedrock embeddings.
|
|
||||||
|
|
||||||
# Request Configuration
|
|
||||||
task_type (str | None): Specifies the task type for Google Generative AI embeddings.
|
|
||||||
default_headers (str | None): Custom headers for OpenAI and Azure API requests.
|
|
||||||
dimensions (str | None): Output dimensions specification for OpenAI and Azure embeddings.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Core Model Configuration
|
|
||||||
model: str | None = None
|
|
||||||
embedder: str | Callable | None = None
|
|
||||||
|
|
||||||
# API Authentication & Configuration
|
|
||||||
api_key: str | None = None
|
|
||||||
api_base: str | None = None
|
|
||||||
api_type: str | None = None
|
|
||||||
api_version: str | None = None
|
|
||||||
api_url: str | None = None
|
|
||||||
url: str | None = None
|
|
||||||
|
|
||||||
# Service-Specific Configuration
|
|
||||||
project_id: str | None = None
|
|
||||||
organization_id: str | None = None
|
|
||||||
deployment_id: str | None = None
|
|
||||||
region: str | None = None
|
|
||||||
session: str | None = None
|
|
||||||
|
|
||||||
# Request Configuration
|
|
||||||
task_type: str | None = None
|
|
||||||
default_headers: str | None = None
|
|
||||||
dimensions: str | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class EmbeddingConfig(BaseModel):
|
|
||||||
provider: Literal[
|
|
||||||
"openai",
|
|
||||||
"azure",
|
|
||||||
"ollama",
|
|
||||||
"vertexai",
|
|
||||||
"google",
|
|
||||||
"cohere",
|
|
||||||
"voyageai",
|
|
||||||
"bedrock",
|
|
||||||
"huggingface",
|
|
||||||
"watson",
|
|
||||||
"custom",
|
|
||||||
]
|
|
||||||
config: EmbeddingProviderConfig | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class EmbeddingConfigurator:
|
class EmbeddingConfigurator:
|
||||||
@@ -99,19 +23,15 @@ class EmbeddingConfigurator:
|
|||||||
|
|
||||||
def configure_embedder(
|
def configure_embedder(
|
||||||
self,
|
self,
|
||||||
embedder_config: EmbeddingConfig | None = None,
|
embedder_config: Optional[Dict[str, Any]] = None,
|
||||||
) -> EmbeddingFunction:
|
) -> EmbeddingFunction:
|
||||||
"""Configures and returns an embedding function based on the provided config."""
|
"""Configures and returns an embedding function based on the provided config."""
|
||||||
if embedder_config is None:
|
if embedder_config is None:
|
||||||
return self._create_default_embedding_function()
|
return self._create_default_embedding_function()
|
||||||
|
|
||||||
provider = embedder_config.provider
|
provider = embedder_config.get("provider")
|
||||||
config = (
|
config = embedder_config.get("config", {})
|
||||||
embedder_config.config
|
model_name = config.get("model") if provider != "custom" else None
|
||||||
if embedder_config.config
|
|
||||||
else EmbeddingProviderConfig()
|
|
||||||
)
|
|
||||||
model_name = config.model if provider != "custom" else None
|
|
||||||
|
|
||||||
if provider not in self.embedding_functions:
|
if provider not in self.embedding_functions:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
@@ -136,123 +56,123 @@ class EmbeddingConfigurator:
|
|||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_openai(config: EmbeddingProviderConfig, model_name: str):
|
def _configure_openai(config, model_name):
|
||||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||||
OpenAIEmbeddingFunction,
|
OpenAIEmbeddingFunction,
|
||||||
)
|
)
|
||||||
|
|
||||||
return OpenAIEmbeddingFunction(
|
return OpenAIEmbeddingFunction(
|
||||||
api_key=config.api_key or os.getenv("OPENAI_API_KEY"),
|
api_key=config.get("api_key") or os.getenv("OPENAI_API_KEY"),
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_base=config.api_base,
|
api_base=config.get("api_base", None),
|
||||||
api_type=config.api_type,
|
api_type=config.get("api_type", None),
|
||||||
api_version=config.api_version,
|
api_version=config.get("api_version", None),
|
||||||
default_headers=config.default_headers,
|
default_headers=config.get("default_headers", None),
|
||||||
dimensions=config.dimensions,
|
dimensions=config.get("dimensions", None),
|
||||||
deployment_id=config.deployment_id,
|
deployment_id=config.get("deployment_id", None),
|
||||||
organization_id=config.organization_id,
|
organization_id=config.get("organization_id", None),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_azure(config: EmbeddingProviderConfig, model_name: str):
|
def _configure_azure(config, model_name):
|
||||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||||
OpenAIEmbeddingFunction,
|
OpenAIEmbeddingFunction,
|
||||||
)
|
)
|
||||||
|
|
||||||
return OpenAIEmbeddingFunction(
|
return OpenAIEmbeddingFunction(
|
||||||
api_key=config.api_key,
|
api_key=config.get("api_key"),
|
||||||
api_base=config.api_base,
|
api_base=config.get("api_base"),
|
||||||
api_type=config.api_type if config.api_type else "azure",
|
api_type=config.get("api_type", "azure"),
|
||||||
api_version=config.api_version,
|
api_version=config.get("api_version"),
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
default_headers=config.default_headers,
|
default_headers=config.get("default_headers"),
|
||||||
dimensions=config.dimensions,
|
dimensions=config.get("dimensions"),
|
||||||
deployment_id=config.deployment_id,
|
deployment_id=config.get("deployment_id"),
|
||||||
organization_id=config.organization_id,
|
organization_id=config.get("organization_id"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_ollama(config: EmbeddingProviderConfig, model_name: str):
|
def _configure_ollama(config, model_name):
|
||||||
from chromadb.utils.embedding_functions.ollama_embedding_function import (
|
from chromadb.utils.embedding_functions.ollama_embedding_function import (
|
||||||
OllamaEmbeddingFunction,
|
OllamaEmbeddingFunction,
|
||||||
)
|
)
|
||||||
|
|
||||||
return OllamaEmbeddingFunction(
|
return OllamaEmbeddingFunction(
|
||||||
url=config.url if config.url else "http://localhost:11434/api/embeddings",
|
url=config.get("url", "http://localhost:11434/api/embeddings"),
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_vertexai(config: EmbeddingProviderConfig, model_name: str):
|
def _configure_vertexai(config, model_name):
|
||||||
from chromadb.utils.embedding_functions.google_embedding_function import (
|
from chromadb.utils.embedding_functions.google_embedding_function import (
|
||||||
GoogleVertexEmbeddingFunction,
|
GoogleVertexEmbeddingFunction,
|
||||||
)
|
)
|
||||||
|
|
||||||
return GoogleVertexEmbeddingFunction(
|
return GoogleVertexEmbeddingFunction(
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_key=config.api_key,
|
api_key=config.get("api_key"),
|
||||||
project_id=config.project_id,
|
project_id=config.get("project_id"),
|
||||||
region=config.region,
|
region=config.get("region"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_google(config: EmbeddingProviderConfig, model_name: str):
|
def _configure_google(config, model_name):
|
||||||
from chromadb.utils.embedding_functions.google_embedding_function import (
|
from chromadb.utils.embedding_functions.google_embedding_function import (
|
||||||
GoogleGenerativeAiEmbeddingFunction,
|
GoogleGenerativeAiEmbeddingFunction,
|
||||||
)
|
)
|
||||||
|
|
||||||
return GoogleGenerativeAiEmbeddingFunction(
|
return GoogleGenerativeAiEmbeddingFunction(
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_key=config.api_key,
|
api_key=config.get("api_key"),
|
||||||
task_type=config.task_type,
|
task_type=config.get("task_type"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_cohere(config: EmbeddingProviderConfig, model_name: str):
|
def _configure_cohere(config, model_name):
|
||||||
from chromadb.utils.embedding_functions.cohere_embedding_function import (
|
from chromadb.utils.embedding_functions.cohere_embedding_function import (
|
||||||
CohereEmbeddingFunction,
|
CohereEmbeddingFunction,
|
||||||
)
|
)
|
||||||
|
|
||||||
return CohereEmbeddingFunction(
|
return CohereEmbeddingFunction(
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_key=config.api_key,
|
api_key=config.get("api_key"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_voyageai(config: EmbeddingProviderConfig, model_name: str):
|
def _configure_voyageai(config, model_name):
|
||||||
from chromadb.utils.embedding_functions.voyageai_embedding_function import (
|
from chromadb.utils.embedding_functions.voyageai_embedding_function import (
|
||||||
VoyageAIEmbeddingFunction,
|
VoyageAIEmbeddingFunction,
|
||||||
)
|
)
|
||||||
|
|
||||||
return VoyageAIEmbeddingFunction(
|
return VoyageAIEmbeddingFunction(
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_key=config.api_key,
|
api_key=config.get("api_key"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_bedrock(config: EmbeddingProviderConfig, model_name: str):
|
def _configure_bedrock(config, model_name):
|
||||||
from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import (
|
from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import (
|
||||||
AmazonBedrockEmbeddingFunction,
|
AmazonBedrockEmbeddingFunction,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Allow custom model_name override with backwards compatibility
|
# Allow custom model_name override with backwards compatibility
|
||||||
kwargs = {"session": config.session}
|
kwargs = {"session": config.get("session")}
|
||||||
if model_name is not None:
|
if model_name is not None:
|
||||||
kwargs["model_name"] = model_name
|
kwargs["model_name"] = model_name
|
||||||
return AmazonBedrockEmbeddingFunction(**kwargs)
|
return AmazonBedrockEmbeddingFunction(**kwargs)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_huggingface(config: EmbeddingProviderConfig, model_name: str):
|
def _configure_huggingface(config, model_name):
|
||||||
from chromadb.utils.embedding_functions.huggingface_embedding_function import (
|
from chromadb.utils.embedding_functions.huggingface_embedding_function import (
|
||||||
HuggingFaceEmbeddingServer,
|
HuggingFaceEmbeddingServer,
|
||||||
)
|
)
|
||||||
|
|
||||||
return HuggingFaceEmbeddingServer(
|
return HuggingFaceEmbeddingServer(
|
||||||
url=config.api_url,
|
url=config.get("api_url"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_watson(config: EmbeddingProviderConfig, model_name: str):
|
def _configure_watson(config, model_name):
|
||||||
try:
|
try:
|
||||||
import ibm_watsonx_ai.foundation_models as watson_models
|
import ibm_watsonx_ai.foundation_models as watson_models
|
||||||
from ibm_watsonx_ai import Credentials
|
from ibm_watsonx_ai import Credentials
|
||||||
@@ -273,10 +193,12 @@ class EmbeddingConfigurator:
|
|||||||
}
|
}
|
||||||
|
|
||||||
embedding = watson_models.Embeddings(
|
embedding = watson_models.Embeddings(
|
||||||
model_id=config.model,
|
model_id=config.get("model"),
|
||||||
params=embed_params,
|
params=embed_params,
|
||||||
credentials=Credentials(api_key=config.api_key, url=config.api_url),
|
credentials=Credentials(
|
||||||
project_id=config.project_id,
|
api_key=config.get("api_key"), url=config.get("api_url")
|
||||||
|
),
|
||||||
|
project_id=config.get("project_id"),
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -289,8 +211,8 @@ class EmbeddingConfigurator:
|
|||||||
return WatsonEmbeddingFunction()
|
return WatsonEmbeddingFunction()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _configure_custom(config: EmbeddingProviderConfig):
|
def _configure_custom(config):
|
||||||
custom_embedder = config.embedder
|
custom_embedder = config.get("embedder")
|
||||||
if isinstance(custom_embedder, EmbeddingFunction):
|
if isinstance(custom_embedder, EmbeddingFunction):
|
||||||
try:
|
try:
|
||||||
validate_embedding_function(custom_embedder)
|
validate_embedding_function(custom_embedder)
|
||||||
|
|||||||
@@ -67,15 +67,12 @@ class CrewAIEventsBus:
|
|||||||
source: The object emitting the event
|
source: The object emitting the event
|
||||||
event: The event instance to emit
|
event: The event instance to emit
|
||||||
"""
|
"""
|
||||||
event_type = type(event)
|
for event_type, handlers in self._handlers.items():
|
||||||
if event_type in self._handlers:
|
if isinstance(event, event_type):
|
||||||
for handler in self._handlers[event_type]:
|
for handler in handlers:
|
||||||
handler(source, event)
|
handler(source, event)
|
||||||
self._signal.send(source, event=event)
|
|
||||||
|
|
||||||
def clear_handlers(self) -> None:
|
self._signal.send(source, event=event)
|
||||||
"""Clear all registered event handlers - useful for testing"""
|
|
||||||
self._handlers.clear()
|
|
||||||
|
|
||||||
def register_handler(
|
def register_handler(
|
||||||
self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
|
self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
|
||||||
|
|||||||
@@ -96,6 +96,10 @@ class CrewPlanner:
|
|||||||
tasks_summary = []
|
tasks_summary = []
|
||||||
for idx, task in enumerate(self.tasks):
|
for idx, task in enumerate(self.tasks):
|
||||||
knowledge_list = self._get_agent_knowledge(task)
|
knowledge_list = self._get_agent_knowledge(task)
|
||||||
|
agent_tools = (
|
||||||
|
f"[{', '.join(str(tool) for tool in task.agent.tools)}]" if task.agent and task.agent.tools else '"agent has no tools"',
|
||||||
|
f',\n "agent_knowledge": "[\\"{knowledge_list[0]}\\"]"' if knowledge_list and str(knowledge_list) != "None" else ""
|
||||||
|
)
|
||||||
task_summary = f"""
|
task_summary = f"""
|
||||||
Task Number {idx + 1} - {task.description}
|
Task Number {idx + 1} - {task.description}
|
||||||
"task_description": {task.description}
|
"task_description": {task.description}
|
||||||
@@ -103,10 +107,7 @@ class CrewPlanner:
|
|||||||
"agent": {task.agent.role if task.agent else "None"}
|
"agent": {task.agent.role if task.agent else "None"}
|
||||||
"agent_goal": {task.agent.goal if task.agent else "None"}
|
"agent_goal": {task.agent.goal if task.agent else "None"}
|
||||||
"task_tools": {task.tools}
|
"task_tools": {task.tools}
|
||||||
"agent_tools": %s%s""" % (
|
"agent_tools": {"".join(agent_tools)}"""
|
||||||
f"[{', '.join(str(tool) for tool in task.agent.tools)}]" if task.agent and task.agent.tools else '"agent has no tools"',
|
|
||||||
f',\n "agent_knowledge": "[\\"{knowledge_list[0]}\\"]"' if knowledge_list and str(knowledge_list) != "None" else ""
|
|
||||||
)
|
|
||||||
|
|
||||||
tasks_summary.append(task_summary)
|
tasks_summary.append(task_summary)
|
||||||
return " ".join(tasks_summary)
|
return " ".join(tasks_summary)
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
from functools import partial
|
||||||
|
from typing import Tuple, Union
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@@ -215,6 +217,75 @@ def test_multiple_output_type_error():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_guardrail_type_error():
|
||||||
|
desc = "Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting."
|
||||||
|
expected_output = "Bullet point list of 5 interesting ideas."
|
||||||
|
# Lambda function
|
||||||
|
Task(
|
||||||
|
description=desc,
|
||||||
|
expected_output=expected_output,
|
||||||
|
guardrail=lambda x: (True, x),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Function
|
||||||
|
def guardrail_fn(x: TaskOutput) -> tuple[bool, TaskOutput]:
|
||||||
|
return (True, x)
|
||||||
|
|
||||||
|
Task(
|
||||||
|
description=desc,
|
||||||
|
expected_output=expected_output,
|
||||||
|
guardrail=guardrail_fn,
|
||||||
|
)
|
||||||
|
|
||||||
|
class Object:
|
||||||
|
def guardrail_fn(self, x: TaskOutput) -> tuple[bool, TaskOutput]:
|
||||||
|
return (True, x)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def guardrail_class_fn(cls, x: TaskOutput) -> tuple[bool, str]:
|
||||||
|
return (True, x)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def guardrail_static_fn(x: TaskOutput) -> tuple[bool, Union[str, TaskOutput]]:
|
||||||
|
return (True, x)
|
||||||
|
|
||||||
|
obj = Object()
|
||||||
|
# Method
|
||||||
|
Task(
|
||||||
|
description=desc,
|
||||||
|
expected_output=expected_output,
|
||||||
|
guardrail=obj.guardrail_fn,
|
||||||
|
)
|
||||||
|
# Class method
|
||||||
|
Task(
|
||||||
|
description=desc,
|
||||||
|
expected_output=expected_output,
|
||||||
|
guardrail=Object.guardrail_class_fn,
|
||||||
|
)
|
||||||
|
# Static method
|
||||||
|
Task(
|
||||||
|
description=desc,
|
||||||
|
expected_output=expected_output,
|
||||||
|
guardrail=Object.guardrail_static_fn,
|
||||||
|
)
|
||||||
|
|
||||||
|
def error_fn(x: TaskOutput, y: bool) -> Tuple[bool, TaskOutput]:
|
||||||
|
return (y, x)
|
||||||
|
|
||||||
|
Task(
|
||||||
|
description=desc,
|
||||||
|
expected_output=expected_output,
|
||||||
|
guardrail=partial(error_fn, y=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValidationError):
|
||||||
|
Task(
|
||||||
|
description=desc,
|
||||||
|
expected_output=expected_output,
|
||||||
|
guardrail=error_fn,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_output_pydantic_sequential():
|
def test_output_pydantic_sequential():
|
||||||
class ScoreOutput(BaseModel):
|
class ScoreOutput(BaseModel):
|
||||||
|
|||||||
34
tests/utilities/events/test_crewai_event_bus.py
Normal file
34
tests/utilities/events/test_crewai_event_bus.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
from unittest.mock import Mock
|
||||||
|
|
||||||
|
from crewai.utilities.events.base_events import CrewEvent
|
||||||
|
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||||
|
|
||||||
|
|
||||||
|
class TestEvent(CrewEvent):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test_specific_event_handler():
|
||||||
|
mock_handler = Mock()
|
||||||
|
|
||||||
|
@crewai_event_bus.on(TestEvent)
|
||||||
|
def handler(source, event):
|
||||||
|
mock_handler(source, event)
|
||||||
|
|
||||||
|
event = TestEvent(type="test_event")
|
||||||
|
crewai_event_bus.emit("source_object", event)
|
||||||
|
|
||||||
|
mock_handler.assert_called_once_with("source_object", event)
|
||||||
|
|
||||||
|
|
||||||
|
def test_wildcard_event_handler():
|
||||||
|
mock_handler = Mock()
|
||||||
|
|
||||||
|
@crewai_event_bus.on(CrewEvent)
|
||||||
|
def handler(source, event):
|
||||||
|
mock_handler(source, event)
|
||||||
|
|
||||||
|
event = TestEvent(type="test_event")
|
||||||
|
crewai_event_bus.emit("source_object", event)
|
||||||
|
|
||||||
|
mock_handler.assert_called_once_with("source_object", event)
|
||||||
Reference in New Issue
Block a user