mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-29 18:58:30 +00:00
Compare commits
12 Commits
lg-fix-env
...
lg-memory-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
df4754301a | ||
|
|
ae57e5723c | ||
|
|
ab39753a75 | ||
|
|
640e1a7bc2 | ||
|
|
e544ff8ba3 | ||
|
|
49c0144154 | ||
|
|
2ab002a5bf | ||
|
|
b7bf15681e | ||
|
|
af9c01f5d3 | ||
|
|
5a12b51ba2 | ||
|
|
576b8ff836 | ||
|
|
b35c3e8024 |
1429
.cursorrules
Normal file
1429
.cursorrules
Normal file
File diff suppressed because it is too large
Load Diff
@@ -41,6 +41,11 @@
|
||||
"anchor": "Get Help",
|
||||
"href": "mailto:support@crewai.com",
|
||||
"icon": "headset"
|
||||
},
|
||||
{
|
||||
"anchor": "Releases",
|
||||
"href": "https://github.com/crewAIInc/crewAI/releases",
|
||||
"icon": "tag"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -89,6 +94,7 @@
|
||||
"pages": [
|
||||
"en/guides/advanced/customizing-prompts",
|
||||
"en/guides/advanced/fingerprinting"
|
||||
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -358,7 +364,7 @@
|
||||
},
|
||||
{
|
||||
"tab": "Examples",
|
||||
"groups": [
|
||||
"groups": [
|
||||
{
|
||||
"group": "Examples",
|
||||
"pages": [
|
||||
@@ -366,18 +372,8 @@
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"tab": "Releases",
|
||||
"groups": [
|
||||
{
|
||||
"group": "Releases",
|
||||
"pages": [
|
||||
"en/changelog"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -403,6 +399,11 @@
|
||||
"anchor": "Obter Ajuda",
|
||||
"href": "mailto:support@crewai.com",
|
||||
"icon": "headset"
|
||||
},
|
||||
{
|
||||
"anchor": "Lançamentos",
|
||||
"href": "https://github.com/crewAIInc/crewAI/releases",
|
||||
"icon": "tag"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -720,7 +721,7 @@
|
||||
},
|
||||
{
|
||||
"tab": "Exemplos",
|
||||
"groups": [
|
||||
"groups": [
|
||||
{
|
||||
"group": "Exemplos",
|
||||
"pages": [
|
||||
@@ -728,18 +729,8 @@
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"tab": "Lançamentos",
|
||||
"groups": [
|
||||
{
|
||||
"group": "Lançamentos",
|
||||
"pages": [
|
||||
"pt-BR/changelog"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -777,6 +768,64 @@
|
||||
"seo": {
|
||||
"indexing": "all"
|
||||
},
|
||||
"redirects": [
|
||||
{
|
||||
"source": "/introduction",
|
||||
"destination": "/en/introduction"
|
||||
},
|
||||
{
|
||||
"source": "/installation",
|
||||
"destination": "/en/installation"
|
||||
},
|
||||
{
|
||||
"source": "/quickstart",
|
||||
"destination": "/en/quickstart"
|
||||
},
|
||||
{
|
||||
"source": "/changelog",
|
||||
"destination": "https://github.com/crewAIInc/crewAI/releases"
|
||||
},
|
||||
{
|
||||
"source": "/telemetry",
|
||||
"destination": "/en/telemetry"
|
||||
},
|
||||
{
|
||||
"source": "/concepts/:path*",
|
||||
"destination": "/en/concepts/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/guides/:path*",
|
||||
"destination": "/en/guides/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/tools/:path*",
|
||||
"destination": "/en/tools/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/learn/:path*",
|
||||
"destination": "/en/learn/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/mcp/:path*",
|
||||
"destination": "/en/mcp/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/observability/:path*",
|
||||
"destination": "/en/observability/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/enterprise/:path*",
|
||||
"destination": "/en/enterprise/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/api-reference/:path*",
|
||||
"destination": "/en/api-reference/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/examples/:path*",
|
||||
"destination": "/en/examples/:path*"
|
||||
}
|
||||
],
|
||||
"errors": {
|
||||
"404": {
|
||||
"redirect": true
|
||||
|
||||
@@ -1,473 +0,0 @@
|
||||
---
|
||||
title: Changelog
|
||||
description: View the latest updates and changes to CrewAI
|
||||
icon: timeline
|
||||
---
|
||||
|
||||
<Update label="2024-05-22" description="v0.121.0" tags={["Latest"]}>
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01210.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.121.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**Core Improvements & Fixes**
|
||||
- Fixed encoding error when creating tools
|
||||
- Fixed failing llama test
|
||||
- Updated logging configuration for consistency
|
||||
- Enhanced telemetry initialization and event handling
|
||||
|
||||
**New Features & Enhancements**
|
||||
- Added **markdown attribute** to the Task class
|
||||
- Added **reasoning attribute** to the Agent class
|
||||
- Added **inject_date flag** to Agent for automatic date injection
|
||||
- Implemented **HallucinationGuardrail** (no-op with test coverage)
|
||||
|
||||
**Documentation & Guides**
|
||||
- Added documentation for **StagehandTool** and improved MDX structure
|
||||
- Added documentation for **MCP integration** and updated enterprise docs
|
||||
- Documented knowledge events and updated reasoning docs
|
||||
- Added stop parameter documentation
|
||||
- Fixed import references in doc examples (before_kickoff, after_kickoff)
|
||||
- General docs updates and restructuring for clarity
|
||||
</Update>
|
||||
|
||||
<Update label="2024-05-15" description="v0.120.1">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01201.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.120.1">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**Core Improvements & Fixes**
|
||||
- Fixed **interpolation with hyphens**
|
||||
</Update>
|
||||
|
||||
<Update label="2024-05-14" description="v0.120.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01200.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.120.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**Core Improvements & Fixes**
|
||||
- Enabled **full Ruff rule set** by default for stricter linting
|
||||
- Addressed race condition in FilteredStream using context managers
|
||||
- Fixed agent knowledge reset issue
|
||||
- Refactored agent fetching logic into utility module
|
||||
|
||||
**New Features & Enhancements**
|
||||
- Added support for **loading an Agent directly from a repository**
|
||||
- Enabled setting an empty context for Task
|
||||
- Enhanced Agent repository feedback and fixed Tool auto-import behavior
|
||||
- Introduced direct initialization of knowledge (bypassing knowledge_sources)
|
||||
|
||||
**Documentation & Guides**
|
||||
- Updated security.md for current security practices
|
||||
- Cleaned up Google setup section for clarity
|
||||
- Added link to AI Studio when entering Gemini key
|
||||
- Updated Arize Phoenix observability guide
|
||||
- Refreshed flow documentation
|
||||
</Update>
|
||||
|
||||
<Update label="2024-05-08" description="v0.119.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01190.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.119.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**Core Improvements & Fixes**
|
||||
- Improved test reliability by enhancing pytest handling for flaky tests
|
||||
- Fixed memory reset crash when embedding dimensions mismatch
|
||||
- Enabled parent flow identification for Crew and LiteAgent
|
||||
- Prevented telemetry-related crashes when unavailable
|
||||
- Upgraded **LiteLLM version** for better compatibility
|
||||
- Fixed llama converter tests by removing skip_external_api
|
||||
|
||||
**New Features & Enhancements**
|
||||
- Introduced **knowledge retrieval prompt re-writing** in Agent for improved tracking and debugging
|
||||
- Made LLM setup and quickstart guides model-agnostic
|
||||
|
||||
**Documentation & Guides**
|
||||
- Added advanced configuration docs for the RAG tool
|
||||
- Updated Windows troubleshooting guide
|
||||
- Refined documentation examples for better clarity
|
||||
- Fixed typos across docs and config files
|
||||
</Update>
|
||||
|
||||
<Update label="2024-04-28" description="v0.118.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01180.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.118.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**Core Improvements & Fixes**
|
||||
- Fixed issues with missing prompt or system templates
|
||||
- Removed global logging configuration to avoid unintended overrides
|
||||
- Renamed **TaskGuardrail to LLMGuardrail** for improved clarity
|
||||
- Downgraded litellm to version 1.167.1 for compatibility
|
||||
- Added missing init.py files to ensure proper module initialization
|
||||
|
||||
**New Features & Enhancements**
|
||||
- Added support for **no-code Guardrail creation** to simplify AI behavior controls
|
||||
|
||||
**Documentation & Guides**
|
||||
- Removed CrewStructuredTool from public documentation to reflect internal usage
|
||||
- Updated enterprise documentation and YouTube embed for improved onboarding experience
|
||||
</Update>
|
||||
|
||||
<Update label="2024-04-20" description="v0.117.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01170.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.117.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**New Features & Enhancements**
|
||||
- Added `result_as_answer` parameter support in `@tool` decorator.
|
||||
- Introduced support for new language models: GPT-4.1, Gemini-2.0, and Gemini-2.5 Pro.
|
||||
- Enhanced knowledge management capabilities.
|
||||
- Added Huggingface provider option in CLI.
|
||||
- Improved compatibility and CI support for Python 3.10+.
|
||||
|
||||
**Core Improvements & Fixes**
|
||||
- Fixed issues with incorrect template parameters and missing inputs.
|
||||
- Improved asynchronous flow handling with coroutine condition checks.
|
||||
- Enhanced memory management with isolated configuration and correct memory object copying.
|
||||
- Fixed initialization of lite agents with correct references.
|
||||
- Addressed Python type hint issues and removed redundant imports.
|
||||
- Updated event placement for improved tool usage tracking.
|
||||
- Raised explicit exceptions when flows fail.
|
||||
- Removed unused code and redundant comments from various modules.
|
||||
- Updated GitHub App token action to v2.
|
||||
|
||||
**Documentation & Guides**
|
||||
- Enhanced documentation structure, including enterprise deployment instructions.
|
||||
- Automatically create output folders for documentation generation.
|
||||
- Fixed broken link in WeaviateVectorSearchTool documentation.
|
||||
- Fixed guardrail documentation usage and import paths for JSON search tools.
|
||||
- Updated documentation for CodeInterpreterTool.
|
||||
- Improved SEO, contextual navigation, and error handling for documentation pages.
|
||||
</Update>
|
||||
|
||||
<Update label="2024-04-25" description="v0.117.1">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01171.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.117.1">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**Core Improvements & Fixes**
|
||||
- Upgraded **crewai-tools** to latest version
|
||||
- Upgraded **liteLLM** to latest version
|
||||
- Fixed **Mem0 OSS**
|
||||
</Update>
|
||||
|
||||
<Update label="2024-04-07" description="v0.114.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01140.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.114.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**New Features & Enhancements**
|
||||
- Agents as an atomic unit. (`Agent(...).kickoff()`)
|
||||
- Support for [Custom LLM implementations](https://docs.crewai.com/guides/advanced/custom-llm).
|
||||
- Integrated External Memory and [Opik observability](https://docs.crewai.com/how-to/opik-observability).
|
||||
- Enhanced YAML extraction.
|
||||
- Multimodal agent validation.
|
||||
- Added Secure fingerprints for agents and crews.
|
||||
|
||||
**Core Improvements & Fixes**
|
||||
- Improved serialization, agent copying, and Python compatibility.
|
||||
- Added wildcard support to `emit()`
|
||||
- Added support for additional router calls and context window adjustments.
|
||||
- Fixed typing issues, validation, and import statements.
|
||||
- Improved method performance.
|
||||
- Enhanced agent task handling, event emissions, and memory management.
|
||||
- Fixed CLI issues, conditional tasks, cloning behavior, and tool outputs.
|
||||
|
||||
**Documentation & Guides**
|
||||
- Improved documentation structure, theme, and organization.
|
||||
- Added guides for Local NVIDIA NIM with WSL2, W&B Weave, and Arize Phoenix.
|
||||
- Updated tool configuration examples, prompts, and observability docs.
|
||||
- Guide on using singular agents within Flows.
|
||||
</Update>
|
||||
|
||||
<Update label="2024-03-17" description="v0.108.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01080.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.108.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**New Features & Enhancements**
|
||||
- Converted tabs to spaces in `crew.py` template
|
||||
- Enhanced LLM Streaming Response Handling and Event System
|
||||
- Included `model_name`
|
||||
- Enhanced Event Listener with rich visualization and improved logging
|
||||
- Added fingerprints
|
||||
|
||||
**Bug Fixes**
|
||||
- Fixed Mistral issues
|
||||
- Fixed a bug in documentation
|
||||
- Fixed type check error in fingerprint property
|
||||
|
||||
**Documentation Updates**
|
||||
- Improved tool documentation
|
||||
- Updated installation guide for the `uv` tool package
|
||||
- Added instructions for upgrading crewAI with the `uv` tool
|
||||
- Added documentation for `ApifyActorsTool`
|
||||
</Update>
|
||||
|
||||
<Update label="2024-03-10" description="v0.105.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01050.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.105.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**Core Improvements & Fixes**
|
||||
- Fixed issues with missing template variables and user memory configuration
|
||||
- Improved async flow support and addressed agent response formatting
|
||||
- Enhanced memory reset functionality and fixed CLI memory commands
|
||||
- Fixed type issues, tool calling properties, and telemetry decoupling
|
||||
|
||||
**New Features & Enhancements**
|
||||
- Added Flow state export and improved state utilities
|
||||
- Enhanced agent knowledge setup with optional crew embedder
|
||||
- Introduced event emitter for better observability and LLM call tracking
|
||||
- Added support for Python 3.10 and ChatOllama from langchain_ollama
|
||||
- Integrated context window size support for the o3-mini model
|
||||
- Added support for multiple router calls
|
||||
|
||||
**Documentation & Guides**
|
||||
- Improved documentation layout and hierarchical structure
|
||||
- Added QdrantVectorSearchTool guide and clarified event listener usage
|
||||
- Fixed typos in prompts and updated Amazon Bedrock model listings
|
||||
</Update>
|
||||
|
||||
<Update label="2024-02-12" description="v0.102.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01020.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.102.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**Core Improvements & Fixes**
|
||||
- Enhanced LLM Support: Improved structured LLM output, parameter handling, and formatting for Anthropic models
|
||||
- Crew & Agent Stability: Fixed issues with cloning agents/crews using knowledge sources, multiple task outputs in conditional tasks, and ignored Crew task callbacks
|
||||
- Memory & Storage Fixes: Fixed short-term memory handling with Bedrock, ensured correct embedder initialization, and added a reset memories function in the crew class
|
||||
- Training & Execution Reliability: Fixed broken training and interpolation issues with dict and list input types
|
||||
|
||||
**New Features & Enhancements**
|
||||
- Advanced Knowledge Management: Improved naming conventions and enhanced embedding configuration with custom embedder support
|
||||
- Expanded Logging & Observability: Added JSON format support for logging and integrated MLflow tracing documentation
|
||||
- Data Handling Improvements: Updated excel_knowledge_source.py to process multi-tab files
|
||||
- General Performance & Codebase Clean-Up: Streamlined enterprise code alignment and resolved linting issues
|
||||
- Adding new tool: `QdrantVectorSearchTool`
|
||||
|
||||
**Documentation & Guides**
|
||||
- Updated AI & Memory Docs: Improved Bedrock, Google AI, and long-term memory documentation
|
||||
- Task & Workflow Clarity: Added "Human Input" row to Task Attributes, Langfuse guide, and FileWriterTool documentation
|
||||
- Fixed Various Typos & Formatting Issues
|
||||
</Update>
|
||||
|
||||
<Update label="2024-01-28" description="v0.100.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v01000.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.100.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**Features**
|
||||
- Add Composio docs
|
||||
- Add SageMaker as a LLM provider
|
||||
|
||||
**Fixes**
|
||||
- Overall LLM connection issues
|
||||
- Using safe accessors on training
|
||||
- Add version check to crew_chat.py
|
||||
|
||||
**Documentation**
|
||||
- New docs for crewai chat
|
||||
- Improve formatting and clarity in CLI and Composio Tool docs
|
||||
</Update>
|
||||
|
||||
<Update label="2024-01-20" description="v0.98.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v0980.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.98.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**Features**
|
||||
- Conversation crew v1
|
||||
- Add unique ID to flow states
|
||||
- Add @persist decorator with FlowPersistence interface
|
||||
|
||||
**Integrations**
|
||||
- Add SambaNova integration
|
||||
- Add NVIDIA NIM provider in cli
|
||||
- Introducing VoyageAI
|
||||
|
||||
**Fixes**
|
||||
- Fix API Key Behavior and Entity Handling in Mem0 Integration
|
||||
- Fixed core invoke loop logic and relevant tests
|
||||
- Make tool inputs actual objects and not strings
|
||||
- Add important missing parts to creating tools
|
||||
- Drop litellm version to prevent windows issue
|
||||
- Before kickoff if inputs are none
|
||||
- Fixed typos, nested pydantic model issue, and docling issues
|
||||
</Update>
|
||||
|
||||
<Update label="2024-01-04" description="v0.95.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v0950.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.95.0">View on GitHub</a>
|
||||
</div>
|
||||
|
||||
**New Features**
|
||||
- Adding Multimodal Abilities to Crew
|
||||
- Programatic Guardrails
|
||||
- HITL multiple rounds
|
||||
- Gemini 2.0 Support
|
||||
- CrewAI Flows Improvements
|
||||
- Add Workflow Permissions
|
||||
- Add support for langfuse with litellm
|
||||
- Portkey Integration with CrewAI
|
||||
- Add interpolate_only method and improve error handling
|
||||
- Docling Support
|
||||
- Weviate Support
|
||||
|
||||
**Fixes**
|
||||
- output_file not respecting system path
|
||||
- disk I/O error when resetting short-term memory
|
||||
- CrewJSONEncoder now accepts enums
|
||||
- Python max version
|
||||
- Interpolation for output_file in Task
|
||||
- Handle coworker role name case/whitespace properly
|
||||
- Add tiktoken as explicit dependency and document Rust requirement
|
||||
- Include agent knowledge in planning process
|
||||
- Change storage initialization to None for KnowledgeStorage
|
||||
- Fix optional storage checks
|
||||
- include event emitter in flows
|
||||
- Docstring, Error Handling, and Type Hints Improvements
|
||||
- Suppressed userWarnings from litellm pydantic issues
|
||||
</Update>
|
||||
|
||||
<Update label="2024-12-05" description="v0.86.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v0860.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.86.0">View on GitHub</a>
|
||||
</div>
|
||||
**Changes**
|
||||
- Remove all references to pipeline and pipeline router
|
||||
- Add Nvidia NIM as provider in Custom LLM
|
||||
- Add knowledge demo + improve knowledge docs
|
||||
- Add HITL multiple rounds of followup
|
||||
- New docs about yaml crew with decorators
|
||||
- Simplify template crew
|
||||
</Update>
|
||||
|
||||
<Update label="2024-12-04" description="v0.85.0">
|
||||
## Release Highlights
|
||||
<Frame>
|
||||
<img src="/images/releases/v0850.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.85.0">View on GitHub</a>
|
||||
</div>
|
||||
**Features**
|
||||
- Added knowledge to agent level
|
||||
- Feat/remove langchain
|
||||
- Improve typed task outputs
|
||||
- Log in to Tool Repository on crewai login
|
||||
|
||||
**Fixes**
|
||||
- Fixes issues with result as answer not properly exiting LLM loop
|
||||
- Fix missing key name when running with ollama provider
|
||||
- Fix spelling issue found
|
||||
|
||||
**Documentation**
|
||||
- Update readme for running mypy
|
||||
- Add knowledge to mint.json
|
||||
- Update Github actions
|
||||
- Update Agents docs to include two approaches for creating an agent
|
||||
- Improvements to LLM Configuration and Usage
|
||||
</Update>
|
||||
|
||||
<Update label="2024-11-25" description="v0.83.0">
|
||||
**New Features**
|
||||
- New before_kickoff and after_kickoff crew callbacks
|
||||
- Support to pre-seed agents with Knowledge
|
||||
- Add support for retrieving user preferences and memories using Mem0
|
||||
|
||||
**Fixes**
|
||||
- Fix Async Execution
|
||||
- Upgrade chroma and adjust embedder function generator
|
||||
- Update CLI Watson supported models + docs
|
||||
- Reduce level for Bandit
|
||||
- Fixing all tests
|
||||
|
||||
**Documentation**
|
||||
- Update Docs
|
||||
</Update>
|
||||
|
||||
<Update label="2024-11-13" description="v0.80.0">
|
||||
**Fixes**
|
||||
- Fixing Tokens callback replacement bug
|
||||
- Fixing Step callback issue
|
||||
- Add cached prompt tokens info on usage metrics
|
||||
- Fix crew_train_success test
|
||||
</Update>
|
||||
@@ -255,6 +255,17 @@ CrewAI provides a wide range of events that you can listen for:
|
||||
- **LLMCallFailedEvent**: Emitted when an LLM call fails
|
||||
- **LLMStreamChunkEvent**: Emitted for each chunk received during streaming LLM responses
|
||||
|
||||
### Memory Events
|
||||
|
||||
- **MemoryQueryStartedEvent**: Emitted when a memory query is started. Contains the query, limit, and optional score threshold.
|
||||
- **MemoryQueryCompletedEvent**: Emitted when a memory query is completed successfully. Contains the query, results, limit, score threshold, and query execution time.
|
||||
- **MemoryQueryFailedEvent**: Emitted when a memory query fails. Contains the query, limit, score threshold, and error message.
|
||||
- **MemorySaveStartedEvent**: Emitted when a memory save operation is started. Contains the value to be saved, metadata, and optional agent role.
|
||||
- **MemorySaveCompletedEvent**: Emitted when a memory save operation is completed successfully. Contains the saved value, metadata, agent role, and save execution time.
|
||||
- **MemorySaveFailedEvent**: Emitted when a memory save operation fails. Contains the value, metadata, agent role, and error message.
|
||||
- **MemoryRetrievalStartedEvent**: Emitted when memory retrieval for a task prompt starts. Contains the optional task ID.
|
||||
- **MemoryRetrievalCompletedEvent**: Emitted when memory retrieval for a task prompt completes successfully. Contains the task ID, memory content, and retrieval execution time.
|
||||
|
||||
## Event Handler Structure
|
||||
|
||||
Each event handler receives two parameters:
|
||||
|
||||
@@ -749,9 +749,58 @@ CrewAI supports streaming responses from LLMs, allowing your application to rece
|
||||
```
|
||||
|
||||
<Tip>
|
||||
[Click here](https://docs.crewai.com/concepts/event-listener#event-listeners) for more details
|
||||
[Click here](https://docs.crewai.com/concepts/event-listener#event-listeners) for more details
|
||||
</Tip>
|
||||
</Tab>
|
||||
|
||||
<Tab title="Agent & Task Tracking">
|
||||
All LLM events in CrewAI include agent and task information, allowing you to track and filter LLM interactions by specific agents or tasks:
|
||||
|
||||
```python
|
||||
from crewai import LLM, Agent, Task, Crew
|
||||
from crewai.utilities.events import LLMStreamChunkEvent
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||
def on_llm_stream_chunk(source, event):
|
||||
if researcher.id == event.agent_id:
|
||||
print("\n==============\n Got event:", event, "\n==============\n")
|
||||
|
||||
|
||||
my_listener = MyCustomListener()
|
||||
|
||||
llm = LLM(model="gpt-4o-mini", temperature=0, stream=True)
|
||||
|
||||
researcher = Agent(
|
||||
role="About User",
|
||||
goal="You know everything about the user.",
|
||||
backstory="""You are a master at understanding people and their preferences.""",
|
||||
llm=llm,
|
||||
)
|
||||
|
||||
search = Task(
|
||||
description="Answer the following questions about the user: {question}",
|
||||
expected_output="An answer to the question.",
|
||||
agent=researcher,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[researcher], tasks=[search])
|
||||
|
||||
result = crew.kickoff(
|
||||
inputs={"question": "..."}
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
This feature is particularly useful for:
|
||||
- Debugging specific agent behaviors
|
||||
- Logging LLM usage by task type
|
||||
- Auditing which agents are making what types of LLM calls
|
||||
- Performance monitoring of specific tasks
|
||||
</Info>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Structured LLM Calls
|
||||
@@ -847,7 +896,7 @@ Learn how to get the most out of your LLM configuration:
|
||||
Remember to regularly monitor your token usage and adjust your configuration as needed to optimize costs and performance.
|
||||
</Info>
|
||||
</Accordion>
|
||||
|
||||
|
||||
<Accordion title="Drop Additional Parameters">
|
||||
CrewAI internally uses Litellm for LLM calls, which allows you to drop additional parameters that are not needed for your specific use case. This can help simplify your code and reduce the complexity of your LLM configuration.
|
||||
For example, if you don't need to send the <code>stop</code> parameter, you can simply omit it from your LLM call:
|
||||
|
||||
@@ -9,7 +9,7 @@ icon: database
|
||||
The CrewAI framework provides a sophisticated memory system designed to significantly enhance AI agent capabilities. CrewAI offers **three distinct memory approaches** that serve different use cases:
|
||||
|
||||
1. **Basic Memory System** - Built-in short-term, long-term, and entity memory
|
||||
2. **User Memory** - User-specific memory with Mem0 integration (legacy approach)
|
||||
2. **User Memory** - User-specific memory with Mem0 integration (legacy approach)
|
||||
3. **External Memory** - Standalone external memory providers (new approach)
|
||||
|
||||
## Memory System Components
|
||||
@@ -62,7 +62,7 @@ By default, CrewAI uses the `appdirs` library to determine storage locations fol
|
||||
```
|
||||
~/Library/Application Support/CrewAI/{project_name}/
|
||||
├── knowledge/ # Knowledge base ChromaDB files
|
||||
├── short_term_memory/ # Short-term memory ChromaDB files
|
||||
├── short_term_memory/ # Short-term memory ChromaDB files
|
||||
├── long_term_memory/ # Long-term memory ChromaDB files
|
||||
├── entities/ # Entity memory ChromaDB files
|
||||
└── long_term_memory_storage.db # SQLite database
|
||||
@@ -252,7 +252,7 @@ chroma_path = os.path.join(storage_path, "knowledge")
|
||||
if os.path.exists(chroma_path):
|
||||
client = chromadb.PersistentClient(path=chroma_path)
|
||||
collections = client.list_collections()
|
||||
|
||||
|
||||
print("ChromaDB Collections:")
|
||||
for collection in collections:
|
||||
print(f" - {collection.name}: {collection.count()} documents")
|
||||
@@ -269,7 +269,7 @@ crew = Crew(agents=[...], tasks=[...], memory=True)
|
||||
|
||||
# Reset specific memory types
|
||||
crew.reset_memories(command_type='short') # Short-term memory
|
||||
crew.reset_memories(command_type='long') # Long-term memory
|
||||
crew.reset_memories(command_type='long') # Long-term memory
|
||||
crew.reset_memories(command_type='entity') # Entity memory
|
||||
crew.reset_memories(command_type='knowledge') # Knowledge storage
|
||||
```
|
||||
@@ -596,7 +596,7 @@ providers_to_test = [
|
||||
{
|
||||
"name": "Ollama",
|
||||
"config": {
|
||||
"provider": "ollama",
|
||||
"provider": "ollama",
|
||||
"config": {"model": "mxbai-embed-large"}
|
||||
}
|
||||
}
|
||||
@@ -604,7 +604,7 @@ providers_to_test = [
|
||||
|
||||
for provider in providers_to_test:
|
||||
print(f"\nTesting {provider['name']} embeddings...")
|
||||
|
||||
|
||||
# Create crew with specific embedder
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
@@ -612,7 +612,7 @@ for provider in providers_to_test:
|
||||
memory=True,
|
||||
embedder=provider['config']
|
||||
)
|
||||
|
||||
|
||||
# Run your test and measure performance
|
||||
result = crew.kickoff()
|
||||
print(f"{provider['name']} completed successfully")
|
||||
@@ -655,17 +655,17 @@ import time
|
||||
|
||||
def test_embedding_performance(embedder_config, test_text="This is a test document"):
|
||||
start_time = time.time()
|
||||
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
memory=True,
|
||||
embedder=embedder_config
|
||||
)
|
||||
|
||||
|
||||
# Simulate memory operation
|
||||
crew.kickoff()
|
||||
|
||||
|
||||
end_time = time.time()
|
||||
return end_time - start_time
|
||||
|
||||
@@ -676,7 +676,7 @@ openai_time = test_embedding_performance({
|
||||
})
|
||||
|
||||
ollama_time = test_embedding_performance({
|
||||
"provider": "ollama",
|
||||
"provider": "ollama",
|
||||
"config": {"model": "mxbai-embed-large"}
|
||||
})
|
||||
|
||||
@@ -783,7 +783,7 @@ os.environ["MEM0_API_KEY"] = "your-api-key"
|
||||
# Create external memory instance
|
||||
external_memory = ExternalMemory(
|
||||
embedder_config={
|
||||
"provider": "mem0",
|
||||
"provider": "mem0",
|
||||
"config": {"user_id": "U-123"}
|
||||
}
|
||||
)
|
||||
@@ -808,8 +808,8 @@ class CustomStorage(Storage):
|
||||
|
||||
def save(self, value, metadata=None, agent=None):
|
||||
self.memories.append({
|
||||
"value": value,
|
||||
"metadata": metadata,
|
||||
"value": value,
|
||||
"metadata": metadata,
|
||||
"agent": agent
|
||||
})
|
||||
|
||||
@@ -986,7 +986,201 @@ crew = Crew(
|
||||
- 🫡 **Enhanced Personalization:** Memory enables agents to remember user preferences and historical interactions, leading to personalized experiences.
|
||||
- 🧠 **Improved Problem Solving:** Access to a rich memory store aids agents in making more informed decisions, drawing on past learnings and contextual insights.
|
||||
|
||||
## Memory Events
|
||||
|
||||
CrewAI's event system provides powerful insights into memory operations. By leveraging memory events, you can monitor, debug, and optimize your memory system's performance and behavior.
|
||||
|
||||
### Available Memory Events
|
||||
|
||||
CrewAI emits the following memory-related events:
|
||||
|
||||
| Event | Description | Key Properties |
|
||||
| :---- | :---------- | :------------- |
|
||||
| **MemoryQueryStartedEvent** | Emitted when a memory query begins | `query`, `limit`, `score_threshold` |
|
||||
| **MemoryQueryCompletedEvent** | Emitted when a memory query completes successfully | `query`, `results`, `limit`, `score_threshold`, `query_time_ms` |
|
||||
| **MemoryQueryFailedEvent** | Emitted when a memory query fails | `query`, `limit`, `score_threshold`, `error` |
|
||||
| **MemorySaveStartedEvent** | Emitted when a memory save operation begins | `value`, `metadata`, `agent_role` |
|
||||
| **MemorySaveCompletedEvent** | Emitted when a memory save operation completes successfully | `value`, `metadata`, `agent_role`, `save_time_ms` |
|
||||
| **MemorySaveFailedEvent** | Emitted when a memory save operation fails | `value`, `metadata`, `agent_role`, `error` |
|
||||
| **MemoryRetrievalStartedEvent** | Emitted when memory retrieval for a task prompt starts | `task_id` |
|
||||
| **MemoryRetrievalCompletedEvent** | Emitted when memory retrieval completes successfully | `task_id`, `memory_content`, `retrieval_time_ms` |
|
||||
|
||||
### Practical Applications
|
||||
|
||||
#### 1. Memory Performance Monitoring
|
||||
|
||||
Track memory operation timing to optimize your application:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemorySaveCompletedEvent
|
||||
)
|
||||
import time
|
||||
|
||||
class MemoryPerformanceMonitor(BaseEventListener):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.query_times = []
|
||||
self.save_times = []
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
@crewai_event_bus.on(MemoryQueryCompletedEvent)
|
||||
def on_memory_query_completed(source, event: MemoryQueryCompletedEvent):
|
||||
self.query_times.append(event.query_time_ms)
|
||||
print(f"Memory query completed in {event.query_time_ms:.2f}ms. Query: '{event.query}'")
|
||||
print(f"Average query time: {sum(self.query_times)/len(self.query_times):.2f}ms")
|
||||
|
||||
@crewai_event_bus.on(MemorySaveCompletedEvent)
|
||||
def on_memory_save_completed(source, event: MemorySaveCompletedEvent):
|
||||
self.save_times.append(event.save_time_ms)
|
||||
print(f"Memory save completed in {event.save_time_ms:.2f}ms")
|
||||
print(f"Average save time: {sum(self.save_times)/len(self.save_times):.2f}ms")
|
||||
|
||||
# Create an instance of your listener
|
||||
memory_monitor = MemoryPerformanceMonitor()
|
||||
```
|
||||
|
||||
#### 2. Memory Content Logging
|
||||
|
||||
Log memory operations for debugging and insights:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryRetrievalCompletedEvent
|
||||
)
|
||||
import logging
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger('memory_events')
|
||||
|
||||
class MemoryLogger(BaseEventListener):
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
@crewai_event_bus.on(MemorySaveStartedEvent)
|
||||
def on_memory_save_started(source, event: MemorySaveStartedEvent):
|
||||
if event.agent_role:
|
||||
logger.info(f"Agent '{event.agent_role}' saving memory: {event.value[:50]}...")
|
||||
else:
|
||||
logger.info(f"Saving memory: {event.value[:50]}...")
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryStartedEvent)
|
||||
def on_memory_query_started(source, event: MemoryQueryStartedEvent):
|
||||
logger.info(f"Memory query started: '{event.query}' (limit: {event.limit})")
|
||||
|
||||
@crewai_event_bus.on(MemoryRetrievalCompletedEvent)
|
||||
def on_memory_retrieval_completed(source, event: MemoryRetrievalCompletedEvent):
|
||||
if event.task_id:
|
||||
logger.info(f"Memory retrieved for task {event.task_id} in {event.retrieval_time_ms:.2f}ms")
|
||||
else:
|
||||
logger.info(f"Memory retrieved in {event.retrieval_time_ms:.2f}ms")
|
||||
logger.debug(f"Memory content: {event.memory_content}")
|
||||
|
||||
# Create an instance of your listener
|
||||
memory_logger = MemoryLogger()
|
||||
```
|
||||
|
||||
#### 3. Error Tracking and Notifications
|
||||
|
||||
Capture and respond to memory errors:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryFailedEvent
|
||||
)
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger('memory_errors')
|
||||
|
||||
class MemoryErrorTracker(BaseEventListener):
|
||||
def __init__(self, notify_email: Optional[str] = None):
|
||||
super().__init__()
|
||||
self.notify_email = notify_email
|
||||
self.error_count = 0
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
@crewai_event_bus.on(MemorySaveFailedEvent)
|
||||
def on_memory_save_failed(source, event: MemorySaveFailedEvent):
|
||||
self.error_count += 1
|
||||
agent_info = f"Agent '{event.agent_role}'" if event.agent_role else "Unknown agent"
|
||||
error_message = f"Memory save failed: {event.error}. {agent_info}"
|
||||
logger.error(error_message)
|
||||
|
||||
if self.notify_email and self.error_count % 5 == 0:
|
||||
self._send_notification(error_message)
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryFailedEvent)
|
||||
def on_memory_query_failed(source, event: MemoryQueryFailedEvent):
|
||||
self.error_count += 1
|
||||
error_message = f"Memory query failed: {event.error}. Query: '{event.query}'"
|
||||
logger.error(error_message)
|
||||
|
||||
if self.notify_email and self.error_count % 5 == 0:
|
||||
self._send_notification(error_message)
|
||||
|
||||
def _send_notification(self, message):
|
||||
# Implement your notification system (email, Slack, etc.)
|
||||
print(f"[NOTIFICATION] Would send to {self.notify_email}: {message}")
|
||||
|
||||
# Create an instance of your listener
|
||||
error_tracker = MemoryErrorTracker(notify_email="admin@example.com")
|
||||
```
|
||||
|
||||
### Integrating with Analytics Platforms
|
||||
|
||||
Memory events can be forwarded to analytics and monitoring platforms to track performance metrics, detect anomalies, and visualize memory usage patterns:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemorySaveCompletedEvent
|
||||
)
|
||||
|
||||
class MemoryAnalyticsForwarder(BaseEventListener):
|
||||
def __init__(self, analytics_client):
|
||||
super().__init__()
|
||||
self.client = analytics_client
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
@crewai_event_bus.on(MemoryQueryCompletedEvent)
|
||||
def on_memory_query_completed(source, event: MemoryQueryCompletedEvent):
|
||||
# Forward query metrics to analytics platform
|
||||
self.client.track_metric({
|
||||
"event_type": "memory_query",
|
||||
"query": event.query,
|
||||
"duration_ms": event.query_time_ms,
|
||||
"result_count": len(event.results) if hasattr(event.results, "__len__") else 0,
|
||||
"timestamp": event.timestamp
|
||||
})
|
||||
|
||||
@crewai_event_bus.on(MemorySaveCompletedEvent)
|
||||
def on_memory_save_completed(source, event: MemorySaveCompletedEvent):
|
||||
# Forward save metrics to analytics platform
|
||||
self.client.track_metric({
|
||||
"event_type": "memory_save",
|
||||
"agent_role": event.agent_role,
|
||||
"duration_ms": event.save_time_ms,
|
||||
"timestamp": event.timestamp
|
||||
})
|
||||
```
|
||||
|
||||
### Best Practices for Memory Event Listeners
|
||||
|
||||
1. **Keep handlers lightweight**: Avoid complex processing in event handlers to prevent performance impacts
|
||||
2. **Use appropriate logging levels**: Use INFO for normal operations, DEBUG for details, ERROR for issues
|
||||
3. **Batch metrics when possible**: Accumulate metrics before sending to external systems
|
||||
4. **Handle exceptions gracefully**: Ensure your event handlers don't crash due to unexpected data
|
||||
5. **Consider memory consumption**: Be mindful of storing large amounts of event data
|
||||
|
||||
## Conclusion
|
||||
|
||||
Integrating CrewAI's memory system into your projects is straightforward. By leveraging the provided memory components and configurations,
|
||||
Integrating CrewAI's memory system into your projects is straightforward. By leveraging the provided memory components and configurations,
|
||||
you can quickly empower your agents with the ability to remember, reason, and learn from their interactions, unlocking new levels of intelligence and capability.
|
||||
|
||||
@@ -6,10 +6,10 @@ icon: dumbbell
|
||||
|
||||
## Overview
|
||||
|
||||
The training feature in CrewAI allows you to train your AI agents using the command-line interface (CLI).
|
||||
The training feature in CrewAI allows you to train your AI agents using the command-line interface (CLI).
|
||||
By running the command `crewai train -n <n_iterations>`, you can specify the number of iterations for the training process.
|
||||
|
||||
During training, CrewAI utilizes techniques to optimize the performance of your agents along with human feedback.
|
||||
During training, CrewAI utilizes techniques to optimize the performance of your agents along with human feedback.
|
||||
This helps the agents improve their understanding, decision-making, and problem-solving abilities.
|
||||
|
||||
### Training Your Crew Using the CLI
|
||||
@@ -42,8 +42,8 @@ filename = "your_model.pkl"
|
||||
|
||||
try:
|
||||
YourCrewName_Crew().crew().train(
|
||||
n_iterations=n_iterations,
|
||||
inputs=inputs,
|
||||
n_iterations=n_iterations,
|
||||
inputs=inputs,
|
||||
filename=filename
|
||||
)
|
||||
|
||||
@@ -64,4 +64,68 @@ Once the training is complete, your agents will be equipped with enhanced capabi
|
||||
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.
|
||||
|
||||
Happy training with CrewAI! 🚀
|
||||
|
||||
|
||||
## Small Language Model Considerations
|
||||
|
||||
<Warning>
|
||||
When using smaller language models (≤7B parameters) for training data evaluation, be aware that they may face challenges with generating structured outputs and following complex instructions.
|
||||
</Warning>
|
||||
|
||||
### Limitations of Small Models in Training Evaluation
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="JSON Output Accuracy" icon="triangle-exclamation">
|
||||
Smaller models often struggle with producing valid JSON responses needed for structured training evaluations, leading to parsing errors and incomplete data.
|
||||
</Card>
|
||||
<Card title="Evaluation Quality" icon="chart-line">
|
||||
Models under 7B parameters may provide less nuanced evaluations with limited reasoning depth compared to larger models.
|
||||
</Card>
|
||||
<Card title="Instruction Following" icon="list-check">
|
||||
Complex training evaluation criteria may not be fully followed or considered by smaller models.
|
||||
</Card>
|
||||
<Card title="Consistency" icon="rotate">
|
||||
Evaluations across multiple training iterations may lack consistency with smaller models.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
### Recommendations for Training
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Best Practice">
|
||||
For optimal training quality and reliable evaluations, we strongly recommend using models with at least 7B parameters or larger:
|
||||
|
||||
```python
|
||||
from crewai import Agent, Crew, Task, LLM
|
||||
|
||||
# Recommended minimum for training evaluation
|
||||
llm = LLM(model="mistral/open-mistral-7b")
|
||||
|
||||
# Better options for reliable training evaluation
|
||||
llm = LLM(model="anthropic/claude-3-sonnet-20240229-v1:0")
|
||||
llm = LLM(model="gpt-4o")
|
||||
|
||||
# Use this LLM with your agents
|
||||
agent = Agent(
|
||||
role="Training Evaluator",
|
||||
goal="Provide accurate training feedback",
|
||||
llm=llm
|
||||
)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
More powerful models provide higher quality feedback with better reasoning, leading to more effective training iterations.
|
||||
</Tip>
|
||||
</Tab>
|
||||
<Tab title="Small Model Usage">
|
||||
If you must use smaller models for training evaluation, be aware of these constraints:
|
||||
|
||||
```python
|
||||
# Using a smaller model (expect some limitations)
|
||||
llm = LLM(model="huggingface/microsoft/Phi-3-mini-4k-instruct")
|
||||
```
|
||||
|
||||
<Warning>
|
||||
While CrewAI includes optimizations for small models, expect less reliable and less nuanced evaluation results that may require more human intervention during training.
|
||||
</Warning>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
@@ -72,7 +72,7 @@ If you haven't installed `uv` yet, follow **step 1** to quickly get it set up on
|
||||
</Warning>
|
||||
|
||||
<Warning>
|
||||
If you encounter the `chroma-hnswlib==0.7.6` build error (`fatal error C1083: Cannot open include file: 'float.h'`) on Windows, install (Visual Studio Build Tools)[https://visualstudio.microsoft.com/downloads/] with *Desktop development with C++*.
|
||||
If you encounter the `chroma-hnswlib==0.7.6` build error (`fatal error C1083: Cannot open include file: 'float.h'`) on Windows, install [Visual Studio Build Tools](https://visualstudio.microsoft.com/downloads/) with *Desktop development with C++*.
|
||||
</Warning>
|
||||
|
||||
- To verify that `crewai` is installed, run:
|
||||
@@ -104,7 +104,6 @@ We recommend using the `YAML` template scaffolding for a structured approach to
|
||||
```
|
||||
|
||||
- This creates a new project with the following structure:
|
||||
<Frame>
|
||||
```
|
||||
my_project/
|
||||
├── .gitignore
|
||||
@@ -124,7 +123,6 @@ We recommend using the `YAML` template scaffolding for a structured approach to
|
||||
├── agents.yaml
|
||||
└── tasks.yaml
|
||||
```
|
||||
</Frame>
|
||||
</Step>
|
||||
|
||||
<Step title="Customize Your Project">
|
||||
@@ -172,7 +170,7 @@ For teams and organizations, CrewAI offers enterprise deployment options that el
|
||||
|
||||
### CrewAI Factory (Self-hosted)
|
||||
- Containerized deployment for your infrastructure
|
||||
- Supports any hyperscaler including on prem depployments
|
||||
- Supports any hyperscaler including on prem deployments
|
||||
- Integration with your existing security systems
|
||||
|
||||
<Card title="Explore Enterprise Options" icon="building" href="https://crewai.com/enterprise">
|
||||
|
||||
@@ -1,473 +0,0 @@
|
||||
---
|
||||
title: Registro de Alterações
|
||||
description: Veja as atualizações e mudanças mais recentes do CrewAI
|
||||
icon: timeline
|
||||
---
|
||||
|
||||
<Update label="2024-05-22" description="v0.121.0" tags={["Latest"]}>
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01210.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.121.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Melhorias & Correções no Núcleo**
|
||||
- Corrigido erro de codificação ao criar ferramentas
|
||||
- Corrigido teste do llama com falha
|
||||
- Configuração de logs atualizada para mais consistência
|
||||
- Inicialização e tratamento de eventos de telemetria aprimorados
|
||||
|
||||
**Novas Funcionalidades & Aprimoramentos**
|
||||
- Adicionado **atributo markdown** à classe Task
|
||||
- Adicionado **atributo reasoning** à classe Agent
|
||||
- Adicionada flag **inject_date** ao Agent para injeção automática de data
|
||||
- Implementado **HallucinationGuardrail** (sem-operação, com cobertura de testes)
|
||||
|
||||
**Documentação & Guias**
|
||||
- Documentação adicionada para **StagehandTool** e melhoria na estrutura MDX
|
||||
- Inclusa documentação para integração **MCP** e atualização nos docs corporativos
|
||||
- Eventos de conhecimento documentados e documentação de reasoning atualizada
|
||||
- Adicionada explicação do parâmetro stop
|
||||
- Corrigidas referências de import nos exemplos de documentação (before_kickoff, after_kickoff)
|
||||
- Atualizações gerais e reestruturação nos docs para maior clareza
|
||||
</Update>
|
||||
|
||||
<Update label="2024-05-15" description="v0.120.1">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01201.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.120.1">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Melhorias & Correções no Núcleo**
|
||||
- Corrigida **interpolação com hífens**
|
||||
</Update>
|
||||
|
||||
<Update label="2024-05-14" description="v0.120.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01200.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.120.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Melhorias & Correções no Núcleo**
|
||||
- Conjunto completo de regras do Ruff habilitado por padrão para linting mais rigoroso
|
||||
- Corrigida condição de corrida no FilteredStream usando context managers
|
||||
- Corrigido problema de reset de conhecimento do agente
|
||||
- Lógica de busca de agente refatorada para módulo utilitário
|
||||
|
||||
**Novas Funcionalidades & Aprimoramentos**
|
||||
- Suporte adicionado para **carregar um Agent diretamente de um repositório**
|
||||
- Permitida configuração de contexto vazio em Task
|
||||
- Feedback do repositório de Agent aprimorado e correção no comportamento de auto-import de Tool
|
||||
- Inicialização direta de conhecimento introduzida (bypassando knowledge_sources)
|
||||
|
||||
**Documentação & Guias**
|
||||
- security.md atualizado para refletir práticas atuais de segurança
|
||||
- Seção de configuração do Google revisada para mais clareza
|
||||
- Adicionado link para AI Studio ao inserir chave Gemini
|
||||
- Guia de observabilidade do Arize Phoenix atualizado
|
||||
- Documentação de fluxo renovada
|
||||
</Update>
|
||||
|
||||
<Update label="2024-05-08" description="v0.119.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01190.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.119.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Melhorias & Correções no Núcleo**
|
||||
- Confiabilidade de testes aprimorada melhorando o tratamento do pytest para testes instáveis
|
||||
- Corrigido crash no reset de memória por incompatibilidade de dimensões de embeddings
|
||||
- Identificação do fluxo pai habilitada para Crew e LiteAgent
|
||||
- Crashes de telemetria prevenidos quando indisponíveis
|
||||
- Atualização da versão do **LiteLLM** para melhor compatibilidade
|
||||
- Corrigidos testes do conversor de llama removendo skip_external_api
|
||||
|
||||
**Novas Funcionalidades & Aprimoramentos**
|
||||
- Introduzida **reescrita de prompt de recuperação de conhecimento** no Agent para melhor rastreamento e debug
|
||||
- Guias de configuração do LLM e início rápido tornados independentes do modelo
|
||||
|
||||
**Documentação & Guias**
|
||||
- Adicionada documentação de configuração avançada para o RAG tool
|
||||
- Guia de resolução de problemas no Windows atualizado
|
||||
- Exemplos na documentação refinados para maior clareza
|
||||
- Correção de erros ortográficos em docs e arquivos de configuração
|
||||
</Update>
|
||||
|
||||
<Update label="2024-04-28" description="v0.118.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01180.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.118.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Melhorias & Correções no Núcleo**
|
||||
- Corrigidos problemas com prompt ou templates de sistema ausentes
|
||||
- Removida configuração global de logs para evitar sobreposição não intencional
|
||||
- Renomeado **TaskGuardrail para LLMGuardrail** para maior clareza
|
||||
- Versão do litellm rebaixada para 1.167.1 visando compatibilidade
|
||||
- Adicionados arquivos init.py ausentes para garantir inicialização correta dos módulos
|
||||
|
||||
**Novas Funcionalidades & Aprimoramentos**
|
||||
- Suporte adicionado para **criação de Guardrails sem código** facilitando o controle de comportamento da IA
|
||||
|
||||
**Documentação & Guias**
|
||||
- CrewStructuredTool removido da documentação pública para refletir uso interno
|
||||
- Documentação corporativa e embed do YouTube atualizados para melhor onboarding
|
||||
</Update>
|
||||
|
||||
<Update label="2024-04-20" description="v0.117.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01170.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.117.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Novas Funcionalidades & Aprimoramentos**
|
||||
- Adicionado suporte ao parâmetro `result_as_answer` no decorator `@tool`.
|
||||
- Suporte a novos modelos de linguagem: GPT-4.1, Gemini-2.0 e Gemini-2.5 Pro.
|
||||
- Capacidades de gerenciamento de conhecimento aprimoradas.
|
||||
- Adicionado provedor Huggingface na CLI.
|
||||
- Compatibilidade e suporte CI melhorados para Python 3.10+.
|
||||
|
||||
**Melhorias & Correções no Núcleo**
|
||||
- Corrigidos problemas com parâmetros de template incorretos e entradas ausentes.
|
||||
- Fluxo assíncrono aprimorado com verificações de condição coroutine.
|
||||
- Gerenciamento de memória aprimorado com configuração isolada e cópia correta dos objetos.
|
||||
- Inicialização de lite agents corrigida com referências corretas.
|
||||
- Corrigidos problemas de type hint em Python e remoção de imports redundantes.
|
||||
- Atualização do posicionamento de eventos para rastreamento do uso de ferramentas.
|
||||
- Exceções explícitas lançadas quando fluxos falham.
|
||||
- Remoção de código e comentários redundantes em diversos módulos.
|
||||
- Atualização da ação de token do GitHub App para v2.
|
||||
|
||||
**Documentação & Guias**
|
||||
- Estrutura documental aprimorada, incluindo instruções para implantação corporativa.
|
||||
- Criação automática de pastas de saída para geração de documentação.
|
||||
- Link quebrado reparado na documentação do WeaviateVectorSearchTool.
|
||||
- Correções na documentação do guardrail e nos caminhos de import dos search tools para JSON.
|
||||
- Atualização na documentação do CodeInterpreterTool.
|
||||
- Aprimoramento de SEO, navegação contextual e tratamento de erros nas páginas de documentação.
|
||||
</Update>
|
||||
|
||||
<Update label="2024-04-25" description="v0.117.1">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01171.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.117.1">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Melhorias & Correções no Núcleo**
|
||||
- Versão do **crewai-tools** atualizada para a mais recente
|
||||
- Versão do **liteLLM** atualizada para a mais recente
|
||||
- Correção no **Mem0 OSS**
|
||||
</Update>
|
||||
|
||||
<Update label="2024-04-07" description="v0.114.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01140.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.114.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Novas Funcionalidades & Aprimoramentos**
|
||||
- Agents como unidade atômica. (`Agent(...).kickoff()`)
|
||||
- Suporte para [implementações Custom LLM](https://docs.crewai.com/guides/advanced/custom-llm).
|
||||
- Memória externa integrada e [Opik observability](https://docs.crewai.com/how-to/opik-observability).
|
||||
- Extração de YAML aprimorada.
|
||||
- Validação multimodal de agentes.
|
||||
- Impressões digitais seguras adicionadas para agents e crews.
|
||||
|
||||
**Melhorias & Correções no Núcleo**
|
||||
- Serialização, cópia de agent e compatibilidade Python aprimoradas.
|
||||
- Suporte a curingas adicionado a `emit()`
|
||||
- Suporte a chamadas adicionais do roteador e ajustes de janela de contexto.
|
||||
- Correções em typing, validação e imports.
|
||||
- Melhoria na performance de métodos.
|
||||
- Manipulação de tasks do agent, emissão de eventos e gerenciamento de memória aprimorados.
|
||||
- Correções na CLI, tarefas condicionais, comportamento de clonagem e saídas de ferramentas.
|
||||
|
||||
**Documentação & Guias**
|
||||
- Estrutura, tema e organização da documentação aprimorados.
|
||||
- Guias para Local NVIDIA NIM com WSL2, W&B Weave e Arize Phoenix adicionados.
|
||||
- Exemplos de configuração de ferramentas, prompts e docs de observabilidade atualizados.
|
||||
- Guia para usar agentes singulares nos Flows.
|
||||
</Update>
|
||||
|
||||
<Update label="2024-03-17" description="v0.108.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01080.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.108.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Novas Funcionalidades & Aprimoramentos**
|
||||
- Conversão de tabs para espaços no template `crew.py`
|
||||
- Streaming de respostas LLM e sistema de eventos aprimorados
|
||||
- Inclusão de `model_name`
|
||||
- Event Listener aprimorado com visualização rica e logs melhorados
|
||||
- Impressões digitais adicionadas
|
||||
|
||||
**Correções de Bugs**
|
||||
- Correções de problemas com Mistral
|
||||
- Correção de bug na documentação
|
||||
- Correção de erro de type check na propriedade fingerprint
|
||||
|
||||
**Atualizações em Documentação**
|
||||
- Documentação de ferramentas aprimorada
|
||||
- Guia de instalação atualizado para o pacote `uv` tool
|
||||
- Instruções adicionadas para upgrade do crewAI com o `uv` tool
|
||||
- Documentação para `ApifyActorsTool` incluída
|
||||
</Update>
|
||||
|
||||
<Update label="2024-03-10" description="v0.105.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01050.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.105.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Melhorias & Correções no Núcleo**
|
||||
- Correção de variáveis de template ausentes e configuração de memória de usuário
|
||||
- Suporte a fluxo assíncrono melhorado e resolução para formatação de respostas do agente
|
||||
- Função de reset de memória aprimorada e correção nos comandos de memória da CLI
|
||||
- Correções em tipos, propriedades de chamadas de ferramenta e desacoplamento de telemetria
|
||||
|
||||
**Novas Funcionalidades & Aprimoramentos**
|
||||
- Exportação de estado do Flow e utilitários de estado melhorados
|
||||
- Configuração de conhecimento do agente aprimorada com embedder opcional para crew
|
||||
- Emissor de eventos adicionado para melhor observabilidade e rastreamento de chamadas LLM
|
||||
- Suporte para Python 3.10 e ChatOllama via langchain_ollama
|
||||
- Suporte ao tamanho da janela de contexto para o modelo o3-mini
|
||||
- Adicionada capacidade de múltiplas chamadas de roteador
|
||||
|
||||
**Documentação & Guias**
|
||||
- Layout e estrutura hierárquica da documentação aprimorados
|
||||
- Guia para QdrantVectorSearchTool incluído e uso de event listener esclarecido
|
||||
- Correções de erros nos prompts e atualização da lista de modelos do Amazon Bedrock
|
||||
</Update>
|
||||
|
||||
<Update label="2024-02-12" description="v0.102.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01020.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.102.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Melhorias & Correções no Núcleo**
|
||||
- Suporte LLM expandido: melhorar saída estruturada do LLM, manuseio de parâmetros e formatação para modelos Anthropic
|
||||
- Estabilidade de Crew & Agent: corrigido clonagem de agents/crews com knowledge sources, múltiplas saídas de task em tarefas condicionais e callbacks de tarefa de Crew ignorados
|
||||
- Correções de Memória & Armazenamento: melhora no gerenciamento de memória de curto prazo com Bedrock, inicialização correta do embedder e função reset memories adicionada na classe crew
|
||||
- Confiabilidade em Treinamento & Execução: corrigidos treinamento quebrado e questões de interpolação com tipos de entrada dict e list
|
||||
|
||||
**Novas Funcionalidades & Aprimoramentos**
|
||||
- Gerenciamento Avançado de Conhecimento: convencionamento de nomes melhorado e configuração de embedding aprimorada com suporte a embedder customizado
|
||||
- Logging & Observabilidade Expandidos: suporte ao formato JSON para logging e documentação de rastreamento no MLflow integrada
|
||||
- Melhorias no Tratamento de Dados: atualização do excel_knowledge_source.py para processar arquivos com múltipl abas
|
||||
- Desempenho Geral & Limpeza do Código: alinhamento de código corporativo e resolução de problemas de linting
|
||||
- Novo tool adicionado: `QdrantVectorSearchTool`
|
||||
|
||||
**Documentação & Guias**
|
||||
- Docs de AI & Memória atualizados: melhorias em documentação do Bedrock, Google AI e memória de longo prazo
|
||||
- Clareza em tarefas & fluxos: adicionada linha "Entrada Humana" em Task Attributes, guia para Langfuse e documentação para FileWriterTool
|
||||
- Diversas correções de ortografia & formatação
|
||||
</Update>
|
||||
|
||||
<Update label="2024-01-28" description="v0.100.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v01000.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.100.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Funcionalidades**
|
||||
- Adição dos docs Composio
|
||||
- Adicionado SageMaker como provedor LLM
|
||||
|
||||
**Correções**
|
||||
- Correção geral nas conexões LLM
|
||||
- Uso de acessores seguros no treinamento
|
||||
- Checagem de versão adicionada ao crew_chat.py
|
||||
|
||||
**Documentação**
|
||||
- Novos docs para crewai chat
|
||||
- Melhorias no formato e clareza nos docs da CLI e da Composio Tool
|
||||
</Update>
|
||||
|
||||
<Update label="2024-01-20" description="v0.98.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v0980.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.98.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Funcionalidades**
|
||||
- Conversation crew v1
|
||||
- Adição de ID único para estados de flow
|
||||
- Adição do decorator @persist com a interface FlowPersistence
|
||||
|
||||
**Integrações**
|
||||
- Integração SambaNova adicionada
|
||||
- Novo provedor NVIDIA NIM na CLI
|
||||
- Apresentando VoyageAI
|
||||
|
||||
**Correções**
|
||||
- Correção de comportamento de chave API e tratamento de entidades na integração com Mem0
|
||||
- Correção na lógica de invoke principal e nos testes relacionados
|
||||
- Inputs de ferramentas agora são objetos reais em vez de strings
|
||||
- Partes importantes adicionadas no processo de criação de ferramentas
|
||||
- Versão do litellm foi rebaixada para prevenir problema no Windows
|
||||
- Correção antes da execução caso inputs estejam nulos
|
||||
- Corrigidos erros, modelo pydantic aninhado e falhas de docling
|
||||
</Update>
|
||||
|
||||
<Update label="2024-01-04" description="v0.95.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v0950.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.95.0">Ver no GitHub</a>
|
||||
</div>
|
||||
|
||||
**Novas Funcionalidades**
|
||||
- Adição de habilidades multimodais ao Crew
|
||||
- Guardrails programáticos
|
||||
- HITL com múltiplas rodadas
|
||||
- Suporte ao Gemini 2.0
|
||||
- Melhorias em CrewAI Flows
|
||||
- Permissões de workflow adicionadas
|
||||
- Suporte a langfuse com litellm
|
||||
- Integração Portkey com CrewAI
|
||||
- Método interpolate_only e melhorias em tratamento de erros
|
||||
- Suporte ao Docling
|
||||
- Suporte ao Weviate
|
||||
|
||||
**Correções**
|
||||
- output_file não respeitava caminho do sistema
|
||||
- Erro de I/O em disco ao resetar memória de curto prazo
|
||||
- CrewJSONEncoder agora aceita enums
|
||||
- Versão máxima do Python
|
||||
- Interpolação de output_file em Task
|
||||
- Manipulação adequada de nomes de funções coworker quanto a caixa e espaços
|
||||
- tiktoken adicionado como dependência explícita e documentação do requisito Rust
|
||||
- Inclusão do conhecimento do agent no processo de planejamento
|
||||
- Inicialização do armazenamento definida como None em KnowledgeStorage
|
||||
- Verificações opcionais de armazenamento corrigidas
|
||||
- Emissor de eventos incluído nos flows
|
||||
- Melhorias em docstring, tratamento de erros e type hints
|
||||
- Suppressão de userWarnings de problemas pydantic no litellm
|
||||
</Update>
|
||||
|
||||
<Update label="2024-12-05" description="v0.86.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v0860.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.86.0">Ver no GitHub</a>
|
||||
</div>
|
||||
**Alterações**
|
||||
- Removidas todas referências a pipeline e roteador de pipeline
|
||||
- Adicionado Nvidia NIM como provedor em Custom LLM
|
||||
- Adicionado demo de knowledge + melhorias nos docs de knowledge
|
||||
- Adicionado suporte a múltiplas rodadas de HITL
|
||||
- Novos docs sobre crew em yaml com decorators
|
||||
- Template de crew simplificado
|
||||
</Update>
|
||||
|
||||
<Update label="2024-12-04" description="v0.85.0">
|
||||
## Destaques da Versão
|
||||
<Frame>
|
||||
<img src="/images/releases/v0850.png" />
|
||||
</Frame>
|
||||
|
||||
<div style={{ textAlign: 'center', marginBottom: '1rem' }}>
|
||||
<a href="https://github.com/crewAIInc/crewAI/releases/tag/0.85.0">Ver no GitHub</a>
|
||||
</div>
|
||||
**Funcionalidades**
|
||||
- Adicionado conhecimento em nível de agent
|
||||
- Feat/remover langchain
|
||||
- Aprimoradas saídas tipadas das tasks
|
||||
- Login no Tool Repository ao fazer login no crewai
|
||||
|
||||
**Correções**
|
||||
- Correção em issues de result as answer não encerrando loop do LLM corretamente
|
||||
- Correção de ausência de nome de chave ao rodar com o provedor ollama
|
||||
- Correção em erro ortográfico identificado
|
||||
|
||||
**Documentação**
|
||||
- Atualização no readme para rodar mypy
|
||||
- Adição do conhecimento em mint.json
|
||||
- Atualização das ações do Github
|
||||
- Atualização na documentação de Agents para incluir duas abordagens na criação de agent
|
||||
- Melhorias na Configuração e Uso do LLM
|
||||
</Update>
|
||||
|
||||
<Update label="2024-11-25" description="v0.83.0">
|
||||
**Novas Funcionalidades**
|
||||
- Novos callbacks before_kickoff e after_kickoff em crew
|
||||
- Suporte para pre-seed de agents com Knowledge
|
||||
- Adicionado suporte para recuperação de preferências de usuário e memórias usando Mem0
|
||||
|
||||
**Correções**
|
||||
- Correção em execução assíncrona
|
||||
- Upgrade do chroma e ajuste no gerador de funções do embedder
|
||||
- Atualização do CLI Watson com modelos suportados + docs
|
||||
- Redução do nível para Bandit
|
||||
- Correção de todos os testes
|
||||
|
||||
**Documentação**
|
||||
- Documentação atualizada
|
||||
</Update>
|
||||
|
||||
<Update label="2024-11-13" description="v0.80.0">
|
||||
**Correções**
|
||||
- Correção em bug de substituição do callback Tokens
|
||||
- Correção em issue de callback Step
|
||||
- Informação de prompt tokens em cache adicionada nas métricas de uso
|
||||
- Correção no teste crew_train_success
|
||||
</Update>
|
||||
16
docs/reo-tracking.js
Normal file
16
docs/reo-tracking.js
Normal file
@@ -0,0 +1,16 @@
|
||||
(function() {
|
||||
var clientID = 'e1256ea7e23318f';
|
||||
|
||||
var initReo = function() {
|
||||
Reo.init({
|
||||
clientID: clientID
|
||||
});
|
||||
};
|
||||
|
||||
var script = document.createElement('script');
|
||||
script.src = 'https://static.reo.dev/' + clientID + '/reo.js';
|
||||
script.defer = true;
|
||||
script.onload = initReo;
|
||||
|
||||
document.head.appendChild(script);
|
||||
})();
|
||||
@@ -11,7 +11,7 @@ dependencies = [
|
||||
# Core Dependencies
|
||||
"pydantic>=2.4.2",
|
||||
"openai>=1.13.3",
|
||||
"litellm==1.72.0",
|
||||
"litellm==1.72.6",
|
||||
"instructor>=1.3.3",
|
||||
# Text Processing
|
||||
"pdfplumber>=0.11.4",
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import warnings
|
||||
import threading
|
||||
import urllib.request
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
@@ -11,6 +13,7 @@ from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
@@ -18,6 +21,39 @@ warnings.filterwarnings(
|
||||
category=UserWarning,
|
||||
module="pydantic.main",
|
||||
)
|
||||
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
def _track_install():
|
||||
"""Track package installation/first-use via Scarf analytics."""
|
||||
global _telemetry_submitted
|
||||
|
||||
if _telemetry_submitted or Telemetry._is_telemetry_disabled():
|
||||
return
|
||||
|
||||
try:
|
||||
pixel_url = "https://api.scarf.sh/v2/packages/CrewAI/crewai/docs/00f2dad1-8334-4a39-934e-003b2e1146db"
|
||||
|
||||
req = urllib.request.Request(pixel_url)
|
||||
req.add_header('User-Agent', f'CrewAI-Python/{__version__}')
|
||||
|
||||
with urllib.request.urlopen(req, timeout=2): # nosec B310
|
||||
_telemetry_submitted = True
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _track_install_async():
|
||||
"""Track installation in background thread to avoid blocking imports."""
|
||||
if not Telemetry._is_telemetry_disabled():
|
||||
thread = threading.Thread(target=_track_install, daemon=True)
|
||||
thread.start()
|
||||
|
||||
|
||||
_track_install_async()
|
||||
|
||||
__version__ = "0.134.0"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Type, Union
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
@@ -32,6 +33,10 @@ from crewai.utilities.events.agent_events import (
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.memory_events import (
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
)
|
||||
from crewai.utilities.events.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
@@ -302,6 +307,15 @@ class Agent(BaseAgent):
|
||||
)
|
||||
|
||||
if self._is_any_available_memory():
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalStartedEvent(
|
||||
task_id=str(task.id) if task else None,
|
||||
source_type="agent",
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew.memory_config,
|
||||
self.crew._short_term_memory,
|
||||
@@ -313,6 +327,16 @@ class Agent(BaseAgent):
|
||||
memory = contextual_memory.build_context_for_task(task, context)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalCompletedEvent(
|
||||
task_id=str(task.id) if task else None,
|
||||
memory_content=memory,
|
||||
retrieval_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="agent",
|
||||
),
|
||||
)
|
||||
knowledge_config = (
|
||||
self.knowledge_config.model_dump() if self.knowledge_config else {}
|
||||
)
|
||||
@@ -775,6 +799,7 @@ class Agent(BaseAgent):
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
lite_agent = LiteAgent(
|
||||
id=self.id,
|
||||
role=self.role,
|
||||
goal=self.goal,
|
||||
backstory=self.backstory,
|
||||
|
||||
@@ -159,6 +159,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
callbacks=self.callbacks,
|
||||
printer=self._printer,
|
||||
from_task=self.task
|
||||
)
|
||||
formatted_answer = process_llm_response(answer, self.use_stop_words)
|
||||
|
||||
|
||||
@@ -252,7 +252,7 @@ def write_env_file(folder_path, env_vars):
|
||||
env_file_path = folder_path / ".env"
|
||||
with open(env_file_path, "w") as file:
|
||||
for key, value in env_vars.items():
|
||||
file.write(f"{key}={value}\n")
|
||||
file.write(f"{key.upper()}={value}\n")
|
||||
|
||||
|
||||
def get_crews(crew_path: str = "crew.py", require: bool = False) -> list[Crew]:
|
||||
|
||||
@@ -15,12 +15,14 @@ from typing import (
|
||||
get_origin,
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
from typing import Self
|
||||
except ImportError:
|
||||
from typing_extensions import Self
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
Field,
|
||||
InstanceOf,
|
||||
@@ -129,6 +131,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
# Core Agent Properties
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Goal of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
@@ -517,6 +520,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
messages=self._messages,
|
||||
tools=None,
|
||||
callbacks=self._callbacks,
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -526,6 +530,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
messages=self._messages,
|
||||
callbacks=self._callbacks,
|
||||
printer=self._printer,
|
||||
from_agent=self,
|
||||
)
|
||||
|
||||
# Emit LLM call completed event
|
||||
@@ -534,13 +539,14 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
event=LLMCallCompletedEvent(
|
||||
response=answer,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
# Emit LLM call failed event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=str(e)),
|
||||
event=LLMCallFailedEvent(error=str(e), from_agent=self),
|
||||
)
|
||||
raise e
|
||||
|
||||
|
||||
@@ -419,6 +419,8 @@ class LLM(BaseLLM):
|
||||
params: Dict[str, Any],
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> str:
|
||||
"""Handle a streaming response from the LLM.
|
||||
|
||||
@@ -426,6 +428,8 @@ class LLM(BaseLLM):
|
||||
params: Parameters for the completion call
|
||||
callbacks: Optional list of callback functions
|
||||
available_functions: Dict of available functions
|
||||
from_task: Optional task object
|
||||
from_agent: Optional agent object
|
||||
|
||||
Returns:
|
||||
str: The complete response text
|
||||
@@ -510,6 +514,8 @@ class LLM(BaseLLM):
|
||||
tool_calls=tool_calls,
|
||||
accumulated_tool_args=accumulated_tool_args,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
if result is not None:
|
||||
chunk_content = result
|
||||
@@ -527,7 +533,7 @@ class LLM(BaseLLM):
|
||||
assert hasattr(crewai_event_bus, "emit")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMStreamChunkEvent(chunk=chunk_content),
|
||||
event=LLMStreamChunkEvent(chunk=chunk_content, from_task=from_task, from_agent=from_agent),
|
||||
)
|
||||
# --- 4) Fallback to non-streaming if no content received
|
||||
if not full_response.strip() and chunk_count == 0:
|
||||
@@ -540,7 +546,7 @@ class LLM(BaseLLM):
|
||||
"stream_options", None
|
||||
) # Remove stream_options for non-streaming call
|
||||
return self._handle_non_streaming_response(
|
||||
non_streaming_params, callbacks, available_functions
|
||||
non_streaming_params, callbacks, available_functions, from_task, from_agent
|
||||
)
|
||||
|
||||
# --- 5) Handle empty response with chunks
|
||||
@@ -625,7 +631,7 @@ class LLM(BaseLLM):
|
||||
# Log token usage if available in streaming mode
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
# Emit completion event and return response
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL)
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent)
|
||||
return full_response
|
||||
|
||||
# --- 9) Handle tool calls if present
|
||||
@@ -637,7 +643,7 @@ class LLM(BaseLLM):
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
|
||||
# --- 11) Emit completion event and return response
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL)
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent)
|
||||
return full_response
|
||||
|
||||
except ContextWindowExceededError as e:
|
||||
@@ -649,14 +655,14 @@ class LLM(BaseLLM):
|
||||
logging.error(f"Error in streaming response: {str(e)}")
|
||||
if full_response.strip():
|
||||
logging.warning(f"Returning partial response despite error: {str(e)}")
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL)
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent)
|
||||
return full_response
|
||||
|
||||
# Emit failed event and re-raise the exception
|
||||
assert hasattr(crewai_event_bus, "emit")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=str(e)),
|
||||
event=LLMCallFailedEvent(error=str(e), from_task=from_task, from_agent=from_agent),
|
||||
)
|
||||
raise Exception(f"Failed to get streaming response: {str(e)}")
|
||||
|
||||
@@ -665,6 +671,8 @@ class LLM(BaseLLM):
|
||||
tool_calls: List[ChatCompletionDeltaToolCall],
|
||||
accumulated_tool_args: DefaultDict[int, AccumulatedToolArgs],
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> None | str:
|
||||
for tool_call in tool_calls:
|
||||
current_tool_accumulator = accumulated_tool_args[tool_call.index]
|
||||
@@ -682,6 +690,8 @@ class LLM(BaseLLM):
|
||||
event=LLMStreamChunkEvent(
|
||||
tool_call=tool_call.to_dict(),
|
||||
chunk=tool_call.function.arguments,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -748,6 +758,8 @@ class LLM(BaseLLM):
|
||||
params: Dict[str, Any],
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> str:
|
||||
"""Handle a non-streaming response from the LLM.
|
||||
|
||||
@@ -755,6 +767,8 @@ class LLM(BaseLLM):
|
||||
params: Parameters for the completion call
|
||||
callbacks: Optional list of callback functions
|
||||
available_functions: Dict of available functions
|
||||
from_task: Optional Task that invoked the LLM
|
||||
from_agent: Optional Agent that invoked the LLM
|
||||
|
||||
Returns:
|
||||
str: The response text
|
||||
@@ -795,7 +809,7 @@ class LLM(BaseLLM):
|
||||
|
||||
# --- 5) If no tool calls or no available functions, return the text response directly
|
||||
if not tool_calls or not available_functions:
|
||||
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL)
|
||||
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL, from_task, from_agent)
|
||||
return text_response
|
||||
|
||||
# --- 6) Handle tool calls if present
|
||||
@@ -804,7 +818,7 @@ class LLM(BaseLLM):
|
||||
return tool_result
|
||||
|
||||
# --- 7) If tool call handling didn't return a result, emit completion event and return text response
|
||||
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL)
|
||||
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL, from_task, from_agent)
|
||||
return text_response
|
||||
|
||||
def _handle_tool_call(
|
||||
@@ -889,6 +903,8 @@ class LLM(BaseLLM):
|
||||
tools: Optional[List[dict]] = None,
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> Union[str, Any]:
|
||||
"""High-level LLM call method.
|
||||
|
||||
@@ -903,6 +919,8 @@ class LLM(BaseLLM):
|
||||
during and after the LLM call.
|
||||
available_functions: Optional dict mapping function names to callables
|
||||
that can be invoked by the LLM.
|
||||
from_task: Optional Task that invoked the LLM
|
||||
from_agent: Optional Agent that invoked the LLM
|
||||
|
||||
Returns:
|
||||
Union[str, Any]: Either a text response from the LLM (str) or
|
||||
@@ -922,6 +940,8 @@ class LLM(BaseLLM):
|
||||
tools=tools,
|
||||
callbacks=callbacks,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -950,11 +970,11 @@ class LLM(BaseLLM):
|
||||
# --- 7) Make the completion call and handle response
|
||||
if self.stream:
|
||||
return self._handle_streaming_response(
|
||||
params, callbacks, available_functions
|
||||
params, callbacks, available_functions, from_task, from_agent
|
||||
)
|
||||
else:
|
||||
return self._handle_non_streaming_response(
|
||||
params, callbacks, available_functions
|
||||
params, callbacks, available_functions, from_task, from_agent
|
||||
)
|
||||
|
||||
except LLMContextLengthExceededException:
|
||||
@@ -966,12 +986,12 @@ class LLM(BaseLLM):
|
||||
assert hasattr(crewai_event_bus, "emit")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=str(e)),
|
||||
event=LLMCallFailedEvent(error=str(e), from_task=from_task, from_agent=from_agent),
|
||||
)
|
||||
logging.error(f"LiteLLM call failed: {str(e)}")
|
||||
raise
|
||||
|
||||
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType):
|
||||
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType, from_task: Optional[Any] = None, from_agent: Optional[Any] = None):
|
||||
"""Handle the events for the LLM call.
|
||||
|
||||
Args:
|
||||
@@ -981,7 +1001,7 @@ class LLM(BaseLLM):
|
||||
assert hasattr(crewai_event_bus, "emit")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallCompletedEvent(response=response, call_type=call_type),
|
||||
event=LLMCallCompletedEvent(response=response, call_type=call_type, from_task=from_task, from_agent=from_agent),
|
||||
)
|
||||
|
||||
def _format_messages_for_provider(
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
|
||||
class BaseLLM(ABC):
|
||||
@@ -47,6 +47,8 @@ class BaseLLM(ABC):
|
||||
tools: Optional[List[dict]] = None,
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> Union[str, Any]:
|
||||
"""Call the LLM with the given messages.
|
||||
|
||||
@@ -61,6 +63,7 @@ class BaseLLM(ABC):
|
||||
during and after the LLM call.
|
||||
available_functions: Optional dict mapping function names to callables
|
||||
that can be invoked by the LLM.
|
||||
from_task: Optional task caller to be used for the LLM call.
|
||||
|
||||
Returns:
|
||||
Either a text response from the LLM (str) or
|
||||
|
||||
2
src/crewai/llms/third_party/ai_suite.py
vendored
2
src/crewai/llms/third_party/ai_suite.py
vendored
@@ -16,6 +16,8 @@ class AISuiteLLM(BaseLLM):
|
||||
tools: Optional[List[dict]] = None,
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> Union[str, Any]:
|
||||
completion_params = self._prepare_completion_params(messages, tools)
|
||||
response = self.client.chat.completions.create(**completion_params)
|
||||
|
||||
@@ -1,10 +1,20 @@
|
||||
from typing import Optional
|
||||
import time
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.memory_events import (
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
|
||||
|
||||
class EntityMemory(Memory):
|
||||
@@ -48,16 +58,96 @@ class EntityMemory(Memory):
|
||||
|
||||
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||
"""Saves an entity item into the SQLite storage."""
|
||||
if self._memory_provider == "mem0":
|
||||
data = f"""
|
||||
Remember details about the following entity:
|
||||
Name: {item.name}
|
||||
Type: {item.type}
|
||||
Entity Description: {item.description}
|
||||
"""
|
||||
else:
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
super().save(data, item.metadata)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
metadata=item.metadata,
|
||||
source_type="entity_memory",
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
if self._memory_provider == "mem0":
|
||||
data = f"""
|
||||
Remember details about the following entity:
|
||||
Name: {item.name}
|
||||
Type: {item.type}
|
||||
Entity Description: {item.description}
|
||||
"""
|
||||
else:
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
|
||||
super().save(data, item.metadata)
|
||||
|
||||
# Emit memory save completed event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=data,
|
||||
metadata=item.metadata,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="entity_memory",
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
metadata=item.metadata,
|
||||
error=str(e),
|
||||
source_type="entity_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="entity_memory",
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = super().search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="entity_memory",
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="entity_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
|
||||
97
src/crewai/memory/external/external_memory.py
vendored
97
src/crewai/memory/external/external_memory.py
vendored
@@ -1,8 +1,18 @@
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional
|
||||
import time
|
||||
|
||||
from crewai.memory.external.external_memory_item import ExternalMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.memory_events import (
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
@@ -46,8 +56,91 @@ class ExternalMemory(Memory):
|
||||
agent: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Saves a value into the external storage."""
|
||||
item = ExternalMemoryItem(value=value, metadata=metadata, agent=agent)
|
||||
super().save(value=item.value, metadata=item.metadata, agent=item.agent)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
agent_role=agent,
|
||||
source_type="external_memory",
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
item = ExternalMemoryItem(value=value, metadata=metadata, agent=agent)
|
||||
super().save(value=item.value, metadata=item.metadata, agent=item.agent)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
agent_role=agent,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="external_memory",
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
agent_role=agent,
|
||||
error=str(e),
|
||||
source_type="external_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="external_memory",
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = super().search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="external_memory",
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="external_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
self.storage.reset()
|
||||
|
||||
@@ -1,7 +1,17 @@
|
||||
from typing import Any, Dict, List
|
||||
import time
|
||||
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.memory_events import (
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
from crewai.memory.storage.ltm_sqlite_storage import LTMSQLiteStorage
|
||||
|
||||
|
||||
@@ -20,17 +30,87 @@ class LongTermMemory(Memory):
|
||||
super().__init__(storage=storage)
|
||||
|
||||
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||
metadata = item.metadata
|
||||
metadata.update({"agent": item.agent, "expected_output": item.expected_output})
|
||||
self.storage.save( # type: ignore # BUG?: Unexpected keyword argument "task_description","score","datetime" for "save" of "Storage"
|
||||
task_description=item.task,
|
||||
score=metadata["quality"],
|
||||
metadata=metadata,
|
||||
datetime=item.datetime,
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
metadata = item.metadata
|
||||
metadata.update({"agent": item.agent, "expected_output": item.expected_output})
|
||||
self.storage.save( # type: ignore # BUG?: Unexpected keyword argument "task_description","score","datetime" for "save" of "Storage"
|
||||
task_description=item.task,
|
||||
score=metadata["quality"],
|
||||
metadata=metadata,
|
||||
datetime=item.datetime,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
error=str(e),
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search(self, task: str, latest_n: int = 3) -> List[Dict[str, Any]]: # type: ignore # signature of "search" incompatible with supertype "Memory"
|
||||
return self.storage.load(task, latest_n) # type: ignore # BUG?: "Storage" has no attribute "load"
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=task,
|
||||
limit=latest_n,
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = self.storage.load(task, latest_n) # type: ignore # BUG?: "Storage" has no attribute "load"
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=task,
|
||||
results=results,
|
||||
limit=latest_n,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=task,
|
||||
limit=latest_n,
|
||||
error=str(e),
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
self.storage.reset()
|
||||
|
||||
@@ -1,10 +1,20 @@
|
||||
from typing import Any, Dict, Optional
|
||||
import time
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.memory_events import (
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
|
||||
|
||||
class ShortTermMemory(Memory):
|
||||
@@ -52,11 +62,46 @@ class ShortTermMemory(Memory):
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
agent: Optional[str] = None,
|
||||
) -> None:
|
||||
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
||||
if self._memory_provider == "mem0":
|
||||
item.data = f"Remember the following insights from Agent run: {item.data}"
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
agent_role=agent,
|
||||
source_type="short_term_memory",
|
||||
),
|
||||
)
|
||||
|
||||
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
||||
start_time = time.time()
|
||||
try:
|
||||
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
||||
if self._memory_provider == "mem0":
|
||||
item.data = f"Remember the following insights from Agent run: {item.data}"
|
||||
|
||||
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
agent_role=agent,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="short_term_memory",
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
agent_role=agent,
|
||||
error=str(e),
|
||||
source_type="short_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search(
|
||||
self,
|
||||
@@ -64,9 +109,47 @@ class ShortTermMemory(Memory):
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
):
|
||||
return self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="short_term_memory",
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="short_term_memory",
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="short_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
|
||||
@@ -71,14 +71,10 @@ class RAGStorage(BaseRAGStorage):
|
||||
|
||||
self.app = chroma_client
|
||||
|
||||
try:
|
||||
self.collection = self.app.get_collection(
|
||||
name=self.type, embedding_function=self.embedder_config
|
||||
)
|
||||
except Exception:
|
||||
self.collection = self.app.create_collection(
|
||||
name=self.type, embedding_function=self.embedder_config
|
||||
)
|
||||
self.collection = self.app.get_or_create_collection(
|
||||
name=self.type, embedding_function=self.embedder_config
|
||||
)
|
||||
logging.info(f"Collection found or created: {self.collection}")
|
||||
|
||||
def _sanitize_role(self, role: str) -> str:
|
||||
"""
|
||||
|
||||
@@ -111,11 +111,13 @@ class Telemetry:
|
||||
raise # Re-raise the exception to not interfere with system signals
|
||||
self.ready = False
|
||||
|
||||
def _is_telemetry_disabled(self) -> bool:
|
||||
@classmethod
|
||||
def _is_telemetry_disabled(cls) -> bool:
|
||||
"""Check if telemetry should be disabled based on environment variables."""
|
||||
return (
|
||||
os.getenv("OTEL_SDK_DISABLED", "false").lower() == "true"
|
||||
or os.getenv("CREWAI_DISABLE_TELEMETRY", "false").lower() == "true"
|
||||
or os.getenv("CREWAI_DISABLE_TRACKING", "false").lower() == "true"
|
||||
)
|
||||
|
||||
def _should_execute_telemetry(self) -> bool:
|
||||
|
||||
@@ -145,12 +145,16 @@ def get_llm_response(
|
||||
messages: List[Dict[str, str]],
|
||||
callbacks: List[Any],
|
||||
printer: Printer,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> str:
|
||||
"""Call the LLM and return the response, handling any invalid responses."""
|
||||
try:
|
||||
answer = llm.call(
|
||||
messages,
|
||||
callbacks=callbacks,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
except Exception as e:
|
||||
printer.print(
|
||||
|
||||
@@ -5,6 +5,7 @@ from pydantic import BaseModel, Field
|
||||
from crewai.utilities import Converter
|
||||
from crewai.utilities.events import TaskEvaluationEvent, crewai_event_bus
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
from crewai.utilities.training_converter import TrainingConverter
|
||||
|
||||
|
||||
class Entity(BaseModel):
|
||||
@@ -133,7 +134,7 @@ class TaskEvaluator:
|
||||
).get_schema()
|
||||
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
|
||||
|
||||
converter = Converter(
|
||||
converter = TrainingConverter(
|
||||
llm=self.llm,
|
||||
text=evaluation_query,
|
||||
model=TrainingTaskEvaluation,
|
||||
|
||||
@@ -51,6 +51,71 @@ from .llm_events import (
|
||||
LLMStreamChunkEvent,
|
||||
)
|
||||
|
||||
from .memory_events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
)
|
||||
|
||||
# events
|
||||
from .event_listener import EventListener
|
||||
from .third_party.agentops_listener import agentops_listener
|
||||
|
||||
__all__ = [
|
||||
"EventListener",
|
||||
"agentops_listener",
|
||||
"CrewAIEventsBus",
|
||||
"crewai_event_bus",
|
||||
"AgentExecutionStartedEvent",
|
||||
"AgentExecutionCompletedEvent",
|
||||
"AgentExecutionErrorEvent",
|
||||
"TaskStartedEvent",
|
||||
"TaskCompletedEvent",
|
||||
"TaskFailedEvent",
|
||||
"TaskEvaluationEvent",
|
||||
"FlowCreatedEvent",
|
||||
"FlowStartedEvent",
|
||||
"FlowFinishedEvent",
|
||||
"FlowPlotEvent",
|
||||
"MethodExecutionStartedEvent",
|
||||
"MethodExecutionFinishedEvent",
|
||||
"MethodExecutionFailedEvent",
|
||||
"LLMCallCompletedEvent",
|
||||
"LLMCallFailedEvent",
|
||||
"LLMCallStartedEvent",
|
||||
"LLMCallType",
|
||||
"LLMStreamChunkEvent",
|
||||
"MemorySaveStartedEvent",
|
||||
"MemorySaveCompletedEvent",
|
||||
"MemorySaveFailedEvent",
|
||||
"MemoryQueryStartedEvent",
|
||||
"MemoryQueryCompletedEvent",
|
||||
"MemoryQueryFailedEvent",
|
||||
"MemoryRetrievalStartedEvent",
|
||||
"MemoryRetrievalCompletedEvent",
|
||||
"EventListener",
|
||||
"agentops_listener",
|
||||
"CrewKickoffStartedEvent",
|
||||
"CrewKickoffCompletedEvent",
|
||||
"CrewKickoffFailedEvent",
|
||||
"CrewTrainStartedEvent",
|
||||
"CrewTrainCompletedEvent",
|
||||
"CrewTrainFailedEvent",
|
||||
"CrewTestStartedEvent",
|
||||
"CrewTestCompletedEvent",
|
||||
"CrewTestFailedEvent",
|
||||
"LLMGuardrailCompletedEvent",
|
||||
"LLMGuardrailStartedEvent",
|
||||
"ToolUsageFinishedEvent",
|
||||
"ToolUsageErrorEvent",
|
||||
"ToolUsageStartedEvent",
|
||||
"ToolExecutionErrorEvent",
|
||||
"ToolSelectionErrorEvent",
|
||||
"ToolUsageEvent",
|
||||
"ToolValidateInputErrorEvent",
|
||||
]
|
||||
|
||||
@@ -12,7 +12,7 @@ class BaseEvent(BaseModel):
|
||||
timestamp: datetime = Field(default_factory=datetime.now)
|
||||
type: str
|
||||
source_fingerprint: Optional[str] = None # UUID string of the source entity
|
||||
source_type: Optional[str] = None # "agent", "task", "crew"
|
||||
source_type: Optional[str] = None # "agent", "task", "crew", "memory", "entity_memory", "short_term_memory", "long_term_memory", "external_memory"
|
||||
fingerprint_metadata: Optional[Dict[str, Any]] = None # Any relevant metadata
|
||||
|
||||
def to_json(self, exclude: set[str] | None = None):
|
||||
|
||||
@@ -65,6 +65,8 @@ from .reasoning_events import (
|
||||
AgentReasoningFailedEvent,
|
||||
)
|
||||
|
||||
from .listeners.memory_listener import MemoryListener
|
||||
|
||||
|
||||
class EventListener(BaseEventListener):
|
||||
_instance = None
|
||||
@@ -91,6 +93,8 @@ class EventListener(BaseEventListener):
|
||||
self._initialized = True
|
||||
self.formatter = ConsoleFormatter(verbose=True)
|
||||
|
||||
MemoryListener(formatter=self.formatter)
|
||||
|
||||
# ----------- CREW EVENTS -----------
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
|
||||
@@ -57,6 +57,17 @@ from .knowledge_events import (
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
)
|
||||
|
||||
from .memory_events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
)
|
||||
|
||||
EventTypes = Union[
|
||||
CrewKickoffStartedEvent,
|
||||
CrewKickoffCompletedEvent,
|
||||
@@ -96,4 +107,12 @@ EventTypes = Union[
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
]
|
||||
|
||||
110
src/crewai/utilities/events/listeners/memory_listener.py
Normal file
110
src/crewai/utilities/events/listeners/memory_listener.py
Normal file
@@ -0,0 +1,110 @@
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.memory_events import (
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
|
||||
class MemoryListener(BaseEventListener):
|
||||
|
||||
def __init__(self, formatter):
|
||||
super().__init__()
|
||||
self.formatter = formatter
|
||||
self.memory_retrieval_in_progress = False
|
||||
self.memory_save_in_progress = False
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
@crewai_event_bus.on(MemoryRetrievalStartedEvent)
|
||||
def on_memory_retrieval_started(
|
||||
source, event: MemoryRetrievalStartedEvent
|
||||
):
|
||||
if self.memory_retrieval_in_progress:
|
||||
return
|
||||
|
||||
self.memory_retrieval_in_progress = True
|
||||
|
||||
self.formatter.handle_memory_retrieval_started(
|
||||
self.formatter.current_agent_branch,
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MemoryRetrievalCompletedEvent)
|
||||
def on_memory_retrieval_completed(
|
||||
source, event: MemoryRetrievalCompletedEvent
|
||||
):
|
||||
if not self.memory_retrieval_in_progress:
|
||||
return
|
||||
|
||||
self.memory_retrieval_in_progress = False
|
||||
self.formatter.handle_memory_retrieval_completed(
|
||||
self.formatter.current_agent_branch,
|
||||
self.formatter.current_crew_tree,
|
||||
event.memory_content,
|
||||
event.retrieval_time_ms
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryCompletedEvent)
|
||||
def on_memory_query_completed(source, event: MemoryQueryCompletedEvent):
|
||||
if not self.memory_retrieval_in_progress:
|
||||
return
|
||||
|
||||
self.formatter.handle_memory_query_completed(
|
||||
self.formatter.current_agent_branch,
|
||||
event.source_type,
|
||||
event.query_time_ms,
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryFailedEvent)
|
||||
def on_memory_query_failed(source, event: MemoryQueryFailedEvent):
|
||||
if not self.memory_retrieval_in_progress:
|
||||
return
|
||||
|
||||
self.formatter.handle_memory_query_failed(
|
||||
self.formatter.current_agent_branch,
|
||||
self.formatter.current_crew_tree,
|
||||
event.error,
|
||||
event.source_type,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MemorySaveStartedEvent)
|
||||
def on_memory_save_started(source, event: MemorySaveStartedEvent):
|
||||
if self.memory_save_in_progress:
|
||||
return
|
||||
|
||||
self.memory_save_in_progress = True
|
||||
|
||||
self.formatter.handle_memory_save_started(
|
||||
self.formatter.current_agent_branch,
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MemorySaveCompletedEvent)
|
||||
def on_memory_save_completed(source, event: MemorySaveCompletedEvent):
|
||||
if not self.memory_save_in_progress:
|
||||
return
|
||||
|
||||
self.memory_save_in_progress = False
|
||||
|
||||
self.formatter.handle_memory_save_completed(
|
||||
self.formatter.current_agent_branch,
|
||||
self.formatter.current_crew_tree,
|
||||
event.save_time_ms,
|
||||
event.source_type,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MemorySaveFailedEvent)
|
||||
def on_memory_save_failed(source, event: MemorySaveFailedEvent):
|
||||
if not self.memory_save_in_progress:
|
||||
return
|
||||
|
||||
self.formatter.handle_memory_save_failed(
|
||||
self.formatter.current_agent_branch,
|
||||
event.error,
|
||||
event.source_type,
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
@@ -5,6 +5,32 @@ from pydantic import BaseModel
|
||||
|
||||
from crewai.utilities.events.base_events import BaseEvent
|
||||
|
||||
class LLMEventBase(BaseEvent):
|
||||
task_name: Optional[str] = None
|
||||
task_id: Optional[str] = None
|
||||
|
||||
agent_id: Optional[str] = None
|
||||
agent_role: Optional[str] = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
self._set_agent_params(data)
|
||||
self._set_task_params(data)
|
||||
|
||||
def _set_agent_params(self, data: Dict[str, Any]):
|
||||
task = data.get("from_task", None)
|
||||
agent = task.agent if task else data.get("from_agent", None)
|
||||
|
||||
if not agent:
|
||||
return
|
||||
|
||||
self.agent_id = agent.id
|
||||
self.agent_role = agent.role
|
||||
|
||||
def _set_task_params(self, data: Dict[str, Any]):
|
||||
if "from_task" in data and (task := data["from_task"]):
|
||||
self.task_id = task.id
|
||||
self.task_name = task.name
|
||||
|
||||
class LLMCallType(Enum):
|
||||
"""Type of LLM call being made"""
|
||||
@@ -13,7 +39,7 @@ class LLMCallType(Enum):
|
||||
LLM_CALL = "llm_call"
|
||||
|
||||
|
||||
class LLMCallStartedEvent(BaseEvent):
|
||||
class LLMCallStartedEvent(LLMEventBase):
|
||||
"""Event emitted when a LLM call starts
|
||||
|
||||
Attributes:
|
||||
@@ -28,7 +54,7 @@ class LLMCallStartedEvent(BaseEvent):
|
||||
available_functions: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class LLMCallCompletedEvent(BaseEvent):
|
||||
class LLMCallCompletedEvent(LLMEventBase):
|
||||
"""Event emitted when a LLM call completes"""
|
||||
|
||||
type: str = "llm_call_completed"
|
||||
@@ -36,7 +62,7 @@ class LLMCallCompletedEvent(BaseEvent):
|
||||
call_type: LLMCallType
|
||||
|
||||
|
||||
class LLMCallFailedEvent(BaseEvent):
|
||||
class LLMCallFailedEvent(LLMEventBase):
|
||||
"""Event emitted when a LLM call fails"""
|
||||
|
||||
error: str
|
||||
@@ -55,7 +81,7 @@ class ToolCall(BaseModel):
|
||||
index: int
|
||||
|
||||
|
||||
class LLMStreamChunkEvent(BaseEvent):
|
||||
class LLMStreamChunkEvent(LLMEventBase):
|
||||
"""Event emitted when a streaming chunk is received"""
|
||||
|
||||
type: str = "llm_stream_chunk"
|
||||
|
||||
78
src/crewai/utilities/events/memory_events.py
Normal file
78
src/crewai/utilities/events/memory_events.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from crewai.utilities.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class MemoryQueryStartedEvent(BaseEvent):
|
||||
"""Event emitted when a memory query is started"""
|
||||
|
||||
type: str = "memory_query_started"
|
||||
query: str
|
||||
limit: int
|
||||
score_threshold: Optional[float] = None
|
||||
|
||||
|
||||
class MemoryQueryCompletedEvent(BaseEvent):
|
||||
"""Event emitted when a memory query is completed successfully"""
|
||||
|
||||
type: str = "memory_query_completed"
|
||||
query: str
|
||||
results: Any
|
||||
limit: int
|
||||
score_threshold: Optional[float] = None
|
||||
query_time_ms: float
|
||||
|
||||
|
||||
class MemoryQueryFailedEvent(BaseEvent):
|
||||
"""Event emitted when a memory query fails"""
|
||||
|
||||
type: str = "memory_query_failed"
|
||||
query: str
|
||||
limit: int
|
||||
score_threshold: Optional[float] = None
|
||||
error: str
|
||||
|
||||
|
||||
class MemorySaveStartedEvent(BaseEvent):
|
||||
"""Event emitted when a memory save operation is started"""
|
||||
|
||||
type: str = "memory_save_started"
|
||||
value: Optional[str] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
agent_role: Optional[str] = None
|
||||
|
||||
|
||||
class MemorySaveCompletedEvent(BaseEvent):
|
||||
"""Event emitted when a memory save operation is completed successfully"""
|
||||
|
||||
type: str = "memory_save_completed"
|
||||
value: str
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
agent_role: Optional[str] = None
|
||||
save_time_ms: float
|
||||
|
||||
|
||||
class MemorySaveFailedEvent(BaseEvent):
|
||||
"""Event emitted when a memory save operation fails"""
|
||||
|
||||
type: str = "memory_save_failed"
|
||||
value: Optional[str] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
agent_role: Optional[str] = None
|
||||
error: str
|
||||
|
||||
|
||||
class MemoryRetrievalStartedEvent(BaseEvent):
|
||||
"""Event emitted when memory retrieval for a task prompt starts"""
|
||||
|
||||
type: str = "memory_retrieval_started"
|
||||
task_id: Optional[str] = None
|
||||
|
||||
|
||||
class MemoryRetrievalCompletedEvent(BaseEvent):
|
||||
"""Event emitted when memory retrieval for a task prompt completes successfully"""
|
||||
|
||||
type: str = "memory_retrieval_completed"
|
||||
task_id: Optional[str] = None
|
||||
memory_content: str
|
||||
retrieval_time_ms: float
|
||||
@@ -1454,3 +1454,250 @@ class ConsoleFormatter:
|
||||
)
|
||||
self.print(finish_panel)
|
||||
self.print()
|
||||
|
||||
def handle_memory_retrieval_started(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
) -> Optional[Tree]:
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
branch_to_use = agent_branch or self.current_lite_agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
|
||||
if branch_to_use is None or tree_to_use is None:
|
||||
if crew_tree is not None:
|
||||
branch_to_use = tree_to_use = crew_tree
|
||||
else:
|
||||
return None
|
||||
|
||||
memory_branch = branch_to_use.add("")
|
||||
self.update_tree_label(
|
||||
memory_branch, "🧠", "Memory Retrieval Started", "blue"
|
||||
)
|
||||
|
||||
self.print(tree_to_use)
|
||||
self.print()
|
||||
return memory_branch
|
||||
|
||||
def handle_memory_retrieval_completed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
memory_content: str,
|
||||
retrieval_time_ms: float,
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
branch_to_use = self.current_lite_agent_branch or agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
|
||||
if branch_to_use is None and tree_to_use is not None:
|
||||
branch_to_use = tree_to_use
|
||||
|
||||
def add_panel():
|
||||
memory_text = str(memory_content)
|
||||
if len(memory_text) > 500:
|
||||
memory_text = memory_text[:497] + "..."
|
||||
|
||||
memory_panel = Panel(
|
||||
Text(memory_text, style="white"),
|
||||
title="🧠 Retrieved Memory",
|
||||
subtitle=f"Retrieval Time: {retrieval_time_ms:.2f}ms",
|
||||
border_style="green",
|
||||
padding=(1, 2),
|
||||
)
|
||||
self.print(memory_panel)
|
||||
self.print()
|
||||
|
||||
if branch_to_use is None or tree_to_use is None:
|
||||
add_panel()
|
||||
return None
|
||||
|
||||
memory_branch_found = False
|
||||
for child in branch_to_use.children:
|
||||
if "Memory Retrieval Started" in str(child.label):
|
||||
self.update_tree_label(
|
||||
child, "✅", "Memory Retrieval Completed", "green"
|
||||
)
|
||||
memory_branch_found = True
|
||||
break
|
||||
|
||||
if not memory_branch_found:
|
||||
for child in branch_to_use.children:
|
||||
if (
|
||||
"Memory Retrieval" in str(child.label)
|
||||
and "Started" not in str(child.label)
|
||||
and "Completed" not in str(child.label)
|
||||
):
|
||||
self.update_tree_label(
|
||||
child, "✅", "Memory Retrieval Completed", "green"
|
||||
)
|
||||
memory_branch_found = True
|
||||
break
|
||||
|
||||
if not memory_branch_found:
|
||||
memory_branch = branch_to_use.add("")
|
||||
self.update_tree_label(
|
||||
memory_branch, "✅", "Memory Retrieval Completed", "green"
|
||||
)
|
||||
|
||||
self.print(tree_to_use)
|
||||
|
||||
if memory_content:
|
||||
add_panel()
|
||||
|
||||
|
||||
def handle_memory_query_completed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
source_type: str,
|
||||
query_time_ms: float,
|
||||
crew_tree: Optional[Tree],
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
branch_to_use = self.current_lite_agent_branch or agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
|
||||
if branch_to_use is None and tree_to_use is not None:
|
||||
branch_to_use = tree_to_use
|
||||
|
||||
if branch_to_use is None:
|
||||
return None
|
||||
|
||||
memory_type = source_type.replace("_", " ").title()
|
||||
|
||||
for child in branch_to_use.children:
|
||||
if "Memory Retrieval" in str(child.label):
|
||||
for child in child.children:
|
||||
sources_branch = child
|
||||
if "Sources Used" in str(child.label):
|
||||
sources_branch.add(f"✅ {memory_type} ({query_time_ms:.2f}ms)")
|
||||
break
|
||||
else:
|
||||
sources_branch = child.add("Sources Used")
|
||||
sources_branch.add(f"✅ {memory_type} ({query_time_ms:.2f}ms)")
|
||||
break
|
||||
|
||||
def handle_memory_query_failed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
error: str,
|
||||
source_type: str,
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
branch_to_use = self.current_lite_agent_branch or agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
|
||||
if branch_to_use is None and tree_to_use is not None:
|
||||
branch_to_use = tree_to_use
|
||||
|
||||
if branch_to_use is None:
|
||||
return None
|
||||
|
||||
memory_type = source_type.replace("_", " ").title()
|
||||
|
||||
for child in branch_to_use.children:
|
||||
if "Memory Retrieval" in str(child.label):
|
||||
for child in child.children:
|
||||
sources_branch = child
|
||||
if "Sources Used" in str(child.label):
|
||||
sources_branch.add(f"❌ {memory_type} - Error: {error}")
|
||||
break
|
||||
else:
|
||||
sources_branch = child.add("🧠 Sources Used")
|
||||
sources_branch.add(f"❌ {memory_type} - Error: {error}")
|
||||
break
|
||||
|
||||
|
||||
def handle_memory_save_started(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree]
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
branch_to_use = agent_branch or self.current_lite_agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
|
||||
if tree_to_use is None:
|
||||
return None
|
||||
|
||||
for child in tree_to_use.children:
|
||||
if "Memory Update" in str(child.label):
|
||||
break
|
||||
else:
|
||||
memory_branch = tree_to_use.add("")
|
||||
self.update_tree_label(
|
||||
memory_branch, "🧠", "Memory Update Overall", "white"
|
||||
)
|
||||
|
||||
self.print(tree_to_use)
|
||||
self.print()
|
||||
|
||||
def handle_memory_save_completed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
crew_tree: Optional[Tree],
|
||||
save_time_ms: float,
|
||||
source_type: str,
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
branch_to_use = agent_branch or self.current_lite_agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
|
||||
if tree_to_use is None:
|
||||
return None
|
||||
|
||||
memory_type = source_type.replace("_", " ").title()
|
||||
content = f"✅ {memory_type} Memory Saved ({save_time_ms:.2f}ms)"
|
||||
|
||||
for child in tree_to_use.children:
|
||||
if "Memory Update" in str(child.label):
|
||||
child.add(content)
|
||||
break
|
||||
else:
|
||||
memory_branch = tree_to_use.add("")
|
||||
memory_branch.add(content)
|
||||
|
||||
self.print(tree_to_use)
|
||||
self.print()
|
||||
|
||||
def handle_memory_save_failed(
|
||||
self,
|
||||
agent_branch: Optional[Tree],
|
||||
error: str,
|
||||
source_type: str,
|
||||
crew_tree: Optional[Tree],
|
||||
) -> None:
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
branch_to_use = agent_branch or self.current_lite_agent_branch
|
||||
tree_to_use = branch_to_use or crew_tree
|
||||
|
||||
if branch_to_use is None or tree_to_use is None:
|
||||
return None
|
||||
|
||||
memory_type = source_type.replace("_", " ").title()
|
||||
content = f"❌ {memory_type} Memory Save Failed"
|
||||
for child in branch_to_use.children:
|
||||
if "Memory Update" in str(child.label):
|
||||
child.add(content)
|
||||
break
|
||||
else:
|
||||
memory_branch = branch_to_use.add("")
|
||||
memory_branch.add(content)
|
||||
|
||||
self.print(tree_to_use)
|
||||
self.print()
|
||||
89
src/crewai/utilities/training_converter.py
Normal file
89
src/crewai/utilities/training_converter.py
Normal file
@@ -0,0 +1,89 @@
|
||||
import json
|
||||
import re
|
||||
from typing import Any, get_origin
|
||||
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
from crewai.utilities.converter import Converter, ConverterError
|
||||
|
||||
|
||||
class TrainingConverter(Converter):
|
||||
"""
|
||||
A specialized converter for smaller LLMs (up to 7B parameters) that handles validation errors
|
||||
by breaking down the model into individual fields and querying the LLM for each field separately.
|
||||
"""
|
||||
|
||||
def to_pydantic(self, current_attempt=1) -> BaseModel:
|
||||
try:
|
||||
return super().to_pydantic(current_attempt)
|
||||
except ConverterError:
|
||||
return self._convert_field_by_field()
|
||||
|
||||
def _convert_field_by_field(self) -> BaseModel:
|
||||
field_values = {}
|
||||
|
||||
for field_name, field_info in self.model.model_fields.items():
|
||||
field_description = field_info.description
|
||||
field_type = field_info.annotation
|
||||
|
||||
response = self._ask_llm_for_field(field_name, field_description)
|
||||
value = self._process_field_value(response, field_type)
|
||||
field_values[field_name] = value
|
||||
|
||||
try:
|
||||
return self.model(**field_values)
|
||||
except ValidationError as e:
|
||||
raise ConverterError(f"Failed to create model from individually collected fields: {e}")
|
||||
|
||||
def _ask_llm_for_field(self, field_name: str, field_description: str) -> str:
|
||||
prompt = f"""
|
||||
Based on the following information:
|
||||
{self.text}
|
||||
|
||||
Please provide ONLY the {field_name} field value as described:
|
||||
"{field_description}"
|
||||
|
||||
Respond with ONLY the requested information, nothing else.
|
||||
"""
|
||||
return self.llm.call([
|
||||
{"role": "system", "content": f"Extract the {field_name} from the previous information."},
|
||||
{"role": "user", "content": prompt}
|
||||
])
|
||||
|
||||
def _process_field_value(self, response: str, field_type: Any) -> Any:
|
||||
response = response.strip()
|
||||
origin = get_origin(field_type)
|
||||
|
||||
if origin is list:
|
||||
return self._parse_list(response)
|
||||
|
||||
if field_type is float:
|
||||
return self._parse_float(response)
|
||||
|
||||
if field_type is str:
|
||||
return response
|
||||
|
||||
return response
|
||||
|
||||
def _parse_list(self, response: str) -> list:
|
||||
try:
|
||||
if response.startswith('['):
|
||||
return json.loads(response)
|
||||
|
||||
items = [item.strip() for item in response.split('\n') if item.strip()]
|
||||
return [self._strip_bullet(item) for item in items]
|
||||
|
||||
except json.JSONDecodeError:
|
||||
return [response]
|
||||
|
||||
def _parse_float(self, response: str) -> float:
|
||||
try:
|
||||
match = re.search(r'(\d+(\.\d+)?)', response)
|
||||
return float(match.group(1)) if match else 0.0
|
||||
except Exception:
|
||||
return 0.0
|
||||
|
||||
def _strip_bullet(self, item: str) -> str:
|
||||
if item.startswith(('- ', '* ')):
|
||||
return item[2:].strip()
|
||||
return item.strip()
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1969
tests/cassettes/test_memory_events_are_emitted.yaml
Normal file
1969
tests/cassettes/test_memory_events_are_emitted.yaml
Normal file
File diff suppressed because one or more lines are too long
@@ -9,7 +9,6 @@ from click.testing import CliRunner
|
||||
|
||||
from crewai.cli.create_crew import create_crew, create_folder_structure
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def runner():
|
||||
return CliRunner()
|
||||
@@ -25,7 +24,7 @@ def temp_dir():
|
||||
def test_create_folder_structure_strips_single_trailing_slash():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
folder_path, folder_name, class_name = create_folder_structure("hello/", parent_folder=temp_dir)
|
||||
|
||||
|
||||
assert folder_name == "hello"
|
||||
assert class_name == "Hello"
|
||||
assert folder_path.name == "hello"
|
||||
@@ -36,7 +35,7 @@ def test_create_folder_structure_strips_single_trailing_slash():
|
||||
def test_create_folder_structure_strips_multiple_trailing_slashes():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
folder_path, folder_name, class_name = create_folder_structure("hello///", parent_folder=temp_dir)
|
||||
|
||||
|
||||
assert folder_name == "hello"
|
||||
assert class_name == "Hello"
|
||||
assert folder_path.name == "hello"
|
||||
@@ -47,7 +46,7 @@ def test_create_folder_structure_strips_multiple_trailing_slashes():
|
||||
def test_create_folder_structure_handles_complex_name_with_trailing_slash():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
folder_path, folder_name, class_name = create_folder_structure("my-awesome_project/", parent_folder=temp_dir)
|
||||
|
||||
|
||||
assert folder_name == "my_awesome_project"
|
||||
assert class_name == "MyAwesomeProject"
|
||||
assert folder_path.name == "my_awesome_project"
|
||||
@@ -58,7 +57,7 @@ def test_create_folder_structure_handles_complex_name_with_trailing_slash():
|
||||
def test_create_folder_structure_normal_name_unchanged():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
folder_path, folder_name, class_name = create_folder_structure("hello", parent_folder=temp_dir)
|
||||
|
||||
|
||||
assert folder_name == "hello"
|
||||
assert class_name == "Hello"
|
||||
assert folder_path.name == "hello"
|
||||
@@ -73,9 +72,9 @@ def test_create_folder_structure_with_parent_folder():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
parent_path = Path(temp_dir) / "parent"
|
||||
parent_path.mkdir()
|
||||
|
||||
|
||||
folder_path, folder_name, class_name = create_folder_structure("child/", parent_folder=parent_path)
|
||||
|
||||
|
||||
assert folder_name == "child"
|
||||
assert class_name == "Child"
|
||||
assert folder_path.name == "child"
|
||||
@@ -88,18 +87,18 @@ def test_create_folder_structure_with_parent_folder():
|
||||
@mock.patch("crewai.cli.create_crew.load_env_vars")
|
||||
def test_create_crew_with_trailing_slash_creates_valid_project(mock_load_env, mock_write_env, mock_copy_template, temp_dir):
|
||||
mock_load_env.return_value = {}
|
||||
|
||||
|
||||
with tempfile.TemporaryDirectory() as work_dir:
|
||||
with mock.patch("crewai.cli.create_crew.create_folder_structure") as mock_create_folder:
|
||||
mock_folder_path = Path(work_dir) / "test_project"
|
||||
mock_create_folder.return_value = (mock_folder_path, "test_project", "TestProject")
|
||||
|
||||
|
||||
create_crew("test-project/", skip_provider=True)
|
||||
|
||||
|
||||
mock_create_folder.assert_called_once_with("test-project/", None)
|
||||
mock_copy_template.assert_called()
|
||||
copy_calls = mock_copy_template.call_args_list
|
||||
|
||||
|
||||
for call in copy_calls:
|
||||
args = call[0]
|
||||
if len(args) >= 5:
|
||||
@@ -112,14 +111,14 @@ def test_create_crew_with_trailing_slash_creates_valid_project(mock_load_env, mo
|
||||
@mock.patch("crewai.cli.create_crew.load_env_vars")
|
||||
def test_create_crew_with_multiple_trailing_slashes(mock_load_env, mock_write_env, mock_copy_template, temp_dir):
|
||||
mock_load_env.return_value = {}
|
||||
|
||||
|
||||
with tempfile.TemporaryDirectory() as work_dir:
|
||||
with mock.patch("crewai.cli.create_crew.create_folder_structure") as mock_create_folder:
|
||||
mock_folder_path = Path(work_dir) / "test_project"
|
||||
mock_create_folder.return_value = (mock_folder_path, "test_project", "TestProject")
|
||||
|
||||
|
||||
create_crew("test-project///", skip_provider=True)
|
||||
|
||||
|
||||
mock_create_folder.assert_called_once_with("test-project///", None)
|
||||
|
||||
|
||||
@@ -128,21 +127,21 @@ def test_create_crew_with_multiple_trailing_slashes(mock_load_env, mock_write_en
|
||||
@mock.patch("crewai.cli.create_crew.load_env_vars")
|
||||
def test_create_crew_normal_name_still_works(mock_load_env, mock_write_env, mock_copy_template, temp_dir):
|
||||
mock_load_env.return_value = {}
|
||||
|
||||
|
||||
with tempfile.TemporaryDirectory() as work_dir:
|
||||
with mock.patch("crewai.cli.create_crew.create_folder_structure") as mock_create_folder:
|
||||
mock_folder_path = Path(work_dir) / "normal_project"
|
||||
mock_create_folder.return_value = (mock_folder_path, "normal_project", "NormalProject")
|
||||
|
||||
|
||||
create_crew("normal-project", skip_provider=True)
|
||||
|
||||
|
||||
mock_create_folder.assert_called_once_with("normal-project", None)
|
||||
|
||||
|
||||
def test_create_folder_structure_handles_spaces_and_dashes_with_slash():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
folder_path, folder_name, class_name = create_folder_structure("My Cool-Project/", parent_folder=temp_dir)
|
||||
|
||||
|
||||
assert folder_name == "my_cool_project"
|
||||
assert class_name == "MyCoolProject"
|
||||
assert folder_path.name == "my_cool_project"
|
||||
@@ -155,7 +154,7 @@ def test_create_folder_structure_raises_error_for_invalid_names():
|
||||
invalid_cases = [
|
||||
("123project/", "cannot start with a digit"),
|
||||
("True/", "reserved Python keyword"),
|
||||
("False/", "reserved Python keyword"),
|
||||
("False/", "reserved Python keyword"),
|
||||
("None/", "reserved Python keyword"),
|
||||
("class/", "reserved Python keyword"),
|
||||
("def/", "reserved Python keyword"),
|
||||
@@ -163,7 +162,7 @@ def test_create_folder_structure_raises_error_for_invalid_names():
|
||||
("", "empty or contain only whitespace"),
|
||||
("@#$/", "contains no valid characters"),
|
||||
]
|
||||
|
||||
|
||||
for invalid_name, expected_error in invalid_cases:
|
||||
with pytest.raises(ValueError, match=expected_error):
|
||||
create_folder_structure(invalid_name, parent_folder=temp_dir)
|
||||
@@ -179,20 +178,20 @@ def test_create_folder_structure_validates_names():
|
||||
("hello.world/", "helloworld", "HelloWorld"),
|
||||
("hello@world/", "helloworld", "HelloWorld"),
|
||||
]
|
||||
|
||||
|
||||
for valid_name, expected_folder, expected_class in valid_cases:
|
||||
folder_path, folder_name, class_name = create_folder_structure(valid_name, parent_folder=temp_dir)
|
||||
assert folder_name == expected_folder
|
||||
assert class_name == expected_class
|
||||
|
||||
|
||||
assert folder_name.isidentifier(), f"folder_name '{folder_name}' should be valid Python identifier"
|
||||
assert not keyword.iskeyword(folder_name), f"folder_name '{folder_name}' should not be Python keyword"
|
||||
assert not folder_name[0].isdigit(), f"folder_name '{folder_name}' should not start with digit"
|
||||
|
||||
|
||||
assert class_name.isidentifier(), f"class_name '{class_name}' should be valid Python identifier"
|
||||
assert not keyword.iskeyword(class_name), f"class_name '{class_name}' should not be Python keyword"
|
||||
assert folder_path.parent == Path(temp_dir)
|
||||
|
||||
|
||||
if folder_path.exists():
|
||||
shutil.rmtree(folder_path)
|
||||
|
||||
@@ -202,13 +201,13 @@ def test_create_folder_structure_validates_names():
|
||||
@mock.patch("crewai.cli.create_crew.load_env_vars")
|
||||
def test_create_crew_with_parent_folder_and_trailing_slash(mock_load_env, mock_write_env, mock_copy_template, temp_dir):
|
||||
mock_load_env.return_value = {}
|
||||
|
||||
|
||||
with tempfile.TemporaryDirectory() as work_dir:
|
||||
parent_path = Path(work_dir) / "parent"
|
||||
parent_path.mkdir()
|
||||
|
||||
|
||||
create_crew("child-crew/", skip_provider=True, parent_folder=parent_path)
|
||||
|
||||
|
||||
crew_path = parent_path / "child_crew"
|
||||
assert crew_path.exists()
|
||||
assert not (crew_path / "src").exists()
|
||||
@@ -224,23 +223,56 @@ def test_create_folder_structure_folder_name_validation():
|
||||
("for/", "reserved Python keyword"),
|
||||
("@#$invalid/", "contains no valid characters.*Python module name"),
|
||||
]
|
||||
|
||||
|
||||
for invalid_name, expected_error in folder_invalid_cases:
|
||||
with pytest.raises(ValueError, match=expected_error):
|
||||
create_folder_structure(invalid_name, parent_folder=temp_dir)
|
||||
|
||||
|
||||
valid_cases = [
|
||||
("hello-world/", "hello_world"),
|
||||
("my.project/", "myproject"),
|
||||
("test@123/", "test123"),
|
||||
("valid_name/", "valid_name"),
|
||||
]
|
||||
|
||||
|
||||
for valid_name, expected_folder in valid_cases:
|
||||
folder_path, folder_name, class_name = create_folder_structure(valid_name, parent_folder=temp_dir)
|
||||
assert folder_name == expected_folder
|
||||
assert folder_name.isidentifier()
|
||||
assert not keyword.iskeyword(folder_name)
|
||||
|
||||
|
||||
if folder_path.exists():
|
||||
shutil.rmtree(folder_path)
|
||||
|
||||
@mock.patch("crewai.cli.create_crew.create_folder_structure")
|
||||
@mock.patch("crewai.cli.create_crew.copy_template")
|
||||
@mock.patch("crewai.cli.create_crew.load_env_vars")
|
||||
@mock.patch("crewai.cli.create_crew.get_provider_data")
|
||||
@mock.patch("crewai.cli.create_crew.select_provider")
|
||||
@mock.patch("crewai.cli.create_crew.select_model")
|
||||
@mock.patch("click.prompt")
|
||||
def test_env_vars_are_uppercased_in_env_file(
|
||||
mock_prompt,
|
||||
mock_select_model,
|
||||
mock_select_provider,
|
||||
mock_get_provider_data,
|
||||
mock_load_env_vars,
|
||||
mock_copy_template,
|
||||
mock_create_folder_structure,
|
||||
tmp_path
|
||||
):
|
||||
crew_path = tmp_path / "test_crew"
|
||||
crew_path.mkdir()
|
||||
mock_create_folder_structure.return_value = (crew_path, "test_crew", "TestCrew")
|
||||
|
||||
mock_load_env_vars.return_value = {}
|
||||
mock_get_provider_data.return_value = {"openai": ["gpt-4"]}
|
||||
mock_select_provider.return_value = "azure"
|
||||
mock_select_model.return_value = "azure/openai"
|
||||
mock_prompt.return_value = "fake-api-key"
|
||||
|
||||
create_crew("Test Crew")
|
||||
|
||||
env_file_path = crew_path / ".env"
|
||||
content = env_file_path.read_text()
|
||||
assert "MODEL=" in content
|
||||
@@ -5,6 +5,7 @@ import json
|
||||
from concurrent.futures import Future
|
||||
from unittest import mock
|
||||
from unittest.mock import ANY, MagicMock, patch
|
||||
from collections import defaultdict
|
||||
|
||||
import pydantic_core
|
||||
import pytest
|
||||
@@ -40,6 +41,16 @@ from crewai.utilities.events.event_listener import EventListener
|
||||
from crewai.utilities.rpm_controller import RPMController
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
|
||||
from crewai.utilities.events.memory_events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
)
|
||||
|
||||
@pytest.fixture
|
||||
def ceo():
|
||||
@@ -2478,11 +2489,79 @@ def test_using_contextual_memory():
|
||||
memory=True,
|
||||
)
|
||||
|
||||
with patch.object(ContextualMemory, "build_context_for_task") as contextual_mem:
|
||||
with patch.object(ContextualMemory, "build_context_for_task", return_value="") as contextual_mem:
|
||||
crew.kickoff()
|
||||
contextual_mem.assert_called_once()
|
||||
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_memory_events_are_emitted():
|
||||
events = defaultdict(list)
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(MemorySaveStartedEvent)
|
||||
def handle_memory_save_started(source, event):
|
||||
events["MemorySaveStartedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemorySaveCompletedEvent)
|
||||
def handle_memory_save_completed(source, event):
|
||||
events["MemorySaveCompletedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemorySaveFailedEvent)
|
||||
def handle_memory_save_failed(source, event):
|
||||
events["MemorySaveFailedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryStartedEvent)
|
||||
def handle_memory_query_started(source, event):
|
||||
events["MemoryQueryStartedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryCompletedEvent)
|
||||
def handle_memory_query_completed(source, event):
|
||||
events["MemoryQueryCompletedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryFailedEvent)
|
||||
def handle_memory_query_failed(source, event):
|
||||
events["MemoryQueryFailedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemoryRetrievalStartedEvent)
|
||||
def handle_memory_retrieval_started(source, event):
|
||||
events["MemoryRetrievalStartedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemoryRetrievalCompletedEvent)
|
||||
def handle_memory_retrieval_completed(source, event):
|
||||
events["MemoryRetrievalCompletedEvent"].append(event)
|
||||
|
||||
math_researcher = Agent(
|
||||
role="Researcher",
|
||||
goal="You research about math.",
|
||||
backstory="You're an expert in research and you love to learn new things.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task1 = Task(
|
||||
description="Research a topic to teach a kid aged 6 about math.",
|
||||
expected_output="A topic, explanation, angle, and examples.",
|
||||
agent=math_researcher,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[math_researcher],
|
||||
tasks=[task1],
|
||||
memory=True,
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
|
||||
assert len(events["MemorySaveStartedEvent"]) == 6
|
||||
assert len(events["MemorySaveCompletedEvent"]) == 6
|
||||
assert len(events["MemorySaveFailedEvent"]) == 0
|
||||
assert len(events["MemoryQueryStartedEvent"]) == 3
|
||||
assert len(events["MemoryQueryCompletedEvent"]) == 3
|
||||
assert len(events["MemoryQueryFailedEvent"]) == 0
|
||||
assert len(events["MemoryRetrievalStartedEvent"]) == 1
|
||||
assert len(events["MemoryRetrievalCompletedEvent"]) == 1
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_using_contextual_memory_with_long_term_memory():
|
||||
from unittest.mock import patch
|
||||
@@ -2506,7 +2585,7 @@ def test_using_contextual_memory_with_long_term_memory():
|
||||
long_term_memory=LongTermMemory(),
|
||||
)
|
||||
|
||||
with patch.object(ContextualMemory, "build_context_for_task") as contextual_mem:
|
||||
with patch.object(ContextualMemory, "build_context_for_task", return_value="") as contextual_mem:
|
||||
crew.kickoff()
|
||||
contextual_mem.assert_called_once()
|
||||
assert crew.memory is False
|
||||
@@ -2607,7 +2686,7 @@ def test_using_contextual_memory_with_short_term_memory():
|
||||
short_term_memory=ShortTermMemory(),
|
||||
)
|
||||
|
||||
with patch.object(ContextualMemory, "build_context_for_task") as contextual_mem:
|
||||
with patch.object(ContextualMemory, "build_context_for_task", return_value="") as contextual_mem:
|
||||
crew.kickoff()
|
||||
contextual_mem.assert_called_once()
|
||||
assert crew.memory is False
|
||||
@@ -2636,7 +2715,7 @@ def test_disabled_memory_using_contextual_memory():
|
||||
memory=False,
|
||||
)
|
||||
|
||||
with patch.object(ContextualMemory, "build_context_for_task") as contextual_mem:
|
||||
with patch.object(ContextualMemory, "build_context_for_task", return_value="") as contextual_mem:
|
||||
crew.kickoff()
|
||||
contextual_mem.assert_not_called()
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -31,6 +30,8 @@ class CustomLLM(BaseLLM):
|
||||
tools=None,
|
||||
callbacks=None,
|
||||
available_functions=None,
|
||||
from_task=None,
|
||||
from_agent=None,
|
||||
):
|
||||
"""
|
||||
Mock LLM call that returns a predefined response.
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from unittest.mock import MagicMock, patch, ANY
|
||||
from collections import defaultdict
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.events.memory_events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
)
|
||||
import pytest
|
||||
from mem0.memory.main import Memory
|
||||
|
||||
@@ -10,7 +17,6 @@ from crewai.memory.external.external_memory_item import ExternalMemoryItem
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_mem0_memory():
|
||||
mock_memory = MagicMock(spec=Memory)
|
||||
@@ -188,7 +194,8 @@ def test_crew_external_memory_save_using_crew_without_memory_flag(
|
||||
assert mock_method.call_count > 0
|
||||
|
||||
|
||||
def test_external_memory_custom_storage(crew_with_external_memory):
|
||||
@pytest.fixture
|
||||
def custom_storage():
|
||||
class CustomStorage(Storage):
|
||||
def __init__(self):
|
||||
self.memories = []
|
||||
@@ -203,6 +210,9 @@ def test_external_memory_custom_storage(crew_with_external_memory):
|
||||
self.memories = []
|
||||
|
||||
custom_storage = CustomStorage()
|
||||
return custom_storage
|
||||
|
||||
def test_external_memory_custom_storage(custom_storage, crew_with_external_memory):
|
||||
external_memory = ExternalMemory(storage=custom_storage)
|
||||
|
||||
# by ensuring the crew is set, we can test that the storage is used
|
||||
@@ -221,3 +231,101 @@ def test_external_memory_custom_storage(crew_with_external_memory):
|
||||
external_memory.reset()
|
||||
results = external_memory.search("test")
|
||||
assert len(results) == 0
|
||||
|
||||
|
||||
|
||||
def test_external_memory_search_events(custom_storage, external_memory_with_mocked_config):
|
||||
events = defaultdict(list)
|
||||
|
||||
external_memory_with_mocked_config.storage = custom_storage
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(MemoryQueryStartedEvent)
|
||||
def on_search_started(source, event):
|
||||
events["MemoryQueryStartedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryCompletedEvent)
|
||||
def on_search_completed(source, event):
|
||||
events["MemoryQueryCompletedEvent"].append(event)
|
||||
|
||||
external_memory_with_mocked_config.search(
|
||||
query="test value",
|
||||
limit=3,
|
||||
score_threshold=0.35,
|
||||
)
|
||||
|
||||
assert len(events["MemoryQueryStartedEvent"]) == 1
|
||||
assert len(events["MemoryQueryCompletedEvent"]) == 1
|
||||
assert len(events["MemoryQueryFailedEvent"]) == 0
|
||||
|
||||
assert dict(events["MemoryQueryStartedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_query_started',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'external_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'query': 'test value',
|
||||
'limit': 3,
|
||||
'score_threshold': 0.35
|
||||
}
|
||||
|
||||
assert dict(events["MemoryQueryCompletedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_query_completed',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'external_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'query': 'test value',
|
||||
'results': [],
|
||||
'limit': 3,
|
||||
'score_threshold': 0.35,
|
||||
'query_time_ms': ANY
|
||||
}
|
||||
|
||||
|
||||
|
||||
def test_external_memory_save_events(custom_storage, external_memory_with_mocked_config):
|
||||
events = defaultdict(list)
|
||||
|
||||
external_memory_with_mocked_config.storage = custom_storage
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(MemorySaveStartedEvent)
|
||||
def on_save_started(source, event):
|
||||
events["MemorySaveStartedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemorySaveCompletedEvent)
|
||||
def on_save_completed(source, event):
|
||||
events["MemorySaveCompletedEvent"].append(event)
|
||||
|
||||
external_memory_with_mocked_config.save(
|
||||
value="saving value",
|
||||
metadata={"task": "test_task"},
|
||||
agent="test_agent",
|
||||
)
|
||||
|
||||
assert len(events["MemorySaveStartedEvent"]) == 1
|
||||
assert len(events["MemorySaveCompletedEvent"]) == 1
|
||||
assert len(events["MemorySaveFailedEvent"]) == 0
|
||||
|
||||
assert dict(events["MemorySaveStartedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_save_started',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'external_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'value': 'saving value',
|
||||
'metadata': {'task': 'test_task'},
|
||||
'agent_role': "test_agent"
|
||||
}
|
||||
|
||||
assert dict(events["MemorySaveCompletedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_save_completed',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'external_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'value': 'saving value',
|
||||
'metadata': {'task': 'test_task', 'agent': 'test_agent'},
|
||||
'agent_role': "test_agent",
|
||||
'save_time_ms': ANY
|
||||
}
|
||||
|
||||
@@ -1,8 +1,15 @@
|
||||
import pytest
|
||||
|
||||
from unittest.mock import ANY
|
||||
from collections import defaultdict
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
|
||||
from crewai.utilities.events.memory_events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
)
|
||||
|
||||
@pytest.fixture
|
||||
def long_term_memory():
|
||||
@@ -10,6 +17,103 @@ def long_term_memory():
|
||||
return LongTermMemory()
|
||||
|
||||
|
||||
def test_long_term_memory_save_events(long_term_memory):
|
||||
events = defaultdict(list)
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(MemorySaveStartedEvent)
|
||||
def on_save_started(source, event):
|
||||
events["MemorySaveStartedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemorySaveCompletedEvent)
|
||||
def on_save_completed(source, event):
|
||||
events["MemorySaveCompletedEvent"].append(event)
|
||||
|
||||
memory = LongTermMemoryItem(
|
||||
agent="test_agent",
|
||||
task="test_task",
|
||||
expected_output="test_output",
|
||||
datetime="test_datetime",
|
||||
quality=0.5,
|
||||
metadata={"task": "test_task", "quality": 0.5},
|
||||
)
|
||||
long_term_memory.save(memory)
|
||||
|
||||
assert len(events["MemorySaveStartedEvent"]) == 1
|
||||
assert len(events["MemorySaveCompletedEvent"]) == 1
|
||||
assert len(events["MemorySaveFailedEvent"]) == 0
|
||||
|
||||
assert dict(events["MemorySaveStartedEvent"][0]) == {
|
||||
"timestamp": ANY,
|
||||
"type": "memory_save_started",
|
||||
"source_fingerprint": None,
|
||||
"source_type": "long_term_memory",
|
||||
"fingerprint_metadata": None,
|
||||
"value": "test_task",
|
||||
"metadata": {"task": "test_task", "quality": 0.5},
|
||||
"agent_role": "test_agent",
|
||||
}
|
||||
assert dict(events["MemorySaveCompletedEvent"][0]) == {
|
||||
"timestamp": ANY,
|
||||
"type": "memory_save_completed",
|
||||
"source_fingerprint": None,
|
||||
"source_type": "long_term_memory",
|
||||
"fingerprint_metadata": None,
|
||||
"value": "test_task",
|
||||
"metadata": {"task": "test_task", "quality": 0.5, "agent": "test_agent", "expected_output": "test_output"},
|
||||
"agent_role": "test_agent",
|
||||
"save_time_ms": ANY,
|
||||
}
|
||||
|
||||
|
||||
def test_long_term_memory_search_events(long_term_memory):
|
||||
events = defaultdict(list)
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(MemoryQueryStartedEvent)
|
||||
def on_search_started(source, event):
|
||||
events["MemoryQueryStartedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryCompletedEvent)
|
||||
def on_search_completed(source, event):
|
||||
events["MemoryQueryCompletedEvent"].append(event)
|
||||
|
||||
test_query = "test query"
|
||||
|
||||
long_term_memory.search(
|
||||
test_query,
|
||||
latest_n=5
|
||||
)
|
||||
|
||||
assert len(events["MemoryQueryStartedEvent"]) == 1
|
||||
assert len(events["MemoryQueryCompletedEvent"]) == 1
|
||||
assert len(events["MemoryQueryFailedEvent"]) == 0
|
||||
|
||||
assert dict(events["MemoryQueryStartedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_query_started',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'long_term_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'query': 'test query',
|
||||
'limit': 5,
|
||||
'score_threshold': None
|
||||
}
|
||||
|
||||
assert dict(events["MemoryQueryCompletedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_query_completed',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'long_term_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'query': 'test query',
|
||||
'results': None,
|
||||
'limit': 5,
|
||||
'score_threshold': None,
|
||||
'query_time_ms': ANY
|
||||
}
|
||||
|
||||
|
||||
def test_save_and_search(long_term_memory):
|
||||
memory = LongTermMemoryItem(
|
||||
agent="test_agent",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
from unittest.mock import patch, ANY
|
||||
from collections import defaultdict
|
||||
import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
@@ -7,6 +7,13 @@ from crewai.crew import Crew
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
||||
from crewai.task import Task
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.events.memory_events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -28,6 +35,98 @@ def short_term_memory():
|
||||
return ShortTermMemory(crew=Crew(agents=[agent], tasks=[task]))
|
||||
|
||||
|
||||
def test_short_term_memory_search_events(short_term_memory):
|
||||
events = defaultdict(list)
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(MemoryQueryStartedEvent)
|
||||
def on_search_started(source, event):
|
||||
events["MemoryQueryStartedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryCompletedEvent)
|
||||
def on_search_completed(source, event):
|
||||
events["MemoryQueryCompletedEvent"].append(event)
|
||||
|
||||
# Call the save method
|
||||
short_term_memory.search(
|
||||
query="test value",
|
||||
limit=3,
|
||||
score_threshold=0.35,
|
||||
)
|
||||
|
||||
assert len(events["MemoryQueryStartedEvent"]) == 1
|
||||
assert len(events["MemoryQueryCompletedEvent"]) == 1
|
||||
assert len(events["MemoryQueryFailedEvent"]) == 0
|
||||
|
||||
assert dict(events["MemoryQueryStartedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_query_started',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'short_term_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'query': 'test value',
|
||||
'limit': 3,
|
||||
'score_threshold': 0.35
|
||||
}
|
||||
|
||||
assert dict(events["MemoryQueryCompletedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_query_completed',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'short_term_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'query': 'test value',
|
||||
'results': [],
|
||||
'limit': 3,
|
||||
'score_threshold': 0.35,
|
||||
'query_time_ms': ANY
|
||||
}
|
||||
|
||||
|
||||
def test_short_term_memory_save_events(short_term_memory):
|
||||
events = defaultdict(list)
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(MemorySaveStartedEvent)
|
||||
def on_save_started(source, event):
|
||||
events["MemorySaveStartedEvent"].append(event)
|
||||
|
||||
@crewai_event_bus.on(MemorySaveCompletedEvent)
|
||||
def on_save_completed(source, event):
|
||||
events["MemorySaveCompletedEvent"].append(event)
|
||||
|
||||
short_term_memory.save(
|
||||
value="test value",
|
||||
metadata={"task": "test_task"},
|
||||
agent="test_agent",
|
||||
)
|
||||
|
||||
assert len(events["MemorySaveStartedEvent"]) == 1
|
||||
assert len(events["MemorySaveCompletedEvent"]) == 1
|
||||
assert len(events["MemorySaveFailedEvent"]) == 0
|
||||
|
||||
assert dict(events["MemorySaveStartedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_save_started',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'short_term_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'value': 'test value',
|
||||
'metadata': {'task': 'test_task'},
|
||||
'agent_role': "test_agent"
|
||||
}
|
||||
|
||||
assert dict(events["MemorySaveCompletedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_save_completed',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'short_term_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'value': 'test value',
|
||||
'metadata': {'task': 'test_task', 'agent': 'test_agent'},
|
||||
'agent_role': "test_agent",
|
||||
'save_time_ms': ANY
|
||||
}
|
||||
|
||||
def test_save_and_search(short_term_memory):
|
||||
memory = ShortTermMemoryItem(
|
||||
data="""test value test value test value test value test value test value
|
||||
|
||||
@@ -0,0 +1,171 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Speaker. You are a
|
||||
helpful assistant that just says hi\nYour personal goal is: Just say hi\n\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "say hi!"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"],
|
||||
"stream": true, "stream_options": {"include_usage": true}}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '602'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":" \n"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
Hi"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[],"usage":{"prompt_tokens":99,"completion_tokens":15,"total_tokens":114,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}}}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 9580b92adce5e838-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 30 Jun 2025 21:23:12 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=nhFmL5HNobQWdbf2Sd9Z8X9ad5zXKG7Ln7MlzuiuwP8-1751318592-1.0.1.1-5qDyF6nVC5d8PDerEmHSOgyWEYdzMdgyFRXqgiJB3FSyWWnvzL4PyVp6LGx9z0P5iTX8PNbxfUOEOYX.7bFaK6p.CyxLaXK7WpnQ3zeasG8;
|
||||
path=/; expires=Mon, 30-Jun-25 21:53:12 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=APKo781sOKEk.HlN5nFBT1Mkid8Lj04kw6JPleI78bU-1751318592001-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '321'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '326'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999896'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_0b0f668953604810c182b1e83e9709fe
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,118 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are base_agent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Just say hi\n\nThis is the expected criteria for
|
||||
your final answer: hi\nyou MUST return the actual complete content as the final
|
||||
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
|
||||
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '838'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xSTW/bMAy9+1cQOsdDnI9m861ZMGAbBuyyHbYWBiMxtjaZEiS5aVHkvw+y09jd
|
||||
OqAXA+bje3qP5GMGILQSJQjZYJStM/nW7j6z+vb9R739+mm707vj7sv2gffq/v2VEbPEsPtfJOMT
|
||||
6420rTMUteUBlp4wUlItNutiOd8s15seaK0ik2i1i/nK5q1mnS/mi1U+3+TF2zO7sVpSECX8zAAA
|
||||
Hvtv8smK7kUJ89lTpaUQsCZRXpoAhLcmVQSGoENEjmI2gtJyJO6tfwS2R5DIUOs7AoQ62QbkcCQP
|
||||
cMMfNKOB6/6/hEZPdTwduoApC3fGTABkthHTLPoEt2fkdPFsbO283Ye/qOKgWYem8oTBcvIXonWi
|
||||
R08ZwG0/m+5ZXOG8bV2sov1N/XPFVTHoiXElE3RxBqONaCb1zXL2gl6lKKI2YTJdIVE2pEbquArs
|
||||
lLYTIJuk/tfNS9pDcs31a+RHQEpykVTlPCktnyce2zyli/1f22XKvWERyN9pSVXU5NMmFB2wM8Md
|
||||
ifAQIrXVQXNN3nk9HNPBVcsVrldI75ZSZKfsDwAAAP//AwBulbOoWgMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 957fa6e91a22023d-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 30 Jun 2025 18:15:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=9WXNY0u6p0Nlyb1G36cXHDgtwb1538JzaUNoS4tgrpo-1751307358-1.0.1.1-BAvg6Fgqsv3ITFxrC3z3E42AqgSZcGq4Gr1Wrjx56TOsljYynqCePNzQ79oAncT9KXehFnUMxA6JSf2lAfQOeSLVREY3_P6GjPkbcwIsVXw;
|
||||
path=/; expires=Mon, 30-Jun-25 18:45:58 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=N5kr6p8e26f8scPW5s2uVOatzh2RYjlQb13eQUBsrts-1751307358295-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '308'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '310'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999823'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_78bb0375ac6e0939c8e05f66869e0137
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,165 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are TestAgent. You are
|
||||
a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Just say hi\n\nThis is the expected criteria for
|
||||
your final answer: hi\nyou MUST return the actual complete content as the final
|
||||
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
|
||||
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream": true, "stream_options":
|
||||
{"include_usage": true}}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '896'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":" \n"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
hi"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoFTMrRBBaVsju5o285dR7JBeVVvS","object":"chat.completion.chunk","created":1751315576,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[],"usage":{"prompt_tokens":161,"completion_tokens":12,"total_tokens":173,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}}}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 95806f8e3fc2f213-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 30 Jun 2025 20:32:56 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=dxb.Rn1CsTQLjW9eU0KWonEuID9KkkVRJ1FaBBsW5gQ-1751315576-1.0.1.1-R7bLnCfrjJwtHYzbKEE9in7ilYfymelWgYg1OcPqSEllAFA9_R2ctsY0f7Mrv7i0dXaynAooDpLs9hGIzfgyBR9EgkRjqoaHbByPXjxy_5s;
|
||||
path=/; expires=Mon, 30-Jun-25 21:02:56 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=MwdjLsfFXJDWzbJseVfA4MIpVAqLa7envAu7EAkdK4o-1751315576696-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '238'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '241'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999824'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_394b055696dfcdc221b5ecd0fba49e97
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,13 +1,15 @@
|
||||
from unittest import mock
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
|
||||
from crewai.utilities.evaluators.task_evaluator import (
|
||||
TaskEvaluator,
|
||||
TrainingTaskEvaluation,
|
||||
)
|
||||
from crewai.utilities.converter import ConverterError
|
||||
|
||||
|
||||
@patch("crewai.utilities.evaluators.task_evaluator.Converter")
|
||||
@patch("crewai.utilities.evaluators.task_evaluator.TrainingConverter")
|
||||
def test_evaluate_training_data(converter_mock):
|
||||
training_data = {
|
||||
"agent_id": {
|
||||
@@ -63,3 +65,39 @@ def test_evaluate_training_data(converter_mock):
|
||||
mock.call().to_pydantic(),
|
||||
]
|
||||
)
|
||||
|
||||
@patch("crewai.utilities.converter.Converter.to_pydantic")
|
||||
@patch("crewai.utilities.training_converter.TrainingConverter._convert_field_by_field")
|
||||
def test_training_converter_fallback_mechanism(convert_field_by_field_mock, to_pydantic_mock):
|
||||
training_data = {
|
||||
"agent_id": {
|
||||
"data1": {
|
||||
"initial_output": "Initial output 1",
|
||||
"human_feedback": "Human feedback 1",
|
||||
"improved_output": "Improved output 1",
|
||||
},
|
||||
"data2": {
|
||||
"initial_output": "Initial output 2",
|
||||
"human_feedback": "Human feedback 2",
|
||||
"improved_output": "Improved output 2",
|
||||
},
|
||||
}
|
||||
}
|
||||
agent_id = "agent_id"
|
||||
to_pydantic_mock.side_effect = ConverterError("Failed to convert directly")
|
||||
|
||||
expected_result = TrainingTaskEvaluation(
|
||||
suggestions=["Fallback suggestion"],
|
||||
quality=6.5,
|
||||
final_summary="Fallback summary"
|
||||
)
|
||||
convert_field_by_field_mock.return_value = expected_result
|
||||
|
||||
original_agent = MagicMock()
|
||||
result = TaskEvaluator(original_agent=original_agent).evaluate_training_data(
|
||||
training_data, agent_id
|
||||
)
|
||||
|
||||
assert result == expected_result
|
||||
to_pydantic_mock.assert_called_once()
|
||||
convert_field_by_field_mock.assert_called_once()
|
||||
|
||||
@@ -57,23 +57,28 @@ def vcr_config(request) -> dict:
|
||||
}
|
||||
|
||||
|
||||
base_agent = Agent(
|
||||
role="base_agent",
|
||||
llm="gpt-4o-mini",
|
||||
goal="Just say hi",
|
||||
backstory="You are a helpful assistant that just says hi",
|
||||
@pytest.fixture(scope="module")
|
||||
def base_agent():
|
||||
return Agent(
|
||||
role="base_agent",
|
||||
llm="gpt-4o-mini",
|
||||
goal="Just say hi",
|
||||
backstory="You are a helpful assistant that just says hi",
|
||||
)
|
||||
|
||||
base_task = Task(
|
||||
description="Just say hi",
|
||||
expected_output="hi",
|
||||
agent=base_agent,
|
||||
)
|
||||
@pytest.fixture(scope="module")
|
||||
def base_task(base_agent):
|
||||
return Task(
|
||||
description="Just say hi",
|
||||
expected_output="hi",
|
||||
agent=base_agent,
|
||||
)
|
||||
|
||||
event_listener = EventListener()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_start_kickoff_event():
|
||||
def test_crew_emits_start_kickoff_event(base_agent, base_task):
|
||||
received_events = []
|
||||
mock_span = Mock()
|
||||
|
||||
@@ -101,7 +106,7 @@ def test_crew_emits_start_kickoff_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_end_kickoff_event():
|
||||
def test_crew_emits_end_kickoff_event(base_agent, base_task):
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
@@ -119,7 +124,7 @@ def test_crew_emits_end_kickoff_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_test_kickoff_type_event():
|
||||
def test_crew_emits_test_kickoff_type_event(base_agent, base_task):
|
||||
received_events = []
|
||||
mock_span = Mock()
|
||||
|
||||
@@ -165,7 +170,7 @@ def test_crew_emits_test_kickoff_type_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_kickoff_failed_event():
|
||||
def test_crew_emits_kickoff_failed_event(base_agent, base_task):
|
||||
received_events = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@@ -190,7 +195,7 @@ def test_crew_emits_kickoff_failed_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_start_task_event():
|
||||
def test_crew_emits_start_task_event(base_agent, base_task):
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(TaskStartedEvent)
|
||||
@@ -207,7 +212,7 @@ def test_crew_emits_start_task_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_emits_end_task_event():
|
||||
def test_crew_emits_end_task_event(base_agent, base_task):
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(TaskCompletedEvent)
|
||||
@@ -235,7 +240,7 @@ def test_crew_emits_end_task_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_task_emits_failed_event_on_execution_error():
|
||||
def test_task_emits_failed_event_on_execution_error(base_agent, base_task):
|
||||
received_events = []
|
||||
received_sources = []
|
||||
|
||||
@@ -272,7 +277,7 @@ def test_task_emits_failed_event_on_execution_error():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_emits_execution_started_and_completed_events():
|
||||
def test_agent_emits_execution_started_and_completed_events(base_agent, base_task):
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionStartedEvent)
|
||||
@@ -301,7 +306,7 @@ def test_agent_emits_execution_started_and_completed_events():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_emits_execution_error_event():
|
||||
def test_agent_emits_execution_error_event(base_agent, base_task):
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionErrorEvent)
|
||||
@@ -501,7 +506,7 @@ def test_flow_emits_method_execution_started_event():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_register_handler_adds_new_handler():
|
||||
def test_register_handler_adds_new_handler(base_agent, base_task):
|
||||
received_events = []
|
||||
|
||||
def custom_handler(source, event):
|
||||
@@ -519,7 +524,7 @@ def test_register_handler_adds_new_handler():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_multiple_handlers_for_same_event():
|
||||
def test_multiple_handlers_for_same_event(base_agent, base_task):
|
||||
received_events_1 = []
|
||||
received_events_2 = []
|
||||
|
||||
@@ -613,6 +618,11 @@ def test_llm_emits_call_started_event():
|
||||
assert received_events[0].type == "llm_call_started"
|
||||
assert received_events[1].type == "llm_call_completed"
|
||||
|
||||
assert received_events[0].task_name is None
|
||||
assert received_events[0].agent_role is None
|
||||
assert received_events[0].agent_id is None
|
||||
assert received_events[0].task_id is None
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_emits_call_failed_event():
|
||||
@@ -632,6 +642,10 @@ def test_llm_emits_call_failed_event():
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].type == "llm_call_failed"
|
||||
assert received_events[0].error == error_message
|
||||
assert received_events[0].task_name is None
|
||||
assert received_events[0].agent_role is None
|
||||
assert received_events[0].agent_id is None
|
||||
assert received_events[0].task_id is None
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -742,7 +756,6 @@ def test_streaming_empty_response_handling():
|
||||
received_chunks = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||
def handle_stream_chunk(source, event):
|
||||
received_chunks.append(event.chunk)
|
||||
@@ -779,3 +792,167 @@ def test_streaming_empty_response_handling():
|
||||
finally:
|
||||
# Restore the original method
|
||||
llm.call = original_call
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_stream_llm_emits_event_with_task_and_agent_info():
|
||||
completed_event = []
|
||||
failed_event = []
|
||||
started_event = []
|
||||
stream_event = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(LLMCallFailedEvent)
|
||||
def handle_llm_failed(source, event):
|
||||
failed_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallStartedEvent)
|
||||
def handle_llm_started(source, event):
|
||||
started_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallCompletedEvent)
|
||||
def handle_llm_completed(source, event):
|
||||
completed_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||
def handle_llm_stream_chunk(source, event):
|
||||
stream_event.append(event)
|
||||
|
||||
agent = Agent(
|
||||
role="TestAgent",
|
||||
llm=LLM(model="gpt-4o-mini", stream=True),
|
||||
goal="Just say hi",
|
||||
backstory="You are a helpful assistant that just says hi",
|
||||
)
|
||||
task = Task(
|
||||
description="Just say hi",
|
||||
expected_output="hi",
|
||||
llm=LLM(model="gpt-4o-mini", stream=True),
|
||||
agent=agent
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
crew.kickoff()
|
||||
|
||||
assert len(completed_event) == 1
|
||||
assert len(failed_event) == 0
|
||||
assert len(started_event) == 1
|
||||
assert len(stream_event) == 12
|
||||
|
||||
all_events = completed_event + failed_event + started_event + stream_event
|
||||
all_agent_roles = [event.agent_role for event in all_events]
|
||||
all_agent_id = [event.agent_id for event in all_events]
|
||||
all_task_id = [event.task_id for event in all_events]
|
||||
all_task_name = [event.task_name for event in all_events]
|
||||
|
||||
# ensure all events have the agent + task props set
|
||||
assert len(all_agent_roles) == 14
|
||||
assert len(all_agent_id) == 14
|
||||
assert len(all_task_id) == 14
|
||||
assert len(all_task_name) == 14
|
||||
|
||||
assert set(all_agent_roles) == {agent.role}
|
||||
assert set(all_agent_id) == {agent.id}
|
||||
assert set(all_task_id) == {task.id}
|
||||
assert set(all_task_name) == {task.name}
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_emits_event_with_task_and_agent_info(base_agent, base_task):
|
||||
completed_event = []
|
||||
failed_event = []
|
||||
started_event = []
|
||||
stream_event = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(LLMCallFailedEvent)
|
||||
def handle_llm_failed(source, event):
|
||||
failed_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallStartedEvent)
|
||||
def handle_llm_started(source, event):
|
||||
started_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallCompletedEvent)
|
||||
def handle_llm_completed(source, event):
|
||||
completed_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||
def handle_llm_stream_chunk(source, event):
|
||||
stream_event.append(event)
|
||||
|
||||
crew = Crew(agents=[base_agent], tasks=[base_task])
|
||||
crew.kickoff()
|
||||
|
||||
assert len(completed_event) == 1
|
||||
assert len(failed_event) == 0
|
||||
assert len(started_event) == 1
|
||||
assert len(stream_event) == 0
|
||||
|
||||
all_events = completed_event + failed_event + started_event + stream_event
|
||||
all_agent_roles = [event.agent_role for event in all_events]
|
||||
all_agent_id = [event.agent_id for event in all_events]
|
||||
all_task_id = [event.task_id for event in all_events]
|
||||
all_task_name = [event.task_name for event in all_events]
|
||||
|
||||
# ensure all events have the agent + task props set
|
||||
assert len(all_agent_roles) == 2
|
||||
assert len(all_agent_id) == 2
|
||||
assert len(all_task_id) == 2
|
||||
assert len(all_task_name) == 2
|
||||
|
||||
assert set(all_agent_roles) == {base_agent.role}
|
||||
assert set(all_agent_id) == {base_agent.id}
|
||||
assert set(all_task_id) == {base_task.id}
|
||||
assert set(all_task_name) == {base_task.name}
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_emits_event_with_lite_agent():
|
||||
completed_event = []
|
||||
failed_event = []
|
||||
started_event = []
|
||||
stream_event = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(LLMCallFailedEvent)
|
||||
def handle_llm_failed(source, event):
|
||||
failed_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallStartedEvent)
|
||||
def handle_llm_started(source, event):
|
||||
started_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallCompletedEvent)
|
||||
def handle_llm_completed(source, event):
|
||||
completed_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||
def handle_llm_stream_chunk(source, event):
|
||||
stream_event.append(event)
|
||||
|
||||
agent = Agent(
|
||||
role="Speaker",
|
||||
llm=LLM(model="gpt-4o-mini", stream=True),
|
||||
goal="Just say hi",
|
||||
backstory="You are a helpful assistant that just says hi",
|
||||
)
|
||||
agent.kickoff(messages=[{"role": "user", "content": "say hi!"}])
|
||||
|
||||
|
||||
assert len(completed_event) == 2
|
||||
assert len(failed_event) == 0
|
||||
assert len(started_event) == 2
|
||||
assert len(stream_event) == 15
|
||||
|
||||
all_events = completed_event + failed_event + started_event + stream_event
|
||||
all_agent_roles = [event.agent_role for event in all_events]
|
||||
all_agent_id = [event.agent_id for event in all_events]
|
||||
all_task_id = [event.task_id for event in all_events if event.task_id]
|
||||
all_task_name = [event.task_name for event in all_events if event.task_name]
|
||||
|
||||
# ensure all events have the agent + task props set
|
||||
assert len(all_agent_roles) == 19
|
||||
assert len(all_agent_id) == 19
|
||||
assert len(all_task_id) == 0
|
||||
assert len(all_task_name) == 0
|
||||
|
||||
assert set(all_agent_roles) == {agent.role}
|
||||
assert set(all_agent_id) == {agent.id}
|
||||
|
||||
97
tests/utilities/test_training_converter.py
Normal file
97
tests/utilities/test_training_converter.py
Normal file
@@ -0,0 +1,97 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List
|
||||
|
||||
from crewai.utilities.converter import ConverterError
|
||||
from crewai.utilities.training_converter import TrainingConverter
|
||||
|
||||
|
||||
class TestModel(BaseModel):
|
||||
string_field: str = Field(description="A simple string field")
|
||||
list_field: List[str] = Field(description="A list of strings")
|
||||
number_field: float = Field(description="A number field")
|
||||
|
||||
|
||||
class TestTrainingConverter:
|
||||
|
||||
def setup_method(self):
|
||||
self.llm_mock = MagicMock()
|
||||
self.test_text = "Sample text for evaluation"
|
||||
self.test_instructions = "Convert to JSON format"
|
||||
self.converter = TrainingConverter(
|
||||
llm=self.llm_mock,
|
||||
text=self.test_text,
|
||||
model=TestModel,
|
||||
instructions=self.test_instructions
|
||||
)
|
||||
|
||||
@patch("crewai.utilities.converter.Converter.to_pydantic")
|
||||
def test_fallback_to_field_by_field(self, parent_to_pydantic_mock):
|
||||
parent_to_pydantic_mock.side_effect = ConverterError("Failed to convert directly")
|
||||
|
||||
llm_responses = {
|
||||
"string_field": "test string value",
|
||||
"list_field": "- item1\n- item2\n- item3",
|
||||
"number_field": "8.5"
|
||||
}
|
||||
|
||||
def llm_side_effect(messages):
|
||||
prompt = messages[1]["content"]
|
||||
if "string_field" in prompt:
|
||||
return llm_responses["string_field"]
|
||||
elif "list_field" in prompt:
|
||||
return llm_responses["list_field"]
|
||||
elif "number_field" in prompt:
|
||||
return llm_responses["number_field"]
|
||||
return "unknown field"
|
||||
|
||||
self.llm_mock.call.side_effect = llm_side_effect
|
||||
|
||||
result = self.converter.to_pydantic()
|
||||
|
||||
assert result.string_field == "test string value"
|
||||
assert result.list_field == ["item1", "item2", "item3"]
|
||||
assert result.number_field == 8.5
|
||||
|
||||
parent_to_pydantic_mock.assert_called_once()
|
||||
assert self.llm_mock.call.call_count == 3
|
||||
|
||||
def test_ask_llm_for_field(self):
|
||||
field_name = "test_field"
|
||||
field_description = "This is a test field description"
|
||||
expected_response = "Test response"
|
||||
self.llm_mock.call.return_value = expected_response
|
||||
response = self.converter._ask_llm_for_field(field_name, field_description)
|
||||
|
||||
assert response == expected_response
|
||||
self.llm_mock.call.assert_called_once()
|
||||
|
||||
call_args = self.llm_mock.call.call_args[0][0]
|
||||
assert call_args[0]["role"] == "system"
|
||||
assert f"Extract the {field_name}" in call_args[0]["content"]
|
||||
assert call_args[1]["role"] == "user"
|
||||
assert field_name in call_args[1]["content"]
|
||||
assert field_description in call_args[1]["content"]
|
||||
|
||||
def test_process_field_value_string(self):
|
||||
response = " This is a string with extra whitespace "
|
||||
result = self.converter._process_field_value(response, str)
|
||||
assert result == "This is a string with extra whitespace"
|
||||
|
||||
def test_process_field_value_list_with_bullet_points(self):
|
||||
response = "- Item 1\n- Item 2\n- Item 3"
|
||||
result = self.converter._process_field_value(response, List[str])
|
||||
assert result == ["Item 1", "Item 2", "Item 3"]
|
||||
|
||||
def test_process_field_value_list_with_json(self):
|
||||
response = '["Item 1", "Item 2", "Item 3"]'
|
||||
with patch("crewai.utilities.training_converter.json.loads") as json_mock:
|
||||
json_mock.return_value = ["Item 1", "Item 2", "Item 3"]
|
||||
result = self.converter._process_field_value(response, List[str])
|
||||
assert result == ["Item 1", "Item 2", "Item 3"]
|
||||
|
||||
def test_process_field_value_float(self):
|
||||
response = "The quality score is 8.5 out of 10"
|
||||
result = self.converter._process_field_value(response, float)
|
||||
assert result == 8.5
|
||||
Reference in New Issue
Block a user