Compare commits

..

48 Commits

Author SHA1 Message Date
Lorenze Jay
007ea5e839 Merge branch 'main' into feat/individual-react-agent 2025-04-02 08:47:13 -07:00
Lorenze Jay
52aeac4b75 Remove redundant error handling for action and final answer in CrewAgentParser. Update tests to reflect this change by deleting the corresponding test case. 2025-04-01 14:10:37 -07:00
Lorenze Jay
e33532796b Merge branch 'feat/individual-react-agent' of github.com:crewAIInc/crewAI into feat/individual-react-agent 2025-04-01 12:38:32 -07:00
Lorenze Jay
7f0d80d427 Add Excalidraw diagram file for visual representation of input-output flow
Created a new Excalidraw file that includes a diagram illustrating the input box, database, and output box with connecting arrows. This visual aid enhances understanding of the data flow within the application.
2025-04-01 12:38:27 -07:00
Lorenze Jay
e4df956bea Merge branch 'main' into feat/individual-react-agent 2025-04-01 12:08:40 -07:00
Lorenze Jay
01ebf802e3 Refactor agent tests by removing outdated test cases and updating YAML cassettes to reflect changes in tool usage and response formats. Adjusted request and response headers, including user agent and content length, for improved accuracy in testing. Enhanced interaction formats for consistency across tests. 2025-03-31 17:00:54 -07:00
Lorenze Jay
4856c4a685 Update tool usage logging to store tool arguments as native types instead of strings, enhancing data integrity and usability. 2025-03-31 16:51:18 -07:00
Lorenze Jay
bfcc228bc3 Update YAML cassette for LLM tests to reflect changes in response structure and model version. Adjusted request and response headers, including updated content length and user agent. Enhanced token limits and request counts for improved testing accuracy. 2025-03-31 16:46:39 -07:00
Lorenze Jay
802a4d079f Update tool usage logging to ensure tool arguments are consistently formatted as strings. Adjust agent test cases to reflect changes in maximum iterations and expected outputs, enhancing clarity in assertions. Update YAML cassettes to align with new response formats and improve overall consistency across tests. 2025-03-31 16:42:51 -07:00
Lorenze Jay
570c7845fa Refactor agent tests to update model versions and improve response formatting in YAML cassettes. Changed model references from 'o1-preview' to 'o3-mini' and adjusted interaction formats for consistency. Enhanced error handling in context length tests and refined mock setups for better clarity. 2025-03-31 16:17:25 -07:00
Lorenze Jay
8f5d6f720d Update agent tests to reflect changes in expected call counts and improve response formatting in YAML cassette. Adjusted mock call count from 2 to 3 and refined interaction formats for clarity and consistency. 2025-03-31 15:34:13 -07:00
Lorenze Jay
99614f83e0 Enhance tests in crew_test.py by verifying cache behavior in test_tools_with_custom_caching and ensuring proper agent initialization with added commas in test_crew_kickoff_for_each_works_with_manager_agent_copy. 2025-03-31 15:29:07 -07:00
Lorenze Jay
288afba5fa Refactor agent_utils.py by removing unused event imports and adding missing commas in function definitions. Update test_events.py to reflect changes in expected event counts and adjust assertions accordingly. Modify test_tools_emits_error_events.yaml to include new headers and update response content for consistency with recent API changes. 2025-03-31 15:21:44 -07:00
Lorenze Jay
b8d871e795 Merge branch 'main' of github.com:crewAIInc/crewAI into feat/individual-react-agent 2025-03-31 12:21:41 -07:00
Lorenze Jay
4deff49b70 Add initialization for 'result' variable in ToolUsage class to resolve type-checker warnings 2025-03-31 12:20:16 -07:00
Lorenze Jay
08f0fc2285 Remove unused variable 'result' from ToolUsage class to clean up code. 2025-03-31 12:16:11 -07:00
Lorenze Jay
7107224fa9 Remove deprecated test files and examples for LiteAgent; add comprehensive tests for LiteAgent functionality, including tool usage and structured output handling. 2025-03-31 12:13:25 -07:00
Lorenze Jay
a00eaa4732 Remove unused parameters from ToolUsage instantiation in tests and clean up debug print statement in CrewAgentParser. 2025-03-31 11:52:20 -07:00
Lorenze Jay
347ff85180 Add optional agent parameter to CrewAgentParser and enhance action handling logic 2025-03-31 10:29:04 -07:00
Lorenze Jay
48b10600d3 Fix type-checker issue by adding type ignore comment for cache read in ToolUsage class 2025-03-31 10:01:02 -07:00
Lorenze Jay
a02f637155 fix type-checker 2025-03-31 09:50:42 -07:00
Lorenze Jay
996bbad0d3 fix fingerprinting issues 2025-03-31 09:36:30 -07:00
Lorenze Jay
859139016e Update LiteAgent documentation for clarity and consistency; replace WebsiteSearchTool with SerperDevTool, and improve formatting in examples. 2025-03-31 08:12:08 -07:00
Lorenze Jay
f99d374609 Merge branch 'main' into feat/individual-react-agent 2025-03-28 15:19:44 -07:00
Brandon Hancock
3c9058a45f More type checking fixes 2025-03-28 15:31:44 -04:00
Brandon Hancock
f8f9063d9e more type checker fixes 2025-03-28 13:59:16 -04:00
Brandon Hancock
a380bc076b Trying to fix CI issues 2025-03-28 13:03:37 -04:00
Brandon Hancock
f2927fc266 Fix errors 2025-03-28 12:58:23 -04:00
Brandon Hancock
594784473b Merge branch 'main' into feat/individual-react-agent 2025-03-28 12:52:40 -04:00
Brandon Hancock
4bfb71d749 Clean up 2025-03-28 10:55:31 -04:00
Brandon Hancock
1daeaa4a81 docs 2025-03-28 10:51:54 -04:00
Brandon Hancock
4857777a9c drop hard coded examples 2025-03-28 10:28:31 -04:00
Brandon Hancock
b8c8640f22 cleanup 2025-03-28 10:28:15 -04:00
Brandon Hancock
0ec3c37912 99% done. Need to make docs match new example 2025-03-27 17:18:33 -04:00
Brandon Hancock
30aa5cc3b9 Update logger 2025-03-27 16:21:31 -04:00
Brandon Hancock
0fca721b11 cleaning up 2025-03-27 13:30:37 -04:00
Brandon Hancock
e11c7d1fd8 WIP 2025-03-26 14:51:53 -04:00
Brandon Hancock
e6b90699a8 more testing 2025-03-26 10:54:00 -04:00
Brandon Hancock
fa62df7d18 Usage metrics fixed 2025-03-25 14:56:27 -04:00
Brandon Hancock
0785d596f0 output type works now 2025-03-25 14:42:50 -04:00
Brandon Hancock
06854fff86 Its working but needs a massive clean up 2025-03-25 13:38:52 -04:00
Brandon Hancock
998afcd498 More WIP 2025-03-25 10:50:53 -04:00
Brandon Hancock
fa15c5eb1d WIP 2025-03-25 10:36:59 -04:00
Brandon Hancock
defb0c55e6 wip 2025-03-20 08:00:32 -04:00
Brandon Hancock
e9fa9c5700 wip 2025-03-17 13:03:57 -04:00
Brandon Hancock
8e3af76252 WIP 2025-03-17 11:50:06 -04:00
Brandon Hancock
84716d4037 Merge branch 'main' into feat/individual-react-agent 2025-03-17 10:52:14 -04:00
Brandon Hancock
33192237a5 WIP 2025-03-13 09:16:18 -04:00
58 changed files with 465 additions and 7384 deletions

View File

@@ -12,9 +12,6 @@ jobs:
tests:
runs-on: ubuntu-latest
timeout-minutes: 15
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12']
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -24,8 +21,9 @@ jobs:
with:
enable-cache: true
- name: Set up Python ${{ matrix.python-version }}
run: uv python install ${{ matrix.python-version }}
- name: Set up Python
run: uv python install 3.12.8
- name: Install the project
run: uv sync --dev --all-extras

3
.gitignore vendored
View File

@@ -25,5 +25,4 @@ agentops.log
test_flow.html
crewairules.mdc
plan.md
conceptual_plan.md
build_image
conceptual_plan.md

View File

@@ -18,18 +18,6 @@ In the CrewAI framework, an `Agent` is an autonomous unit that can:
Think of an agent as a specialized team member with specific skills, expertise, and responsibilities. For example, a `Researcher` agent might excel at gathering and analyzing information, while a `Writer` agent might be better at creating content.
</Tip>
<Note type="info" title="Enterprise Enhancement: Visual Agent Builder">
CrewAI Enterprise includes a Visual Agent Builder that simplifies agent creation and configuration without writing code. Design your agents visually and test them in real-time.
![Visual Agent Builder Screenshot](../images/enterprise/crew-studio-quickstart)
The Visual Agent Builder enables:
- Intuitive agent configuration with form-based interfaces
- Real-time testing and validation
- Template library with pre-configured agent types
- Easy customization of agent attributes and behaviors
</Note>
## Agent Attributes
| Attribute | Parameter | Type | Description |
@@ -245,7 +233,7 @@ custom_agent = Agent(
#### Code Execution
- `allow_code_execution`: Must be True to run code
- `code_execution_mode`:
- `code_execution_mode`:
- `"safe"`: Uses Docker (recommended for production)
- `"unsafe"`: Direct execution (use only in trusted environments)

View File

@@ -23,7 +23,8 @@ The `Crew` class has been enriched with several attributes to support advanced f
| **Process Flow** (`process`) | Defines execution logic (e.g., sequential, hierarchical) for task distribution. |
| **Verbose Logging** (`verbose`) | Provides detailed logging for monitoring and debugging. Accepts integer and boolean values to control verbosity level. |
| **Rate Limiting** (`max_rpm`) | Limits requests per minute to optimize resource usage. Setting guidelines depend on task complexity and load. |
| **Internationalization / Customization** (`prompt_file`) | Supports prompt customization for global usability. [Example of file](https://github.com/joaomdmoura/crewAI/blob/main/src/crewai/translations/en.json) |
| **Internationalization / Customization** (`language`, `prompt_file`) | Supports prompt customization for global usability. [Example of file](https://github.com/joaomdmoura/crewAI/blob/main/src/crewai/translations/en.json) |
| **Execution and Output Handling** (`full_output`) | Controls output granularity, distinguishing between full and final outputs. |
| **Callback and Telemetry** (`step_callback`, `task_callback`) | Enables step-wise and task-level execution monitoring and telemetry for performance analytics. |
| **Crew Sharing** (`share_crew`) | Allows sharing crew data with CrewAI for model improvement. Privacy implications and benefits should be considered. |
| **Usage Metrics** (`usage_metrics`) | Logs all LLM usage metrics during task execution for performance insights. |
@@ -48,4 +49,4 @@ Consider a crew with a researcher agent tasked with data gathering and a writer
## Conclusion
The integration of advanced attributes and functionalities into the CrewAI framework significantly enriches the agent collaboration ecosystem. These enhancements not only simplify interactions but also offer unprecedented flexibility and control, paving the way for sophisticated AI-driven solutions capable of tackling complex tasks through intelligent collaboration and delegation.
The integration of advanced attributes and functionalities into the CrewAI framework significantly enriches the agent collaboration ecosystem. These enhancements not only simplify interactions but also offer unprecedented flexibility and control, paving the way for sophisticated AI-driven solutions capable of tackling complex tasks through intelligent collaboration and delegation.

View File

@@ -20,10 +20,13 @@ A crew in crewAI represents a collaborative group of agents working together to
| **Function Calling LLM** _(optional)_ | `function_calling_llm` | If passed, the crew will use this LLM to do function calling for tools for all agents in the crew. Each agent can have its own LLM, which overrides the crew's LLM for function calling. |
| **Config** _(optional)_ | `config` | Optional configuration settings for the crew, in `Json` or `Dict[str, Any]` format. |
| **Max RPM** _(optional)_ | `max_rpm` | Maximum requests per minute the crew adheres to during execution. Defaults to `None`. |
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
| **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |

View File

@@ -18,20 +18,6 @@ CrewAI uses an event bus architecture to emit events throughout the execution li
When specific actions occur in CrewAI (like a Crew starting execution, an Agent completing a task, or a tool being used), the system emits corresponding events. You can register handlers for these events to execute custom code when they occur.
<Note type="info" title="Enterprise Enhancement: Prompt Tracing">
CrewAI Enterprise provides a built-in Prompt Tracing feature that leverages the event system to track, store, and visualize all prompts, completions, and associated metadata. This provides powerful debugging capabilities and transparency into your agent operations.
![Prompt Tracing Dashboard](../images/enterprise/prompt-tracing.png)
With Prompt Tracing you can:
- View the complete history of all prompts sent to your LLM
- Track token usage and costs
- Debug agent reasoning failures
- Share prompt sequences with your team
- Compare different prompt strategies
- Export traces for compliance and auditing
</Note>
## Creating a Custom Event Listener
To create a custom event listener, you need to:
@@ -54,17 +40,17 @@ from crewai.utilities.events.base_event_listener import BaseEventListener
class MyCustomListener(BaseEventListener):
def __init__(self):
super().__init__()
def setup_listeners(self, crewai_event_bus):
@crewai_event_bus.on(CrewKickoffStartedEvent)
def on_crew_started(source, event):
print(f"Crew '{event.crew_name}' has started execution!")
@crewai_event_bus.on(CrewKickoffCompletedEvent)
def on_crew_completed(source, event):
print(f"Crew '{event.crew_name}' has completed execution!")
print(f"Output: {event.output}")
@crewai_event_bus.on(AgentExecutionCompletedEvent)
def on_agent_execution_completed(source, event):
print(f"Agent '{event.agent.role}' completed task")
@@ -97,7 +83,7 @@ my_listener = MyCustomListener()
class MyCustomCrew:
# Your crew implementation...
def crew(self):
return Crew(
agents=[...],
@@ -120,7 +106,7 @@ my_listener = MyCustomListener()
class MyCustomFlow(Flow):
# Your flow implementation...
@start()
def first_step(self):
# ...
@@ -338,9 +324,9 @@ with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStartedEvent)
def temp_handler(source, event):
print("This handler only exists within this context")
# Do something that emits events
# Outside the context, the temporary handler is removed
```

View File

@@ -545,20 +545,16 @@ The `third_method` and `fourth_method` listen to the output of the `second_metho
When you run this Flow, the output will change based on the random boolean value generated by the `start_method`.
## Adding Agents to Flows
## Adding LiteAgent to Flows
Agents can be seamlessly integrated into your flows, providing a lightweight alternative to full Crews when you need simpler, focused task execution. Here's an example of how to use an Agent within a flow to perform market research:
LiteAgents can be seamlessly integrated into your flows, providing a lightweight alternative to full Crews when you need simpler, focused task execution. Here's an example of how to use a LiteAgent within a flow to perform market research:
```python
import asyncio
from typing import Any, Dict, List
from crewai_tools import SerperDevTool
from typing import List, cast
from crewai_tools.tools.website_search.website_search_tool import WebsiteSearchTool
from pydantic import BaseModel, Field
from crewai.agent import Agent
from crewai.flow.flow import Flow, listen, start
from crewai.lite_agent import LiteAgent
# Define a structured output format
class MarketAnalysis(BaseModel):
@@ -566,30 +562,28 @@ class MarketAnalysis(BaseModel):
market_size: str = Field(description="Estimated market size")
competitors: List[str] = Field(description="Major competitors in the space")
# Define flow state
class MarketResearchState(BaseModel):
product: str = ""
analysis: MarketAnalysis | None = None
# Create a flow class
class MarketResearchFlow(Flow[MarketResearchState]):
@start()
def initialize_research(self) -> Dict[str, Any]:
def initialize_research(self):
print(f"Starting market research for {self.state.product}")
return {"product": self.state.product}
@listen(initialize_research)
async def analyze_market(self) -> Dict[str, Any]:
# Create an Agent for market research
analyst = Agent(
def analyze_market(self):
# Create a LiteAgent for market research
analyst = LiteAgent(
role="Market Research Analyst",
goal=f"Analyze the market for {self.state.product}",
backstory="You are an experienced market analyst with expertise in "
"identifying market trends and opportunities.",
tools=[SerperDevTool()],
llm="gpt-4o",
tools=[WebsiteSearchTool()],
verbose=True,
response_format=MarketAnalysis,
)
# Define the research query
@@ -598,65 +592,49 @@ class MarketResearchFlow(Flow[MarketResearchState]):
1. Key market trends
2. Market size
3. Major competitors
Format your response according to the specified structure.
"""
# Execute the analysis with structured output format
result = await analyst.kickoff_async(query, response_format=MarketAnalysis)
if result.pydantic:
print("result", result.pydantic)
else:
print("result", result)
# Return the analysis to update the state
return {"analysis": result.pydantic}
# Execute the analysis
result = analyst.kickoff(query)
self.state.analysis = cast(MarketAnalysis, result.pydantic)
return result.pydantic
@listen(analyze_market)
def present_results(self, analysis) -> None:
def present_results(self):
analysis = self.state.analysis
if analysis is None:
print("No analysis results available")
return
print("\nMarket Analysis Results")
print("=====================")
if isinstance(analysis, dict):
# If we got a dict with 'analysis' key, extract the actual analysis object
market_analysis = analysis.get("analysis")
else:
market_analysis = analysis
print("\nKey Market Trends:")
for trend in analysis.key_trends:
print(f"- {trend}")
if market_analysis and isinstance(market_analysis, MarketAnalysis):
print("\nKey Market Trends:")
for trend in market_analysis.key_trends:
print(f"- {trend}")
print(f"\nMarket Size: {market_analysis.market_size}")
print("\nMajor Competitors:")
for competitor in market_analysis.competitors:
print(f"- {competitor}")
else:
print("No structured analysis data available.")
print("Raw analysis:", analysis)
print(f"\nMarket Size: {analysis.market_size}")
print("\nMajor Competitors:")
for competitor in analysis.competitors:
print(f"- {competitor}")
# Usage example
async def run_flow():
flow = MarketResearchFlow()
result = await flow.kickoff_async(inputs={"product": "AI-powered chatbots"})
return result
# Run the flow
if __name__ == "__main__":
asyncio.run(run_flow())
flow = MarketResearchFlow()
result = flow.kickoff(inputs={"product": "AI-powered chatbots"})
```
This example demonstrates several key features of using Agents in flows:
This example demonstrates several key features of using LiteAgents in flows:
1. **Structured Output**: Using Pydantic models to define the expected output format (`MarketAnalysis`) ensures type safety and structured data throughout the flow.
2. **State Management**: The flow state (`MarketResearchState`) maintains context between steps and stores both inputs and outputs.
3. **Tool Integration**: Agents can use tools (like `WebsiteSearchTool`) to enhance their capabilities.
3. **Tool Integration**: LiteAgents can use tools (like `WebsiteSearchTool`) to enhance their capabilities.
If you want to learn more about LiteAgents, check out the [LiteAgent](/concepts/lite-agent) page.
## Adding Crews to Flows

View File

@@ -0,0 +1,242 @@
---
title: LiteAgent
description: A lightweight, single-purpose agent for simple autonomous tasks within the CrewAI framework.
icon: feather
---
## Overview
A `LiteAgent` is a streamlined version of CrewAI's Agent, designed for simpler, standalone tasks that don't require the full complexity of a crew-based workflow. It's perfect for quick automations, single-purpose tasks, or when you need a lightweight solution.
<Tip>
Think of a LiteAgent as a specialized worker that excels at individual tasks.
While regular Agents are team players in a crew, LiteAgents are solo
performers optimized for specific operations.
</Tip>
## LiteAgent Attributes
| Attribute | Parameter | Type | Description |
| :------------------------------- | :---------------- | :--------------------- | :-------------------------------------------------------------- |
| **Role** | `role` | `str` | Defines the agent's function and expertise. |
| **Goal** | `goal` | `str` | The specific objective that guides the agent's actions. |
| **Backstory** | `backstory` | `str` | Provides context and personality to the agent. |
| **LLM** _(optional)_ | `llm` | `Union[str, LLM, Any]` | Language model powering the agent. Defaults to "gpt-4". |
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | Capabilities available to the agent. Defaults to an empty list. |
| **Verbose** _(optional)_ | `verbose` | `bool` | Enable detailed execution logs. Default is False. |
| **Response Format** _(optional)_ | `response_format` | `Type[BaseModel]` | Pydantic model for structured output. Optional. |
## Creating a LiteAgent
Here's a simple example of creating and using a standalone LiteAgent:
```python
from typing import List, cast
from crewai_tools import SerperDevTool
from pydantic import BaseModel, Field
from crewai.lite_agent import LiteAgent
# Define a structured output format
class MovieReview(BaseModel):
title: str = Field(description="The title of the movie")
rating: float = Field(description="Rating out of 10")
pros: List[str] = Field(description="List of positive aspects")
cons: List[str] = Field(description="List of negative aspects")
# Create a LiteAgent
critic = LiteAgent(
role="Movie Critic",
goal="Provide insightful movie reviews",
backstory="You are an experienced film critic known for balanced, thoughtful reviews.",
tools=[SerperDevTool()],
verbose=True,
response_format=MovieReview,
)
# Use the agent
query = """
Review the movie 'Inception'. Include:
1. Your rating out of 10
2. Key positive aspects
3. Areas that could be improved
"""
result = critic.kickoff(query)
# Access the structured output
review = cast(MovieReview, result.pydantic)
print(f"\nMovie Review: {review.title}")
print(f"Rating: {review.rating}/10")
print("\nPros:")
for pro in review.pros:
print(f"- {pro}")
print("\nCons:")
for con in review.cons:
print(f"- {con}")
```
This example demonstrates the core features of a LiteAgent:
- Structured output using Pydantic models
- Tool integration with WebSearchTool
- Simple execution with `kickoff()`
- Easy access to both raw and structured results
## Using LiteAgent in a Flow
For more complex scenarios, you can integrate LiteAgents into a Flow. Here's an example of a market research flow:
````python
from typing import List
from pydantic import BaseModel, Field
from crewai.flow.flow import Flow, start, listen
from crewai.lite_agent import LiteAgent
from crewai.tools import WebSearchTool
# Define a structured output format
class MarketAnalysis(BaseModel):
key_trends: List[str] = Field(description="List of identified market trends")
market_size: str = Field(description="Estimated market size")
competitors: List[str] = Field(description="Major competitors in the space")
# Define flow state
class MarketResearchState(BaseModel):
product: str = ""
analysis: MarketAnalysis = None
# Create a flow class
class MarketResearchFlow(Flow[MarketResearchState]):
@start()
def initialize_research(self, product: str):
print(f"Starting market research for {product}")
self.state.product = product
@listen(initialize_research)
async def analyze_market(self):
# Create a LiteAgent for market research
analyst = LiteAgent(
role="Market Research Analyst",
goal=f"Analyze the market for {self.state.product}",
backstory="You are an experienced market analyst with expertise in "
"identifying market trends and opportunities.",
tools=[WebSearchTool()],
verbose=True,
response_format=MarketAnalysis
)
# Define the research query
query = f"""
Research the market for {self.state.product}. Include:
1. Key market trends
2. Market size
3. Major competitors
Format your response according to the specified structure.
"""
# Execute the analysis
result = await analyst.kickoff_async(query)
self.state.analysis = result.pydantic
return result.pydantic
@listen(analyze_market)
def present_results(self):
analysis = self.state.analysis
print("\nMarket Analysis Results")
print("=====================")
print("\nKey Market Trends:")
for trend in analysis.key_trends:
print(f"- {trend}")
print(f"\nMarket Size: {analysis.market_size}")
print("\nMajor Competitors:")
for competitor in analysis.competitors:
print(f"- {competitor}")
# Usage example
import asyncio
async def run_flow():
flow = MarketResearchFlow()
result = await flow.kickoff(inputs={"product": "AI-powered chatbots"})
return result
# Run the flow
if __name__ == "__main__":
asyncio.run(run_flow())
## Key Features
### 1. Simplified Setup
Unlike regular Agents, LiteAgents are designed for quick setup and standalone operation. They don't require crew configuration or task management.
### 2. Structured Output
LiteAgents support Pydantic models for response formatting, making it easy to get structured, type-safe data from your agent's operations.
### 3. Tool Integration
Just like regular Agents, LiteAgents can use tools to enhance their capabilities:
```python
from crewai.tools import SerperDevTool, CalculatorTool
agent = LiteAgent(
role="Research Assistant",
goal="Find and analyze information",
tools=[SerperDevTool(), CalculatorTool()],
verbose=True
)
````
### 4. Async Support
LiteAgents support asynchronous execution through the `kickoff_async` method, making them suitable for non-blocking operations in your application.
## Response Formatting
LiteAgents support structured output through Pydantic models using the `response_format` parameter. This feature ensures type safety and consistent output structure, making it easier to work with agent responses in your application.
### Basic Usage
```python
from pydantic import BaseModel, Field
class SearchResult(BaseModel):
title: str = Field(description="The title of the found content")
summary: str = Field(description="A brief summary of the content")
relevance_score: float = Field(description="Relevance score from 0 to 1")
agent = LiteAgent(
role="Search Specialist",
goal="Find and summarize relevant information",
response_format=SearchResult
)
result = await agent.kickoff_async("Find information about quantum computing")
print(f"Title: {result.pydantic.title}")
print(f"Summary: {result.pydantic.summary}")
print(f"Relevance: {result.pydantic.relevance_score}")
```
### Handling Responses
When using `response_format`, the agent's response will be available in two forms:
1. **Raw Response**: Access the unstructured string response
```python
result = await agent.kickoff_async("Analyze the market")
print(result.raw) # Original LLM response
```
2. **Structured Response**: Access the parsed Pydantic model
```python
print(result.pydantic) # Parsed response as Pydantic model
print(result.pydantic.dict()) # Convert to dictionary
```

View File

@@ -18,8 +18,7 @@ reason, and learn from past interactions.
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. |
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. |
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
| **External Memory** | Enables integration with external memory systems and providers (like Mem0), allowing for specialized memory storage and retrieval across different applications. Supports custom storage implementations for flexible memory management. |
| **User Memory** | ⚠️ **DEPRECATED**: This component is deprecated and will be removed in a future version. Please use [External Memory](#using-external-memory) instead. |
| **User Memory** | Stores user-specific information and preferences, enhancing personalization and user experience. |
## How Memory Systems Empower Agents
@@ -275,102 +274,6 @@ crew = Crew(
)
```
### Using External Memory
External Memory is a powerful feature that allows you to integrate external memory systems with your CrewAI applications. This is particularly useful when you want to use specialized memory providers or maintain memory across different applications.
#### Basic Usage with Mem0
The most common way to use External Memory is with Mem0 as the provider:
```python
from crewai import Agent, Crew, Process, Task
from crewai.memory.external.external_memory import ExternalMemory
agent = Agent(
role="You are a helpful assistant",
goal="Plan a vacation for the user",
backstory="You are a helpful assistant that can plan a vacation for the user",
verbose=True,
)
task = Task(
description="Give things related to the user's vacation",
expected_output="A plan for the vacation",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
process=Process.sequential,
memory=True,
external_memory=ExternalMemory(
embedder_config={"provider": "mem0", "config": {"user_id": "U-123"}} # you can provide an entire Mem0 configuration
),
)
crew.kickoff(
inputs={"question": "which destination is better for a beach vacation?"}
)
```
#### Using External Memory with Custom Storage
You can also create custom storage implementations for External Memory. Here's an example of how to create a custom storage:
```python
from crewai import Agent, Crew, Process, Task
from crewai.memory.external.external_memory import ExternalMemory
from crewai.memory.storage.interface import Storage
class CustomStorage(Storage):
def __init__(self):
self.memories = []
def save(self, value, metadata=None, agent=None):
self.memories.append({"value": value, "metadata": metadata, "agent": agent})
def search(self, query, limit=10, score_threshold=0.5):
# Implement your search logic here
return []
def reset(self):
self.memories = []
# Create external memory with custom storage
external_memory = ExternalMemory(
storage=CustomStorage(),
embedder_config={"provider": "mem0", "config": {"user_id": "U-123"}},
)
agent = Agent(
role="You are a helpful assistant",
goal="Plan a vacation for the user",
backstory="You are a helpful assistant that can plan a vacation for the user",
verbose=True,
)
task = Task(
description="Give things related to the user's vacation",
expected_output="A plan for the vacation",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
process=Process.sequential,
memory=True,
external_memory=external_memory,
)
crew.kickoff(
inputs={"question": "which destination is better for a beach vacation?"}
)
```
## Additional Embedding Providers

View File

@@ -12,18 +12,6 @@ Tasks provide all necessary details for execution, such as a description, the ag
Tasks within CrewAI can be collaborative, requiring multiple agents to work together. This is managed through the task properties and orchestrated by the Crew's process, enhancing teamwork and efficiency.
<Note type="info" title="Enterprise Enhancement: Visual Task Builder">
CrewAI Enterprise includes a Visual Task Builder in Crew Studio that simplifies complex task creation and chaining. Design your task flows visually and test them in real-time without writing code.
![Task Builder Screenshot](../images/enterprise/crew-studio-quickstart.png)
The Visual Task Builder enables:
- Drag-and-drop task creation
- Visual task dependencies and flow
- Real-time testing and validation
- Easy sharing and collaboration
</Note>
### Task Execution Flow
Tasks can be executed in two ways:
@@ -426,7 +414,7 @@ It's also important to note that the output of the final task of a crew becomes
### Using `output_pydantic`
The `output_pydantic` property allows you to define a Pydantic model that the task output should conform to. This ensures that the output is not only structured but also validated according to the Pydantic model.
Here's an example demonstrating how to use output_pydantic:
Heres an example demonstrating how to use output_pydantic:
```python Code
import json
@@ -507,7 +495,7 @@ In this example:
### Using `output_json`
The `output_json` property allows you to define the expected output in JSON format. This ensures that the task's output is a valid JSON structure that can be easily parsed and used in your application.
Here's an example demonstrating how to use `output_json`:
Heres an example demonstrating how to use `output_json`:
```python Code
import json

View File

@@ -15,18 +15,6 @@ A tool in CrewAI is a skill or function that agents can utilize to perform vario
This includes tools from the [CrewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools),
enabling everything from simple searches to complex interactions and effective teamwork among agents.
<Note type="info" title="Enterprise Enhancement: Tools Repository">
CrewAI Enterprise provides a comprehensive Tools Repository with pre-built integrations for common business systems and APIs. Deploy agents with enterprise tools in minutes instead of days.
![Tools Repository Screenshot](../images/enterprise/tools-repository.png)
The Enterprise Tools Repository includes:
- Pre-built connectors for popular enterprise systems
- Custom tool creation interface
- Version control and sharing capabilities
- Security and compliance features
</Note>
## Key Characteristics of Tools
- **Utility**: Crafted for tasks such as web searching, data analysis, content generation, and agent collaboration.
@@ -91,7 +79,7 @@ research = Task(
)
write = Task(
description='Write an engaging blog post about the AI industry, based on the research analyst's summary. Draw inspiration from the latest blog posts in the directory.',
description='Write an engaging blog post about the AI industry, based on the research analysts summary. Draw inspiration from the latest blog posts in the directory.',
expected_output='A 4-paragraph blog post formatted in markdown with engaging, informative, and accessible content, avoiding complex jargon.',
agent=writer,
output_file='blog-posts/new_post.md' # The final blog post will be saved here
@@ -153,7 +141,7 @@ Here is a list of the available tools and their descriptions:
## Creating your own Tools
<Tip>
Developers can craft `custom tools` tailored for their agent's needs or
Developers can craft `custom tools` tailored for their agents needs or
utilize pre-built options.
</Tip>

View File

@@ -66,6 +66,7 @@
"concepts/tasks",
"concepts/crews",
"concepts/flows",
"concepts/lite-agent",
"concepts/knowledge",
"concepts/llms",
"concepts/processes",
@@ -76,7 +77,9 @@
"concepts/testing",
"concepts/cli",
"concepts/tools",
"concepts/event-listener"
"concepts/event-listener",
"concepts/langchain-tools",
"concepts/llamaindex-tools"
]
},
{
@@ -95,21 +98,19 @@
"how-to/kickoff-async",
"how-to/kickoff-for-each",
"how-to/replay-tasks-from-latest-crew-kickoff",
"how-to/conditional-tasks",
"how-to/langchain-tools",
"how-to/llamaindex-tools"
"how-to/conditional-tasks"
]
},
{
"group": "Agent Monitoring & Observability",
"pages": [
"how-to/agentops-observability",
"how-to/arize-phoenix-observability",
"how-to/langfuse-observability",
"how-to/langtrace-observability",
"how-to/mlflow-observability",
"how-to/openlit-observability",
"how-to/opik-observability",
"how-to/phoenix-observability",
"how-to/portkey-observability",
"how-to/weave-integration"
]
@@ -196,11 +197,6 @@
"anchor": "Community",
"href": "https://community.crewai.com",
"icon": "discourse"
},
{
"anchor": "Tutorials",
"href": "https://www.youtube.com/@crewAIInc",
"icon": "youtube"
}
]
}
@@ -235,4 +231,4 @@
"reddit": "https://www.reddit.com/r/crewAIInc/"
}
}
}
}

View File

@@ -1,10 +1,10 @@
---
title: Arize Phoenix
description: Arize Phoenix integration for CrewAI with OpenTelemetry and OpenInference
title: Agent Monitoring with Arize Phoenix
description: Learn how to integrate Arize Phoenix with CrewAI via OpenTelemetry using OpenInference
icon: magnifying-glass-chart
---
# Arize Phoenix Integration
# Integrate Arize Phoenix with CrewAI
This guide demonstrates how to integrate **Arize Phoenix** with **CrewAI** using OpenTelemetry via the [OpenInference](https://github.com/openinference/openinference) SDK. By the end of this guide, you will be able to trace your CrewAI agents and easily debug your agents.

View File

@@ -4,29 +4,14 @@ description: Get started with CrewAI - Install, configure, and build your first
icon: wrench
---
## Video Tutorial
Watch this video tutorial for a step-by-step demonstration of the installation process:
<iframe
width="100%"
height="400"
src="https://www.youtube.com/embed/-kSOTtYzgEw"
title="CrewAI Installation Guide"
frameborder="0"
style={{ borderRadius: '10px' }}
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
allowfullscreen
></iframe>
## Text Tutorial
<Note>
**Python Version Requirements**
CrewAI requires `Python >=3.10 and <3.13`. Here's how to check your version:
```bash
python3 --version
```
If you need to update Python, visit [python.org/downloads](https://python.org/downloads)
</Note>
@@ -155,27 +140,6 @@ We recommend using the `YAML` template scaffolding for a structured approach to
</Step>
</Steps>
## Enterprise Installation Options
<Note type="info">
For teams and organizations, CrewAI offers enterprise deployment options that eliminate setup complexity:
### CrewAI Enterprise (SaaS)
- Zero installation required - just sign up for free at [app.crewai.com](https://app.crewai.com)
- Automatic updates and maintenance
- Managed infrastructure and scaling
- Build Crews with no Code
### CrewAI Factory (Self-hosted)
- Containerized deployment for your infrastructure
- Supports any hyperscaler including on prem depployments
- Integration with your existing security systems
<Card title="Explore Enterprise Options" icon="building" href="https://crewai.com/enterprise">
Learn about CrewAI's enterprise offerings and schedule a demo
</Card>
</Note>
## Next Steps
<CardGroup cols={2}>

View File

@@ -15,7 +15,6 @@ CrewAI empowers developers with both high-level simplicity and precise low-level
With over 100,000 developers certified through our community courses, CrewAI is rapidly becoming the standard for enterprise-ready AI automation.
## How Crews Work
<Note>

View File

@@ -200,22 +200,6 @@ Follow the steps below to get Crewing! 🚣‍♂️
```
</CodeGroup>
</Step>
<Step title="Enterprise Alternative: Create in Crew Studio">
For CrewAI Enterprise users, you can create the same crew without writing code:
1. Log in to your CrewAI Enterprise account (create a free account at [app.crewai.com](https://app.crewai.com))
2. Open Crew Studio
3. Type what is the automation you're tryign to build
4. Create your tasks visually and connect them in sequence
5. Configure your inputs and click "Download Code" or "Deploy"
![Crew Studio Quickstart](../images/enterprise/crew-studio-quickstart.png)
<Card title="Try CrewAI Enterprise" icon="rocket" href="https://app.crewai.com">
Start your free account at CrewAI Enterprise
</Card>
</Step>
<Step title="View your final report">
You should see the output in the console and the `report.md` file should be created in the root of your project with the final report.
@@ -287,7 +271,7 @@ Follow the steps below to get Crewing! 🚣‍♂️
</Steps>
<Check>
Congratulations!
Congratulations!
You have successfully set up your crew project and are ready to start building your own agentic workflows!
</Check>

View File

@@ -22,16 +22,7 @@ usage of tools, API calls, responses, any data processed by the agents, or secre
When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected
to provide deeper insights. This expanded data collection may include personal information if users have incorporated it into their crews or tasks.
Users should carefully consider the content of their crews and tasks before enabling `share_crew`.
Users can disable telemetry by setting the environment variable `CREWAI_DISABLE_TELEMETRY` to `true` or by setting `OTEL_SDK_DISABLED` to `true` (note that the latter disables all OpenTelemetry instrumentation globally).
### Examples:
```python
# Disable CrewAI telemetry only
os.environ['CREWAI_DISABLE_TELEMETRY'] = 'true'
# Disable all OpenTelemetry (including CrewAI)
os.environ['OTEL_SDK_DISABLED'] = 'true'
```
Users can disable telemetry by setting the environment variable `OTEL_SDK_DISABLED` to `true`.
### Data Explanation:
| Defaulted | Data | Reason and Specifics |
@@ -64,4 +55,4 @@ This enables a deeper insight into usage patterns.
<Warning>
If you enable `share_crew`, the collected data may include personal information if it has been incorporated into crew configurations, task descriptions, or outputs.
Users should carefully review their data and ensure compliance with GDPR and other applicable privacy regulations before enabling this feature.
</Warning>
</Warning>

View File

@@ -1,6 +1,6 @@
[project]
name = "crewai"
version = "0.114.0"
version = "0.108.0"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md"
requires-python = ">=3.10,<3.13"
@@ -45,7 +45,7 @@ Documentation = "https://docs.crewai.com"
Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = ["crewai-tools~=0.40.1"]
tools = ["crewai-tools~=0.38.0"]
embeddings = [
"tiktoken~=0.7.0"
]

View File

@@ -2,14 +2,12 @@ import warnings
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.crews.crew_output import CrewOutput
from crewai.flow.flow import Flow
from crewai.knowledge.knowledge import Knowledge
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.process import Process
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
warnings.filterwarnings(
"ignore",
@@ -17,16 +15,14 @@ warnings.filterwarnings(
category=UserWarning,
module="pydantic.main",
)
__version__ = "0.114.0"
__version__ = "0.108.0"
__all__ = [
"Agent",
"Crew",
"CrewOutput",
"Process",
"Task",
"LLM",
"BaseLLM",
"Flow",
"Knowledge",
"TaskOutput",
]

View File

@@ -1,6 +1,7 @@
import re
import shutil
import subprocess
from typing import Any, Dict, List, Literal, Optional, Sequence, Type, Union
from typing import Any, Dict, List, Literal, Optional, Sequence, Union
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
@@ -10,7 +11,6 @@ from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
from crewai.lite_agent import LiteAgent, LiteAgentOutput
from crewai.llm import BaseLLM
from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.security import Fingerprint
@@ -207,7 +207,6 @@ class Agent(BaseAgent):
self.crew._long_term_memory,
self.crew._entity_memory,
self.crew._user_memory,
self.crew._external_memory,
)
memory = contextual_memory.build_context_for_task(task, context)
if memory.strip() != "":
@@ -449,74 +448,3 @@ class Agent(BaseAgent):
def set_fingerprint(self, fingerprint: Fingerprint):
self.security_config.fingerprint = fingerprint
def kickoff(
self,
messages: Union[str, List[Dict[str, str]]],
response_format: Optional[Type[Any]] = None,
) -> LiteAgentOutput:
"""
Execute the agent with the given messages using a LiteAgent instance.
This method is useful when you want to use the Agent configuration but
with the simpler and more direct execution flow of LiteAgent.
Args:
messages: Either a string query or a list of message dictionaries.
If a string is provided, it will be converted to a user message.
If a list is provided, each dict should have 'role' and 'content' keys.
response_format: Optional Pydantic model for structured output.
Returns:
LiteAgentOutput: The result of the agent execution.
"""
lite_agent = LiteAgent(
role=self.role,
goal=self.goal,
backstory=self.backstory,
llm=self.llm,
tools=self.tools or [],
max_iterations=self.max_iter,
max_execution_time=self.max_execution_time,
respect_context_window=self.respect_context_window,
verbose=self.verbose,
response_format=response_format,
i18n=self.i18n,
)
return lite_agent.kickoff(messages)
async def kickoff_async(
self,
messages: Union[str, List[Dict[str, str]]],
response_format: Optional[Type[Any]] = None,
) -> LiteAgentOutput:
"""
Execute the agent asynchronously with the given messages using a LiteAgent instance.
This is the async version of the kickoff method.
Args:
messages: Either a string query or a list of message dictionaries.
If a string is provided, it will be converted to a user message.
If a list is provided, each dict should have 'role' and 'content' keys.
response_format: Optional Pydantic model for structured output.
Returns:
LiteAgentOutput: The result of the agent execution.
"""
lite_agent = LiteAgent(
role=self.role,
goal=self.goal,
backstory=self.backstory,
llm=self.llm,
tools=self.tools or [],
max_iterations=self.max_iter,
max_execution_time=self.max_execution_time,
respect_context_window=self.respect_context_window,
verbose=self.verbose,
response_format=response_format,
i18n=self.i18n,
)
return await lite_agent.kickoff_async(messages)

View File

@@ -47,27 +47,6 @@ class CrewAgentExecutorMixin:
print(f"Failed to add to short term memory: {e}")
pass
def _create_external_memory(self, output) -> None:
"""Create and save a external-term memory item if conditions are met."""
if (
self.crew
and self.agent
and self.task
and hasattr(self.crew, "_external_memory")
and self.crew._external_memory
):
try:
self.crew._external_memory.save(
value=output.text,
metadata={
"description": self.task.description,
},
agent=self.agent.role,
)
except Exception as e:
print(f"Failed to add to external memory: {e}")
pass
def _create_long_term_memory(self, output) -> None:
"""Create and save long-term and entity memory items based on evaluation."""
if (

View File

@@ -129,7 +129,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._create_short_term_memory(formatted_answer)
self._create_long_term_memory(formatted_answer)
self._create_external_memory(formatted_answer)
return {"output": formatted_answer.output}
def _invoke_loop(self) -> AgentFinish:

View File

@@ -3,10 +3,6 @@ import subprocess
import click
# Be mindful about changing this.
# on some enviorments we don't use this command but instead uv sync directly
# so if you expect this to support more things you will need to replicate it there
# ask @joaomdmoura if you are unsure
def install_crew(proxy_options: list[str]) -> None:
"""
Install the crew by running the UV command to lock and install.

View File

@@ -60,7 +60,7 @@ def test():
"current_year": str(datetime.now().year)
}
try:
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), eval_llm=sys.argv[2], inputs=inputs)
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
except Exception as e:
raise Exception(f"An error occurred while testing the crew: {e}")

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.114.0,<1.0.0"
"crewai[tools]>=0.108.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.114.0,<1.0.0",
"crewai[tools]>=0.108.0,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.114.0"
"crewai[tools]>=0.108.0"
]
[tool.crewai]

View File

@@ -28,7 +28,6 @@ from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.llm import LLM, BaseLLM
from crewai.memory.entity.entity_memory import EntityMemory
from crewai.memory.external.external_memory import ExternalMemory
from crewai.memory.long_term.long_term_memory import LongTermMemory
from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.memory.user.user_memory import UserMemory
@@ -106,7 +105,6 @@ class Crew(BaseModel):
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
_user_memory: Optional[InstanceOf[UserMemory]] = PrivateAttr()
_external_memory: Optional[InstanceOf[ExternalMemory]] = PrivateAttr()
_train: Optional[bool] = PrivateAttr(default=False)
_train_iteration: Optional[int] = PrivateAttr()
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
@@ -147,10 +145,6 @@ class Crew(BaseModel):
default=None,
description="An instance of the UserMemory to be used by the Crew to store/fetch memories of a specific user.",
)
external_memory: Optional[InstanceOf[ExternalMemory]] = Field(
default=None,
description="An Instance of the ExternalMemory to be used by the Crew",
)
embedder: Optional[dict] = Field(
default=None,
description="Configuration for the embedder to be used for the crew.",
@@ -295,12 +289,6 @@ class Crew(BaseModel):
if self.entity_memory
else EntityMemory(crew=self, embedder_config=self.embedder)
)
self._external_memory = (
# External memory doesnt support a default value since it was designed to be managed entirely externally
self.external_memory.set_crew(self)
if self.external_memory
else None
)
if (
self.memory_config
and "user_memory" in self.memory_config
@@ -1179,7 +1167,6 @@ class Crew(BaseModel):
"_short_term_memory",
"_long_term_memory",
"_entity_memory",
"_external_memory",
"_telemetry",
"agents",
"tasks",
@@ -1326,15 +1313,7 @@ class Crew(BaseModel):
RuntimeError: If memory reset operation fails.
"""
VALID_TYPES = frozenset(
[
"long",
"short",
"entity",
"knowledge",
"kickoff_outputs",
"all",
"external",
]
["long", "short", "entity", "knowledge", "kickoff_outputs", "all"]
)
if command_type not in VALID_TYPES:
@@ -1360,7 +1339,6 @@ class Crew(BaseModel):
memory_systems = [
("short term", getattr(self, "_short_term_memory", None)),
("entity", getattr(self, "_entity_memory", None)),
("external", getattr(self, "_external_memory", None)),
("long term", getattr(self, "_long_term_memory", None)),
("task output", getattr(self, "_task_output_handler", None)),
("knowledge", getattr(self, "knowledge", None)),
@@ -1388,7 +1366,6 @@ class Crew(BaseModel):
"entity": (self._entity_memory, "entity"),
"knowledge": (self.knowledge, "knowledge"),
"kickoff_outputs": (self._task_output_handler, "task output"),
"external": (self._external_memory, "external"),
}
memory_system, name = reset_functions[memory_type]

View File

@@ -1043,7 +1043,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
import traceback
traceback.print_exc()
raise
def _log_flow_event(
self, message: str, color: str = "yellow", level: str = "info"

View File

@@ -1,4 +1,6 @@
import asyncio
import json
import re
import uuid
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Type, Union, cast

View File

@@ -2,12 +2,5 @@ from .entity.entity_memory import EntityMemory
from .long_term.long_term_memory import LongTermMemory
from .short_term.short_term_memory import ShortTermMemory
from .user.user_memory import UserMemory
from .external.external_memory import ExternalMemory
__all__ = [
"UserMemory",
"EntityMemory",
"LongTermMemory",
"ShortTermMemory",
"ExternalMemory",
]
__all__ = ["UserMemory", "EntityMemory", "LongTermMemory", "ShortTermMemory"]

View File

@@ -1,12 +1,6 @@
from typing import Any, Dict, Optional
from crewai.memory import (
EntityMemory,
ExternalMemory,
LongTermMemory,
ShortTermMemory,
UserMemory,
)
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory, UserMemory
class ContextualMemory:
@@ -17,7 +11,6 @@ class ContextualMemory:
ltm: LongTermMemory,
em: EntityMemory,
um: UserMemory,
exm: ExternalMemory,
):
if memory_config is not None:
self.memory_provider = memory_config.get("provider")
@@ -27,7 +20,6 @@ class ContextualMemory:
self.ltm = ltm
self.em = em
self.um = um
self.exm = exm
def build_context_for_task(self, task, context) -> str:
"""
@@ -43,7 +35,6 @@ class ContextualMemory:
context.append(self._fetch_ltm_context(task.description))
context.append(self._fetch_stm_context(query))
context.append(self._fetch_entity_context(query))
context.append(self._fetch_external_context(query))
if self.memory_provider == "mem0":
context.append(self._fetch_user_context(query))
return "\n".join(filter(None, context))
@@ -115,24 +106,3 @@ class ContextualMemory:
f"- {result['memory']}" for result in user_memories
)
return f"User memories/preferences:\n{formatted_memories}"
def _fetch_external_context(self, query: str) -> str:
"""
Fetches and formats relevant information from External Memory.
Args:
query (str): The search query to find relevant information.
Returns:
str: Formatted information as bullet points, or an empty string if none found.
"""
if self.exm is None:
return ""
external_memories = self.exm.search(query)
if not external_memories:
return ""
formatted_memories = "\n".join(
f"- {result['memory']}" for result in external_memories
)
return f"External memories:\n{formatted_memories}"

View File

@@ -1,61 +0,0 @@
from typing import TYPE_CHECKING, Any, Dict, Optional
from crewai.memory.external.external_memory_item import ExternalMemoryItem
from crewai.memory.memory import Memory
from crewai.memory.storage.interface import Storage
if TYPE_CHECKING:
from crewai.memory.storage.mem0_storage import Mem0Storage
class ExternalMemory(Memory):
def __init__(self, storage: Optional[Storage] = None, **data: Any):
super().__init__(storage=storage, **data)
@staticmethod
def _configure_mem0(crew: Any, config: Dict[str, Any]) -> "Mem0Storage":
from crewai.memory.storage.mem0_storage import Mem0Storage
return Mem0Storage(type="external", crew=crew, config=config)
@staticmethod
def external_supported_storages() -> Dict[str, Any]:
return {
"mem0": ExternalMemory._configure_mem0,
}
@staticmethod
def create_storage(crew: Any, embedder_config: Optional[Dict[str, Any]]) -> Storage:
if not embedder_config:
raise ValueError("embedder_config is required")
if "provider" not in embedder_config:
raise ValueError("embedder_config must include a 'provider' key")
provider = embedder_config["provider"]
supported_storages = ExternalMemory.external_supported_storages()
if provider not in supported_storages:
raise ValueError(f"Provider {provider} not supported")
return supported_storages[provider](crew, embedder_config.get("config", {}))
def save(
self,
value: Any,
metadata: Optional[Dict[str, Any]] = None,
agent: Optional[str] = None,
) -> None:
"""Saves a value into the external storage."""
item = ExternalMemoryItem(value=value, metadata=metadata, agent=agent)
super().save(value=item.value, metadata=item.metadata, agent=item.agent)
def reset(self) -> None:
self.storage.reset()
def set_crew(self, crew: Any) -> "ExternalMemory":
super().set_crew(crew)
if not self.storage:
self.storage = self.create_storage(crew, self.embedder_config)
return self

View File

@@ -1,13 +0,0 @@
from typing import Any, Dict, Optional
class ExternalMemoryItem:
def __init__(
self,
value: Any,
metadata: Optional[Dict[str, Any]] = None,
agent: Optional[str] = None,
):
self.value = value
self.metadata = metadata
self.agent = agent

View File

@@ -9,7 +9,6 @@ class Memory(BaseModel):
"""
embedder_config: Optional[Dict[str, Any]] = None
crew: Optional[Any] = None
storage: Any
@@ -37,7 +36,3 @@ class Memory(BaseModel):
return self.storage.search(
query=query, limit=limit, score_threshold=score_threshold
)
def set_crew(self, crew: Any) -> "Memory":
self.crew = crew
return self

View File

@@ -11,20 +11,15 @@ class Mem0Storage(Storage):
Extends Storage to handle embedding and searching across entities using Mem0.
"""
def __init__(self, type, crew=None, config=None):
def __init__(self, type, crew=None):
super().__init__()
supported_types = ["user", "short_term", "long_term", "entities", "external"]
if type not in supported_types:
raise ValueError(
f"Invalid type '{type}' for Mem0Storage. Must be one of: "
+ ", ".join(supported_types)
)
if type not in ["user", "short_term", "long_term", "entities"]:
raise ValueError("Invalid type for Mem0Storage. Must be 'user' or 'agent'.")
self.memory_type = type
self.crew = crew
self.config = config or {}
# TODO: Memory config will be removed in the future the config will be passed as a parameter
self.memory_config = self.config or getattr(crew, "memory_config", {}) or {}
self.memory_config = crew.memory_config
# User ID is required for user memory type "user" since it's used as a unique identifier for the user.
user_id = self._get_user_id()
@@ -32,7 +27,7 @@ class Mem0Storage(Storage):
raise ValueError("User ID is required for user memory type")
# API key in memory config overrides the environment variable
config = self._get_config()
config = self.memory_config.get("config", {})
mem0_api_key = config.get("api_key") or os.getenv("MEM0_API_KEY")
mem0_org_id = config.get("org_id")
mem0_project_id = config.get("project_id")
@@ -61,34 +56,26 @@ class Mem0Storage(Storage):
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
user_id = self._get_user_id()
agent_name = self._get_agent_name()
params = None
if self.memory_type == "short_term":
params = {
"agent_id": agent_name,
"infer": False,
"metadata": {"type": "short_term", **metadata},
}
if self.memory_type == "user":
self.memory.add(value, user_id=user_id, metadata={**metadata})
elif self.memory_type == "short_term":
agent_name = self._get_agent_name()
self.memory.add(
value, agent_id=agent_name, metadata={"type": "short_term", **metadata}
)
elif self.memory_type == "long_term":
params = {
"agent_id": agent_name,
"infer": False,
"metadata": {"type": "long_term", **metadata},
}
agent_name = self._get_agent_name()
self.memory.add(
value,
agent_id=agent_name,
infer=False,
metadata={"type": "long_term", **metadata},
)
elif self.memory_type == "entities":
params = {
"agent_id": agent_name,
"infer": False,
"metadata": {"type": "entity", **metadata},
}
elif self.memory_type == "external":
params = {
"user_id": user_id,
"agent_id": agent_name,
"metadata": {"type": "external", **metadata},
}
if params:
self.memory.add(value, **params | {"output_format": "v1.1"})
entity_name = self._get_agent_name()
self.memory.add(
value, user_id=entity_name, metadata={"type": "entity", **metadata}
)
def search(
self,
@@ -97,43 +84,41 @@ class Mem0Storage(Storage):
score_threshold: float = 0.35,
) -> List[Any]:
params = {"query": query, "limit": limit}
if user_id := self._get_user_id():
if self.memory_type == "user":
user_id = self._get_user_id()
params["user_id"] = user_id
agent_name = self._get_agent_name()
if self.memory_type == "short_term":
elif self.memory_type == "short_term":
agent_name = self._get_agent_name()
params["agent_id"] = agent_name
params["metadata"] = {"type": "short_term"}
elif self.memory_type == "long_term":
agent_name = self._get_agent_name()
params["agent_id"] = agent_name
params["metadata"] = {"type": "long_term"}
elif self.memory_type == "entities":
agent_name = self._get_agent_name()
params["agent_id"] = agent_name
params["metadata"] = {"type": "entity"}
elif self.memory_type == "external":
params["agent_id"] = agent_name
params["metadata"] = {"type": "external"}
# Discard the filters for now since we create the filters
# automatically when the crew is created.
results = self.memory.search(**params)
return [r for r in results if r["score"] >= score_threshold]
def _get_user_id(self) -> str:
return self._get_config().get("user_id", "")
def _get_user_id(self):
if self.memory_type == "user":
if hasattr(self, "memory_config") and self.memory_config is not None:
return self.memory_config.get("config", {}).get("user_id")
else:
return None
return None
def _get_agent_name(self) -> str:
if not self.crew:
return ""
agents = self.crew.agents
def _get_agent_name(self):
agents = self.crew.agents if self.crew else []
agents = [self._sanitize_role(agent.role) for agent in agents]
agents = "_".join(agents)
return agents
def _get_config(self) -> Dict[str, Any]:
return self.config or getattr(self, "memory_config", {}).get("config", {}) or {}
def reset(self):
if self.memory:
self.memory.reset()

View File

@@ -1,4 +1,3 @@
import warnings
from typing import Any, Dict, Optional
from crewai.memory.memory import Memory
@@ -13,12 +12,6 @@ class UserMemory(Memory):
"""
def __init__(self, crew=None):
warnings.warn(
"UserMemory is deprecated and will be removed in a future version. "
"Please use ExternalMemory instead.",
DeprecationWarning,
stacklevel=2,
)
try:
from crewai.memory.storage.mem0_storage import Mem0Storage
except ImportError:
@@ -55,4 +48,6 @@ class UserMemory(Memory):
try:
self.storage.reset()
except Exception as e:
raise Exception(f"An error occurred while resetting the user memory: {e}")
raise Exception(
f"An error occurred while resetting the user memory: {e}"
)

View File

@@ -137,11 +137,13 @@ def CrewBase(cls: T) -> T:
all_functions, "is_cache_handler"
)
callbacks = self._filter_functions(all_functions, "is_callback")
agents = self._filter_functions(all_functions, "is_agent")
for agent_name, agent_info in self.agents_config.items():
self._map_agent_variables(
agent_name,
agent_info,
agents,
llms,
tool_functions,
cache_handler_functions,
@@ -152,6 +154,7 @@ def CrewBase(cls: T) -> T:
self,
agent_name: str,
agent_info: Dict[str, Any],
agents: Dict[str, Callable],
llms: Dict[str, Callable],
tool_functions: Dict[str, Callable],
cache_handler_functions: Dict[str, Callable],
@@ -169,10 +172,9 @@ def CrewBase(cls: T) -> T:
]
if function_calling_llm := agent_info.get("function_calling_llm"):
try:
self.agents_config[agent_name]["function_calling_llm"] = llms[function_calling_llm]()
except KeyError:
self.agents_config[agent_name]["function_calling_llm"] = function_calling_llm
self.agents_config[agent_name]["function_calling_llm"] = agents[
function_calling_llm
]()
if step_callback := agent_info.get("step_callback"):
self.agents_config[agent_name]["step_callback"] = callbacks[

View File

@@ -45,10 +45,10 @@ class Telemetry:
"""
def __init__(self):
self.ready: bool = False
self.trace_set: bool = False
self.ready = False
self.trace_set = False
if self._is_telemetry_disabled():
if os.getenv("OTEL_SDK_DISABLED", "false").lower() == "true":
return
try:
@@ -75,13 +75,6 @@ class Telemetry:
):
raise # Re-raise the exception to not interfere with system signals
self.ready = False
def _is_telemetry_disabled(self) -> bool:
"""Check if telemetry should be disabled based on environment variables."""
return (
os.getenv("OTEL_SDK_DISABLED", "false").lower() == "true" or
os.getenv("CREWAI_DISABLE_TELEMETRY", "false").lower() == "true"
)
def set_tracer(self):
if self.ready and not self.trace_set:

View File

@@ -244,13 +244,9 @@ def to_langchain(
return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools]
def tool(*args, result_as_answer=False):
def tool(*args):
"""
Decorator to create a tool from a function.
Args:
*args: Positional arguments, either the function to decorate or the tool name.
result_as_answer: Flag to indicate if the tool result should be used as the final agent answer.
"""
def _make_with_name(tool_name: str) -> Callable:
@@ -276,7 +272,6 @@ def tool(*args, result_as_answer=False):
description=f.__doc__,
func=f,
args_schema=args_schema,
result_as_answer=result_as_answer,
)
return _make_tool

View File

@@ -1,486 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
and should NEVER make up tools that are not listed here:\n\nTool Name: search_web\nTool
Arguments: {''query'': {''description'': None, ''type'': ''str''}}\nTool Description:
Search the web for information about a topic.\nTool Name: calculate\nTool Arguments:
{''expression'': {''description'': None, ''type'': ''str''}}\nTool Description:
Calculate the result of a mathematical expression.\n\nIMPORTANT: Use the following
format in your response:\n\n```\nThought: you should always think about what
to do\nAction: the action to take, only one name of [search_web, calculate],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "Test query"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1220'
content-type:
- application/json
cookie:
- __cf_bm=skEg5DBE_nQ5gLAsfGzhjTetiNUJ_Y2bXWLMsvjIi7s-1744222695-1.0.1.1-qyjwnTgJKwF54pRhf0YHxW_BUw6p7SC60kwFsF9XTq4i2u2mnFKVq4WbsgvQDeuDEIxyaNb.ngWUVOU1GIX1O2Hcxcdn6TSaJ8NXTQw28F8;
_cfuvid=Hwvd7n4RVfOZLGiOKPaHmYJC7h8rCQmlmnBgBsKqy4Y-1744222695443-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-BKUIMCbxAr4MO0Ku8tDYBgJ30LGXi\",\n \"object\":
\"chat.completion\",\n \"created\": 1744222714,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"```\\nThought: I need more information
to understand what specific query to search for.\\nAction: search_web\\nAction
Input: {\\\"query\\\":\\\"Test query\\\"}\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 242,\n \"completion_tokens\":
31,\n \"total_tokens\": 273,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
headers:
CF-RAY:
- 92dc01f9bd96cf41-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 09 Apr 2025 18:18:34 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '749'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999732'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_99e3ad4ee98371cc1c55a2f5c6ae3962
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
and should NEVER make up tools that are not listed here:\n\nTool Name: search_web\nTool
Arguments: {''query'': {''description'': None, ''type'': ''str''}}\nTool Description:
Search the web for information about a topic.\nTool Name: calculate\nTool Arguments:
{''expression'': {''description'': None, ''type'': ''str''}}\nTool Description:
Calculate the result of a mathematical expression.\n\nIMPORTANT: Use the following
format in your response:\n\n```\nThought: you should always think about what
to do\nAction: the action to take, only one name of [search_web, calculate],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "Test query"}, {"role": "assistant", "content":
"```\nThought: I need more information to understand what specific query to
search for.\nAction: search_web\nAction Input: {\"query\":\"Test query\"}\nObservation:
Found information about Test query: This is a simulated search result for demonstration
purposes."}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1518'
content-type:
- application/json
cookie:
- __cf_bm=skEg5DBE_nQ5gLAsfGzhjTetiNUJ_Y2bXWLMsvjIi7s-1744222695-1.0.1.1-qyjwnTgJKwF54pRhf0YHxW_BUw6p7SC60kwFsF9XTq4i2u2mnFKVq4WbsgvQDeuDEIxyaNb.ngWUVOU1GIX1O2Hcxcdn6TSaJ8NXTQw28F8;
_cfuvid=Hwvd7n4RVfOZLGiOKPaHmYJC7h8rCQmlmnBgBsKqy4Y-1744222695443-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-BKUINDYiGwrVyJU7wUoXCw3hft7yF\",\n \"object\":
\"chat.completion\",\n \"created\": 1744222715,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
Answer: This is a simulated search result for demonstration purposes.\\n```\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
295,\n \"completion_tokens\": 26,\n \"total_tokens\": 321,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
headers:
CF-RAY:
- 92dc02003c9ecf41-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 09 Apr 2025 18:18:35 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '531'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999667'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_dd9052c40d5d61ecc5eb141f49df3abe
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
and should NEVER make up tools that are not listed here:\n\nTool Name: search_web\nTool
Arguments: {''query'': {''description'': None, ''type'': ''str''}}\nTool Description:
Search the web for information about a topic.\nTool Name: calculate\nTool Arguments:
{''expression'': {''description'': None, ''type'': ''str''}}\nTool Description:
Calculate the result of a mathematical expression.\n\nIMPORTANT: Use the following
format in your response:\n\n```\nThought: you should always think about what
to do\nAction: the action to take, only one name of [search_web, calculate],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```\nIMPORTANT:
Your final answer MUST contain all the information requested in the following
format: {\n \"test_field\": str\n}\n\nIMPORTANT: Ensure the final output does
not include any code block markers like ```json or ```python."}, {"role": "user",
"content": "Test query"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1451'
content-type:
- application/json
cookie:
- __cf_bm=skEg5DBE_nQ5gLAsfGzhjTetiNUJ_Y2bXWLMsvjIi7s-1744222695-1.0.1.1-qyjwnTgJKwF54pRhf0YHxW_BUw6p7SC60kwFsF9XTq4i2u2mnFKVq4WbsgvQDeuDEIxyaNb.ngWUVOU1GIX1O2Hcxcdn6TSaJ8NXTQw28F8;
_cfuvid=Hwvd7n4RVfOZLGiOKPaHmYJC7h8rCQmlmnBgBsKqy4Y-1744222695443-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-BKUIN3xeM6JBgLjV5HQA8MTI2Uuem\",\n \"object\":
\"chat.completion\",\n \"created\": 1744222715,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"```\\nThought: I need to clarify what
specific information or topic the test query is targeting.\\nAction: search_web\\nAction
Input: {\\\"query\\\":\\\"What is the purpose of a test query in data retrieval?\\\"}\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
288,\n \"completion_tokens\": 43,\n \"total_tokens\": 331,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
headers:
CF-RAY:
- 92dc0204d91ccf41-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 09 Apr 2025 18:18:36 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '728'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999675'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_e792e993009ddfe84cfbb503560d88cf
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
and should NEVER make up tools that are not listed here:\n\nTool Name: search_web\nTool
Arguments: {''query'': {''description'': None, ''type'': ''str''}}\nTool Description:
Search the web for information about a topic.\nTool Name: calculate\nTool Arguments:
{''expression'': {''description'': None, ''type'': ''str''}}\nTool Description:
Calculate the result of a mathematical expression.\n\nIMPORTANT: Use the following
format in your response:\n\n```\nThought: you should always think about what
to do\nAction: the action to take, only one name of [search_web, calculate],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```\nIMPORTANT:
Your final answer MUST contain all the information requested in the following
format: {\n \"test_field\": str\n}\n\nIMPORTANT: Ensure the final output does
not include any code block markers like ```json or ```python."}, {"role": "user",
"content": "Test query"}, {"role": "assistant", "content": "```\nThought: I
need to clarify what specific information or topic the test query is targeting.\nAction:
search_web\nAction Input: {\"query\":\"What is the purpose of a test query in
data retrieval?\"}\nObservation: Found information about What is the purpose
of a test query in data retrieval?: This is a simulated search result for demonstration
purposes."}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1846'
content-type:
- application/json
cookie:
- __cf_bm=skEg5DBE_nQ5gLAsfGzhjTetiNUJ_Y2bXWLMsvjIi7s-1744222695-1.0.1.1-qyjwnTgJKwF54pRhf0YHxW_BUw6p7SC60kwFsF9XTq4i2u2mnFKVq4WbsgvQDeuDEIxyaNb.ngWUVOU1GIX1O2Hcxcdn6TSaJ8NXTQw28F8;
_cfuvid=Hwvd7n4RVfOZLGiOKPaHmYJC7h8rCQmlmnBgBsKqy4Y-1744222695443-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-BKUIOqyLDCIZv6YIz1hlaW479SIzg\",\n \"object\":
\"chat.completion\",\n \"created\": 1744222716,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
Answer: {\\n \\\"test_field\\\": \\\"A test query is utilized to evaluate the
functionality, performance, and accuracy of data retrieval systems, ensuring
they return expected results.\\\"\\n}\\n```\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 362,\n \"completion_tokens\":
49,\n \"total_tokens\": 411,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
headers:
CF-RAY:
- 92dc020a3defcf41-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 09 Apr 2025 18:18:37 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '805'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999588'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_3b6c80fd3066b9e0054d0d2280bc4c98
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,249 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Research Assistant.
You are a helpful research assistant who can search for information about the
population of Tokyo.\nYour personal goal is: Find information about the population
of Tokyo\n\nYou ONLY have access to the following tools, and should NEVER make
up tools that are not listed here:\n\nTool Name: search_web\nTool Arguments:
{''query'': {''description'': None, ''type'': ''str''}}\nTool Description: Search
the web for information about a topic.\n\nIMPORTANT: Use the following format
in your response:\n\n```\nThought: you should always think about what to do\nAction:
the action to take, only one name of [search_web], just the name, exactly as
it''s written.\nAction Input: the input to the action, just a simple JSON object,
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
result of the action\n```\n\nOnce all necessary information is gathered, return
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
the final answer to the original input question\n```"}, {"role": "user", "content":
"What is the population of Tokyo? Return your strucutred output in JSON format
with the following fields: summary, confidence"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1290'
content-type:
- application/json
cookie:
- _cfuvid=u769MG.poap6iEjFpbByMFUC0FygMEqYSurr5DfLbas-1743447969501-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-BKUM5MZbz4TG6qmUtTrgKo8gI48FO\",\n \"object\":
\"chat.completion\",\n \"created\": 1744222945,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"```\\nThought: I need to find the current
population of Tokyo.\\nAction: search_web\\nAction Input: {\\\"query\\\":\\\"current
population of Tokyo 2023\\\"}\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 248,\n \"completion_tokens\":
33,\n \"total_tokens\": 281,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
headers:
CF-RAY:
- 92dc079f8e5a7ab0-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 09 Apr 2025 18:22:26 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=1F.UUVSjZyp8QMRT0dTQXUJc5WlGpC3xAx4FY7KCQbs-1744222946-1.0.1.1-vcXIZcokSjfxyFeoTTUAWmBGmJpv0ss9iFqt5EJVZGE1PvSV2ov0erCS.KIo0xItBMuX_MtCgDSaYMPI3L9QDsLatWqfUFieHiFh0CrX4h8;
path=/; expires=Wed, 09-Apr-25 18:52:26 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=RbJuVW8hReYElyyghEbAFletdnJZ2mk5rn9D8EGuyNk-1744222946580-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1282'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999713'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_845ed875afd48dee3d88f33cbab88cc2
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "system", "content": "You are Research Assistant.
You are a helpful research assistant who can search for information about the
population of Tokyo.\nYour personal goal is: Find information about the population
of Tokyo\n\nYou ONLY have access to the following tools, and should NEVER make
up tools that are not listed here:\n\nTool Name: search_web\nTool Arguments:
{''query'': {''description'': None, ''type'': ''str''}}\nTool Description: Search
the web for information about a topic.\n\nIMPORTANT: Use the following format
in your response:\n\n```\nThought: you should always think about what to do\nAction:
the action to take, only one name of [search_web], just the name, exactly as
it''s written.\nAction Input: the input to the action, just a simple JSON object,
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
result of the action\n```\n\nOnce all necessary information is gathered, return
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
the final answer to the original input question\n```"}, {"role": "user", "content":
"What is the population of Tokyo? Return your strucutred output in JSON format
with the following fields: summary, confidence"}, {"role": "assistant", "content":
"```\nThought: I need to find the current population of Tokyo.\nAction: search_web\nAction
Input: {\"query\":\"current population of Tokyo 2023\"}\nObservation: Tokyo''s
population in 2023 was approximately 21 million people in the city proper, and
37 million in the greater metropolitan area."}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1619'
content-type:
- application/json
cookie:
- _cfuvid=RbJuVW8hReYElyyghEbAFletdnJZ2mk5rn9D8EGuyNk-1744222946580-0.0.1.1-604800000;
__cf_bm=1F.UUVSjZyp8QMRT0dTQXUJc5WlGpC3xAx4FY7KCQbs-1744222946-1.0.1.1-vcXIZcokSjfxyFeoTTUAWmBGmJpv0ss9iFqt5EJVZGE1PvSV2ov0erCS.KIo0xItBMuX_MtCgDSaYMPI3L9QDsLatWqfUFieHiFh0CrX4h8
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-BKUM69pnk6VLn5rpDjGdg21mOxFke\",\n \"object\":
\"chat.completion\",\n \"created\": 1744222946,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
Answer: {\\\"summary\\\":\\\"The population of Tokyo is approximately 21 million
in the city proper and 37 million in the greater metropolitan area as of 2023.\\\",\\\"confidence\\\":\\\"high\\\"}\\n```\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
315,\n \"completion_tokens\": 51,\n \"total_tokens\": 366,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
headers:
CF-RAY:
- 92dc07a8ac9f7ab0-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 09 Apr 2025 18:22:27 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1024'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999642'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_d72860d8629025988b1170e939bc1f20
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -8,7 +8,6 @@ researcher:
developments in {topic}. Known for your ability to find the most relevant
information and present it in a clear and concise manner.
verbose: true
function_calling_llm: "local_llm"
reporting_analyst:
role: >
@@ -19,5 +18,4 @@ reporting_analyst:
You're a meticulous analyst with a keen eye for detail. You're known for
your ability to turn complex data into clear and concise reports, making
it easy for others to understand and act on the information you provide.
verbose: true
function_calling_llm: "online_llm"
verbose: true

View File

@@ -1,15 +0,0 @@
"""Test that all public API classes are properly importable."""
def test_task_output_import():
"""Test that TaskOutput can be imported from crewai."""
from crewai import TaskOutput
assert TaskOutput is not None
def test_crew_output_import():
"""Test that CrewOutput can be imported from crewai."""
from crewai import CrewOutput
assert CrewOutput is not None

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,180 +0,0 @@
from unittest.mock import MagicMock, patch
import pytest
from mem0.memory.main import Memory
from crewai.agent import Agent
from crewai.crew import Crew, Process
from crewai.memory.external.external_memory import ExternalMemory
from crewai.memory.external.external_memory_item import ExternalMemoryItem
from crewai.memory.storage.interface import Storage
from crewai.task import Task
@pytest.fixture
def mock_mem0_memory():
mock_memory = MagicMock(spec=Memory)
return mock_memory
@pytest.fixture
def patch_configure_mem0(mock_mem0_memory):
with patch(
"crewai.memory.external.external_memory.ExternalMemory._configure_mem0",
return_value=mock_mem0_memory,
) as mocked:
yield mocked
@pytest.fixture
def external_memory_with_mocked_config(patch_configure_mem0):
embedder_config = {"provider": "mem0"}
external_memory = ExternalMemory(embedder_config=embedder_config)
return external_memory
@pytest.fixture
def crew_with_external_memory(external_memory_with_mocked_config, patch_configure_mem0):
agent = Agent(
role="Researcher",
goal="Search relevant data and provide results",
backstory="You are a researcher at a leading tech think tank.",
tools=[],
verbose=True,
)
task = Task(
description="Perform a search on specific topics.",
expected_output="A list of relevant URLs based on the search query.",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
process=Process.sequential,
memory=True,
external_memory=external_memory_with_mocked_config,
)
return crew
def test_external_memory_initialization(external_memory_with_mocked_config):
assert external_memory_with_mocked_config is not None
assert isinstance(external_memory_with_mocked_config, ExternalMemory)
def test_external_memory_save(external_memory_with_mocked_config):
memory_item = ExternalMemoryItem(
value="test value", metadata={"task": "test_task"}, agent="test_agent"
)
with patch.object(ExternalMemory, "save") as mock_save:
external_memory_with_mocked_config.save(
value=memory_item.value,
metadata=memory_item.metadata,
agent=memory_item.agent,
)
mock_save.assert_called_once_with(
value=memory_item.value,
metadata=memory_item.metadata,
agent=memory_item.agent,
)
def test_external_memory_reset(external_memory_with_mocked_config):
with patch(
"crewai.memory.external.external_memory.ExternalMemory.reset"
) as mock_reset:
external_memory_with_mocked_config.reset()
mock_reset.assert_called_once()
def test_external_memory_supported_storages():
supported_storages = ExternalMemory.external_supported_storages()
assert "mem0" in supported_storages
assert callable(supported_storages["mem0"])
def test_external_memory_create_storage_invalid_provider():
embedder_config = {"provider": "invalid_provider", "config": {}}
with pytest.raises(ValueError, match="Provider invalid_provider not supported"):
ExternalMemory.create_storage(None, embedder_config)
def test_external_memory_create_storage_missing_provider():
embedder_config = {"config": {}}
with pytest.raises(
ValueError, match="embedder_config must include a 'provider' key"
):
ExternalMemory.create_storage(None, embedder_config)
def test_external_memory_create_storage_missing_config():
with pytest.raises(ValueError, match="embedder_config is required"):
ExternalMemory.create_storage(None, None)
def test_crew_with_external_memory_initialization(crew_with_external_memory):
assert crew_with_external_memory._external_memory is not None
assert isinstance(crew_with_external_memory._external_memory, ExternalMemory)
assert crew_with_external_memory._external_memory.crew == crew_with_external_memory
@pytest.mark.parametrize("mem_type", ["external", "all"])
def test_crew_external_memory_reset(mem_type, crew_with_external_memory):
with patch(
"crewai.memory.external.external_memory.ExternalMemory.reset"
) as mock_reset:
crew_with_external_memory.reset_memories(mem_type)
mock_reset.assert_called_once()
@pytest.mark.parametrize("mem_method", ["search", "save"])
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_external_memory_save(mem_method, crew_with_external_memory):
with patch(
f"crewai.memory.external.external_memory.ExternalMemory.{mem_method}"
) as mock_method:
crew_with_external_memory.kickoff()
assert mock_method.call_count > 0
def test_external_memory_custom_storage(crew_with_external_memory):
class CustomStorage(Storage):
def __init__(self):
self.memories = []
def save(self, value, metadata=None, agent=None):
self.memories.append({"value": value, "metadata": metadata, "agent": agent})
def search(self, query, limit=10, score_threshold=0.5):
return self.memories
def reset(self):
self.memories = []
custom_storage = CustomStorage()
external_memory = ExternalMemory(storage=custom_storage)
# by ensuring the crew is set, we can test that the storage is used
external_memory.set_crew(crew_with_external_memory)
test_value = "test value"
test_metadata = {"source": "test"}
test_agent = "test_agent"
external_memory.save(value=test_value, metadata=test_metadata, agent=test_agent)
results = external_memory.search("test")
assert len(results) == 1
assert results[0]["value"] == test_value
assert results[0]["metadata"] == test_metadata | {"agent": test_agent}
external_memory.reset()
results = external_memory.search("test")
assert len(results) == 0

View File

@@ -2,16 +2,7 @@ import pytest
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.llm import LLM
from crewai.project import (
CrewBase,
after_kickoff,
agent,
before_kickoff,
crew,
llm,
task,
)
from crewai.project import CrewBase, after_kickoff, agent, before_kickoff, crew, task
from crewai.task import Task
@@ -40,13 +31,6 @@ class InternalCrew:
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
@llm
def local_llm(self):
return LLM(
model='openai/model_name',
api_key="None",
base_url="http://xxx.xxx.xxx.xxx:8000/v1")
@agent
def researcher(self):
return Agent(config=self.agents_config["researcher"])
@@ -121,20 +105,6 @@ def test_task_name():
), "Custom task name is not being set as expected"
def test_agent_function_calling_llm():
crew = InternalCrew()
llm = crew.local_llm()
obj_llm_agent = crew.researcher()
assert (
obj_llm_agent.function_calling_llm is llm
), "agent's function_calling_llm is incorrect"
str_llm_agent = crew.reporting_analyst()
assert (
str_llm_agent.function_calling_llm.model == "online_llm"
), "agent's function_calling_llm is incorrect"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_before_kickoff_modification():
crew = InternalCrew()

View File

@@ -29,32 +29,41 @@ def mem0_storage_with_mocked_config(mock_mem0_memory):
"""Fixture to create a Mem0Storage instance with mocked dependencies"""
# Patch the Memory class to return our mock
with patch("mem0.memory.main.Memory.from_config", return_value=mock_mem0_memory):
with patch('mem0.memory.main.Memory.from_config', return_value=mock_mem0_memory):
config = {
"vector_store": {
"provider": "mock_vector_store",
"config": {"host": "localhost", "port": 6333},
"config": {
"host": "localhost",
"port": 6333
}
},
"llm": {
"provider": "mock_llm",
"config": {"api_key": "mock-api-key", "model": "mock-model"},
"config": {
"api_key": "mock-api-key",
"model": "mock-model"
}
},
"embedder": {
"provider": "mock_embedder",
"config": {"api_key": "mock-api-key", "model": "mock-model"},
"config": {
"api_key": "mock-api-key",
"model": "mock-model"
}
},
"graph_store": {
"provider": "mock_graph_store",
"config": {
"url": "mock-url",
"username": "mock-user",
"password": "mock-password",
},
"password": "mock-password"
}
},
"history_db_path": "/mock/path",
"version": "test-version",
"custom_fact_extraction_prompt": "mock prompt 1",
"custom_update_memory_prompt": "mock prompt 2",
"custom_update_memory_prompt": "mock prompt 2"
}
# Instantiate the class with memory_config
@@ -83,73 +92,23 @@ def mock_mem0_memory_client():
@pytest.fixture
def mem0_storage_with_memory_client_using_config_from_crew(mock_mem0_memory_client):
def mem0_storage_with_memory_client(mock_mem0_memory_client):
"""Fixture to create a Mem0Storage instance with mocked dependencies"""
# We need to patch the MemoryClient before it's instantiated
with patch.object(MemoryClient, "__new__", return_value=mock_mem0_memory_client):
crew = MockCrew(
memory_config={
"provider": "mem0",
"config": {
"user_id": "test_user",
"api_key": "ABCDEFGH",
"org_id": "my_org_id",
"project_id": "my_project_id",
},
}
)
with patch.object(MemoryClient, '__new__', return_value=mock_mem0_memory_client):
crew = MockCrew(
memory_config={
"provider": "mem0",
"config": {"user_id": "test_user", "api_key": "ABCDEFGH", "org_id": "my_org_id", "project_id": "my_project_id"},
}
)
mem0_storage = Mem0Storage(type="short_term", crew=crew)
return mem0_storage
mem0_storage = Mem0Storage(type="short_term", crew=crew)
return mem0_storage
@pytest.fixture
def mem0_storage_with_memory_client_using_explictly_config(mock_mem0_memory_client):
"""Fixture to create a Mem0Storage instance with mocked dependencies"""
# We need to patch the MemoryClient before it's instantiated
with patch.object(MemoryClient, "__new__", return_value=mock_mem0_memory_client):
crew = MockCrew(
memory_config={
"provider": "mem0",
"config": {
"user_id": "test_user",
"api_key": "ABCDEFGH",
"org_id": "my_org_id",
"project_id": "my_project_id",
},
}
)
new_config = {"provider": "mem0", "config": {"api_key": "new-api-key"}}
mem0_storage = Mem0Storage(type="short_term", crew=crew, config=new_config)
return mem0_storage
def test_mem0_storage_with_memory_client_initialization(
mem0_storage_with_memory_client_using_config_from_crew, mock_mem0_memory_client
):
def test_mem0_storage_with_memory_client_initialization(mem0_storage_with_memory_client, mock_mem0_memory_client):
"""Test Mem0Storage initialization with MemoryClient"""
assert (
mem0_storage_with_memory_client_using_config_from_crew.memory_type
== "short_term"
)
assert (
mem0_storage_with_memory_client_using_config_from_crew.memory
is mock_mem0_memory_client
)
def test_mem0_storage_with_explict_config(
mem0_storage_with_memory_client_using_explictly_config,
):
expected_config = {"provider": "mem0", "config": {"api_key": "new-api-key"}}
assert (
mem0_storage_with_memory_client_using_explictly_config.config == expected_config
)
assert (
mem0_storage_with_memory_client_using_explictly_config.memory_config
== expected_config
)
assert mem0_storage_with_memory_client.memory_type == "short_term"
assert mem0_storage_with_memory_client.memory is mock_mem0_memory_client

View File

@@ -1,30 +0,0 @@
import os
from unittest.mock import patch
import pytest
from crewai.telemetry import Telemetry
@pytest.mark.parametrize("env_var,value,expected_ready", [
("OTEL_SDK_DISABLED", "true", False),
("OTEL_SDK_DISABLED", "TRUE", False),
("CREWAI_DISABLE_TELEMETRY", "true", False),
("CREWAI_DISABLE_TELEMETRY", "TRUE", False),
("OTEL_SDK_DISABLED", "false", True),
("CREWAI_DISABLE_TELEMETRY", "false", True),
])
def test_telemetry_environment_variables(env_var, value, expected_ready):
"""Test telemetry state with different environment variable configurations."""
with patch.dict(os.environ, {env_var: value}):
with patch("crewai.telemetry.telemetry.TracerProvider"):
telemetry = Telemetry()
assert telemetry.ready is expected_ready
def test_telemetry_enabled_by_default():
"""Test that telemetry is enabled by default."""
with patch.dict(os.environ, {}, clear=True):
with patch("crewai.telemetry.telemetry.TracerProvider"):
telemetry = Telemetry()
assert telemetry.ready is True

View File

@@ -4,8 +4,8 @@ from typing import cast
import pytest
from pydantic import BaseModel, Field
from crewai import LLM, Agent
from crewai.lite_agent import LiteAgent, LiteAgentOutput
from crewai import LLM
from crewai.lite_agent import LiteAgent
from crewai.tools import BaseTool
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
@@ -63,74 +63,12 @@ class ResearchResult(BaseModel):
sources: list[str] = Field(description="List of sources used")
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.parametrize("verbose", [True, False])
def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose):
"""Test that LiteAgent is created with the correct parameters when Agent.kickoff() is called."""
# Create a test agent with specific parameters
llm = LLM(model="gpt-4o-mini")
custom_tools = [WebSearchTool(), CalculatorTool()]
max_iter = 10
max_execution_time = 300
agent = Agent(
role="Test Agent",
goal="Test Goal",
backstory="Test Backstory",
llm=llm,
tools=custom_tools,
max_iter=max_iter,
max_execution_time=max_execution_time,
verbose=verbose,
)
# Create a mock to capture the created LiteAgent
created_lite_agent = None
original_lite_agent = LiteAgent
# Define a mock LiteAgent class that captures its arguments
class MockLiteAgent(original_lite_agent):
def __init__(self, **kwargs):
nonlocal created_lite_agent
created_lite_agent = kwargs
super().__init__(**kwargs)
# Patch the LiteAgent class
monkeypatch.setattr("crewai.agent.LiteAgent", MockLiteAgent)
# Call kickoff to create the LiteAgent
agent.kickoff("Test query")
# Verify all parameters were passed correctly
assert created_lite_agent is not None
assert created_lite_agent["role"] == "Test Agent"
assert created_lite_agent["goal"] == "Test Goal"
assert created_lite_agent["backstory"] == "Test Backstory"
assert created_lite_agent["llm"] == llm
assert len(created_lite_agent["tools"]) == 2
assert isinstance(created_lite_agent["tools"][0], WebSearchTool)
assert isinstance(created_lite_agent["tools"][1], CalculatorTool)
assert created_lite_agent["max_iterations"] == max_iter
assert created_lite_agent["max_execution_time"] == max_execution_time
assert created_lite_agent["verbose"] == verbose
assert created_lite_agent["response_format"] is None
# Test with a response_format
monkeypatch.setattr("crewai.agent.LiteAgent", MockLiteAgent)
class TestResponse(BaseModel):
test_field: str
agent.kickoff("Test query", response_format=TestResponse)
assert created_lite_agent["response_format"] == TestResponse
@pytest.mark.vcr(filter_headers=["authorization"])
def test_lite_agent_with_tools():
"""Test that Agent can use tools."""
"""Test that LiteAgent can use tools."""
# Create a LiteAgent with tools
llm = LLM(model="gpt-4o-mini")
agent = Agent(
agent = LiteAgent(
role="Research Assistant",
goal="Find information about the population of Tokyo",
backstory="You are a helpful research assistant who can search for information about the population of Tokyo.",
@@ -168,7 +106,7 @@ def test_lite_agent_with_tools():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_lite_agent_structured_output():
"""Test that Agent can return a simple structured output."""
"""Test that LiteAgent can return a simple structured output."""
class SimpleOutput(BaseModel):
"""Simple structure for agent outputs."""
@@ -179,18 +117,18 @@ def test_lite_agent_structured_output():
web_search_tool = WebSearchTool()
llm = LLM(model="gpt-4o-mini")
agent = Agent(
agent = LiteAgent(
role="Info Gatherer",
goal="Provide brief information",
backstory="You gather and summarize information quickly.",
llm=llm,
tools=[web_search_tool],
verbose=True,
response_format=SimpleOutput,
)
result = agent.kickoff(
"What is the population of Tokyo? Return your strucutred output in JSON format with the following fields: summary, confidence",
response_format=SimpleOutput,
"What is the population of Tokyo? Return your strucutred output in JSON format with the following fields: summary, confidence"
)
print(f"\n=== Agent Result Type: {type(result)}")
@@ -217,7 +155,7 @@ def test_lite_agent_structured_output():
def test_lite_agent_returns_usage_metrics():
"""Test that LiteAgent returns usage metrics."""
llm = LLM(model="gpt-4o-mini")
agent = Agent(
agent = LiteAgent(
role="Research Assistant",
goal="Find information about the population of Tokyo",
backstory="You are a helpful research assistant who can search for information about the population of Tokyo.",
@@ -232,26 +170,3 @@ def test_lite_agent_returns_usage_metrics():
assert result.usage_metrics is not None
assert result.usage_metrics["total_tokens"] > 0
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.asyncio
async def test_lite_agent_returns_usage_metrics_async():
"""Test that LiteAgent returns usage metrics when run asynchronously."""
llm = LLM(model="gpt-4o-mini")
agent = Agent(
role="Research Assistant",
goal="Find information about the population of Tokyo",
backstory="You are a helpful research assistant who can search for information about the population of Tokyo.",
llm=llm,
tools=[WebSearchTool()],
verbose=True,
)
result = await agent.kickoff_async(
"What is the population of Tokyo? Return your strucutred output in JSON format with the following fields: summary, confidence"
)
assert isinstance(result, LiteAgentOutput)
assert "21 million" in result.raw or "37 million" in result.raw
assert result.usage_metrics is not None
assert result.usage_metrics["total_tokens"] > 0

View File

@@ -100,25 +100,3 @@ def test_default_cache_function_is_true():
my_tool = MyCustomTool()
# Assert all the right attributes were defined
assert my_tool.cache_function()
def test_result_as_answer_in_tool_decorator():
@tool("Tool with result as answer", result_as_answer=True)
def my_tool_with_result_as_answer(question: str) -> str:
"""This tool will return its result as the final answer."""
return question
assert my_tool_with_result_as_answer.result_as_answer is True
converted_tool = my_tool_with_result_as_answer.to_structured_tool()
assert converted_tool.result_as_answer is True
@tool("Tool with default result_as_answer")
def my_tool_with_default(question: str) -> str:
"""This tool uses the default result_as_answer value."""
return question
assert my_tool_with_default.result_as_answer is False
converted_tool = my_tool_with_default.to_structured_tool()
assert converted_tool.result_as_answer is False

110
uv.lock generated
View File

@@ -1,42 +1,19 @@
version = 1
revision = 1
requires-python = ">=3.10, <3.13"
resolution-markers = [
"python_full_version < '3.11' and platform_system == 'Darwin' and sys_platform == 'darwin'",
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform == 'darwin'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version < '3.11' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_system == 'Darwin' and sys_platform == 'linux'",
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform == 'linux'",
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.11.*' and platform_system == 'Darwin' and sys_platform == 'darwin'",
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform == 'darwin'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_system == 'Darwin' and sys_platform == 'linux'",
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform == 'linux'",
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_system == 'Darwin' and sys_platform == 'darwin'",
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform == 'darwin'",
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Darwin' and sys_platform == 'linux'",
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform == 'linux'",
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.12.4' and platform_system == 'Darwin' and sys_platform == 'darwin'",
"python_full_version >= '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform == 'darwin'",
"(python_full_version >= '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin' and sys_platform == 'darwin') or (python_full_version >= '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'darwin')",
"python_full_version >= '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Darwin' and sys_platform == 'linux'",
"python_full_version >= '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform == 'linux'",
"python_full_version >= '3.12.4' and platform_machine == 'aarch64' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform == 'linux'",
"(python_full_version >= '3.12.4' and platform_machine != 'aarch64' and platform_system == 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12.4' and platform_system == 'Darwin' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux'",
"(python_full_version >= '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin' and sys_platform != 'darwin') or (python_full_version >= '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11' and sys_platform == 'darwin'",
"python_full_version < '3.11' and platform_machine == 'aarch64' and sys_platform == 'linux'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.11.*' and sys_platform == 'darwin'",
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and sys_platform == 'linux'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.12' and python_full_version < '3.12.4' and sys_platform == 'darwin'",
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and sys_platform == 'linux'",
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.12.4' and sys_platform == 'darwin'",
"python_full_version >= '3.12.4' and platform_machine == 'aarch64' and sys_platform == 'linux'",
"(python_full_version >= '3.12.4' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12.4' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
[[package]]
@@ -344,7 +321,7 @@ name = "build"
version = "1.2.2.post1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "os_name == 'nt'" },
{ name = "colorama", marker = "(os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "importlib-metadata", marker = "python_full_version < '3.10.2'" },
{ name = "packaging" },
{ name = "pyproject-hooks" },
@@ -579,7 +556,7 @@ name = "click"
version = "8.1.8"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "platform_system == 'Windows'" },
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 }
wheels = [
@@ -630,7 +607,7 @@ wheels = [
[[package]]
name = "crewai"
version = "0.114.0"
version = "0.108.0"
source = { editable = "." }
dependencies = [
{ name = "appdirs" },
@@ -718,7 +695,7 @@ requires-dist = [
{ name = "blinker", specifier = ">=1.9.0" },
{ name = "chromadb", specifier = ">=0.5.23" },
{ name = "click", specifier = ">=8.1.7" },
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.40.1" },
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.38.0" },
{ name = "docling", marker = "extra == 'docling'", specifier = ">=2.12.0" },
{ name = "fastembed", marker = "extra == 'fastembed'", specifier = ">=0.4.1" },
{ name = "instructor", specifier = ">=1.3.3" },
@@ -745,6 +722,7 @@ requires-dist = [
{ name = "tomli-w", specifier = ">=1.1.0" },
{ name = "uv", specifier = ">=0.4.25" },
]
provides-extras = ["tools", "embeddings", "agentops", "fastembed", "pdfplumber", "pandas", "openpyxl", "mem0", "docling", "aisuite"]
[package.metadata.requires-dev]
dev = [
@@ -767,7 +745,7 @@ dev = [
[[package]]
name = "crewai-tools"
version = "0.40.1"
version = "0.38.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "chromadb" },
@@ -782,9 +760,9 @@ dependencies = [
{ name = "pytube" },
{ name = "requests" },
]
sdist = { url = "https://files.pythonhosted.org/packages/16/ff/0c16c9943ec1501b12fc72aca7815f191ffe94d5f1fe4e9c353ee8c4ad1d/crewai_tools-0.40.1.tar.gz", hash = "sha256:6af5040b2277df8fd592238a17bf584f95dcc9ef7766236534999c8a9e9d0b52", size = 744094 }
sdist = { url = "https://files.pythonhosted.org/packages/85/3f/d3b5697b4c6756cec65316c9ea9ccd9054f7b73670d1580befd3632ba031/crewai_tools-0.38.1.tar.gz", hash = "sha256:6abe75b3b339d53a9cf4e2d80124d863ff62a82b36753c30bec64318881876b2", size = 737620 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/35/05/619c00bae2dda038f0d218dd5197120c938e9c9ccef1b9e50cfb037486f6/crewai_tools-0.40.1-py3-none-any.whl", hash = "sha256:8f459f74dee64364bfdbc524c815c4afcfb9ed532b51e6b8b4f616398d46cf1e", size = 573286 },
{ url = "https://files.pythonhosted.org/packages/2b/2b/a6c9007647ffbb6a3c204b3ef26806030d6b041e3e012d4cec43c21335d6/crewai_tools-0.38.1-py3-none-any.whl", hash = "sha256:d9d3a88060f1f30c8f4ea044f6dd564a50d0a22b8a018a6fcec202b36246b9d8", size = 561414 },
]
[[package]]
@@ -2518,7 +2496,7 @@ version = "1.6.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click" },
{ name = "colorama", marker = "platform_system == 'Windows'" },
{ name = "colorama", marker = "sys_platform == 'win32'" },
{ name = "ghp-import" },
{ name = "jinja2" },
{ name = "markdown" },
@@ -2699,7 +2677,7 @@ version = "2.10.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pygments" },
{ name = "pywin32", marker = "platform_system == 'Windows'" },
{ name = "pywin32", marker = "sys_platform == 'win32'" },
{ name = "tqdm" },
]
sdist = { url = "https://files.pythonhosted.org/packages/3a/93/80ac75c20ce54c785648b4ed363c88f148bf22637e10c9863db4fbe73e74/mpire-2.10.2.tar.gz", hash = "sha256:f66a321e93fadff34585a4bfa05e95bd946cf714b442f51c529038eb45773d97", size = 271270 }
@@ -2946,7 +2924,7 @@ name = "nvidia-cudnn-cu12"
version = "9.1.0.70"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 },
@@ -2973,9 +2951,9 @@ name = "nvidia-cusolver-cu12"
version = "11.4.5.107"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
{ name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/bc/1d/8de1e5c67099015c834315e333911273a8c6aaba78923dd1d1e25fc5f217/nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd", size = 124161928 },
@@ -2986,7 +2964,7 @@ name = "nvidia-cusparse-cu12"
version = "12.1.0.106"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/65/5b/cfaeebf25cd9fdec14338ccb16f6b2c4c7fa9163aefcf057d86b9cc248bb/nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c", size = 195958278 },
@@ -2997,7 +2975,6 @@ name = "nvidia-nccl-cu12"
version = "2.20.5"
source = { registry = "https://pypi.org/simple" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c1/bb/d09dda47c881f9ff504afd6f9ca4f502ded6d8fc2f572cacc5e39da91c28/nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01", size = 176238458 },
{ url = "https://files.pythonhosted.org/packages/4b/2a/0a131f572aa09f741c30ccd45a8e56316e8be8dfc7bc19bf0ab7cfef7b19/nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56", size = 176249402 },
]
@@ -3007,7 +2984,6 @@ version = "12.6.85"
source = { registry = "https://pypi.org/simple" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/9d/d7/c5383e47c7e9bf1c99d5bd2a8c935af2b6d705ad831a7ec5c97db4d82f4f/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:eedc36df9e88b682efe4309aa16b5b4e78c2407eac59e8c10a6a47535164369a", size = 19744971 },
{ url = "https://files.pythonhosted.org/packages/31/db/dc71113d441f208cdfe7ae10d4983884e13f464a6252450693365e166dcf/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf4eaa7d4b6b543ffd69d6abfb11efdeb2db48270d94dfd3a452c24150829e41", size = 19270338 },
]
[[package]]
@@ -3525,7 +3501,7 @@ name = "portalocker"
version = "2.10.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pywin32", marker = "platform_system == 'Windows'" },
{ name = "pywin32", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/ed/d3/c6c64067759e87af98cc668c1cc75171347d0f1577fab7ca3749134e3cd4/portalocker-2.10.1.tar.gz", hash = "sha256:ef1bf844e878ab08aee7e40184156e1151f228f103aa5c6bd0724cc330960f8f", size = 40891 }
wheels = [
@@ -5032,19 +5008,19 @@ dependencies = [
{ name = "fsspec" },
{ name = "jinja2" },
{ name = "networkx" },
{ name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "sympy" },
{ name = "triton", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "typing-extensions" },
]
wheels = [
@@ -5091,7 +5067,7 @@ name = "tqdm"
version = "4.66.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "platform_system == 'Windows'" },
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/58/83/6ba9844a41128c62e810fddddd72473201f3eacde02046066142a2d96cc5/tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad", size = 169504 }
wheels = [
@@ -5133,7 +5109,7 @@ name = "triton"
version = "3.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "filelock", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'linux')" },
{ name = "filelock", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/45/27/14cc3101409b9b4b9241d2ba7deaa93535a217a211c86c4cc7151fb12181/triton-3.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e1efef76935b2febc365bfadf74bcb65a6f959a9872e5bddf44cc9e0adce1e1a", size = 209376304 },