Compare commits

..

1 Commits

Author SHA1 Message Date
Devin AI
cc08e36d32 feat: change litellm dependency from strict pin to minimum version constraint
- Change litellm==1.74.3 to litellm>=1.74.3 in pyproject.toml
- Update uv.lock with new dependency constraint
- Add comprehensive tests to verify minimum version constraint works
- Allows users to install newer litellm versions for features like Claude 4 Sonnet

Fixes #3207

Co-Authored-By: Jo\u00E3o <joao@crewai.com>
2025-07-22 23:50:14 +00:00
47 changed files with 3572 additions and 4960 deletions

View File

@@ -270,7 +270,7 @@ In this section, you'll find detailed examples that help you select, configure,
from crewai import LLM
llm = LLM(
model="gemini-1.5-pro-latest", # or vertex_ai/gemini-1.5-pro-latest
model="gemini/gemini-1.5-pro-latest",
temperature=0.7,
vertex_credentials=vertex_credentials_json
)

View File

@@ -623,7 +623,7 @@ for provider in providers_to_test:
**Model not found errors:**
```python
# Verify model availability
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
from crewai.utilities.embedding_configurator import EmbeddingConfigurator
configurator = EmbeddingConfigurator()
try:
@@ -720,16 +720,7 @@ crew = Crew(
```
### Advanced Mem0 Configuration
When using Mem0 Client, you can customize the memory configuration further, by using parameters like 'includes', 'excludes', 'custom_categories', 'infer' and 'run_id' (this is only for short-term memory).
You can find more details in the [Mem0 documentation](https://docs.mem0.ai/).
```python
new_categories = [
{"lifestyle_management_concerns": "Tracks daily routines, habits, hobbies and interests including cooking, time management and work-life balance"},
{"seeking_structure": "Documents goals around creating routines, schedules, and organized systems in various life areas"},
{"personal_information": "Basic information about the user including name, preferences, and personality traits"}
]
crew = Crew(
agents=[...],
tasks=[...],
@@ -741,11 +732,6 @@ crew = Crew(
"org_id": "my_org_id", # Optional
"project_id": "my_project_id", # Optional
"api_key": "custom-api-key" # Optional - overrides env var
"run_id": "my_run_id", # Optional - for short-term memory
"includes": "include1", # Optional
"excludes": "exclude1", # Optional
"infer": True # Optional defaults to True
"custom_categories": new_categories # Optional - custom categories for user memory
},
"user_memory": {}
}
@@ -775,8 +761,7 @@ crew = Crew(
"provider": "openai",
"config": {"api_key": "your-api-key", "model": "text-embedding-3-small"}
}
},
"infer": True # Optional defaults to True
}
},
"user_memory": {}
}

View File

@@ -6,6 +6,10 @@ icon: google
# `SerperDevTool`
<Note>
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
</Note>
## Description
This tool is designed to perform a semantic search for a specified query from a text's content across the internet. It utilizes the [serper.dev](https://serper.dev) API
@@ -13,12 +17,6 @@ to fetch and display the most relevant search results based on the query provide
## Installation
To effectively use the `SerperDevTool`, follow these steps:
1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment.
2. **API Key Acquisition**: Acquire a `serper.dev` API key by registering for a free account at `serper.dev`.
3. **Environment Configuration**: Store your obtained API key in an environment variable named `SERPER_API_KEY` to facilitate its use by the tool.
To incorporate this tool into your project, follow the installation instructions below:
```shell
@@ -36,6 +34,14 @@ from crewai_tools import SerperDevTool
tool = SerperDevTool()
```
## Steps to Get Started
To effectively use the `SerperDevTool`, follow these steps:
1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment.
2. **API Key Acquisition**: Acquire a `serper.dev` API key by registering for a free account at `serper.dev`.
3. **Environment Configuration**: Store your obtained API key in an environment variable named `SERPER_API_KEY` to facilitate its use by the tool.
## Parameters
The `SerperDevTool` comes with several parameters that will be passed to the API :

View File

@@ -1,100 +0,0 @@
---
title: Serper Scrape Website
description: The `SerperScrapeWebsiteTool` is designed to scrape websites and extract clean, readable content using Serper's scraping API.
icon: globe
---
# `SerperScrapeWebsiteTool`
## Description
This tool is designed to scrape website content and extract clean, readable text from any website URL. It utilizes the [serper.dev](https://serper.dev) scraping API to fetch and process web pages, optionally including markdown formatting for better structure and readability.
## Installation
To effectively use the `SerperScrapeWebsiteTool`, follow these steps:
1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment.
2. **API Key Acquisition**: Acquire a `serper.dev` API key by registering for an account at `serper.dev`.
3. **Environment Configuration**: Store your obtained API key in an environment variable named `SERPER_API_KEY` to facilitate its use by the tool.
To incorporate this tool into your project, follow the installation instructions below:
```shell
pip install 'crewai[tools]'
```
## Example
The following example demonstrates how to initialize the tool and scrape a website:
```python Code
from crewai_tools import SerperScrapeWebsiteTool
# Initialize the tool for website scraping capabilities
tool = SerperScrapeWebsiteTool()
# Scrape a website with markdown formatting
result = tool.run(url="https://example.com", include_markdown=True)
```
## Arguments
The `SerperScrapeWebsiteTool` accepts the following arguments:
- **url**: Required. The URL of the website to scrape.
- **include_markdown**: Optional. Whether to include markdown formatting in the scraped content. Defaults to `True`.
## Example with Parameters
Here is an example demonstrating how to use the tool with different parameters:
```python Code
from crewai_tools import SerperScrapeWebsiteTool
tool = SerperScrapeWebsiteTool()
# Scrape with markdown formatting (default)
markdown_result = tool.run(
url="https://docs.crewai.com",
include_markdown=True
)
# Scrape without markdown formatting for plain text
plain_result = tool.run(
url="https://docs.crewai.com",
include_markdown=False
)
print("Markdown formatted content:")
print(markdown_result)
print("\nPlain text content:")
print(plain_result)
```
## Use Cases
The `SerperScrapeWebsiteTool` is particularly useful for:
- **Content Analysis**: Extract and analyze website content for research purposes
- **Data Collection**: Gather structured information from web pages
- **Documentation Processing**: Convert web-based documentation into readable formats
- **Competitive Analysis**: Scrape competitor websites for market research
- **Content Migration**: Extract content from existing websites for migration purposes
## Error Handling
The tool includes comprehensive error handling for:
- **Network Issues**: Handles connection timeouts and network errors gracefully
- **API Errors**: Provides detailed error messages for API-related issues
- **Invalid URLs**: Validates and reports issues with malformed URLs
- **Authentication**: Clear error messages for missing or invalid API keys
## Security Considerations
- Always store your `SERPER_API_KEY` in environment variables, never hardcode it in your source code
- Be mindful of rate limits imposed by the Serper API
- Respect robots.txt and website terms of service when scraping content
- Consider implementing delays between requests for large-scale scraping operations

View File

@@ -84,8 +84,8 @@ filename = "seu_modelo.pkl"
try:
SuaCrew().crew().train(
n_iterations=n_iterations,
inputs=inputs,
n_iterations=n_iterations,
inputs=inputs,
filename=filename
)
except Exception as e:
@@ -103,7 +103,7 @@ crewai replay [OPTIONS]
- `-t, --task_id TEXT`: Reexecuta o crew a partir deste task ID, incluindo todas as tarefas subsequentes
Exemplo:
```shell Terminal
```shell Terminal
crewai replay -t task_123456
```
@@ -149,7 +149,7 @@ crewai test [OPTIONS]
- `-m, --model TEXT`: Modelo LLM para executar os testes no Crew (padrão: "gpt-4o-mini")
Exemplo:
```shell Terminal
```shell Terminal
crewai test -n 5 -m gpt-3.5-turbo
```
@@ -203,7 +203,10 @@ def crew(self) -> Crew:
Implemente o crew ou flow no [CrewAI Enterprise](https://app.crewai.com).
- **Autenticação**: Você precisa estar autenticado para implementar no CrewAI Enterprise.
Você pode fazer login ou criar uma conta com:
```shell Terminal
crewai signup
```
Caso já tenha uma conta, você pode fazer login com:
```shell Terminal
crewai login
```
@@ -250,7 +253,7 @@ Você deve estar autenticado no CrewAI Enterprise para usar estes comandos de ge
- **Implantar o Crew**: Depois de autenticado, você pode implantar seu crew ou flow no CrewAI Enterprise.
```shell Terminal
crewai deploy push
```
```
- Inicia o processo de deployment na plataforma CrewAI Enterprise.
- Após a iniciação bem-sucedida, será exibida a mensagem Deployment created successfully! juntamente com o Nome do Deployment e um Deployment ID (UUID) único.
@@ -323,4 +326,4 @@ Ao escolher um provedor, o CLI solicitará que você informe o nome da chave e a
Veja o seguinte link para o nome de chave de cada provedor:
* [LiteLLM Providers](https://docs.litellm.ai/docs/providers)
* [LiteLLM Providers](https://docs.litellm.ai/docs/providers)

View File

@@ -268,7 +268,7 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co
from crewai import LLM
llm = LLM(
model="gemini-1.5-pro-latest", # or vertex_ai/gemini-1.5-pro-latest
model="gemini/gemini-1.5-pro-latest",
temperature=0.7,
vertex_credentials=vertex_credentials_json
)

View File

@@ -623,7 +623,7 @@ for provider in providers_to_test:
**Erros de modelo não encontrado:**
```python
# Verifique disponibilidade do modelo
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
from crewai.utilities.embedding_configurator import EmbeddingConfigurator
configurator = EmbeddingConfigurator()
try:

View File

@@ -1,49 +0,0 @@
from crewai import Agent, Crew, Task, Process, LLM
researcher = Agent(
role="Researcher",
goal="Research and analyze information",
backstory="You are an expert researcher with years of experience.",
llm=LLM(model="gpt-4o-mini")
)
writer = Agent(
role="Writer",
goal="Write compelling content",
backstory="You are a skilled writer who creates engaging content.",
llm=LLM(model="gpt-4o-mini")
)
research_task = Task(
description="Research the latest trends in AI",
expected_output="A comprehensive report on AI trends",
agent=researcher
)
writing_task = Task(
description="Write an article based on the research",
expected_output="A well-written article about AI trends",
agent=writer
)
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, writing_task],
process=Process.sequential,
trace_execution=True
)
result = crew.kickoff(inputs={"topic": "artificial intelligence"})
if result.execution_trace:
print(f"Total execution steps: {result.execution_trace.total_steps}")
print(f"Execution duration: {result.execution_trace.end_time - result.execution_trace.start_time}")
thoughts = result.execution_trace.get_steps_by_type("agent_thought")
print(f"Agent thoughts captured: {len(thoughts)}")
tool_calls = result.execution_trace.get_steps_by_type("tool_call_started")
print(f"Tool calls made: {len(tool_calls)}")
for step in result.execution_trace.steps:
print(f"{step.timestamp}: {step.step_type} - {step.agent_role or 'System'}")

View File

@@ -11,7 +11,7 @@ dependencies = [
# Core Dependencies
"pydantic>=2.4.2",
"openai>=1.13.3",
"litellm==1.74.3",
"litellm>=1.74.3",
"instructor>=1.3.3",
# Text Processing
"pdfplumber>=0.11.4",
@@ -48,7 +48,7 @@ Documentation = "https://docs.crewai.com"
Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = ["crewai-tools~=0.59.0"]
tools = ["crewai-tools~=0.55.0"]
embeddings = [
"tiktoken~=0.8.0"
]

View File

@@ -5,7 +5,6 @@ import urllib.request
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.crews.crew_output import CrewOutput
from crewai.crews.execution_trace import ExecutionTrace, ExecutionStep
from crewai.flow.flow import Flow
from crewai.knowledge.knowledge import Knowledge
from crewai.llm import LLM
@@ -55,13 +54,11 @@ def _track_install_async():
_track_install_async()
__version__ = "0.152.0"
__version__ = "0.148.0"
__all__ = [
"Agent",
"Crew",
"CrewOutput",
"ExecutionTrace",
"ExecutionStep",
"Process",
"Task",
"LLM",

View File

@@ -3,7 +3,6 @@ from typing import Optional
import click
from crewai.cli.config import Settings
from crewai.cli.settings.main import SettingsCommand
from crewai.cli.add_crew_to_flow import add_crew_to_flow
from crewai.cli.create_crew import create_crew
from crewai.cli.create_flow import create_flow
@@ -228,7 +227,7 @@ def update():
@crewai.command()
def login():
"""Sign Up/Login to CrewAI Enterprise."""
Settings().clear_user_settings()
Settings().clear()
AuthenticationCommand().login()
@@ -370,8 +369,8 @@ def org():
pass
@org.command("list")
def org_list():
@org.command()
def list():
"""List available organizations."""
org_command = OrganizationCommand()
org_command.list()
@@ -392,34 +391,5 @@ def current():
org_command.current()
@crewai.group()
def config():
"""CLI Configuration commands."""
pass
@config.command("list")
def config_list():
"""List all CLI configuration parameters."""
config_command = SettingsCommand()
config_command.list()
@config.command("set")
@click.argument("key")
@click.argument("value")
def config_set(key: str, value: str):
"""Set a CLI configuration parameter."""
config_command = SettingsCommand()
config_command.set(key, value)
@config.command("reset")
def config_reset():
"""Reset all CLI configuration parameters to default values."""
config_command = SettingsCommand()
config_command.reset_all_settings()
if __name__ == "__main__":
crewai()

View File

@@ -26,7 +26,7 @@ class PlusAPIMixin:
"Please sign up/login to CrewAI+ before using the CLI.",
style="bold red",
)
console.print("Run 'crewai login' to sign up/login.", style="bold green")
console.print("Run 'crewai signup' to sign up/login.", style="bold green")
raise SystemExit
def _validate_response(self, response: requests.Response) -> None:

View File

@@ -4,47 +4,10 @@ from typing import Optional
from pydantic import BaseModel, Field
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
DEFAULT_CONFIG_PATH = Path.home() / ".config" / "crewai" / "settings.json"
# Settings that are related to the user's account
USER_SETTINGS_KEYS = [
"tool_repository_username",
"tool_repository_password",
"org_name",
"org_uuid",
]
# Settings that are related to the CLI
CLI_SETTINGS_KEYS = [
"enterprise_base_url",
]
# Default values for CLI settings
DEFAULT_CLI_SETTINGS = {
"enterprise_base_url": DEFAULT_CREWAI_ENTERPRISE_URL,
}
# Readonly settings - cannot be set by the user
READONLY_SETTINGS_KEYS = [
"org_name",
"org_uuid",
]
# Hidden settings - not displayed by the 'list' command and cannot be set by the user
HIDDEN_SETTINGS_KEYS = [
"config_path",
"tool_repository_username",
"tool_repository_password",
]
class Settings(BaseModel):
enterprise_base_url: Optional[str] = Field(
default=DEFAULT_CREWAI_ENTERPRISE_URL,
description="Base URL of the CrewAI Enterprise instance",
)
tool_repository_username: Optional[str] = Field(
None, description="Username for interacting with the Tool Repository"
)
@@ -57,7 +20,7 @@ class Settings(BaseModel):
org_uuid: Optional[str] = Field(
None, description="UUID of the currently active organization"
)
config_path: Path = Field(default=DEFAULT_CONFIG_PATH, frozen=True, exclude=True)
config_path: Path = Field(default=DEFAULT_CONFIG_PATH, exclude=True)
def __init__(self, config_path: Path = DEFAULT_CONFIG_PATH, **data):
"""Load Settings from config path"""
@@ -74,16 +37,9 @@ class Settings(BaseModel):
merged_data = {**file_data, **data}
super().__init__(config_path=config_path, **merged_data)
def clear_user_settings(self) -> None:
"""Clear all user settings"""
self._reset_user_settings()
self.dump()
def reset(self) -> None:
"""Reset all settings to default values"""
self._reset_user_settings()
self._reset_cli_settings()
self.dump()
def clear(self) -> None:
"""Clear all settings"""
self.config_path.unlink(missing_ok=True)
def dump(self) -> None:
"""Save current settings to settings.json"""
@@ -96,13 +52,3 @@ class Settings(BaseModel):
updated_data = {**existing_data, **self.model_dump(exclude_unset=True)}
with self.config_path.open("w") as f:
json.dump(updated_data, f, indent=4)
def _reset_user_settings(self) -> None:
"""Reset all user settings to default values"""
for key in USER_SETTINGS_KEYS:
setattr(self, key, None)
def _reset_cli_settings(self) -> None:
"""Reset all CLI settings to default values"""
for key in CLI_SETTINGS_KEYS:
setattr(self, key, DEFAULT_CLI_SETTINGS[key])

View File

@@ -1,5 +1,3 @@
DEFAULT_CREWAI_ENTERPRISE_URL = "https://app.crewai.com"
ENV_VARS = {
"openai": [
{
@@ -322,4 +320,5 @@ DEFAULT_LLM_MODEL = "gpt-4o-mini"
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
LITELLM_PARAMS = ["api_key", "api_base", "api_version"]

View File

@@ -1,3 +1,4 @@
from os import getenv
from typing import List, Optional
from urllib.parse import urljoin
@@ -5,7 +6,6 @@ import requests
from crewai.cli.config import Settings
from crewai.cli.version import get_crewai_version
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
class PlusAPI:
@@ -29,10 +29,7 @@ class PlusAPI:
settings = Settings()
if settings.org_uuid:
self.headers["X-Crewai-Organization-Id"] = settings.org_uuid
self.base_url = (
str(settings.enterprise_base_url) or DEFAULT_CREWAI_ENTERPRISE_URL
)
self.base_url = getenv("CREWAI_BASE_URL", "https://app.crewai.com")
def _make_request(self, method: str, endpoint: str, **kwargs) -> requests.Response:
url = urljoin(self.base_url, endpoint)
@@ -111,6 +108,7 @@ class PlusAPI:
def create_crew(self, payload) -> requests.Response:
return self._make_request("POST", self.CREWS_RESOURCE, json=payload)
def get_organizations(self) -> requests.Response:
return self._make_request("GET", self.ORGANIZATIONS_RESOURCE)

View File

@@ -1,67 +0,0 @@
from rich.console import Console
from rich.table import Table
from crewai.cli.command import BaseCommand
from crewai.cli.config import Settings, READONLY_SETTINGS_KEYS, HIDDEN_SETTINGS_KEYS
from typing import Any
console = Console()
class SettingsCommand(BaseCommand):
"""A class to handle CLI configuration commands."""
def __init__(self, settings_kwargs: dict[str, Any] = {}):
super().__init__()
self.settings = Settings(**settings_kwargs)
def list(self) -> None:
"""List all CLI configuration parameters."""
table = Table(title="CrewAI CLI Configuration")
table.add_column("Setting", style="cyan", no_wrap=True)
table.add_column("Value", style="green")
table.add_column("Description", style="yellow")
# Add all settings to the table
for field_name, field_info in Settings.model_fields.items():
if field_name in HIDDEN_SETTINGS_KEYS:
# Do not display hidden settings
continue
current_value = getattr(self.settings, field_name)
description = field_info.description or "No description available"
display_value = (
str(current_value) if current_value is not None else "Not set"
)
table.add_row(field_name, display_value, description)
console.print(table)
def set(self, key: str, value: str) -> None:
"""Set a CLI configuration parameter."""
readonly_settings = READONLY_SETTINGS_KEYS + HIDDEN_SETTINGS_KEYS
if not hasattr(self.settings, key) or key in readonly_settings:
console.print(
f"Error: Unknown or readonly configuration key '{key}'",
style="bold red",
)
console.print("Available keys:", style="yellow")
for field_name in Settings.model_fields.keys():
if field_name not in readonly_settings:
console.print(f" - {field_name}", style="yellow")
raise SystemExit(1)
setattr(self.settings, key, value)
self.settings.dump()
console.print(f"Successfully set '{key}' to '{value}'", style="bold green")
def reset_all_settings(self) -> None:
"""Reset all CLI configuration parameters to default values."""
self.settings.reset()
console.print(
"Successfully reset all configuration parameters to default values. It is recommended to run [bold yellow]'crewai login'[/bold yellow] to re-authenticate.",
style="bold green",
)

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.152.0,<1.0.0"
"crewai[tools]>=0.148.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.152.0,<1.0.0",
"crewai[tools]>=0.148.0,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.152.0"
"crewai[tools]>=0.148.0"
]
[tool.crewai]

View File

@@ -81,7 +81,6 @@ from crewai.utilities.llm_utils import create_llm
from crewai.utilities.planning_handler import CrewPlanner
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
from crewai.utilities.training_handler import CrewTrainingHandler
from crewai.utilities.execution_trace_collector import ExecutionTraceCollector
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
@@ -206,9 +205,6 @@ class Crew(FlowTrackable, BaseModel):
default_factory=list,
description="List of callbacks to be executed after crew kickoff. It may be used to adjust the output of the crew.",
)
trace_execution: bool = Field(
default=False, description="Whether to trace the execution steps of the crew"
)
max_rpm: Optional[int] = Field(
default=None,
description="Maximum number of requests per minute for the crew execution to be respected.",
@@ -625,11 +621,6 @@ class Crew(FlowTrackable, BaseModel):
self,
inputs: Optional[Dict[str, Any]] = None,
) -> CrewOutput:
trace_collector = None
if self.trace_execution:
trace_collector = ExecutionTraceCollector()
trace_collector.start_collecting()
ctx = baggage.set_baggage(
"crew_context", CrewContext(id=str(self.id), key=self.key)
)
@@ -687,10 +678,6 @@ class Crew(FlowTrackable, BaseModel):
result = after_callback(result)
self.usage_metrics = self.calculate_usage_metrics()
if trace_collector:
execution_trace = trace_collector.stop_collecting()
result.execution_trace = execution_trace
return result
except Exception as e:
@@ -1099,7 +1086,6 @@ class Crew(FlowTrackable, BaseModel):
json_dict=final_task_output.json_dict,
tasks_output=task_outputs,
token_usage=token_usage,
execution_trace=None,
)
def _process_async_tasks(

View File

@@ -6,7 +6,6 @@ from pydantic import BaseModel, Field
from crewai.tasks.output_format import OutputFormat
from crewai.tasks.task_output import TaskOutput
from crewai.types.usage_metrics import UsageMetrics
from crewai.crews.execution_trace import ExecutionTrace
class CrewOutput(BaseModel):
@@ -23,9 +22,6 @@ class CrewOutput(BaseModel):
description="Output of each task", default=[]
)
token_usage: UsageMetrics = Field(description="Processed token summary", default={})
execution_trace: Optional[ExecutionTrace] = Field(
description="Detailed execution trace of crew steps", default=None
)
@property
def json(self) -> Optional[str]:

View File

@@ -1,34 +0,0 @@
from datetime import datetime
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
class ExecutionStep(BaseModel):
"""Represents a single step in the crew execution trace."""
timestamp: datetime = Field(description="When this step occurred")
step_type: str = Field(description="Type of step: agent_thought, tool_call, tool_result, task_start, task_complete, etc.")
agent_role: Optional[str] = Field(description="Role of the agent performing this step", default=None)
task_description: Optional[str] = Field(description="Description of the task being executed", default=None)
content: Dict[str, Any] = Field(description="Step-specific content (thought, tool args, result, etc.)", default_factory=dict)
metadata: Dict[str, Any] = Field(description="Additional metadata for this step", default_factory=dict)
class ExecutionTrace(BaseModel):
"""Complete execution trace for a crew run."""
steps: List[ExecutionStep] = Field(description="Ordered list of execution steps", default_factory=list)
total_steps: int = Field(description="Total number of steps in the trace", default=0)
start_time: Optional[datetime] = Field(description="When execution started", default=None)
end_time: Optional[datetime] = Field(description="When execution completed", default=None)
def add_step(self, step: ExecutionStep) -> None:
"""Add a step to the trace."""
self.steps.append(step)
self.total_steps = len(self.steps)
def get_steps_by_type(self, step_type: str) -> List[ExecutionStep]:
"""Get all steps of a specific type."""
return [step for step in self.steps if step.step_type == step_type]
def get_steps_by_agent(self, agent_role: str) -> List[ExecutionStep]:
"""Get all steps performed by a specific agent."""
return [step for step in self.steps if step.agent_role == agent_role]

View File

@@ -436,7 +436,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
_routers: Set[str] = set()
_router_paths: Dict[str, List[str]] = {}
initial_state: Union[Type[T], T, None] = None
name: Optional[str] = None
def __class_getitem__(cls: Type["Flow"], item: Type[T]) -> Type["Flow"]:
class _FlowGeneric(cls): # type: ignore
@@ -474,7 +473,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
self,
FlowCreatedEvent(
type="flow_created",
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
),
)
@@ -770,7 +769,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
self,
FlowStartedEvent(
type="flow_started",
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
inputs=inputs,
),
)
@@ -793,7 +792,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
self,
FlowFinishedEvent(
type="flow_finished",
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
result=final_output,
),
)
@@ -835,7 +834,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
MethodExecutionStartedEvent(
type="method_execution_started",
method_name=method_name,
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
params=dumped_params,
state=self._copy_state(),
),
@@ -857,7 +856,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
MethodExecutionFinishedEvent(
type="method_execution_finished",
method_name=method_name,
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
state=self._copy_state(),
result=result,
),
@@ -870,7 +869,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
MethodExecutionFailedEvent(
type="method_execution_failed",
method_name=method_name,
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
error=e,
),
)
@@ -1077,7 +1076,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
self,
FlowPlotEvent(
type="flow_plot",
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
),
)
plot_flow(self, filename)

View File

@@ -0,0 +1,55 @@
from abc import ABC, abstractmethod
from typing import List
import numpy as np
class BaseEmbedder(ABC):
"""
Abstract base class for text embedding models
"""
@abstractmethod
def embed_chunks(self, chunks: List[str]) -> np.ndarray:
"""
Generate embeddings for a list of text chunks
Args:
chunks: List of text chunks to embed
Returns:
Array of embeddings
"""
pass
@abstractmethod
def embed_texts(self, texts: List[str]) -> np.ndarray:
"""
Generate embeddings for a list of texts
Args:
texts: List of texts to embed
Returns:
Array of embeddings
"""
pass
@abstractmethod
def embed_text(self, text: str) -> np.ndarray:
"""
Generate embedding for a single text
Args:
text: Text to embed
Returns:
Embedding array
"""
pass
@property
@abstractmethod
def dimension(self) -> int:
"""Get the dimension of the embeddings"""
pass

View File

@@ -13,7 +13,7 @@ from chromadb.api.types import OneOrMany
from chromadb.config import Settings
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
from crewai.utilities import EmbeddingConfigurator
from crewai.utilities.chromadb import sanitize_collection_name
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
from crewai.utilities.logger import Logger

View File

@@ -1,10 +1,10 @@
import os
from typing import Any, Dict, List
from collections import defaultdict
from mem0 import Memory, MemoryClient
from crewai.utilities.chromadb import sanitize_collection_name
from crewai.memory.storage.interface import Storage
from crewai.utilities.chromadb import sanitize_collection_name
MAX_AGENT_ID_LENGTH_MEM0 = 255
@@ -13,162 +13,47 @@ class Mem0Storage(Storage):
"""
Extends Storage to handle embedding and searching across entities using Mem0.
"""
def __init__(self, type, crew=None, config=None):
super().__init__()
self._validate_type(type)
self.memory_type = type
self.crew = crew
# TODO: Memory config will be removed in the future the config will be passed as a parameter
self.config = config or getattr(crew, "memory_config", {}).get("config", {}) or {}
self._validate_user_id()
self._extract_config_values()
self._initialize_memory()
def _validate_type(self, type):
supported_types = {"user", "short_term", "long_term", "entities", "external"}
supported_types = ["user", "short_term", "long_term", "entities", "external"]
if type not in supported_types:
raise ValueError(
f"Invalid type '{type}' for Mem0Storage. Must be one of: {', '.join(supported_types)}"
f"Invalid type '{type}' for Mem0Storage. Must be one of: "
+ ", ".join(supported_types)
)
def _validate_user_id(self):
if self.memory_type == "user" and not self.config.get("user_id", ""):
self.memory_type = type
self.crew = crew
self.config = config or {}
# TODO: Memory config will be removed in the future the config will be passed as a parameter
self.memory_config = self.config or getattr(crew, "memory_config", {}) or {}
# User ID is required for user memory type "user" since it's used as a unique identifier for the user.
user_id = self._get_user_id()
if type == "user" and not user_id:
raise ValueError("User ID is required for user memory type")
def _extract_config_values(self):
cfg = self.config
self.mem0_run_id = cfg.get("run_id")
self.includes = cfg.get("includes")
self.excludes = cfg.get("excludes")
self.custom_categories = cfg.get("custom_categories")
self.infer = cfg.get("infer", True)
# API key in memory config overrides the environment variable
config = self._get_config()
mem0_api_key = config.get("api_key") or os.getenv("MEM0_API_KEY")
mem0_org_id = config.get("org_id")
mem0_project_id = config.get("project_id")
mem0_local_config = config.get("local_mem0_config")
def _initialize_memory(self):
api_key = self.config.get("api_key") or os.getenv("MEM0_API_KEY")
org_id = self.config.get("org_id")
project_id = self.config.get("project_id")
local_config = self.config.get("local_mem0_config")
if api_key:
self.memory = (
MemoryClient(api_key=api_key, org_id=org_id, project_id=project_id)
if org_id and project_id
else MemoryClient(api_key=api_key)
)
if self.custom_categories:
self.memory.update_project(custom_categories=self.custom_categories)
# Initialize MemoryClient or Memory based on the presence of the mem0_api_key
if mem0_api_key:
if mem0_org_id and mem0_project_id:
self.memory = MemoryClient(
api_key=mem0_api_key, org_id=mem0_org_id, project_id=mem0_project_id
)
else:
self.memory = MemoryClient(api_key=mem0_api_key)
else:
self.memory = (
Memory.from_config(local_config)
if local_config and len(local_config)
else Memory()
)
def _create_filter_for_search(self):
"""
Returns:
dict: A filter dictionary containing AND conditions for querying data.
- Includes user_id and agent_id if both are present.
- Includes user_id if only user_id is present.
- Includes agent_id if only agent_id is present.
- Includes run_id if memory_type is 'short_term' and mem0_run_id is present.
"""
filter = defaultdict(list)
if self.memory_type == "short_term" and self.mem0_run_id:
filter["AND"].append({"run_id": self.mem0_run_id})
else:
user_id = self.config.get("user_id", "")
agent_id = self.config.get("agent_id", "")
if user_id and agent_id:
filter["OR"].append({"user_id": user_id})
filter["OR"].append({"agent_id": agent_id})
elif user_id:
filter["AND"].append({"user_id": user_id})
elif agent_id:
filter["AND"].append({"agent_id": agent_id})
return filter
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
user_id = self.config.get("user_id", "")
assistant_message = [{"role" : "assistant","content" : value}]
base_metadata = {
"short_term": "short_term",
"long_term": "long_term",
"entities": "entity",
"external": "external"
}
# Shared base params
params: dict[str, Any] = {
"metadata": {"type": base_metadata[self.memory_type], **metadata},
"infer": self.infer
}
# MemoryClient-specific overrides
if isinstance(self.memory, MemoryClient):
params["includes"] = self.includes
params["excludes"] = self.excludes
params["output_format"] = "v1.1"
params["version"] = "v2"
if self.memory_type == "short_term" and self.mem0_run_id:
params["run_id"] = self.mem0_run_id
if user_id:
params["user_id"] = user_id
if agent_id := self.config.get("agent_id", self._get_agent_name()):
params["agent_id"] = agent_id
self.memory.add(assistant_message, **params)
def search(self,query: str,limit: int = 3,score_threshold: float = 0.35) -> List[Any]:
params = {
"query": query,
"limit": limit,
"version": "v2",
"output_format": "v1.1"
}
if user_id := self.config.get("user_id", ""):
params["user_id"] = user_id
memory_type_map = {
"short_term": {"type": "short_term"},
"long_term": {"type": "long_term"},
"entities": {"type": "entity"},
"external": {"type": "external"},
}
if self.memory_type in memory_type_map:
params["metadata"] = memory_type_map[self.memory_type]
if self.memory_type == "short_term":
params["run_id"] = self.mem0_run_id
# Discard the filters for now since we create the filters
# automatically when the crew is created.
params["filters"] = self._create_filter_for_search()
params['threshold'] = score_threshold
if isinstance(self.memory, Memory):
del params["metadata"], params["version"], params['output_format']
if params.get("run_id"):
del params["run_id"]
results = self.memory.search(**params)
return [r for r in results["results"]]
def reset(self):
if self.memory:
self.memory.reset()
if mem0_local_config and len(mem0_local_config):
self.memory = Memory.from_config(mem0_local_config)
else:
self.memory = Memory()
def _sanitize_role(self, role: str) -> str:
"""
@@ -176,6 +61,77 @@ class Mem0Storage(Storage):
"""
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
user_id = self._get_user_id()
agent_name = self._get_agent_name()
assistant_message = [{"role" : "assistant","content" : value}]
params = None
if self.memory_type == "short_term":
params = {
"agent_id": agent_name,
"infer": False,
"metadata": {"type": "short_term", **metadata},
}
elif self.memory_type == "long_term":
params = {
"agent_id": agent_name,
"infer": False,
"metadata": {"type": "long_term", **metadata},
}
elif self.memory_type == "entities":
params = {
"agent_id": agent_name,
"infer": False,
"metadata": {"type": "entity", **metadata},
}
elif self.memory_type == "external":
params = {
"user_id": user_id,
"agent_id": agent_name,
"metadata": {"type": "external", **metadata},
}
if params:
if isinstance(self.memory, MemoryClient):
params["output_format"] = "v1.1"
self.memory.add(assistant_message, **params)
def search(
self,
query: str,
limit: int = 3,
score_threshold: float = 0.35,
) -> List[Any]:
params = {"query": query, "limit": limit, "output_format": "v1.1"}
if user_id := self._get_user_id():
params["user_id"] = user_id
agent_name = self._get_agent_name()
if self.memory_type == "short_term":
params["agent_id"] = agent_name
params["metadata"] = {"type": "short_term"}
elif self.memory_type == "long_term":
params["agent_id"] = agent_name
params["metadata"] = {"type": "long_term"}
elif self.memory_type == "entities":
params["agent_id"] = agent_name
params["metadata"] = {"type": "entity"}
elif self.memory_type == "external":
params["agent_id"] = agent_name
params["metadata"] = {"type": "external"}
# Discard the filters for now since we create the filters
# automatically when the crew is created.
if isinstance(self.memory, Memory):
del params["metadata"], params["output_format"]
results = self.memory.search(**params)
return [r for r in results["results"] if r["score"] >= score_threshold]
def _get_user_id(self) -> str:
return self._get_config().get("user_id", "")
def _get_agent_name(self) -> str:
if not self.crew:
return ""
@@ -183,4 +139,11 @@ class Mem0Storage(Storage):
agents = self.crew.agents
agents = [self._sanitize_role(agent.role) for agent in agents]
agents = "_".join(agents)
return sanitize_collection_name(name=agents, max_collection_length=MAX_AGENT_ID_LENGTH_MEM0)
return sanitize_collection_name(name=agents,max_collection_length=MAX_AGENT_ID_LENGTH_MEM0)
def _get_config(self) -> Dict[str, Any]:
return self.config or getattr(self, "memory_config", {}).get("config", {}) or {}
def reset(self):
if self.memory:
self.memory.reset()

View File

@@ -7,8 +7,8 @@ import uuid
from typing import Any, Dict, List, Optional
from chromadb.api import ClientAPI
from crewai.rag.storage.base_rag_storage import BaseRAGStorage
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
from crewai.memory.storage.base_rag_storage import BaseRAGStorage
from crewai.utilities import EmbeddingConfigurator
from crewai.utilities.chromadb import create_persistent_client
from crewai.utilities.constants import MAX_FILE_NAME_LENGTH
from crewai.utilities.paths import db_storage_path

View File

@@ -1 +0,0 @@
"""RAG (Retrieval-Augmented Generation) infrastructure for CrewAI."""

View File

@@ -1 +0,0 @@
"""Embedding components for RAG infrastructure."""

View File

@@ -1 +0,0 @@
"""Storage components for RAG infrastructure."""

View File

@@ -10,6 +10,7 @@ from .rpm_controller import RPMController
from .exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
)
from .embedding_configurator import EmbeddingConfigurator
__all__ = [
"Converter",
@@ -23,4 +24,5 @@ __all__ = [
"RPMController",
"YamlParser",
"LLMContextLengthExceededException",
"EmbeddingConfigurator",
]

View File

@@ -38,14 +38,7 @@ class EmbeddingConfigurator:
f"Unsupported embedding provider: {provider}, supported providers: {list(self.embedding_functions.keys())}"
)
try:
embedding_function = self.embedding_functions[provider]
except ImportError as e:
missing_package = str(e).split()[-1]
raise ImportError(
f"{missing_package} is not installed. Please install it with: pip install {missing_package}"
)
embedding_function = self.embedding_functions[provider]
return (
embedding_function(config)
if provider == "custom"

View File

@@ -1,5 +1,6 @@
from datetime import datetime, timezone
from datetime import datetime
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field
from crewai.utilities.serialization import to_serializable
@@ -8,7 +9,7 @@ from crewai.utilities.serialization import to_serializable
class BaseEvent(BaseModel):
"""Base class for all events"""
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
timestamp: datetime = Field(default_factory=datetime.now)
type: str
source_fingerprint: Optional[str] = None # UUID string of the source entity
source_type: Optional[str] = None # "agent", "task", "crew", "memory", "entity_memory", "short_term_memory", "long_term_memory", "external_memory"

View File

@@ -1,152 +0,0 @@
from datetime import datetime
from typing import Any
from crewai.crews.execution_trace import ExecutionStep, ExecutionTrace
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.agent_events import (
AgentExecutionStartedEvent,
AgentExecutionCompletedEvent,
AgentLogsExecutionEvent,
)
from crewai.utilities.events.tool_usage_events import (
ToolUsageStartedEvent,
ToolUsageFinishedEvent,
)
from crewai.utilities.events.task_events import (
TaskStartedEvent,
TaskCompletedEvent,
)
class ExecutionTraceCollector:
"""Collects execution events and builds an execution trace."""
def __init__(self):
self.trace = ExecutionTrace()
self.is_collecting = False
def start_collecting(self) -> None:
"""Start collecting execution events."""
self.is_collecting = True
self.trace = ExecutionTrace(start_time=datetime.now())
crewai_event_bus.register_handler(TaskStartedEvent, self._handle_task_started)
crewai_event_bus.register_handler(TaskCompletedEvent, self._handle_task_completed)
crewai_event_bus.register_handler(AgentExecutionStartedEvent, self._handle_agent_started)
crewai_event_bus.register_handler(AgentExecutionCompletedEvent, self._handle_agent_completed)
crewai_event_bus.register_handler(AgentLogsExecutionEvent, self._handle_agent_logs)
crewai_event_bus.register_handler(ToolUsageStartedEvent, self._handle_tool_started)
crewai_event_bus.register_handler(ToolUsageFinishedEvent, self._handle_tool_finished)
def stop_collecting(self) -> ExecutionTrace:
"""Stop collecting and return the execution trace."""
self.is_collecting = False
self.trace.end_time = datetime.now()
return self.trace
def _handle_agent_started(self, source: Any, event: AgentExecutionStartedEvent) -> None:
if not self.is_collecting:
return
step = ExecutionStep(
timestamp=datetime.now(),
step_type="agent_execution_started",
agent_role=event.agent.role if hasattr(event.agent, 'role') else None,
task_description=getattr(event.task, 'description', None) if event.task else None,
content={
"task_prompt": event.task_prompt,
"tools": [tool.name for tool in event.tools] if event.tools else [],
}
)
self.trace.add_step(step)
def _handle_agent_completed(self, source: Any, event: AgentExecutionCompletedEvent) -> None:
if not self.is_collecting:
return
step = ExecutionStep(
timestamp=datetime.now(),
step_type="agent_execution_completed",
agent_role=event.agent.role if hasattr(event.agent, 'role') else None,
content={
"output": event.output,
}
)
self.trace.add_step(step)
def _handle_agent_logs(self, source: Any, event: AgentLogsExecutionEvent) -> None:
if not self.is_collecting:
return
step = ExecutionStep(
timestamp=datetime.now(),
step_type="agent_thought",
agent_role=event.agent_role,
content={
"formatted_answer": str(event.formatted_answer),
}
)
self.trace.add_step(step)
def _handle_tool_started(self, source: Any, event: ToolUsageStartedEvent) -> None:
if not self.is_collecting:
return
step = ExecutionStep(
timestamp=datetime.now(),
step_type="tool_call_started",
agent_role=event.agent_role,
content={
"tool_name": event.tool_name,
"tool_args": event.tool_args,
"tool_class": event.tool_class,
}
)
self.trace.add_step(step)
def _handle_tool_finished(self, source: Any, event: ToolUsageFinishedEvent) -> None:
if not self.is_collecting:
return
step = ExecutionStep(
timestamp=datetime.now(),
step_type="tool_call_completed",
agent_role=event.agent_role,
content={
"tool_name": event.tool_name,
"output": event.output,
"from_cache": event.from_cache,
"duration": (event.finished_at - event.started_at).total_seconds() if hasattr(event, 'started_at') and hasattr(event, 'finished_at') else None,
}
)
self.trace.add_step(step)
def _handle_task_started(self, source: Any, event: TaskStartedEvent) -> None:
if not self.is_collecting:
return
step = ExecutionStep(
timestamp=datetime.now(),
step_type="task_started",
task_description=getattr(event.task, 'description', None) if hasattr(event, 'task') and event.task else None,
content={
"task_id": getattr(event.task, 'id', None) if hasattr(event, 'task') and event.task else None,
"context": getattr(event, 'context', None),
}
)
self.trace.add_step(step)
def _handle_task_completed(self, source: Any, event: TaskCompletedEvent) -> None:
if not self.is_collecting:
return
step = ExecutionStep(
timestamp=datetime.now(),
step_type="task_completed",
task_description=getattr(event.task, 'description', None) if hasattr(event, 'task') and event.task else None,
content={
"task_id": getattr(event.task, 'id', None) if hasattr(event, 'task') and event.task else None,
"output": event.output.raw if hasattr(event, 'output') and event.output else None,
}
)
self.trace.add_step(step)

View File

@@ -4,12 +4,7 @@ import tempfile
import unittest
from pathlib import Path
from crewai.cli.config import (
Settings,
USER_SETTINGS_KEYS,
CLI_SETTINGS_KEYS,
DEFAULT_CLI_SETTINGS,
)
from crewai.cli.config import Settings
class TestSettings(unittest.TestCase):
@@ -57,30 +52,6 @@ class TestSettings(unittest.TestCase):
self.assertEqual(settings.tool_repository_username, "new_user")
self.assertEqual(settings.tool_repository_password, "file_pass")
def test_clear_user_settings(self):
user_settings = {key: f"value_for_{key}" for key in USER_SETTINGS_KEYS}
settings = Settings(config_path=self.config_path, **user_settings)
settings.clear_user_settings()
for key in user_settings.keys():
self.assertEqual(getattr(settings, key), None)
def test_reset_settings(self):
user_settings = {key: f"value_for_{key}" for key in USER_SETTINGS_KEYS}
cli_settings = {key: f"value_for_{key}" for key in CLI_SETTINGS_KEYS}
settings = Settings(
config_path=self.config_path, **user_settings, **cli_settings
)
settings.reset()
for key in user_settings.keys():
self.assertEqual(getattr(settings, key), None)
for key in cli_settings.keys():
self.assertEqual(getattr(settings, key), DEFAULT_CLI_SETTINGS[key])
def test_dump_new_settings(self):
settings = Settings(
config_path=self.config_path, tool_repository_username="user1"

View File

@@ -6,7 +6,7 @@ from click.testing import CliRunner
import requests
from crewai.cli.organization.main import OrganizationCommand
from crewai.cli.cli import org_list, switch, current
from crewai.cli.cli import list, switch, current
@pytest.fixture
@@ -16,44 +16,44 @@ def runner():
@pytest.fixture
def org_command():
with patch.object(OrganizationCommand, "__init__", return_value=None):
with patch.object(OrganizationCommand, '__init__', return_value=None):
command = OrganizationCommand()
yield command
@pytest.fixture
def mock_settings():
with patch("crewai.cli.organization.main.Settings") as mock_settings_class:
with patch('crewai.cli.organization.main.Settings') as mock_settings_class:
mock_settings_instance = MagicMock()
mock_settings_class.return_value = mock_settings_instance
yield mock_settings_instance
@patch("crewai.cli.cli.OrganizationCommand")
@patch('crewai.cli.cli.OrganizationCommand')
def test_org_list_command(mock_org_command_class, runner):
mock_org_instance = MagicMock()
mock_org_command_class.return_value = mock_org_instance
result = runner.invoke(org_list)
result = runner.invoke(list)
assert result.exit_code == 0
mock_org_command_class.assert_called_once()
mock_org_instance.list.assert_called_once()
@patch("crewai.cli.cli.OrganizationCommand")
@patch('crewai.cli.cli.OrganizationCommand')
def test_org_switch_command(mock_org_command_class, runner):
mock_org_instance = MagicMock()
mock_org_command_class.return_value = mock_org_instance
result = runner.invoke(switch, ["test-id"])
result = runner.invoke(switch, ['test-id'])
assert result.exit_code == 0
mock_org_command_class.assert_called_once()
mock_org_instance.switch.assert_called_once_with("test-id")
mock_org_instance.switch.assert_called_once_with('test-id')
@patch("crewai.cli.cli.OrganizationCommand")
@patch('crewai.cli.cli.OrganizationCommand')
def test_org_current_command(mock_org_command_class, runner):
mock_org_instance = MagicMock()
mock_org_command_class.return_value = mock_org_instance
@@ -67,18 +67,18 @@ def test_org_current_command(mock_org_command_class, runner):
class TestOrganizationCommand(unittest.TestCase):
def setUp(self):
with patch.object(OrganizationCommand, "__init__", return_value=None):
with patch.object(OrganizationCommand, '__init__', return_value=None):
self.org_command = OrganizationCommand()
self.org_command.plus_api_client = MagicMock()
@patch("crewai.cli.organization.main.console")
@patch("crewai.cli.organization.main.Table")
@patch('crewai.cli.organization.main.console')
@patch('crewai.cli.organization.main.Table')
def test_list_organizations_success(self, mock_table, mock_console):
mock_response = MagicMock()
mock_response.raise_for_status = MagicMock()
mock_response.json.return_value = [
{"name": "Org 1", "uuid": "org-123"},
{"name": "Org 2", "uuid": "org-456"},
{"name": "Org 2", "uuid": "org-456"}
]
self.org_command.plus_api_client = MagicMock()
self.org_command.plus_api_client.get_organizations.return_value = mock_response
@@ -89,14 +89,16 @@ class TestOrganizationCommand(unittest.TestCase):
self.org_command.plus_api_client.get_organizations.assert_called_once()
mock_table.assert_called_once_with(title="Your Organizations")
mock_table.return_value.add_column.assert_has_calls(
[call("Name", style="cyan"), call("ID", style="green")]
)
mock_table.return_value.add_row.assert_has_calls(
[call("Org 1", "org-123"), call("Org 2", "org-456")]
)
mock_table.return_value.add_column.assert_has_calls([
call("Name", style="cyan"),
call("ID", style="green")
])
mock_table.return_value.add_row.assert_has_calls([
call("Org 1", "org-123"),
call("Org 2", "org-456")
])
@patch("crewai.cli.organization.main.console")
@patch('crewai.cli.organization.main.console')
def test_list_organizations_empty(self, mock_console):
mock_response = MagicMock()
mock_response.raise_for_status = MagicMock()
@@ -108,32 +110,33 @@ class TestOrganizationCommand(unittest.TestCase):
self.org_command.plus_api_client.get_organizations.assert_called_once()
mock_console.print.assert_called_once_with(
"You don't belong to any organizations yet.", style="yellow"
"You don't belong to any organizations yet.",
style="yellow"
)
@patch("crewai.cli.organization.main.console")
@patch('crewai.cli.organization.main.console')
def test_list_organizations_api_error(self, mock_console):
self.org_command.plus_api_client = MagicMock()
self.org_command.plus_api_client.get_organizations.side_effect = (
requests.exceptions.RequestException("API Error")
)
self.org_command.plus_api_client.get_organizations.side_effect = requests.exceptions.RequestException("API Error")
with pytest.raises(SystemExit):
self.org_command.list()
self.org_command.plus_api_client.get_organizations.assert_called_once()
mock_console.print.assert_called_once_with(
"Failed to retrieve organization list: API Error", style="bold red"
"Failed to retrieve organization list: API Error",
style="bold red"
)
@patch("crewai.cli.organization.main.console")
@patch("crewai.cli.organization.main.Settings")
@patch('crewai.cli.organization.main.console')
@patch('crewai.cli.organization.main.Settings')
def test_switch_organization_success(self, mock_settings_class, mock_console):
mock_response = MagicMock()
mock_response.raise_for_status = MagicMock()
mock_response.json.return_value = [
{"name": "Org 1", "uuid": "org-123"},
{"name": "Test Org", "uuid": "test-id"},
{"name": "Test Org", "uuid": "test-id"}
]
self.org_command.plus_api_client = MagicMock()
self.org_command.plus_api_client.get_organizations.return_value = mock_response
@@ -148,16 +151,17 @@ class TestOrganizationCommand(unittest.TestCase):
assert mock_settings_instance.org_name == "Test Org"
assert mock_settings_instance.org_uuid == "test-id"
mock_console.print.assert_called_once_with(
"Successfully switched to Test Org (test-id)", style="bold green"
"Successfully switched to Test Org (test-id)",
style="bold green"
)
@patch("crewai.cli.organization.main.console")
@patch('crewai.cli.organization.main.console')
def test_switch_organization_not_found(self, mock_console):
mock_response = MagicMock()
mock_response.raise_for_status = MagicMock()
mock_response.json.return_value = [
{"name": "Org 1", "uuid": "org-123"},
{"name": "Org 2", "uuid": "org-456"},
{"name": "Org 2", "uuid": "org-456"}
]
self.org_command.plus_api_client = MagicMock()
self.org_command.plus_api_client.get_organizations.return_value = mock_response
@@ -166,11 +170,12 @@ class TestOrganizationCommand(unittest.TestCase):
self.org_command.plus_api_client.get_organizations.assert_called_once()
mock_console.print.assert_called_once_with(
"Organization with id 'non-existent-id' not found.", style="bold red"
"Organization with id 'non-existent-id' not found.",
style="bold red"
)
@patch("crewai.cli.organization.main.console")
@patch("crewai.cli.organization.main.Settings")
@patch('crewai.cli.organization.main.console')
@patch('crewai.cli.organization.main.Settings')
def test_current_organization_with_org(self, mock_settings_class, mock_console):
mock_settings_instance = MagicMock()
mock_settings_instance.org_name = "Test Org"
@@ -181,11 +186,12 @@ class TestOrganizationCommand(unittest.TestCase):
self.org_command.plus_api_client.get_organizations.assert_not_called()
mock_console.print.assert_called_once_with(
"Currently logged in to organization Test Org (test-id)", style="bold green"
"Currently logged in to organization Test Org (test-id)",
style="bold green"
)
@patch("crewai.cli.organization.main.console")
@patch("crewai.cli.organization.main.Settings")
@patch('crewai.cli.organization.main.console')
@patch('crewai.cli.organization.main.Settings')
def test_current_organization_without_org(self, mock_settings_class, mock_console):
mock_settings_instance = MagicMock()
mock_settings_instance.org_uuid = None
@@ -195,14 +201,16 @@ class TestOrganizationCommand(unittest.TestCase):
assert mock_console.print.call_count == 3
mock_console.print.assert_any_call(
"You're not currently logged in to any organization.", style="yellow"
"You're not currently logged in to any organization.",
style="yellow"
)
@patch("crewai.cli.organization.main.console")
@patch('crewai.cli.organization.main.console')
def test_list_organizations_unauthorized(self, mock_console):
mock_response = MagicMock()
mock_http_error = requests.exceptions.HTTPError(
"401 Client Error: Unauthorized", response=MagicMock(status_code=401)
"401 Client Error: Unauthorized",
response=MagicMock(status_code=401)
)
mock_response.raise_for_status.side_effect = mock_http_error
@@ -213,14 +221,15 @@ class TestOrganizationCommand(unittest.TestCase):
self.org_command.plus_api_client.get_organizations.assert_called_once()
mock_console.print.assert_called_once_with(
"You are not logged in to any organization. Use 'crewai login' to login.",
style="bold red",
style="bold red"
)
@patch("crewai.cli.organization.main.console")
@patch('crewai.cli.organization.main.console')
def test_switch_organization_unauthorized(self, mock_console):
mock_response = MagicMock()
mock_http_error = requests.exceptions.HTTPError(
"401 Client Error: Unauthorized", response=MagicMock(status_code=401)
"401 Client Error: Unauthorized",
response=MagicMock(status_code=401)
)
mock_response.raise_for_status.side_effect = mock_http_error
@@ -231,5 +240,5 @@ class TestOrganizationCommand(unittest.TestCase):
self.org_command.plus_api_client.get_organizations.assert_called_once()
mock_console.print.assert_called_once_with(
"You are not logged in to any organization. Use 'crewai login' to login.",
style="bold red",
style="bold red"
)

View File

@@ -1,8 +1,8 @@
import os
import unittest
from unittest.mock import MagicMock, patch, ANY
from crewai.cli.plus_api import PlusAPI
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
class TestPlusAPI(unittest.TestCase):
@@ -30,41 +30,29 @@ class TestPlusAPI(unittest.TestCase):
)
self.assertEqual(response, mock_response)
def assert_request_with_org_id(
self, mock_make_request, method: str, endpoint: str, **kwargs
):
def assert_request_with_org_id(self, mock_make_request, method: str, endpoint: str, **kwargs):
mock_make_request.assert_called_once_with(
method,
f"{DEFAULT_CREWAI_ENTERPRISE_URL}{endpoint}",
headers={
"Authorization": ANY,
"Content-Type": ANY,
"User-Agent": ANY,
"X-Crewai-Version": ANY,
"X-Crewai-Organization-Id": self.org_uuid,
},
**kwargs,
method, f"https://app.crewai.com{endpoint}", headers={'Authorization': ANY, 'Content-Type': ANY, 'User-Agent': ANY, 'X-Crewai-Version': ANY, 'X-Crewai-Organization-Id': self.org_uuid}, **kwargs
)
@patch("crewai.cli.plus_api.Settings")
@patch("requests.Session.request")
def test_login_to_tool_repository_with_org_uuid(
self, mock_make_request, mock_settings_class
):
def test_login_to_tool_repository_with_org_uuid(self, mock_make_request, mock_settings_class):
mock_settings = MagicMock()
mock_settings.org_uuid = self.org_uuid
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
mock_settings_class.return_value = mock_settings
# re-initialize Client
self.api = PlusAPI(self.api_key)
mock_response = MagicMock()
mock_make_request.return_value = mock_response
response = self.api.login_to_tool_repository()
self.assert_request_with_org_id(
mock_make_request, "POST", "/crewai_plus/api/v1/tools/login"
mock_make_request,
'POST',
'/crewai_plus/api/v1/tools/login'
)
self.assertEqual(response, mock_response)
@@ -78,27 +66,28 @@ class TestPlusAPI(unittest.TestCase):
"GET", "/crewai_plus/api/v1/agents/test_agent_handle"
)
self.assertEqual(response, mock_response)
@patch("crewai.cli.plus_api.Settings")
@patch("requests.Session.request")
def test_get_agent_with_org_uuid(self, mock_make_request, mock_settings_class):
mock_settings = MagicMock()
mock_settings.org_uuid = self.org_uuid
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
mock_settings_class.return_value = mock_settings
# re-initialize Client
self.api = PlusAPI(self.api_key)
mock_response = MagicMock()
mock_make_request.return_value = mock_response
response = self.api.get_agent("test_agent_handle")
self.assert_request_with_org_id(
mock_make_request, "GET", "/crewai_plus/api/v1/agents/test_agent_handle"
mock_make_request,
"GET",
"/crewai_plus/api/v1/agents/test_agent_handle"
)
self.assertEqual(response, mock_response)
@patch("crewai.cli.plus_api.PlusAPI._make_request")
def test_get_tool(self, mock_make_request):
mock_response = MagicMock()
@@ -109,13 +98,12 @@ class TestPlusAPI(unittest.TestCase):
"GET", "/crewai_plus/api/v1/tools/test_tool_handle"
)
self.assertEqual(response, mock_response)
@patch("crewai.cli.plus_api.Settings")
@patch("requests.Session.request")
def test_get_tool_with_org_uuid(self, mock_make_request, mock_settings_class):
mock_settings = MagicMock()
mock_settings.org_uuid = self.org_uuid
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
mock_settings_class.return_value = mock_settings
# re-initialize Client
self.api = PlusAPI(self.api_key)
@@ -127,7 +115,9 @@ class TestPlusAPI(unittest.TestCase):
response = self.api.get_tool("test_tool_handle")
self.assert_request_with_org_id(
mock_make_request, "GET", "/crewai_plus/api/v1/tools/test_tool_handle"
mock_make_request,
"GET",
"/crewai_plus/api/v1/tools/test_tool_handle"
)
self.assertEqual(response, mock_response)
@@ -157,13 +147,12 @@ class TestPlusAPI(unittest.TestCase):
"POST", "/crewai_plus/api/v1/tools", json=params
)
self.assertEqual(response, mock_response)
@patch("crewai.cli.plus_api.Settings")
@patch("requests.Session.request")
def test_publish_tool_with_org_uuid(self, mock_make_request, mock_settings_class):
mock_settings = MagicMock()
mock_settings.org_uuid = self.org_uuid
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
mock_settings_class.return_value = mock_settings
# re-initialize Client
self.api = PlusAPI(self.api_key)
@@ -171,7 +160,7 @@ class TestPlusAPI(unittest.TestCase):
# Set up mock response
mock_response = MagicMock()
mock_make_request.return_value = mock_response
handle = "test_tool_handle"
public = True
version = "1.0.0"
@@ -191,9 +180,12 @@ class TestPlusAPI(unittest.TestCase):
"description": description,
"available_exports": None,
}
self.assert_request_with_org_id(
mock_make_request, "POST", "/crewai_plus/api/v1/tools", json=expected_params
mock_make_request,
"POST",
"/crewai_plus/api/v1/tools",
json=expected_params
)
self.assertEqual(response, mock_response)
@@ -319,11 +311,8 @@ class TestPlusAPI(unittest.TestCase):
"POST", "/crewai_plus/api/v1/crews", json=payload
)
@patch("crewai.cli.plus_api.Settings")
def test_custom_base_url(self, mock_settings_class):
mock_settings = MagicMock()
mock_settings.enterprise_base_url = "https://custom-url.com/api"
mock_settings_class.return_value = mock_settings
@patch.dict(os.environ, {"CREWAI_BASE_URL": "https://custom-url.com/api"})
def test_custom_base_url(self):
custom_api = PlusAPI("test_key")
self.assertEqual(
custom_api.base_url,

View File

@@ -1,91 +0,0 @@
import tempfile
import unittest
from pathlib import Path
from unittest.mock import patch, MagicMock, call
from crewai.cli.settings.main import SettingsCommand
from crewai.cli.config import (
Settings,
USER_SETTINGS_KEYS,
CLI_SETTINGS_KEYS,
DEFAULT_CLI_SETTINGS,
HIDDEN_SETTINGS_KEYS,
READONLY_SETTINGS_KEYS,
)
import shutil
class TestSettingsCommand(unittest.TestCase):
def setUp(self):
self.test_dir = Path(tempfile.mkdtemp())
self.config_path = self.test_dir / "settings.json"
self.settings = Settings(config_path=self.config_path)
self.settings_command = SettingsCommand(
settings_kwargs={"config_path": self.config_path}
)
def tearDown(self):
shutil.rmtree(self.test_dir)
@patch("crewai.cli.settings.main.console")
@patch("crewai.cli.settings.main.Table")
def test_list_settings(self, mock_table_class, mock_console):
mock_table_instance = MagicMock()
mock_table_class.return_value = mock_table_instance
self.settings_command.list()
# Tests that the table is created skipping hidden settings
mock_table_instance.add_row.assert_has_calls(
[
call(
field_name,
getattr(self.settings, field_name) or "Not set",
field_info.description,
)
for field_name, field_info in Settings.model_fields.items()
if field_name not in HIDDEN_SETTINGS_KEYS
]
)
# Tests that the table is printed
mock_console.print.assert_called_once_with(mock_table_instance)
def test_set_valid_keys(self):
valid_keys = Settings.model_fields.keys() - (
READONLY_SETTINGS_KEYS + HIDDEN_SETTINGS_KEYS
)
for key in valid_keys:
test_value = f"some_value_for_{key}"
self.settings_command.set(key, test_value)
self.assertEqual(getattr(self.settings_command.settings, key), test_value)
def test_set_invalid_key(self):
with self.assertRaises(SystemExit):
self.settings_command.set("invalid_key", "value")
def test_set_readonly_keys(self):
for key in READONLY_SETTINGS_KEYS:
with self.assertRaises(SystemExit):
self.settings_command.set(key, "some_readonly_key_value")
def test_set_hidden_keys(self):
for key in HIDDEN_SETTINGS_KEYS:
with self.assertRaises(SystemExit):
self.settings_command.set(key, "some_hidden_key_value")
def test_reset_all_settings(self):
for key in USER_SETTINGS_KEYS + CLI_SETTINGS_KEYS:
setattr(self.settings_command.settings, key, f"custom_value_for_{key}")
self.settings_command.settings.dump()
self.settings_command.reset_all_settings()
print(USER_SETTINGS_KEYS)
for key in USER_SETTINGS_KEYS:
self.assertEqual(getattr(self.settings_command.settings, key), None)
for key in CLI_SETTINGS_KEYS:
self.assertEqual(
getattr(self.settings_command.settings, key), DEFAULT_CLI_SETTINGS[key]
)

View File

@@ -755,15 +755,3 @@ def test_multiple_routers_from_same_trigger():
assert execution_order.index("anemia_analysis") > execution_order.index(
"anemia_router"
)
def test_flow_name():
class MyFlow(Flow):
name = "MyFlow"
@start()
def start(self):
return "Hello, world!"
flow = MyFlow()
assert flow.name == "MyFlow"

View File

@@ -55,11 +55,10 @@ def mem0_storage_with_mocked_config(mock_mem0_memory):
}
# Instantiate the class with memory_config
# Parameters like run_id, includes, and excludes doesn't matter in Memory OSS
crew = MockCrew(
memory_config={
"provider": "mem0",
"config": {"user_id": "test_user", "local_mem0_config": config, "run_id": "my_run_id", "includes": "include1","excludes": "exclude1", "infer" : True},
"config": {"user_id": "test_user", "local_mem0_config": config},
}
)
@@ -96,10 +95,6 @@ def mem0_storage_with_memory_client_using_config_from_crew(mock_mem0_memory_clie
"api_key": "ABCDEFGH",
"org_id": "my_org_id",
"project_id": "my_project_id",
"run_id": "my_run_id",
"includes": "include1",
"excludes": "exclude1",
"infer": True
},
}
)
@@ -155,75 +150,28 @@ def test_mem0_storage_with_explict_config(
assert (
mem0_storage_with_memory_client_using_explictly_config.config == expected_config
)
def test_mem0_storage_updates_project_with_custom_categories(mock_mem0_memory_client):
mock_mem0_memory_client.update_project = MagicMock()
new_categories = [
{"lifestyle_management_concerns": "Tracks daily routines, habits, hobbies and interests including cooking, time management and work-life balance"},
]
crew = MockCrew(
memory_config={
"provider": "mem0",
"config": {
"user_id": "test_user",
"api_key": "ABCDEFGH",
"org_id": "my_org_id",
"project_id": "my_project_id",
"custom_categories": new_categories,
},
}
assert (
mem0_storage_with_memory_client_using_explictly_config.memory_config
== expected_config
)
with patch.object(MemoryClient, "__new__", return_value=mock_mem0_memory_client):
_ = Mem0Storage(type="short_term", crew=crew)
mock_mem0_memory_client.update_project.assert_called_once_with(
custom_categories=new_categories
)
def test_save_method_with_memory_oss(mem0_storage_with_mocked_config):
"""Test save method for different memory types"""
mem0_storage, _, _ = mem0_storage_with_mocked_config
mem0_storage.memory.add = MagicMock()
# Test short_term memory type (already set in fixture)
test_value = "This is a test memory"
test_metadata = {"key": "value"}
mem0_storage.save(test_value, test_metadata)
mem0_storage.memory.add.assert_called_once_with(
[{"role": "assistant" , "content": test_value}],
infer=True,
[{'role': 'assistant' , 'content': test_value}],
agent_id="Test_Agent",
infer=False,
metadata={"type": "short_term", "key": "value"},
run_id="my_run_id",
user_id="test_user",
agent_id='Test_Agent'
)
def test_save_method_with_multiple_agents(mem0_storage_with_mocked_config):
mem0_storage, _, _ = mem0_storage_with_mocked_config
mem0_storage.crew.agents = [MagicMock(role="Test Agent"), MagicMock(role="Test Agent 2"), MagicMock(role="Test Agent 3")]
mem0_storage.memory.add = MagicMock()
test_value = "This is a test memory"
test_metadata = {"key": "value"}
mem0_storage.save(test_value, test_metadata)
mem0_storage.memory.add.assert_called_once_with(
[{"role": "assistant" , "content": test_value}],
infer=True,
metadata={"type": "short_term", "key": "value"},
run_id="my_run_id",
user_id="test_user",
agent_id='Test_Agent_Test_Agent_2_Test_Agent_3'
)
@@ -231,24 +179,19 @@ def test_save_method_with_memory_client(mem0_storage_with_memory_client_using_co
"""Test save method for different memory types"""
mem0_storage = mem0_storage_with_memory_client_using_config_from_crew
mem0_storage.memory.add = MagicMock()
# Test short_term memory type (already set in fixture)
test_value = "This is a test memory"
test_metadata = {"key": "value"}
mem0_storage.save(test_value, test_metadata)
mem0_storage.memory.add.assert_called_once_with(
[{'role': 'assistant' , 'content': test_value}],
infer=True,
agent_id="Test_Agent",
infer=False,
metadata={"type": "short_term", "key": "value"},
version="v2",
run_id="my_run_id",
includes="include1",
excludes="exclude1",
output_format='v1.1',
user_id='test_user',
agent_id='Test_Agent'
output_format="v1.1"
)
@@ -261,14 +204,13 @@ def test_search_method_with_memory_oss(mem0_storage_with_mocked_config):
results = mem0_storage.search("test query", limit=5, score_threshold=0.5)
mem0_storage.memory.search.assert_called_once_with(
query="test query",
limit=5,
user_id="test_user",
filters={'AND': [{'run_id': 'my_run_id'}]},
threshold=0.5
query="test query",
limit=5,
agent_id="Test_Agent",
user_id="test_user"
)
assert len(results) == 2
assert len(results) == 1
assert results[0]["content"] == "Result 1"
@@ -281,85 +223,13 @@ def test_search_method_with_memory_client(mem0_storage_with_memory_client_using_
results = mem0_storage.search("test query", limit=5, score_threshold=0.5)
mem0_storage.memory.search.assert_called_once_with(
query="test query",
limit=5,
query="test query",
limit=5,
agent_id="Test_Agent",
metadata={"type": "short_term"},
user_id="test_user",
version='v2',
run_id="my_run_id",
output_format='v1.1',
filters={'AND': [{'run_id': 'my_run_id'}]},
threshold=0.5
output_format='v1.1'
)
assert len(results) == 2
assert results[0]["content"] == "Result 1"
def test_mem0_storage_default_infer_value(mock_mem0_memory_client):
"""Test that Mem0Storage sets infer=True by default for short_term memory."""
with patch.object(MemoryClient, "__new__", return_value=mock_mem0_memory_client):
crew = MockCrew(
memory_config={
"provider": "mem0",
"config": {
"user_id": "test_user",
"api_key": "ABCDEFGH"
},
}
)
mem0_storage = Mem0Storage(type="short_term", crew=crew)
assert mem0_storage.infer is True
def test_save_memory_using_agent_entity(mock_mem0_memory_client):
config = {
"agent_id": "agent-123",
}
mock_memory = MagicMock(spec=Memory)
with patch.object(Memory, "__new__", return_value=mock_memory):
mem0_storage = Mem0Storage(type="external", config=config)
mem0_storage.save("test memory", {"key": "value"})
mem0_storage.memory.add.assert_called_once_with(
[{'role': 'assistant' , 'content': 'test memory'}],
infer=True,
metadata={"type": "external", "key": "value"},
agent_id="agent-123",
)
def test_search_method_with_agent_entity():
mem0_storage = Mem0Storage(type="external", config={"agent_id": "agent-123"})
mock_results = {"results": [{"score": 0.9, "content": "Result 1"}, {"score": 0.4, "content": "Result 2"}]}
mem0_storage.memory.search = MagicMock(return_value=mock_results)
results = mem0_storage.search("test query", limit=5, score_threshold=0.5)
mem0_storage.memory.search.assert_called_once_with(
query="test query",
limit=5,
filters={"AND": [{"agent_id": "agent-123"}]},
threshold=0.5,
)
assert len(results) == 2
assert results[0]["content"] == "Result 1"
def test_search_method_with_agent_id_and_user_id():
mem0_storage = Mem0Storage(type="external", config={"agent_id": "agent-123", "user_id": "user-123"})
mock_results = {"results": [{"score": 0.9, "content": "Result 1"}, {"score": 0.4, "content": "Result 2"}]}
mem0_storage.memory.search = MagicMock(return_value=mock_results)
results = mem0_storage.search("test query", limit=5, score_threshold=0.5)
mem0_storage.memory.search.assert_called_once_with(
query="test query",
limit=5,
user_id='user-123',
filters={"OR": [{"user_id": "user-123"}, {"agent_id": "agent-123"}]},
threshold=0.5,
)
assert len(results) == 2
assert len(results) == 1
assert results[0]["content"] == "Result 1"

View File

@@ -1,310 +0,0 @@
import pytest
from unittest.mock import Mock, patch
from datetime import datetime
from crewai import Agent, Crew, Task, Process
from crewai.crews.execution_trace import ExecutionStep, ExecutionTrace
from crewai.utilities.execution_trace_collector import ExecutionTraceCollector
from crewai.tools.base_tool import BaseTool
class MockTool(BaseTool):
name: str = "mock_tool"
description: str = "A mock tool for testing"
def _run(self, query: str) -> str:
return f"Mock result for: {query}"
@pytest.fixture
def mock_llm():
llm = Mock()
llm.call.return_value = "Test response"
llm.supports_stop_words.return_value = True
llm.stop = []
return llm
@pytest.fixture
def test_agent(mock_llm):
return Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm=mock_llm,
tools=[MockTool()]
)
@pytest.fixture
def test_task(test_agent):
return Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent
)
def test_execution_step_creation():
"""Test creating an ExecutionStep."""
step = ExecutionStep(
timestamp=datetime.now(),
step_type="agent_thought",
agent_role="Test Agent",
task_description="Test task",
content={"thought": "I need to think about this"},
metadata={"iteration": 1}
)
assert step.step_type == "agent_thought"
assert step.agent_role == "Test Agent"
assert step.content["thought"] == "I need to think about this"
assert step.metadata["iteration"] == 1
def test_execution_trace_creation():
"""Test creating an ExecutionTrace."""
trace = ExecutionTrace()
step1 = ExecutionStep(
timestamp=datetime.now(),
step_type="task_started",
content={"task_id": "1"}
)
step2 = ExecutionStep(
timestamp=datetime.now(),
step_type="agent_thought",
agent_role="Test Agent",
content={"thought": "Starting work"}
)
trace.add_step(step1)
trace.add_step(step2)
assert trace.total_steps == 2
assert len(trace.steps) == 2
assert trace.get_steps_by_type("task_started") == [step1]
assert trace.get_steps_by_agent("Test Agent") == [step2]
def test_execution_trace_collector():
"""Test the ExecutionTraceCollector."""
collector = ExecutionTraceCollector()
collector.start_collecting()
assert collector.is_collecting is True
assert collector.trace.start_time is not None
trace = collector.stop_collecting()
assert collector.is_collecting is False
assert trace.end_time is not None
assert isinstance(trace, ExecutionTrace)
@patch('crewai.crew.crewai_event_bus')
def test_crew_with_execution_trace_enabled(mock_event_bus, test_agent, test_task, mock_llm):
"""Test crew execution with trace_execution=True."""
crew = Crew(
agents=[test_agent],
tasks=[test_task],
process=Process.sequential,
trace_execution=True
)
with patch.object(test_task, 'execute_sync') as mock_execute:
from crewai.tasks.task_output import TaskOutput
mock_output = TaskOutput(
description="Test task description",
raw="Test output",
agent="Test Agent"
)
mock_execute.return_value = mock_output
result = crew.kickoff()
assert result.execution_trace is not None
assert isinstance(result.execution_trace, ExecutionTrace)
assert result.execution_trace.start_time is not None
assert result.execution_trace.end_time is not None
@patch('crewai.crew.crewai_event_bus')
def test_crew_without_execution_trace(mock_event_bus, test_agent, test_task, mock_llm):
"""Test crew execution with trace_execution=False (default)."""
crew = Crew(
agents=[test_agent],
tasks=[test_task],
process=Process.sequential,
trace_execution=False
)
with patch.object(test_task, 'execute_sync') as mock_execute:
from crewai.tasks.task_output import TaskOutput
mock_output = TaskOutput(
description="Test task description",
raw="Test output",
agent="Test Agent"
)
mock_execute.return_value = mock_output
result = crew.kickoff()
assert result.execution_trace is None
def test_execution_trace_with_multiple_agents_and_tasks(mock_llm):
"""Test execution trace with multiple agents and tasks."""
agent1 = Agent(
role="Agent 1",
goal="Goal 1",
backstory="Backstory 1",
llm=mock_llm
)
agent2 = Agent(
role="Agent 2",
goal="Goal 2",
backstory="Backstory 2",
llm=mock_llm
)
task1 = Task(
description="Task 1",
expected_output="Output 1",
agent=agent1
)
task2 = Task(
description="Task 2",
expected_output="Output 2",
agent=agent2
)
crew = Crew(
agents=[agent1, agent2],
tasks=[task1, task2],
process=Process.sequential,
trace_execution=True
)
with patch.object(task1, 'execute_sync') as mock_execute1, \
patch.object(task2, 'execute_sync') as mock_execute2:
from crewai.tasks.task_output import TaskOutput
mock_output1 = TaskOutput(
description="Task 1",
raw="Output 1",
agent="Agent 1"
)
mock_output2 = TaskOutput(
description="Task 2",
raw="Output 2",
agent="Agent 2"
)
mock_execute1.return_value = mock_output1
mock_execute2.return_value = mock_output2
result = crew.kickoff()
assert result.execution_trace is not None
agent1_steps = result.execution_trace.get_steps_by_agent("Agent 1")
agent2_steps = result.execution_trace.get_steps_by_agent("Agent 2")
assert len(agent1_steps) >= 0
assert len(agent2_steps) >= 0
def test_execution_trace_step_types():
"""Test that different step types are properly categorized."""
trace = ExecutionTrace()
steps_data = [
("task_started", "Task 1", {}),
("agent_thought", "Agent 1", {"thought": "I need to analyze this"}),
("tool_call_started", "Agent 1", {"tool_name": "search", "args": {"query": "test"}}),
("tool_call_completed", "Agent 1", {"tool_name": "search", "output": "results"}),
("agent_execution_completed", "Agent 1", {"output": "Final answer"}),
("task_completed", "Task 1", {"output": "Task complete"}),
]
for step_type, agent_role, content in steps_data:
step = ExecutionStep(
timestamp=datetime.now(),
step_type=step_type,
agent_role=agent_role if "agent" in step_type or "tool" in step_type else None,
task_description="Task 1" if "task" in step_type else None,
content=content
)
trace.add_step(step)
assert len(trace.get_steps_by_type("task_started")) == 1
assert len(trace.get_steps_by_type("agent_thought")) == 1
assert len(trace.get_steps_by_type("tool_call_started")) == 1
assert len(trace.get_steps_by_type("tool_call_completed")) == 1
assert len(trace.get_steps_by_type("agent_execution_completed")) == 1
assert len(trace.get_steps_by_type("task_completed")) == 1
agent_steps = trace.get_steps_by_agent("Agent 1")
assert len(agent_steps) == 4
def test_execution_trace_with_async_tasks(mock_llm):
"""Test execution trace with async tasks."""
agent = Agent(
role="Async Agent",
goal="Async goal",
backstory="Async backstory",
llm=mock_llm
)
task = Task(
description="Async task",
expected_output="Async output",
agent=agent,
async_execution=True
)
crew = Crew(
agents=[agent],
tasks=[task],
process=Process.sequential,
trace_execution=True
)
with patch.object(task, 'execute_async') as mock_execute_async:
from concurrent.futures import Future
from crewai.tasks.task_output import TaskOutput
future = Future()
mock_output = TaskOutput(
description="Async task",
raw="Async output",
agent="Async Agent"
)
future.set_result(mock_output)
mock_execute_async.return_value = future
result = crew.kickoff()
assert result.execution_trace is not None
assert isinstance(result.execution_trace, ExecutionTrace)
def test_execution_trace_error_handling():
"""Test execution trace handles errors gracefully."""
collector = ExecutionTraceCollector()
collector.start_collecting()
mock_event = Mock()
mock_event.agent = Mock()
mock_event.agent.role = "Test Agent"
collector._handle_agent_started(mock_event)
trace = collector.stop_collecting()
assert isinstance(trace, ExecutionTrace)

View File

@@ -0,0 +1,116 @@
import pytest
import importlib.metadata
from packaging import version
from crewai.llm import LLM
from crewai.agent import Agent
from crewai.task import Task
from crewai.crew import Crew
def test_litellm_minimum_version_constraint():
"""Test that litellm meets the minimum version requirement."""
try:
litellm_version = importlib.metadata.version("litellm")
minimum_version = "1.74.3"
assert version.parse(litellm_version) >= version.parse(minimum_version), (
f"litellm version {litellm_version} is below minimum required version {minimum_version}"
)
except importlib.metadata.PackageNotFoundError:
pytest.fail("litellm package is not installed")
def test_llm_creation_with_relaxed_litellm_constraint():
"""Test that LLM can be created successfully with the relaxed litellm constraint."""
llm = LLM(model="gpt-4o-mini")
assert llm is not None
assert llm.model == "gpt-4o-mini"
def test_basic_llm_functionality_with_relaxed_constraint():
"""Test that basic LLM functionality works with the relaxed litellm constraint."""
llm = LLM(model="gpt-4o-mini", temperature=0.7, max_tokens=100)
assert llm.model == "gpt-4o-mini"
assert llm.temperature == 0.7
assert llm.max_tokens == 100
def test_agent_creation_with_relaxed_litellm_constraint():
"""Test that Agent can be created with LLM using relaxed litellm constraint."""
llm = LLM(model="gpt-4o-mini")
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm=llm
)
assert agent is not None
assert agent.llm == llm
assert agent.role == "Test Agent"
def test_crew_functionality_with_relaxed_litellm_constraint():
"""Test that Crew functionality works with the relaxed litellm constraint."""
llm = LLM(model="gpt-4o-mini")
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm=llm
)
task = Task(
description="Test task description",
expected_output="Test output",
agent=agent
)
crew = Crew(
agents=[agent],
tasks=[task]
)
assert crew is not None
assert len(crew.agents) == 1
assert len(crew.tasks) == 1
assert crew.agents[0] == agent
assert crew.tasks[0] == task
def test_litellm_import_functionality():
"""Test that litellm can be imported and basic functionality works."""
import litellm
from litellm.exceptions import ContextWindowExceededError, AuthenticationError
assert hasattr(litellm, 'completion')
assert ContextWindowExceededError is not None
assert AuthenticationError is not None
def test_llm_supports_function_calling():
"""Test that LLM function calling support detection works with relaxed constraint."""
llm = LLM(model="gpt-4o-mini")
supports_functions = llm.supports_function_calling()
assert isinstance(supports_functions, bool)
def test_llm_context_window_size():
"""Test that LLM context window size detection works with relaxed constraint."""
llm = LLM(model="gpt-4o-mini")
context_window = llm.get_context_window_size()
assert isinstance(context_window, int)
assert context_window > 0
def test_llm_anthropic_model_detection():
"""Test that Anthropic model detection works with relaxed constraint."""
anthropic_llm = LLM(model="anthropic/claude-3-sonnet")
openai_llm = LLM(model="gpt-4o-mini")
assert anthropic_llm._is_anthropic_model() is True
assert openai_llm._is_anthropic_model() is False

View File

@@ -1,25 +0,0 @@
from unittest.mock import patch
import pytest
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
def test_configure_embedder_importerror():
configurator = EmbeddingConfigurator()
embedder_config = {
'provider': 'openai',
'config': {
'model': 'text-embedding-ada-002',
}
}
with patch('chromadb.utils.embedding_functions.openai_embedding_function.OpenAIEmbeddingFunction') as mock_openai:
mock_openai.side_effect = ImportError("Module not found.")
with pytest.raises(ImportError) as exc_info:
configurator.configure_embedder(embedder_config)
assert str(exc_info.value) == "Module not found."
mock_openai.assert_called_once()

View File

@@ -64,8 +64,7 @@ def base_agent():
llm="gpt-4o-mini",
goal="Just say hi",
backstory="You are a helpful assistant that just says hi",
)
)
@pytest.fixture(scope="module")
def base_task(base_agent):
@@ -75,7 +74,6 @@ def base_task(base_agent):
agent=base_agent,
)
event_listener = EventListener()
@@ -450,27 +448,6 @@ def test_flow_emits_start_event():
assert received_events[0].type == "flow_started"
def test_flow_name_emitted_to_event_bus():
received_events = []
class MyFlowClass(Flow):
name = "PRODUCTION_FLOW"
@start()
def start(self):
return "Hello, world!"
@crewai_event_bus.on(FlowStartedEvent)
def handle_flow_start(source, event):
received_events.append(event)
flow = MyFlowClass()
flow.kickoff()
assert len(received_events) == 1
assert received_events[0].flow_name == "PRODUCTION_FLOW"
def test_flow_emits_finish_event():
received_events = []
@@ -779,7 +756,6 @@ def test_streaming_empty_response_handling():
received_chunks = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_stream_chunk(source, event):
received_chunks.append(event.chunk)
@@ -817,7 +793,6 @@ def test_streaming_empty_response_handling():
# Restore the original method
llm.call = original_call
@pytest.mark.vcr(filter_headers=["authorization"])
def test_stream_llm_emits_event_with_task_and_agent_info():
completed_event = []
@@ -826,7 +801,6 @@ def test_stream_llm_emits_event_with_task_and_agent_info():
stream_event = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMCallFailedEvent)
def handle_llm_failed(source, event):
failed_event.append(event)
@@ -853,7 +827,7 @@ def test_stream_llm_emits_event_with_task_and_agent_info():
description="Just say hi",
expected_output="hi",
llm=LLM(model="gpt-4o-mini", stream=True),
agent=agent,
agent=agent
)
crew = Crew(agents=[agent], tasks=[task])
@@ -881,7 +855,6 @@ def test_stream_llm_emits_event_with_task_and_agent_info():
assert set(all_task_id) == {task.id}
assert set(all_task_name) == {task.name}
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_event_with_task_and_agent_info(base_agent, base_task):
completed_event = []
@@ -890,7 +863,6 @@ def test_llm_emits_event_with_task_and_agent_info(base_agent, base_task):
stream_event = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMCallFailedEvent)
def handle_llm_failed(source, event):
failed_event.append(event)
@@ -932,7 +904,6 @@ def test_llm_emits_event_with_task_and_agent_info(base_agent, base_task):
assert set(all_task_id) == {base_task.id}
assert set(all_task_name) == {base_task.name}
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_event_with_lite_agent():
completed_event = []
@@ -941,7 +912,6 @@ def test_llm_emits_event_with_lite_agent():
stream_event = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMCallFailedEvent)
def handle_llm_failed(source, event):
failed_event.append(event)
@@ -966,6 +936,7 @@ def test_llm_emits_event_with_lite_agent():
)
agent.kickoff(messages=[{"role": "user", "content": "say hi!"}])
assert len(completed_event) == 2
assert len(failed_event) == 0
assert len(started_event) == 2

6602
uv.lock generated

File diff suppressed because it is too large Load Diff