Compare commits

..

1 Commits

Author SHA1 Message Date
Lucas Gomide
df4754301a docs: add docs about Memory Events 2025-07-02 12:04:17 -03:00
2076 changed files with 36020 additions and 222960 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

View File

@@ -260,7 +260,7 @@ def handle_success(self):
# Handle success case
pass
@listen("failure_path")
@listen("failure_path")
def handle_failure(self):
# Handle failure case
pass
@@ -288,7 +288,7 @@ class SelectiveFlow(Flow):
def critical_step(self):
# Only this method's state is persisted
self.state["important_data"] = "value"
@start()
def temporary_step(self):
# This method's state is not persisted
@@ -322,20 +322,20 @@ flow.plot("workflow_diagram") # Generates HTML visualization
class CyclicFlow(Flow):
max_iterations = 5
current_iteration = 0
@start("loop")
def process_iteration(self):
if self.current_iteration >= self.max_iterations:
return
# Process current iteration
self.current_iteration += 1
@router(process_iteration)
def check_continue(self):
if self.current_iteration < self.max_iterations:
return "loop" # Continue cycling
return "complete"
@listen("complete")
def finalize(self):
# Final processing
@@ -369,7 +369,7 @@ def risky_operation(self):
self.state["success"] = False
return None
@listen(risky_operation)
@listen(risky_operation)
def handle_result(self, result):
if self.state.get("success", False):
# Handle success case
@@ -390,7 +390,7 @@ class CrewOrchestrationFlow(Flow[WorkflowState]):
result = research_crew.crew().kickoff(inputs={"topic": self.state.research_topic})
self.state.research_results = result.raw
return result
@listen(research_phase)
def analysis_phase(self, research_results):
analysis_crew = AnalysisCrew()
@@ -400,13 +400,13 @@ class CrewOrchestrationFlow(Flow[WorkflowState]):
})
self.state.analysis_results = result.raw
return result
@router(analysis_phase)
def decide_next_action(self):
if self.state.analysis_results.confidence > 0.7:
return "generate_report"
return "additional_research"
@listen("generate_report")
def final_report(self):
reporting_crew = ReportingCrew()
@@ -439,7 +439,7 @@ class CrewOrchestrationFlow(Flow[WorkflowState]):
## CrewAI Version Compatibility:
- Stay updated with CrewAI releases for new features and bug fixes
- Test crew functionality when upgrading CrewAI versions
- Use version constraints in pyproject.toml (e.g., "crewai[tools]>=0.140.0,<1.0.0")
- Use version constraints in pyproject.toml (e.g., "crewai[tools]>=0.134.0,<1.0.0")
- Monitor deprecation warnings for future compatibility
## Code Examples and Implementation Patterns
@@ -464,22 +464,22 @@ class ResearchOutput(BaseModel):
@CrewBase
class ResearchCrew():
"""Advanced research crew with structured outputs and validation"""
agents: List[BaseAgent]
tasks: List[Task]
@before_kickoff
def setup_environment(self):
"""Initialize environment before crew execution"""
print("🚀 Setting up research environment...")
# Validate API keys, create directories, etc.
@after_kickoff
def cleanup_and_report(self, output):
"""Handle post-execution tasks"""
print(f"✅ Research completed. Generated {len(output.tasks_output)} task outputs")
print(f"📊 Token usage: {output.token_usage}")
@agent
def researcher(self) -> Agent:
return Agent(
@@ -490,7 +490,7 @@ class ResearchCrew():
max_iter=15,
max_execution_time=1800
)
@agent
def analyst(self) -> Agent:
return Agent(
@@ -499,7 +499,7 @@ class ResearchCrew():
verbose=True,
memory=True
)
@task
def research_task(self) -> Task:
return Task(
@@ -507,7 +507,7 @@ class ResearchCrew():
agent=self.researcher(),
output_pydantic=ResearchOutput
)
@task
def validation_task(self) -> Task:
return Task(
@@ -517,7 +517,7 @@ class ResearchCrew():
guardrail=self.validate_research_quality,
max_retries=3
)
def validate_research_quality(self, output) -> tuple[bool, str]:
"""Custom guardrail to ensure research quality"""
content = output.raw
@@ -526,7 +526,7 @@ class ResearchCrew():
if not any(keyword in content.lower() for keyword in ['conclusion', 'finding', 'result']):
return False, "Missing key analytical elements."
return True, content
@crew
def crew(self) -> Crew:
return Crew(
@@ -557,13 +557,13 @@ class RobustSearchTool(BaseTool):
name: str = "robust_search"
description: str = "Perform web search with retry logic and error handling"
args_schema: Type[BaseModel] = SearchInput
def __init__(self, api_key: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
self.api_key = api_key or os.getenv("SEARCH_API_KEY")
self.rate_limit_delay = 1.0
self.last_request_time = 0
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10)
@@ -575,43 +575,43 @@ class RobustSearchTool(BaseTool):
time_since_last = time.time() - self.last_request_time
if time_since_last < self.rate_limit_delay:
time.sleep(self.rate_limit_delay - time_since_last)
# Input validation
if not query or len(query.strip()) == 0:
return "Error: Empty search query provided"
if len(query) > 500:
return "Error: Search query too long (max 500 characters)"
# Perform search
results = self._perform_search(query, max_results, timeout)
self.last_request_time = time.time()
return self._format_results(results)
except requests.exceptions.Timeout:
return f"Search timed out after {timeout} seconds"
except requests.exceptions.RequestException as e:
return f"Search failed due to network error: {str(e)}"
except Exception as e:
return f"Unexpected error during search: {str(e)}"
def _perform_search(self, query: str, max_results: int, timeout: int) -> List[dict]:
"""Implement actual search logic here"""
# Your search API implementation
pass
def _format_results(self, results: List[dict]) -> str:
"""Format search results for LLM consumption"""
if not results:
return "No results found for the given query."
formatted = "Search Results:\n\n"
for i, result in enumerate(results[:10], 1):
formatted += f"{i}. {result.get('title', 'No title')}\n"
formatted += f" URL: {result.get('url', 'No URL')}\n"
formatted += f" Summary: {result.get('snippet', 'No summary')}\n\n"
return formatted
```
@@ -623,20 +623,20 @@ from crewai.memory.storage.mem0_storage import Mem0Storage
class AdvancedMemoryManager:
"""Enhanced memory management for CrewAI applications"""
def __init__(self, crew, config: dict = None):
self.crew = crew
self.config = config or {}
self.setup_memory_systems()
def setup_memory_systems(self):
"""Configure multiple memory systems"""
# Short-term memory for current session
self.short_term = ShortTermMemory()
# Long-term memory for cross-session persistence
self.long_term = LongTermMemory()
# External memory with Mem0 (if configured)
if self.config.get('use_external_memory'):
self.external = ExternalMemory.create_storage(
@@ -649,8 +649,8 @@ class AdvancedMemoryManager:
}
}
)
def save_with_context(self, content: str, memory_type: str = "short_term",
def save_with_context(self, content: str, memory_type: str = "short_term",
metadata: dict = None, agent: str = None):
"""Save content with enhanced metadata"""
enhanced_metadata = {
@@ -659,14 +659,14 @@ class AdvancedMemoryManager:
"crew_type": self.crew.__class__.__name__,
**(metadata or {})
}
if memory_type == "short_term":
self.short_term.save(content, enhanced_metadata, agent)
elif memory_type == "long_term":
self.long_term.save(content, enhanced_metadata, agent)
elif memory_type == "external" and hasattr(self, 'external'):
self.external.save(content, enhanced_metadata, agent)
def search_across_memories(self, query: str, limit: int = 5) -> dict:
"""Search across all memory systems"""
results = {
@@ -674,23 +674,23 @@ class AdvancedMemoryManager:
"long_term": [],
"external": []
}
# Search short-term memory
results["short_term"] = self.short_term.search(query, limit=limit)
# Search long-term memory
results["long_term"] = self.long_term.search(query, limit=limit)
# Search external memory (if available)
if hasattr(self, 'external'):
results["external"] = self.external.search(query, limit=limit)
return results
def cleanup_old_memories(self, days_threshold: int = 30):
"""Clean up old memories based on age"""
cutoff_time = time.time() - (days_threshold * 24 * 60 * 60)
# Implement cleanup logic based on timestamps in metadata
# This would vary based on your specific storage implementation
pass
@@ -719,12 +719,12 @@ class TaskMetrics:
class CrewMonitor:
"""Comprehensive monitoring for CrewAI applications"""
def __init__(self, crew_name: str, log_level: str = "INFO"):
self.crew_name = crew_name
self.metrics: List[TaskMetrics] = []
self.session_start = time.time()
# Setup logging
logging.basicConfig(
level=getattr(logging, log_level),
@@ -735,7 +735,7 @@ class CrewMonitor:
]
)
self.logger = logging.getLogger(f"CrewAI.{crew_name}")
def start_task_monitoring(self, task_name: str, agent_name: str) -> dict:
"""Start monitoring a task execution"""
context = {
@@ -743,16 +743,16 @@ class CrewMonitor:
"agent_name": agent_name,
"start_time": time.time()
}
self.logger.info(f"Task started: {task_name} by {agent_name}")
return context
def end_task_monitoring(self, context: dict, success: bool = True,
def end_task_monitoring(self, context: dict, success: bool = True,
tokens_used: int = 0, error: str = None):
"""End monitoring and record metrics"""
end_time = time.time()
duration = end_time - context["start_time"]
# Get memory usage (if psutil is available)
memory_usage = None
try:
@@ -761,7 +761,7 @@ class CrewMonitor:
memory_usage = process.memory_info().rss / 1024 / 1024 # MB
except ImportError:
pass
metrics = TaskMetrics(
task_name=context["task_name"],
agent_name=context["agent_name"],
@@ -773,29 +773,29 @@ class CrewMonitor:
error_message=error,
memory_usage_mb=memory_usage
)
self.metrics.append(metrics)
# Log the completion
status = "SUCCESS" if success else "FAILED"
self.logger.info(f"Task {status}: {context['task_name']} "
f"(Duration: {duration:.2f}s, Tokens: {tokens_used})")
if error:
self.logger.error(f"Task error: {error}")
def get_performance_summary(self) -> Dict[str, Any]:
"""Generate comprehensive performance summary"""
if not self.metrics:
return {"message": "No metrics recorded yet"}
successful_tasks = [m for m in self.metrics if m.success]
failed_tasks = [m for m in self.metrics if not m.success]
total_duration = sum(m.duration for m in self.metrics)
total_tokens = sum(m.tokens_used for m in self.metrics)
avg_duration = total_duration / len(self.metrics)
return {
"crew_name": self.crew_name,
"session_duration": time.time() - self.session_start,
@@ -811,7 +811,7 @@ class CrewMonitor:
"most_token_intensive": max(self.metrics, key=lambda x: x.tokens_used).task_name if self.metrics else None,
"common_errors": self._get_common_errors()
}
def _get_common_errors(self) -> Dict[str, int]:
"""Get frequency of common errors"""
error_counts = {}
@@ -819,20 +819,20 @@ class CrewMonitor:
if metric.error_message:
error_counts[metric.error_message] = error_counts.get(metric.error_message, 0) + 1
return dict(sorted(error_counts.items(), key=lambda x: x[1], reverse=True))
def export_metrics(self, filename: str = None) -> str:
"""Export metrics to JSON file"""
if not filename:
filename = f"crew_metrics_{self.crew_name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
export_data = {
"summary": self.get_performance_summary(),
"detailed_metrics": [asdict(m) for m in self.metrics]
}
with open(filename, 'w') as f:
json.dump(export_data, f, indent=2, default=str)
self.logger.info(f"Metrics exported to {filename}")
return filename
@@ -847,10 +847,10 @@ def monitored_research_task(self) -> Task:
if context:
tokens = getattr(task_output, 'token_usage', {}).get('total', 0)
monitor.end_task_monitoring(context, success=True, tokens_used=tokens)
# Start monitoring would be called before task execution
# This is a simplified example - in practice you'd integrate this into the task execution flow
return Task(
config=self.tasks_config['research_task'],
agent=self.researcher(),
@@ -872,7 +872,7 @@ class ErrorSeverity(Enum):
class CrewError(Exception):
"""Base exception for CrewAI applications"""
def __init__(self, message: str, severity: ErrorSeverity = ErrorSeverity.MEDIUM,
def __init__(self, message: str, severity: ErrorSeverity = ErrorSeverity.MEDIUM,
context: dict = None):
super().__init__(message)
self.severity = severity
@@ -893,19 +893,19 @@ class ConfigurationError(CrewError):
class ErrorHandler:
"""Centralized error handling for CrewAI applications"""
def __init__(self, crew_name: str):
self.crew_name = crew_name
self.error_log: List[CrewError] = []
self.recovery_strategies: Dict[type, Callable] = {}
def register_recovery_strategy(self, error_type: type, strategy: Callable):
"""Register a recovery strategy for specific error types"""
self.recovery_strategies[error_type] = strategy
def handle_error(self, error: Exception, context: dict = None) -> Any:
"""Handle errors with appropriate recovery strategies"""
# Convert to CrewError if needed
if not isinstance(error, CrewError):
crew_error = CrewError(
@@ -915,11 +915,11 @@ class ErrorHandler:
)
else:
crew_error = error
# Log the error
self.error_log.append(crew_error)
self._log_error(crew_error)
# Apply recovery strategy if available
error_type = type(error)
if error_type in self.recovery_strategies:
@@ -931,21 +931,21 @@ class ErrorHandler:
ErrorSeverity.HIGH,
{"original_error": str(error), "recovery_error": str(recovery_error)}
))
# If critical, re-raise
if crew_error.severity == ErrorSeverity.CRITICAL:
raise crew_error
return None
def _log_error(self, error: CrewError):
"""Log error with appropriate level based on severity"""
logger = logging.getLogger(f"CrewAI.{self.crew_name}.ErrorHandler")
error_msg = f"[{error.severity.value.upper()}] {error}"
if error.context:
error_msg += f" | Context: {error.context}"
if error.severity in [ErrorSeverity.HIGH, ErrorSeverity.CRITICAL]:
logger.error(error_msg)
logger.error(f"Stack trace: {traceback.format_exc()}")
@@ -953,16 +953,16 @@ class ErrorHandler:
logger.warning(error_msg)
else:
logger.info(error_msg)
def get_error_summary(self) -> Dict[str, Any]:
"""Get summary of errors encountered"""
if not self.error_log:
return {"total_errors": 0}
severity_counts = {}
for error in self.error_log:
severity_counts[error.severity.value] = severity_counts.get(error.severity.value, 0) + 1
return {
"total_errors": len(self.error_log),
"severity_breakdown": severity_counts,
@@ -1004,7 +1004,7 @@ def robust_task(self) -> Task:
# Use fallback response
return "Task failed, using fallback response"
return wrapper
return Task(
config=self.tasks_config['research_task'],
agent=self.researcher()
@@ -1020,60 +1020,60 @@ from pydantic import BaseSettings, Field, validator
class Environment(str, Enum):
DEVELOPMENT = "development"
TESTING = "testing"
TESTING = "testing"
STAGING = "staging"
PRODUCTION = "production"
class CrewAISettings(BaseSettings):
"""Comprehensive settings management for CrewAI applications"""
# Environment
environment: Environment = Field(default=Environment.DEVELOPMENT)
debug: bool = Field(default=True)
# API Keys (loaded from environment)
openai_api_key: Optional[str] = Field(default=None, env="OPENAI_API_KEY")
anthropic_api_key: Optional[str] = Field(default=None, env="ANTHROPIC_API_KEY")
serper_api_key: Optional[str] = Field(default=None, env="SERPER_API_KEY")
mem0_api_key: Optional[str] = Field(default=None, env="MEM0_API_KEY")
# CrewAI Configuration
crew_max_rpm: int = Field(default=100)
crew_max_execution_time: int = Field(default=3600) # 1 hour
default_llm_model: str = Field(default="gpt-4")
fallback_llm_model: str = Field(default="gpt-3.5-turbo")
# Memory and Storage
crewai_storage_dir: str = Field(default="./storage", env="CREWAI_STORAGE_DIR")
memory_enabled: bool = Field(default=True)
memory_cleanup_interval: int = Field(default=86400) # 24 hours in seconds
# Performance
enable_caching: bool = Field(default=True)
max_retries: int = Field(default=3)
retry_delay: float = Field(default=1.0)
# Monitoring
enable_monitoring: bool = Field(default=True)
log_level: str = Field(default="INFO")
metrics_export_interval: int = Field(default=3600) # 1 hour
# Security
input_sanitization: bool = Field(default=True)
max_input_length: int = Field(default=10000)
allowed_file_types: list = Field(default=["txt", "md", "pdf", "docx"])
@validator('environment', pre=True)
def set_debug_based_on_env(cls, v):
return v
@validator('debug')
def set_debug_from_env(cls, v, values):
env = values.get('environment')
if env == Environment.PRODUCTION:
return False
return v
@validator('openai_api_key')
def validate_openai_key(cls, v):
if not v:
@@ -1081,15 +1081,15 @@ class CrewAISettings(BaseSettings):
if not v.startswith('sk-'):
raise ValueError("Invalid OpenAI API key format")
return v
@property
def is_production(self) -> bool:
return self.environment == Environment.PRODUCTION
@property
def is_development(self) -> bool:
return self.environment == Environment.DEVELOPMENT
def get_llm_config(self) -> Dict[str, Any]:
"""Get LLM configuration based on environment"""
config = {
@@ -1098,12 +1098,12 @@ class CrewAISettings(BaseSettings):
"max_tokens": 4000 if self.is_production else 2000,
"timeout": 60
}
if self.is_development:
config["model"] = self.fallback_llm_model
return config
def get_memory_config(self) -> Dict[str, Any]:
"""Get memory configuration"""
return {
@@ -1112,7 +1112,7 @@ class CrewAISettings(BaseSettings):
"cleanup_interval": self.memory_cleanup_interval,
"provider": "mem0" if self.mem0_api_key and self.is_production else "local"
}
class Config:
env_file = ".env"
env_file_encoding = 'utf-8'
@@ -1125,25 +1125,25 @@ settings = CrewAISettings()
@CrewBase
class ConfigurableCrew():
"""Crew that uses centralized configuration"""
def __init__(self):
self.settings = settings
self.validate_configuration()
def validate_configuration(self):
"""Validate configuration before crew execution"""
required_keys = [self.settings.openai_api_key]
if not all(required_keys):
raise ConfigurationError("Missing required API keys")
if not os.path.exists(self.settings.crewai_storage_dir):
os.makedirs(self.settings.crewai_storage_dir, exist_ok=True)
@agent
def adaptive_agent(self) -> Agent:
"""Agent that adapts to configuration"""
llm_config = self.settings.get_llm_config()
return Agent(
config=self.agents_config['researcher'],
llm=llm_config["model"],
@@ -1163,7 +1163,7 @@ from crewai.tasks.task_output import TaskOutput
class CrewAITestFramework:
"""Comprehensive testing framework for CrewAI applications"""
@staticmethod
def create_mock_agent(role: str = "test_agent", tools: list = None) -> Mock:
"""Create a mock agent for testing"""
@@ -1175,9 +1175,9 @@ class CrewAITestFramework:
mock_agent.llm = "gpt-3.5-turbo"
mock_agent.verbose = False
return mock_agent
@staticmethod
def create_mock_task_output(content: str, success: bool = True,
def create_mock_task_output(content: str, success: bool = True,
tokens: int = 100) -> TaskOutput:
"""Create a mock task output for testing"""
return TaskOutput(
@@ -1187,13 +1187,13 @@ class CrewAITestFramework:
pydantic=None,
json_dict=None
)
@staticmethod
def create_test_crew(agents: list = None, tasks: list = None) -> Crew:
"""Create a test crew with mock components"""
test_agents = agents or [CrewAITestFramework.create_mock_agent()]
test_tasks = tasks or []
return Crew(
agents=test_agents,
tasks=test_tasks,
@@ -1203,53 +1203,53 @@ class CrewAITestFramework:
# Example test cases
class TestResearchCrew:
"""Test cases for research crew functionality"""
def setup_method(self):
"""Setup test environment"""
self.framework = CrewAITestFramework()
self.mock_serper = Mock()
@patch('crewai_tools.SerperDevTool')
def test_agent_creation(self, mock_serper_tool):
"""Test agent creation with proper configuration"""
mock_serper_tool.return_value = self.mock_serper
crew = ResearchCrew()
researcher = crew.researcher()
assert researcher.role == "Senior Research Analyst"
assert len(researcher.tools) > 0
assert researcher.verbose is True
def test_task_validation(self):
"""Test task validation logic"""
crew = ResearchCrew()
# Test valid output
valid_output = self.framework.create_mock_task_output(
"This is a comprehensive research summary with conclusions and findings."
)
is_valid, message = crew.validate_research_quality(valid_output)
assert is_valid is True
# Test invalid output (too short)
invalid_output = self.framework.create_mock_task_output("Too short")
is_valid, message = crew.validate_research_quality(invalid_output)
assert is_valid is False
assert "brief" in message.lower()
@patch('requests.get')
def test_tool_error_handling(self, mock_requests):
"""Test tool error handling and recovery"""
# Simulate network error
mock_requests.side_effect = requests.exceptions.RequestException("Network error")
tool = RobustSearchTool()
result = tool._run("test query")
assert "network error" in result.lower()
assert "failed" in result.lower()
@pytest.mark.asyncio
async def test_crew_execution_flow(self):
"""Test complete crew execution with mocked dependencies"""
@@ -1257,18 +1257,18 @@ class TestResearchCrew:
mock_execute.return_value = self.framework.create_mock_task_output(
"Research completed successfully with findings and recommendations."
)
crew = ResearchCrew()
result = crew.crew().kickoff(inputs={"topic": "AI testing"})
assert result is not None
assert "successfully" in result.raw.lower()
def test_memory_integration(self):
"""Test memory system integration"""
crew = ResearchCrew()
memory_manager = AdvancedMemoryManager(crew)
# Test saving to memory
test_content = "Important research finding about AI"
memory_manager.save_with_context(
@@ -1277,34 +1277,34 @@ class TestResearchCrew:
metadata={"importance": "high"},
agent="researcher"
)
# Test searching memory
results = memory_manager.search_across_memories("AI research")
assert "short_term" in results
def test_error_handling_workflow(self):
"""Test error handling and recovery mechanisms"""
error_handler = ErrorHandler("test_crew")
# Test error registration and handling
test_error = TaskExecutionError("Test task failed", ErrorSeverity.MEDIUM)
result = error_handler.handle_error(test_error)
assert len(error_handler.error_log) == 1
assert error_handler.error_log[0].severity == ErrorSeverity.MEDIUM
def test_configuration_validation(self):
"""Test configuration validation"""
# Test with missing API key
with patch.dict(os.environ, {}, clear=True):
with pytest.raises(ValueError):
settings = CrewAISettings()
# Test with valid configuration
with patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-key"}):
settings = CrewAISettings()
assert settings.openai_api_key == "sk-test-key"
@pytest.mark.integration
def test_end_to_end_workflow(self):
"""Integration test for complete workflow"""
@@ -1315,41 +1315,41 @@ class TestResearchCrew:
# Performance testing
class TestCrewPerformance:
"""Performance tests for CrewAI applications"""
def test_memory_usage(self):
"""Test memory usage during crew execution"""
import psutil
import gc
process = psutil.Process()
initial_memory = process.memory_info().rss
# Create and run crew multiple times
for i in range(10):
crew = ResearchCrew()
# Simulate crew execution
del crew
gc.collect()
final_memory = process.memory_info().rss
memory_increase = final_memory - initial_memory
# Assert memory increase is reasonable (less than 100MB)
assert memory_increase < 100 * 1024 * 1024
def test_concurrent_execution(self):
"""Test concurrent crew execution"""
import concurrent.futures
def run_crew(crew_id):
crew = ResearchCrew()
# Simulate execution
return f"crew_{crew_id}_completed"
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
futures = [executor.submit(run_crew, i) for i in range(5)]
results = [future.result() for future in futures]
assert len(results) == 5
assert all("completed" in result for result in results)
@@ -1400,7 +1400,7 @@ class TestCrewPerformance:
### Development:
1. Always use .env files for sensitive configuration
2. Implement comprehensive error handling and logging
2. Implement comprehensive error handling and logging
3. Use structured outputs with Pydantic for reliability
4. Test crew functionality with different input scenarios
5. Follow CrewAI patterns and conventions consistently
@@ -1426,4 +1426,4 @@ class TestCrewPerformance:
5. Use async patterns for I/O-bound operations
6. Implement proper connection pooling and resource management
7. Profile and optimize critical paths
8. Plan for horizontal scaling when needed
8. Plan for horizontal scaling when needed

View File

@@ -1,21 +0,0 @@
name: "CodeQL Config"
paths-ignore:
# Ignore template files - these are boilerplate code that shouldn't be analyzed
- "src/crewai/cli/templates/**"
# Ignore test cassettes - these are test fixtures/recordings
- "tests/cassettes/**"
# Ignore cache and build artifacts
- ".cache/**"
# Ignore documentation build artifacts
- "docs/.cache/**"
paths:
# Include all Python source code
- "src/**"
# Include tests (but exclude cassettes)
- "tests/**"
# Configure specific queries or packs if needed
# queries:
# - uses: security-and-quality

63
.github/security.md vendored
View File

@@ -1,50 +1,27 @@
## CrewAI Security Policy
## CrewAI Security Vulnerability Reporting Policy
We are committed to protecting the confidentiality, integrity, and availability of the CrewAI ecosystem. This policy explains how to report potential vulnerabilities and what you can expect from us when you do.
CrewAI prioritizes the security of our software products, services, and GitHub repositories. To promptly address vulnerabilities, follow these steps for reporting security issues:
### Scope
### Reporting Process
Do **not** report vulnerabilities via public GitHub issues.
We welcome reports for vulnerabilities that could impact:
Email all vulnerability reports directly to:
**security@crewai.com**
- CrewAI-maintained source code and repositories
- CrewAI-operated infrastructure and services
- Official CrewAI releases, packages, and distributions
### Required Information
To help us quickly validate and remediate the issue, your report must include:
Issues affecting clearly unaffiliated third-party services or user-generated content are out of scope, unless you can demonstrate a direct impact on CrewAI systems or customers.
- **Vulnerability Type:** Clearly state the vulnerability type (e.g., SQL injection, XSS, privilege escalation).
- **Affected Source Code:** Provide full file paths and direct URLs (branch, tag, or commit).
- **Reproduction Steps:** Include detailed, step-by-step instructions. Screenshots are recommended.
- **Special Configuration:** Document any special settings or configurations required to reproduce.
- **Proof-of-Concept (PoC):** Provide exploit or PoC code (if available).
- **Impact Assessment:** Clearly explain the severity and potential exploitation scenarios.
### How to Report
### Our Response
- We will acknowledge receipt of your report promptly via your provided email.
- Confirmed vulnerabilities will receive priority remediation based on severity.
- Patches will be released as swiftly as possible following verification.
- **Please do not** disclose vulnerabilities via public GitHub issues, pull requests, or social media.
- Email detailed reports to **security@crewai.com** with the subject line `Security Report`.
- If you need to share large files or sensitive artifacts, mention it in your email and we will coordinate a secure transfer method.
### What to Include
Providing comprehensive information enables us to validate the issue quickly:
- **Vulnerability overview** — a concise description and classification (e.g., RCE, privilege escalation)
- **Affected components** — repository, branch, tag, or deployed service along with relevant file paths or endpoints
- **Reproduction steps** — detailed, step-by-step instructions; include logs, screenshots, or screen recordings when helpful
- **Proof-of-concept** — exploit details or code that demonstrates the impact (if available)
- **Impact analysis** — severity assessment, potential exploitation scenarios, and any prerequisites or special configurations
### Our Commitment
- **Acknowledgement:** We aim to acknowledge your report within two business days.
- **Communication:** We will keep you informed about triage results, remediation progress, and planned release timelines.
- **Resolution:** Confirmed vulnerabilities will be prioritized based on severity and fixed as quickly as possible.
- **Recognition:** We currently do not run a bug bounty program; any rewards or recognition are issued at CrewAI's discretion.
### Coordinated Disclosure
We ask that you allow us a reasonable window to investigate and remediate confirmed issues before any public disclosure. We will coordinate publication timelines with you whenever possible.
### Safe Harbor
We will not pursue or support legal action against individuals who, in good faith:
- Follow this policy and refrain from violating any applicable laws
- Avoid privacy violations, data destruction, or service disruption
- Limit testing to systems in scope and respect rate limits and terms of service
If you are unsure whether your testing is covered, please contact us at **security@crewai.com** before proceeding.
### Reward Notice
Currently, we do not offer a bug bounty program. Rewards, if issued, are discretionary.

View File

@@ -1,48 +0,0 @@
name: Build uv cache
on:
push:
branches:
- main
paths:
- "uv.lock"
- "pyproject.toml"
schedule:
- cron: "0 0 */5 * *" # Run every 5 days at midnight UTC to prevent cache expiration
workflow_dispatch:
permissions:
contents: read
jobs:
build-cache:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13"]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "0.8.4"
python-version: ${{ matrix.python-version }}
enable-cache: false
- name: Install dependencies and populate cache
run: |
echo "Building global UV cache for Python ${{ matrix.python-version }}..."
uv sync --all-groups --all-extras --no-install-project
echo "Cache populated successfully"
- name: Save uv caches
uses: actions/cache/save@v4
with:
path: |
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}

View File

@@ -1,103 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL Advanced"
on:
push:
branches: [ "main" ]
paths-ignore:
- "lib/crewai/src/crewai/cli/templates/**"
pull_request:
branches: [ "main" ]
paths-ignore:
- "lib/crewai/src/crewai/cli/templates/**"
jobs:
analyze:
name: Analyze (${{ matrix.language }})
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners (GitHub.com only)
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
permissions:
# required for all workflows
security-events: write
# required to fetch internal or private CodeQL packs
packages: read
# only required for workflows in private repositories
actions: read
contents: read
strategy:
fail-fast: false
matrix:
include:
- language: actions
build-mode: none
- language: python
build-mode: none
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift'
# Use `c-cpp` to analyze code written in C, C++ or both
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Add any setup steps before running the `github/codeql-action/init` action.
# This includes steps like installing compilers or runtimes (`actions/setup-node`
# or others). This is typically only required for manual builds.
# - name: Setup runtime (example)
# uses: actions/setup-example@v1
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
config-file: ./.github/codeql/codeql-config.yml
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# If the analyze step fails for one of the languages you are analyzing with
# "We were unable to automatically build your code", modify the matrix above
# to set the build mode to "manual" for that language. Then modify this step
# to build your code.
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- if: matrix.build-mode == 'manual'
shell: bash
run: |
echo 'If you are using a "manual" build mode for one or more of the' \
'languages you are analyzing, replace this with the commands to build' \
'your code, for example:'
echo ' make bootstrap'
echo ' make release'
exit 1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View File

@@ -2,9 +2,6 @@ name: Lint
on: [pull_request]
permissions:
contents: read
jobs:
lint:
runs-on: ubuntu-latest
@@ -18,27 +15,8 @@ jobs:
- name: Fetch Target Branch
run: git fetch origin $TARGET_BRANCH --depth=1
- name: Restore global uv cache
id: cache-restore
uses: actions/cache/restore@v4
with:
path: |
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
restore-keys: |
uv-main-py3.11-
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "0.8.4"
python-version: "3.11"
enable-cache: false
- name: Install dependencies
run: uv sync --all-groups --all-extras --no-install-project
- name: Install Ruff
run: pip install ruff
- name: Get Changed Python Files
id: changed-files
@@ -52,18 +30,7 @@ jobs:
- name: Run Ruff on Changed Files
if: ${{ steps.changed-files.outputs.files != '' }}
run: |
echo "${{ steps.changed-files.outputs.files }}" \
| tr ' ' '\n' \
| grep -v 'src/crewai/cli/templates/' \
| grep -v '/tests/' \
| xargs -I{} uv run ruff check "{}"
- name: Save uv caches
if: steps.cache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
path: |
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
echo "${{ steps.changed-files.outputs.files }}" \
| tr ' ' '\n' \
| grep -v 'src/crewai/cli/templates/' \
| xargs -I{} ruff check "{}"

View File

@@ -1,81 +0,0 @@
name: Publish to PyPI
on:
release:
types: [ published ]
workflow_dispatch:
jobs:
build:
name: Build packages
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install uv
uses: astral-sh/setup-uv@v4
- name: Build packages
run: |
uv build --all-packages
rm dist/.gitignore
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: dist
path: dist/
publish:
name: Publish to PyPI
needs: build
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/crewai
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "0.8.4"
python-version: "3.12"
enable-cache: false
- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: dist
path: dist
- name: Publish to PyPI
env:
UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
run: |
failed=0
for package in dist/*; do
if [[ "$package" == *"crewai_devtools"* ]]; then
echo "Skipping private package: $package"
continue
fi
echo "Publishing $package"
if ! uv publish "$package"; then
echo "Failed to publish $package"
failed=1
fi
done
if [ $failed -eq 1 ]; then
echo "Some packages failed to publish"
exit 1
fi

23
.github/workflows/security-checker.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: Security Checker
on: [pull_request]
jobs:
security-check:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11.9"
- name: Install dependencies
run: pip install bandit
- name: Run Bandit
run: bandit -c pyproject.toml -r src/ -ll

View File

@@ -3,116 +3,32 @@ name: Run Tests
on: [pull_request]
permissions:
contents: read
contents: write
env:
OPENAI_API_KEY: fake-api-key
PYTHONUNBUFFERED: 1
BRAVE_API_KEY: fake-brave-key
SNOWFLAKE_USER: fake-snowflake-user
SNOWFLAKE_PASSWORD: fake-snowflake-password
SNOWFLAKE_ACCOUNT: fake-snowflake-account
SNOWFLAKE_WAREHOUSE: fake-snowflake-warehouse
SNOWFLAKE_DATABASE: fake-snowflake-database
SNOWFLAKE_SCHEMA: fake-snowflake-schema
EMBEDCHAIN_DB_URI: sqlite:///test.db
jobs:
tests:
name: tests (${{ matrix.python-version }})
runs-on: ubuntu-latest
timeout-minutes: 15
strategy:
fail-fast: true
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13']
group: [1, 2, 3, 4, 5, 6, 7, 8]
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch all history for proper diff
- name: Restore global uv cache
id: cache-restore
uses: actions/cache/restore@v4
with:
path: |
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
restore-keys: |
uv-main-py${{ matrix.python-version }}-
- name: Install uv
uses: astral-sh/setup-uv@v6
uses: astral-sh/setup-uv@v3
with:
version: "0.8.4"
python-version: ${{ matrix.python-version }}
enable-cache: false
enable-cache: true
- name: Set up Python ${{ matrix.python-version }}
run: uv python install ${{ matrix.python-version }}
- name: Install the project
run: uv sync --all-groups --all-extras
run: uv sync --dev --all-extras
- name: Restore test durations
uses: actions/cache/restore@v4
with:
path: .test_durations_py*
key: test-durations-py${{ matrix.python-version }}
- name: Run tests (group ${{ matrix.group }} of 8)
run: |
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
DURATION_FILE="../../.test_durations_py${PYTHON_VERSION_SAFE}"
# Temporarily always skip cached durations to fix test splitting
# When durations don't match, pytest-split runs duplicate tests instead of splitting
echo "Using even test splitting (duration cache disabled until fix merged)"
DURATIONS_ARG=""
# Original logic (disabled temporarily):
# if [ ! -f "$DURATION_FILE" ]; then
# echo "No cached durations found, tests will be split evenly"
# DURATIONS_ARG=""
# elif git diff origin/${{ github.base_ref }}...HEAD --name-only 2>/dev/null | grep -q "^tests/.*\.py$"; then
# echo "Test files have changed, skipping cached durations to avoid mismatches"
# DURATIONS_ARG=""
# else
# echo "No test changes detected, using cached test durations for optimal splitting"
# DURATIONS_ARG="--durations-path=${DURATION_FILE}"
# fi
cd lib/crewai && uv run pytest \
--block-network \
--timeout=30 \
-vv \
--splits 8 \
--group ${{ matrix.group }} \
$DURATIONS_ARG \
--durations=10 \
-n auto \
--maxfail=3
- name: Run tool tests (group ${{ matrix.group }} of 8)
run: |
cd lib/crewai-tools && uv run pytest \
--block-network \
--timeout=30 \
-vv \
--splits 8 \
--group ${{ matrix.group }} \
--durations=10 \
-n auto \
--maxfail=3
- name: Save uv caches
if: steps.cache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
path: |
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
- name: Run tests
run: uv run pytest --block-network --timeout=60 -vv

View File

@@ -3,99 +3,24 @@ name: Run Type Checks
on: [pull_request]
permissions:
contents: read
contents: write
jobs:
type-checker-matrix:
name: type-checker (${{ matrix.python-version }})
type-checker:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13"]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
fetch-depth: 0 # Fetch all history for proper diff
python-version: "3.11.9"
- name: Restore global uv cache
id: cache-restore
uses: actions/cache/restore@v4
with:
path: |
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
restore-keys: |
uv-main-py${{ matrix.python-version }}-
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "0.8.4"
python-version: ${{ matrix.python-version }}
enable-cache: false
- name: Install dependencies
run: uv sync --all-groups --all-extras
- name: Get changed Python files
id: changed-files
- name: Install Requirements
run: |
# Get the list of changed Python files compared to the base branch
echo "Fetching changed files..."
git diff --name-only --diff-filter=ACMRT origin/${{ github.base_ref }}...HEAD -- '*.py' > changed_files.txt
pip install mypy
# Filter for files in src/ directory only (excluding tests/)
grep -E "^src/" changed_files.txt > filtered_changed_files.txt || true
# Check if there are any changed files
if [ -s filtered_changed_files.txt ]; then
echo "Changed Python files in src/:"
cat filtered_changed_files.txt
echo "has_changes=true" >> $GITHUB_OUTPUT
# Convert newlines to spaces for mypy command
echo "files=$(cat filtered_changed_files.txt | tr '\n' ' ')" >> $GITHUB_OUTPUT
else
echo "No Python files changed in src/"
echo "has_changes=false" >> $GITHUB_OUTPUT
fi
- name: Run type checks on changed files
if: steps.changed-files.outputs.has_changes == 'true'
run: |
echo "Running mypy on changed files with Python ${{ matrix.python-version }}..."
uv run mypy ${{ steps.changed-files.outputs.files }}
- name: No files to check
if: steps.changed-files.outputs.has_changes == 'false'
run: echo "No Python files in src/ were modified - skipping type checks"
- name: Save uv caches
if: steps.cache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
path: |
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
# Summary job to provide single status for branch protection
type-checker:
name: type-checker
runs-on: ubuntu-latest
needs: type-checker-matrix
if: always()
steps:
- name: Check matrix results
run: |
if [ "${{ needs.type-checker-matrix.result }}" == "success" ] || [ "${{ needs.type-checker-matrix.result }}" == "skipped" ]; then
echo "✅ All type checks passed"
else
echo "❌ Type checks failed"
exit 1
fi
- name: Run type checks
run: mypy src

View File

@@ -1,71 +0,0 @@
name: Update Test Durations
on:
push:
branches:
- main
paths:
- 'tests/**/*.py'
workflow_dispatch:
permissions:
contents: read
jobs:
update-durations:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13']
env:
OPENAI_API_KEY: fake-api-key
PYTHONUNBUFFERED: 1
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Restore global uv cache
id: cache-restore
uses: actions/cache/restore@v4
with:
path: |
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
restore-keys: |
uv-main-py${{ matrix.python-version }}-
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "0.8.4"
python-version: ${{ matrix.python-version }}
enable-cache: false
- name: Install the project
run: uv sync --all-groups --all-extras
- name: Run all tests and store durations
run: |
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
uv run pytest --store-durations --durations-path=.test_durations_py${PYTHON_VERSION_SAFE} -n auto
continue-on-error: true
- name: Save durations to cache
if: always()
uses: actions/cache/save@v4
with:
path: .test_durations_py*
key: test-durations-py${{ matrix.python-version }}
- name: Save uv caches
if: steps.cache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
path: |
~/.cache/uv
~/.local/share/uv
.venv
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}

5
.gitignore vendored
View File

@@ -2,6 +2,7 @@
.pytest_cache
__pycache__
dist/
lib/
.env
assets/*
.idea
@@ -20,9 +21,9 @@ crew_tasks_output.json
.mypy_cache
.ruff_cache
.venv
agentops.log
test_flow.html
crewairules.mdc
plan.md
conceptual_plan.md
build_image
chromadb-*.lock
build_image

View File

@@ -1,26 +1,7 @@
repos:
- repo: local
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.2
hooks:
- id: ruff
name: ruff
entry: bash -c 'source .venv/bin/activate && uv run ruff check --config pyproject.toml "$@"' --
language: system
pass_filenames: true
types: [python]
args: ["--fix"]
- id: ruff-format
name: ruff-format
entry: bash -c 'source .venv/bin/activate && uv run ruff format --config pyproject.toml "$@"' --
language: system
pass_filenames: true
types: [python]
- id: mypy
name: mypy
entry: bash -c 'source .venv/bin/activate && uv run mypy --config-file pyproject.toml "$@"' --
language: system
pass_filenames: true
types: [python]
- repo: https://github.com/astral-sh/uv-pre-commit
rev: 0.9.3
hooks:
- id: uv-lock

4
.ruff.toml Normal file
View File

@@ -0,0 +1,4 @@
exclude = [
"templates",
"__init__.py",
]

View File

@@ -62,9 +62,9 @@
With over 100,000 developers certified through our community courses at [learn.crewai.com](https://learn.crewai.com), CrewAI is rapidly becoming the
standard for enterprise-ready AI automation.
# CrewAI AMP Suite
# CrewAI Enterprise Suite
CrewAI AMP Suite is a comprehensive bundle tailored for organizations that require secure, scalable, and easy-to-manage agent-driven automation.
CrewAI Enterprise Suite is a comprehensive bundle tailored for organizations that require secure, scalable, and easy-to-manage agent-driven automation.
You can try one part of the suite the [Crew Control Plane for free](https://app.crewai.com)
@@ -76,9 +76,9 @@ You can try one part of the suite the [Crew Control Plane for free](https://app.
- **Advanced Security**: Built-in robust security and compliance measures ensuring safe deployment and management.
- **Actionable Insights**: Real-time analytics and reporting to optimize performance and decision-making.
- **24/7 Support**: Dedicated enterprise support to ensure uninterrupted operation and quick resolution of issues.
- **On-premise and Cloud Deployment Options**: Deploy CrewAI AMP on-premise or in the cloud, depending on your security and compliance requirements.
- **On-premise and Cloud Deployment Options**: Deploy CrewAI Enterprise on-premise or in the cloud, depending on your security and compliance requirements.
CrewAI AMP is designed for enterprises seeking a powerful, reliable solution to transform complex business processes into efficient,
CrewAI Enterprise is designed for enterprises seeking a powerful, reliable solution to transform complex business processes into efficient,
intelligent automations.
## Table of contents
@@ -418,10 +418,10 @@ Choose CrewAI to easily build powerful, adaptable, and production-ready AI autom
You can test different real life examples of AI crews in the [CrewAI-examples repo](https://github.com/crewAIInc/crewAI-examples?tab=readme-ov-file):
- [Landing Page Generator](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/landing_page_generator)
- [Landing Page Generator](https://github.com/crewAIInc/crewAI-examples/tree/main/landing_page_generator)
- [Having Human input on the execution](https://docs.crewai.com/how-to/Human-Input-on-Execution)
- [Trip Planner](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/trip_planner)
- [Stock Analysis](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/stock_analysis)
- [Trip Planner](https://github.com/crewAIInc/crewAI-examples/tree/main/trip_planner)
- [Stock Analysis](https://github.com/crewAIInc/crewAI-examples/tree/main/stock_analysis)
### Quick Tutorial
@@ -429,19 +429,19 @@ You can test different real life examples of AI crews in the [CrewAI-examples re
### Write Job Descriptions
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/job-posting) or watch a video below:
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/job-posting) or watch a video below:
[![Jobs postings](https://img.youtube.com/vi/u98wEMz-9to/maxresdefault.jpg)](https://www.youtube.com/watch?v=u98wEMz-9to "Jobs postings")
### Trip Planner
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/trip_planner) or watch a video below:
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/trip_planner) or watch a video below:
[![Trip Planner](https://img.youtube.com/vi/xis7rWp-hjs/maxresdefault.jpg)](https://www.youtube.com/watch?v=xis7rWp-hjs "Trip Planner")
### Stock Analysis
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/stock_analysis) or watch a video below:
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/stock_analysis) or watch a video below:
[![Stock Analysis](https://img.youtube.com/vi/e0Uj4yWdaAg/maxresdefault.jpg)](https://www.youtube.com/watch?v=e0Uj4yWdaAg "Stock Analysis")
@@ -674,9 +674,9 @@ CrewAI is released under the [MIT License](https://github.com/crewAIInc/crewAI/b
### Enterprise Features
- [What additional features does CrewAI AMP offer?](#q-what-additional-features-does-crewai-amp-offer)
- [Is CrewAI AMP available for cloud and on-premise deployments?](#q-is-crewai-amp-available-for-cloud-and-on-premise-deployments)
- [Can I try CrewAI AMP for free?](#q-can-i-try-crewai-amp-for-free)
- [What additional features does CrewAI Enterprise offer?](#q-what-additional-features-does-crewai-enterprise-offer)
- [Is CrewAI Enterprise available for cloud and on-premise deployments?](#q-is-crewai-enterprise-available-for-cloud-and-on-premise-deployments)
- [Can I try CrewAI Enterprise for free?](#q-can-i-try-crewai-enterprise-for-free)
### Q: What exactly is CrewAI?
@@ -732,17 +732,17 @@ A: Check out practical examples in the [CrewAI-examples repository](https://gith
A: Contributions are warmly welcomed! Fork the repository, create your branch, implement your changes, and submit a pull request. See the Contribution section of the README for detailed guidelines.
### Q: What additional features does CrewAI AMP offer?
### Q: What additional features does CrewAI Enterprise offer?
A: CrewAI AMP provides advanced features such as a unified control plane, real-time observability, secure integrations, advanced security, actionable insights, and dedicated 24/7 enterprise support.
A: CrewAI Enterprise provides advanced features such as a unified control plane, real-time observability, secure integrations, advanced security, actionable insights, and dedicated 24/7 enterprise support.
### Q: Is CrewAI AMP available for cloud and on-premise deployments?
### Q: Is CrewAI Enterprise available for cloud and on-premise deployments?
A: Yes, CrewAI AMP supports both cloud-based and on-premise deployment options, allowing enterprises to meet their specific security and compliance requirements.
A: Yes, CrewAI Enterprise supports both cloud-based and on-premise deployment options, allowing enterprises to meet their specific security and compliance requirements.
### Q: Can I try CrewAI AMP for free?
### Q: Can I try CrewAI Enterprise for free?
A: Yes, you can explore part of the CrewAI AMP Suite by accessing the [Crew Control Plane](https://app.crewai.com) for free.
A: Yes, you can explore part of the CrewAI Enterprise Suite by accessing the [Crew Control Plane](https://app.crewai.com) for free.
### Q: Does CrewAI support fine-tuning or training custom models?
@@ -762,7 +762,7 @@ A: CrewAI is highly scalable, supporting simple automations and large-scale ente
### Q: Does CrewAI offer debugging and monitoring tools?
A: Yes, CrewAI AMP includes advanced debugging, tracing, and real-time observability features, simplifying the management and troubleshooting of your automations.
A: Yes, CrewAI Enterprise includes advanced debugging, tracing, and real-time observability features, simplifying the management and troubleshooting of your automations.
### Q: What programming languages does CrewAI support?

1737
crewAI.excalidraw Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +0,0 @@
---
title: "GET /inputs"
description: "Get required inputs for your crew"
openapi: "/enterprise-api.en.yaml GET /inputs"
mode: "wide"
---

View File

@@ -1,29 +1,28 @@
---
title: "Introduction"
description: "Complete reference for the CrewAI AMP REST API"
description: "Complete reference for the CrewAI Enterprise REST API"
icon: "code"
mode: "wide"
---
# CrewAI AMP API
# CrewAI Enterprise API
Welcome to the CrewAI AMP API reference. This API allows you to programmatically interact with your deployed crews, enabling integration with your applications, workflows, and services.
Welcome to the CrewAI Enterprise API reference. This API allows you to programmatically interact with your deployed crews, enabling integration with your applications, workflows, and services.
## Quick Start
<Steps>
<Step title="Get Your API Credentials">
Navigate to your crew's detail page in the CrewAI AMP dashboard and copy your Bearer Token from the Status tab.
Navigate to your crew's detail page in the CrewAI Enterprise dashboard and copy your Bearer Token from the Status tab.
</Step>
<Step title="Discover Required Inputs">
Use the `GET /inputs` endpoint to see what parameters your crew expects.
</Step>
<Step title="Start a Crew Execution">
Call `POST /kickoff` with your inputs to start the crew execution and receive a `kickoff_id`.
</Step>
<Step title="Monitor Progress">
Use `GET /status/{kickoff_id}` to check execution status and retrieve results.
</Step>
@@ -46,7 +45,7 @@ curl -H "Authorization: Bearer YOUR_CREW_TOKEN" \
| **User Bearer Token** | User-scoped access | Limited permissions, suitable for user-specific operations |
<Tip>
You can find both token types in the Status tab of your crew's detail page in the CrewAI AMP dashboard.
You can find both token types in the Status tab of your crew's detail page in the CrewAI Enterprise dashboard.
</Tip>
## Base URL
@@ -62,7 +61,7 @@ Replace `your-crew-name` with your actual crew's URL from the dashboard.
## Typical Workflow
1. **Discovery**: Call `GET /inputs` to understand what your crew needs
2. **Execution**: Submit inputs via `POST /kickoff` to start processing
2. **Execution**: Submit inputs via `POST /kickoff` to start processing
3. **Monitoring**: Poll `GET /status/{kickoff_id}` until completion
4. **Results**: Extract the final output from the completed response
@@ -82,12 +81,12 @@ The API uses standard HTTP status codes:
## Interactive Testing
<Info>
**Why no "Send" button?** Since each CrewAI AMP user has their own unique crew URL, we use **reference mode** instead of an interactive playground to avoid confusion. This shows you exactly what the requests should look like without non-functional send buttons.
**Why no "Send" button?** Since each CrewAI Enterprise user has their own unique crew URL, we use **reference mode** instead of an interactive playground to avoid confusion. This shows you exactly what the requests should look like without non-functional send buttons.
</Info>
Each endpoint page shows you:
- ✅ **Exact request format** with all parameters
- ✅ **Response examples** for success and error cases
- ✅ **Response examples** for success and error cases
- ✅ **Code samples** in multiple languages (cURL, Python, JavaScript, etc.)
- ✅ **Authentication examples** with proper Bearer token format
@@ -104,7 +103,7 @@ Each endpoint page shows you:
**Example workflow:**
1. **Copy this cURL example** from any endpoint page
2. **Replace `your-actual-crew-name.crewai.com`** with your real crew URL
2. **Replace `your-actual-crew-name.crewai.com`** with your real crew URL
3. **Replace the Bearer token** with your real token from the dashboard
4. **Run the request** in your terminal or API client

View File

@@ -1,8 +0,0 @@
---
title: "POST /kickoff"
description: "Start a crew execution"
openapi: "/enterprise-api.en.yaml POST /kickoff"
mode: "wide"
---

View File

@@ -1,6 +0,0 @@
---
title: "POST /resume"
description: "Resume crew execution with human feedback"
openapi: "/enterprise-api.en.yaml POST /resume"
mode: "wide"
---

View File

@@ -1,8 +0,0 @@
---
title: "GET /status/{kickoff_id}"
description: "Get execution status"
openapi: "/enterprise-api.en.yaml GET /status/{kickoff_id}"
mode: "wide"
---

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,6 @@
title: Agents
description: Detailed guide on creating and managing agents within the CrewAI framework.
icon: robot
mode: "wide"
---
## Overview of an Agent
@@ -20,7 +19,7 @@ Think of an agent as a specialized team member with specific skills, expertise,
</Tip>
<Note type="info" title="Enterprise Enhancement: Visual Agent Builder">
CrewAI AMP includes a Visual Agent Builder that simplifies agent creation and configuration without writing code. Design your agents visually and test them in real-time.
CrewAI Enterprise includes a Visual Agent Builder that simplifies agent creation and configuration without writing code. Design your agents visually and test them in real-time.
![Visual Agent Builder Screenshot](/images/enterprise/crew-studio-interface.png)
@@ -527,103 +526,6 @@ agent = Agent(
The context window management feature works automatically in the background. You don't need to call any special functions - just set `respect_context_window` to your preferred behavior and CrewAI handles the rest!
</Note>
## Direct Agent Interaction with `kickoff()`
Agents can be used directly without going through a task or crew workflow using the `kickoff()` method. This provides a simpler way to interact with an agent when you don't need the full crew orchestration capabilities.
### How `kickoff()` Works
The `kickoff()` method allows you to send messages directly to an agent and get a response, similar to how you would interact with an LLM but with all the agent's capabilities (tools, reasoning, etc.).
```python Code
from crewai import Agent
from crewai_tools import SerperDevTool
# Create an agent
researcher = Agent(
role="AI Technology Researcher",
goal="Research the latest AI developments",
tools=[SerperDevTool()],
verbose=True
)
# Use kickoff() to interact directly with the agent
result = researcher.kickoff("What are the latest developments in language models?")
# Access the raw response
print(result.raw)
```
### Parameters and Return Values
| Parameter | Type | Description |
| :---------------- | :---------------------------------- | :------------------------------------------------------------------------ |
| `messages` | `Union[str, List[Dict[str, str]]]` | Either a string query or a list of message dictionaries with role/content |
| `response_format` | `Optional[Type[Any]]` | Optional Pydantic model for structured output |
The method returns a `LiteAgentOutput` object with the following properties:
- `raw`: String containing the raw output text
- `pydantic`: Parsed Pydantic model (if a `response_format` was provided)
- `agent_role`: Role of the agent that produced the output
- `usage_metrics`: Token usage metrics for the execution
### Structured Output
You can get structured output by providing a Pydantic model as the `response_format`:
```python Code
from pydantic import BaseModel
from typing import List
class ResearchFindings(BaseModel):
main_points: List[str]
key_technologies: List[str]
future_predictions: str
# Get structured output
result = researcher.kickoff(
"Summarize the latest developments in AI for 2025",
response_format=ResearchFindings
)
# Access structured data
print(result.pydantic.main_points)
print(result.pydantic.future_predictions)
```
### Multiple Messages
You can also provide a conversation history as a list of message dictionaries:
```python Code
messages = [
{"role": "user", "content": "I need information about large language models"},
{"role": "assistant", "content": "I'd be happy to help with that! What specifically would you like to know?"},
{"role": "user", "content": "What are the latest developments in 2025?"}
]
result = researcher.kickoff(messages)
```
### Async Support
An asynchronous version is available via `kickoff_async()` with the same parameters:
```python Code
import asyncio
async def main():
result = await researcher.kickoff_async("What are the latest developments in AI?")
print(result.raw)
asyncio.run(main())
```
<Note>
The `kickoff()` method uses a `LiteAgent` internally, which provides a simpler execution flow while preserving all of the agent's configuration (role, goal, backstory, tools, etc.).
</Note>
## Important Considerations and Best Practices
### Security and Code Execution

View File

@@ -2,11 +2,8 @@
title: CLI
description: Learn how to use the CrewAI CLI to interact with CrewAI.
icon: terminal
mode: "wide"
---
<Warning>Since release 0.140.0, CrewAI AMP started a process of migrating their login provider. As such, the authentication flow via CLI was updated. Users that use Google to login, or that created their account after July 3rd, 2025 will be unable to log in with older versions of the `crewai` library.</Warning>
## Overview
The CrewAI CLI provides a set of commands to interact with CrewAI, allowing you to create, train, run, and manage crews & flows.
@@ -89,7 +86,7 @@ crewai replay [OPTIONS]
- `-t, --task_id TEXT`: Replay the crew from this task ID, including all subsequent tasks
Example:
```shell Terminal
```shell Terminal
crewai replay -t task_123456
```
@@ -135,7 +132,7 @@ crewai test [OPTIONS]
- `-m, --model TEXT`: LLM Model to run the tests on the Crew (default: "gpt-4o-mini")
Example:
```shell Terminal
```shell Terminal
crewai test -n 5 -m gpt-3.5-turbo
```
@@ -152,7 +149,7 @@ Starting from version 0.103.0, the `crewai run` command can be used to run both
</Note>
<Note>
Make sure to run these commands from the directory where your CrewAI project is set up.
Make sure to run these commands from the directory where your CrewAI project is set up.
Some commands may require additional configuration or setup within your project structure.
</Note>
@@ -186,10 +183,13 @@ def crew(self) -> Crew:
### 10. Deploy
Deploy the crew or flow to [CrewAI AMP](https://app.crewai.com).
Deploy the crew or flow to [CrewAI Enterprise](https://app.crewai.com).
- **Authentication**: You need to be authenticated to deploy to CrewAI AMP.
You can login or create an account with:
- **Authentication**: You need to be authenticated to deploy to CrewAI Enterprise.
```shell Terminal
crewai signup
```
If you already have an account, you can login with:
```shell Terminal
crewai login
```
@@ -203,7 +203,7 @@ Deploy the crew or flow to [CrewAI AMP](https://app.crewai.com).
### 11. Organization Management
Manage your CrewAI AMP organizations.
Manage your CrewAI Enterprise organizations.
```shell Terminal
crewai org [COMMAND] [OPTIONS]
@@ -227,17 +227,17 @@ crewai org switch <organization_id>
```
<Note>
You must be authenticated to CrewAI AMP to use these organization management commands.
You must be authenticated to CrewAI Enterprise to use these organization management commands.
</Note>
- **Create a deployment** (continued):
- Links the deployment to the corresponding remote GitHub repository (it usually detects this automatically).
- **Deploy the Crew**: Once you are authenticated, you can deploy your crew or flow to CrewAI AMP.
- **Deploy the Crew**: Once you are authenticated, you can deploy your crew or flow to CrewAI Enterprise.
```shell Terminal
crewai deploy push
```
- Initiates the deployment process on the CrewAI AMP platform.
```
- Initiates the deployment process on the CrewAI Enterprise platform.
- Upon successful initiation, it will output the Deployment created successfully! message along with the Deployment Name and a unique Deployment ID (UUID).
- **Deployment Status**: You can check the status of your deployment with:
@@ -262,7 +262,7 @@ You must be authenticated to CrewAI AMP to use these organization management com
```shell Terminal
crewai deploy remove
```
This deletes the deployment from the CrewAI AMP platform.
This deletes the deployment from the CrewAI Enterprise platform.
- **Help Command**: You can get help with the CLI with:
```shell Terminal
@@ -270,36 +270,20 @@ You must be authenticated to CrewAI AMP to use these organization management com
```
This shows the help message for the CrewAI Deploy CLI.
Watch this video tutorial for a step-by-step demonstration of deploying your crew to [CrewAI AMP](http://app.crewai.com) using the CLI.
Watch this video tutorial for a step-by-step demonstration of deploying your crew to [CrewAI Enterprise](http://app.crewai.com) using the CLI.
<iframe
className="w-full aspect-video rounded-xl"
width="100%"
height="400"
src="https://www.youtube.com/embed/3EqSV-CYDZA"
title="CrewAI Deployment Guide"
frameBorder="0"
frameborder="0"
style={{ borderRadius: '10px' }}
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
allowFullScreen
allowfullscreen
></iframe>
### 11. Login
Authenticate with CrewAI AMP using a secure device code flow (no email entry required).
```shell Terminal
crewai login
```
What happens:
- A verification URL and short code are displayed in your terminal
- Your browser opens to the verification URL
- Enter/confirm the code to complete authentication
Notes:
- The OAuth2 provider and domain are configured via `crewai config` (defaults use `login.crewai.com`)
- After successful login, the CLI also attempts to authenticate to the Tool Repository automatically
- If you reset your configuration, run `crewai login` again to re-authenticate
### 12. API Keys
### 11. API Keys
When running ```crewai create crew``` command, the CLI will show you a list of available LLM providers to choose from, followed by model selection for your chosen provider.
@@ -326,86 +310,3 @@ When you select a provider, the CLI will prompt you to enter the Key name and th
See the following link for each provider's key name:
* [LiteLLM Providers](https://docs.litellm.ai/docs/providers)
### 13. Configuration Management
Manage CLI configuration settings for CrewAI.
```shell Terminal
crewai config [COMMAND] [OPTIONS]
```
#### Commands:
- `list`: Display all CLI configuration parameters
```shell Terminal
crewai config list
```
- `set`: Set a CLI configuration parameter
```shell Terminal
crewai config set <key> <value>
```
- `reset`: Reset all CLI configuration parameters to default values
```shell Terminal
crewai config reset
```
#### Available Configuration Parameters
- `enterprise_base_url`: Base URL of the CrewAI AMP instance
- `oauth2_provider`: OAuth2 provider used for authentication (e.g., workos, okta, auth0)
- `oauth2_audience`: OAuth2 audience value, typically used to identify the target API or resource
- `oauth2_client_id`: OAuth2 client ID issued by the provider, used during authentication requests
- `oauth2_domain`: OAuth2 provider's domain (e.g., your-org.auth0.com) used for issuing tokens
#### Examples
Display current configuration:
```shell Terminal
crewai config list
```
Example output:
| Setting | Value | Description |
| :------------------ | :----------------------- | :---------------------------------------------------------- |
| enterprise_base_url | https://app.crewai.com | Base URL of the CrewAI AMP instance |
| org_name | Not set | Name of the currently active organization |
| org_uuid | Not set | UUID of the currently active organization |
| oauth2_provider | workos | OAuth2 provider (e.g., workos, okta, auth0) |
| oauth2_audience | client_01YYY | Audience identifying the target API/resource |
| oauth2_client_id | client_01XXX | OAuth2 client ID issued by the provider |
| oauth2_domain | login.crewai.com | Provider domain (e.g., your-org.auth0.com) |
Set the enterprise base URL:
```shell Terminal
crewai config set enterprise_base_url https://my-enterprise.crewai.com
```
Set OAuth2 provider:
```shell Terminal
crewai config set oauth2_provider auth0
```
Set OAuth2 domain:
```shell Terminal
crewai config set oauth2_domain my-company.auth0.com
```
Reset all configuration to defaults:
```shell Terminal
crewai config reset
```
<Tip>
After resetting configuration, re-run `crewai login` to authenticate again.
</Tip>
<Tip>
CrewAI CLI handles authentication to the Tool Repository automatically when adding packages to your project. Just append `crewai` before any `uv` command to use it. E.g. `crewai uv add requests`. For more information, see [Tool Repository](https://docs.crewai.com/enterprise/features/tool-repository) docs.
</Tip>
<Note>
Configuration settings are stored in `~/.config/crewai/settings.json`. Some settings like organization name and UUID are read-only and managed through authentication and organization commands. Tool repository related settings are hidden and cannot be set directly by users.
</Note>

View File

@@ -2,7 +2,6 @@
title: Collaboration
description: How to enable agents to work together, delegate tasks, and communicate effectively within CrewAI teams.
icon: screen-users
mode: "wide"
---
## Overview

View File

@@ -2,7 +2,6 @@
title: Crews
description: Understanding and utilizing crews in the crewAI framework with comprehensive attributes and functionalities.
icon: people-group
mode: "wide"
---
## Overview
@@ -21,7 +20,8 @@ A crew in crewAI represents a collaborative group of agents working together to
| **Function Calling LLM** _(optional)_ | `function_calling_llm` | If passed, the crew will use this LLM to do function calling for tools for all agents in the crew. Each agent can have its own LLM, which overrides the crew's LLM for function calling. |
| **Config** _(optional)_ | `config` | Optional configuration settings for the crew, in `Json` or `Dict[str, Any]` format. |
| **Max RPM** _(optional)_ | `max_rpm` | Maximum requests per minute the crew adheres to during execution. Defaults to `None`. |
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). | |
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
@@ -32,7 +32,6 @@ A crew in crewAI represents a collaborative group of agents working together to
| **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. |
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
| **Planning LLM** *(optional)* | `planning_llm` | The language model used by the AgentPlanner in a planning process. |
| **Knowledge Sources** _(optional)_ | `knowledge_sources` | Knowledge sources available at the crew level, accessible to all the agents. |
<Tip>
**Crew Max RPM**: The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents' `max_rpm` settings if you set it.

View File

@@ -2,7 +2,6 @@
title: 'Event Listeners'
description: 'Tap into CrewAI events to build custom integrations and monitoring'
icon: spinner
mode: "wide"
---
## Overview
@@ -20,7 +19,7 @@ CrewAI uses an event bus architecture to emit events throughout the execution li
When specific actions occur in CrewAI (like a Crew starting execution, an Agent completing a task, or a tool being used), the system emits corresponding events. You can register handlers for these events to execute custom code when they occur.
<Note type="info" title="Enterprise Enhancement: Prompt Tracing">
CrewAI AMP provides a built-in Prompt Tracing feature that leverages the event system to track, store, and visualize all prompts, completions, and associated metadata. This provides powerful debugging capabilities and transparency into your agent operations.
CrewAI Enterprise provides a built-in Prompt Tracing feature that leverages the event system to track, store, and visualize all prompts, completions, and associated metadata. This provides powerful debugging capabilities and transparency into your agent operations.
![Prompt Tracing Dashboard](/images/enterprise/traces-overview.png)
@@ -45,12 +44,12 @@ To create a custom event listener, you need to:
Here's a simple example of a custom event listener class:
```python
from crewai.events import (
from crewai.utilities.events import (
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent,
AgentExecutionCompletedEvent,
)
from crewai.events import BaseEventListener
from crewai.utilities.events.base_event_listener import BaseEventListener
class MyCustomListener(BaseEventListener):
def __init__(self):
@@ -147,7 +146,7 @@ my_project/
```python
# my_custom_listener.py
from crewai.events import BaseEventListener
from crewai.utilities.events.base_event_listener import BaseEventListener
# ... import events ...
class MyCustomListener(BaseEventListener):
@@ -178,7 +177,14 @@ class MyCustomCrew:
# Your crew implementation...
```
This is how third-party event listeners are registered in the CrewAI codebase.
This is exactly how CrewAI's built-in `agentops_listener` is registered. In the CrewAI codebase, you'll find:
```python
# src/crewai/utilities/events/third_party/__init__.py
from .agentops_listener import agentops_listener
```
This ensures the `agentops_listener` is loaded when the `crewai.utilities.events` package is imported.
## Available Event Types
@@ -274,13 +280,84 @@ The structure of the event object depends on the event type, but all events inhe
Additional fields vary by event type. For example, `CrewKickoffCompletedEvent` includes `crew_name` and `output` fields.
## Real-World Example: Integration with AgentOps
CrewAI includes an example of a third-party integration with [AgentOps](https://github.com/AgentOps-AI/agentops), a monitoring and observability platform for AI agents. Here's how it's implemented:
```python
from typing import Optional
from crewai.utilities.events import (
CrewKickoffCompletedEvent,
ToolUsageErrorEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
from crewai.utilities.events.task_events import TaskEvaluationEvent
try:
import agentops
AGENTOPS_INSTALLED = True
except ImportError:
AGENTOPS_INSTALLED = False
class AgentOpsListener(BaseEventListener):
tool_event: Optional["agentops.ToolEvent"] = None
session: Optional["agentops.Session"] = None
def __init__(self):
super().__init__()
def setup_listeners(self, crewai_event_bus):
if not AGENTOPS_INSTALLED:
return
@crewai_event_bus.on(CrewKickoffStartedEvent)
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
self.session = agentops.init()
for agent in source.agents:
if self.session:
self.session.create_agent(
name=agent.role,
agent_id=str(agent.id),
)
@crewai_event_bus.on(CrewKickoffCompletedEvent)
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
if self.session:
self.session.end_session(
end_state="Success",
end_state_reason="Finished Execution",
)
@crewai_event_bus.on(ToolUsageStartedEvent)
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
self.tool_event = agentops.ToolEvent(name=event.tool_name)
if self.session:
self.session.record(self.tool_event)
@crewai_event_bus.on(ToolUsageErrorEvent)
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
```
This listener initializes an AgentOps session when a Crew starts, registers agents with AgentOps, tracks tool usage, and ends the session when the Crew completes.
The AgentOps listener is registered in CrewAI's event system through the import in `src/crewai/utilities/events/third_party/__init__.py`:
```python
from .agentops_listener import agentops_listener
```
This ensures the `agentops_listener` is loaded when the `crewai.utilities.events` package is imported.
## Advanced Usage: Scoped Handlers
For temporary event handling (useful for testing or specific operations), you can use the `scoped_handlers` context manager:
```python
from crewai.events import crewai_event_bus, CrewKickoffStartedEvent
from crewai.utilities.events import crewai_event_bus, CrewKickoffStartedEvent
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStartedEvent)

View File

@@ -2,7 +2,6 @@
title: Flows
description: Learn how to create and manage AI workflows using CrewAI Flows.
icon: arrow-progress
mode: "wide"
---
## Overview
@@ -98,13 +97,7 @@ The state's unique ID and stored data can be useful for tracking flow executions
### @start()
The `@start()` decorator marks entry points for a Flow. You can:
- Declare multiple unconditional starts: `@start()`
- Gate a start on a prior method or router label: `@start("method_or_label")`
- Provide a callable condition to control when a start should fire
All satisfied `@start()` methods will execute (often in parallel) when the Flow begins or resumes.
The `@start()` decorator is used to mark a method as the starting point of a Flow. When a Flow is started, all the methods decorated with `@start()` are executed in parallel. You can have multiple start methods in a Flow, and they will all be executed when the Flow is started.
### @listen()
@@ -875,13 +868,14 @@ By exploring these examples, you can gain insights into how to leverage CrewAI F
Also, check out our YouTube video on how to use flows in CrewAI below!
<iframe
className="w-full aspect-video rounded-xl"
width="560"
height="315"
src="https://www.youtube.com/embed/MTb5my6VOT8"
title="CrewAI Flows overview"
frameBorder="0"
title="YouTube video player"
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerPolicy="strict-origin-when-cross-origin"
allowFullScreen
referrerpolicy="strict-origin-when-cross-origin"
allowfullscreen
></iframe>
## Running Flows

View File

@@ -2,7 +2,6 @@
title: Knowledge
description: What is knowledge in CrewAI and how to use it.
icon: book
mode: "wide"
---
## Overview
@@ -25,41 +24,6 @@ For file-based Knowledge Sources, make sure to place your files in a `knowledge`
Also, use relative paths from the `knowledge` directory when creating the source.
</Tip>
### Vector store (RAG) client configuration
CrewAI exposes a provider-neutral RAG client abstraction for vector stores. The default provider is ChromaDB, and Qdrant is supported as well. You can switch providers using configuration utilities.
Supported today:
- ChromaDB (default)
- Qdrant
```python Code
from crewai.rag.config.utils import set_rag_config, get_rag_client, clear_rag_config
# ChromaDB (default)
from crewai.rag.chromadb.config import ChromaDBConfig
set_rag_config(ChromaDBConfig())
chromadb_client = get_rag_client()
# Qdrant
from crewai.rag.qdrant.config import QdrantConfig
set_rag_config(QdrantConfig())
qdrant_client = get_rag_client()
# Example operations (same API for any provider)
client = qdrant_client # or chromadb_client
client.create_collection(collection_name="docs")
client.add_documents(
collection_name="docs",
documents=[{"id": "1", "content": "CrewAI enables collaborative AI agents."}],
)
results = client.search(collection_name="docs", query="collaborative agents", limit=3)
clear_rag_config() # optional reset
```
This RAG client is separate from Knowledges built-in storage. Use it when you need direct vector-store control or custom retrieval pipelines.
### Basic String Knowledge Example
```python Code
@@ -717,11 +681,11 @@ CrewAI emits events during the knowledge retrieval process that you can listen f
#### Example: Monitoring Knowledge Retrieval
```python
from crewai.events import (
from crewai.utilities.events import (
KnowledgeRetrievalStartedEvent,
KnowledgeRetrievalCompletedEvent,
BaseEventListener,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
class KnowledgeMonitorListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):

View File

@@ -2,7 +2,6 @@
title: 'LLMs'
description: 'A comprehensive guide to configuring and using Large Language Models (LLMs) in your CrewAI projects'
icon: 'microchip-ai'
mode: "wide"
---
## Overview
@@ -271,7 +270,7 @@ In this section, you'll find detailed examples that help you select, configure,
from crewai import LLM
llm = LLM(
model="gemini-1.5-pro-latest", # or vertex_ai/gemini-1.5-pro-latest
model="gemini/gemini-1.5-pro-latest",
temperature=0.7,
vertex_credentials=vertex_credentials_json
)
@@ -734,10 +733,10 @@ CrewAI supports streaming responses from LLMs, allowing your application to rece
CrewAI emits events for each chunk received during streaming:
```python
from crewai.events import (
from crewai.utilities.events import (
LLMStreamChunkEvent
)
from crewai.events import BaseEventListener
from crewai.utilities.events.base_event_listener import BaseEventListener
class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):
@@ -759,8 +758,8 @@ CrewAI supports streaming responses from LLMs, allowing your application to rece
```python
from crewai import LLM, Agent, Task, Crew
from crewai.events import LLMStreamChunkEvent
from crewai.events import BaseEventListener
from crewai.utilities.events import LLMStreamChunkEvent
from crewai.utilities.events.base_event_listener import BaseEventListener
class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):

View File

@@ -2,15 +2,15 @@
title: Memory
description: Leveraging memory systems in the CrewAI framework to enhance agent capabilities.
icon: database
mode: "wide"
---
## Overview
The CrewAI framework provides a sophisticated memory system designed to significantly enhance AI agent capabilities. CrewAI offers **two distinct memory approaches** that serve different use cases:
The CrewAI framework provides a sophisticated memory system designed to significantly enhance AI agent capabilities. CrewAI offers **three distinct memory approaches** that serve different use cases:
1. **Basic Memory System** - Built-in short-term, long-term, and entity memory
2. **External Memory** - Standalone external memory providers
2. **User Memory** - User-specific memory with Mem0 integration (legacy approach)
3. **External Memory** - Standalone external memory providers (new approach)
## Memory System Components
@@ -19,7 +19,7 @@ The CrewAI framework provides a sophisticated memory system designed to signific
| **Short-Term Memory**| Temporarily stores recent interactions and outcomes using `RAG`, enabling agents to recall and utilize information relevant to their current context during the current executions.|
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. |
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. |
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, `ExternalMemory` and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
## 1. Basic Memory System (Recommended)
@@ -202,7 +202,7 @@ crew = Crew(
tasks=[task],
memory=True,
embedder={
"provider": "anthropic", # Match your LLM provider
"provider": "anthropic", # Match your LLM provider
"config": {
"api_key": "your-anthropic-key",
"model": "text-embedding-3-small"
@@ -540,71 +540,16 @@ crew = Crew(
)
```
### Mem0 Provider
Short-Term Memory and Entity Memory both supports a tight integration with both Mem0 OSS and Mem0 Client as a provider. Here is how you can use Mem0 as a provider.
```python
from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.memory.entity_entity_memory import EntityMemory
mem0_oss_embedder_config = {
"provider": "mem0",
"config": {
"user_id": "john",
"local_mem0_config": {
"vector_store": {"provider": "qdrant","config": {"host": "localhost", "port": 6333}},
"llm": {"provider": "openai","config": {"api_key": "your-api-key", "model": "gpt-4"}},
"embedder": {"provider": "openai","config": {"api_key": "your-api-key", "model": "text-embedding-3-small"}}
},
"infer": True # Optional defaults to True
},
}
mem0_client_embedder_config = {
"provider": "mem0",
"config": {
"user_id": "john",
"org_id": "my_org_id", # Optional
"project_id": "my_project_id", # Optional
"api_key": "custom-api-key" # Optional - overrides env var
"run_id": "my_run_id", # Optional - for short-term memory
"includes": "include1", # Optional
"excludes": "exclude1", # Optional
"infer": True # Optional defaults to True
"custom_categories": new_categories # Optional - custom categories for user memory
},
}
short_term_memory_mem0_oss = ShortTermMemory(embedder_config=mem0_oss_embedder_config) # Short Term Memory with Mem0 OSS
short_term_memory_mem0_client = ShortTermMemory(embedder_config=mem0_client_embedder_config) # Short Term Memory with Mem0 Client
entity_memory_mem0_oss = EntityMemory(embedder_config=mem0_oss_embedder_config) # Entity Memory with Mem0 OSS
entity_memory_mem0_client = EntityMemory(embedder_config=mem0_client_embedder_config) # Short Term Memory with Mem0 Client
crew = Crew(
memory=True,
short_term_memory=short_term_memory_mem0_oss, # or short_term_memory_mem0_client
entity_memory=entity_memory_mem0_oss # or entity_memory_mem0_client
)
```
### Choosing the Right Embedding Provider
When selecting an embedding provider, consider factors like performance, privacy, cost, and integration needs.
Below is a comparison to help you decide:
| Provider | Best For | Pros | Cons |
| -------------- | ------------------------------ | --------------------------------- | ------------------------- |
| **OpenAI** | General use, high reliability | High quality, widely tested | Paid service, API key required |
| **Ollama** | Privacy-focused, cost savings | Free, runs locally, fully private | Requires local installation/setup |
| **Google AI** | Integration in Google ecosystem| Strong performance, good support | Google account required |
| **Azure OpenAI** | Enterprise & compliance needs| Enterprise-grade features, security | More complex setup process |
| **Cohere** | Multilingual content handling | Excellent language support | More niche use cases |
| **VoyageAI** | Information retrieval & search | Optimized for retrieval tasks | Relatively new provider |
| **Mem0** | Per-user personalization | Search-optimized embeddings | Paid service, API key required |
| Provider | Best For | Pros | Cons |
|:---------|:----------|:------|:------|
| **OpenAI** | General use, reliability | High quality, well-tested | Cost, requires API key |
| **Ollama** | Privacy, cost savings | Free, local, private | Requires local setup |
| **Google AI** | Google ecosystem | Good performance | Requires Google account |
| **Azure OpenAI** | Enterprise, compliance | Enterprise features | Complex setup |
| **Cohere** | Multilingual content | Great language support | Specialized use case |
| **VoyageAI** | Retrieval tasks | Optimized for search | Newer provider |
### Environment Variable Configuration
@@ -678,7 +623,7 @@ for provider in providers_to_test:
**Model not found errors:**
```python
# Verify model availability
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
from crewai.utilities.embedding_configurator import EmbeddingConfigurator
configurator = EmbeddingConfigurator()
try:
@@ -739,29 +684,67 @@ print(f"OpenAI: {openai_time:.2f}s")
print(f"Ollama: {ollama_time:.2f}s")
```
### Entity Memory batching behavior
## 2. User Memory with Mem0 (Legacy)
Entity Memory supports batching when saving multiple entities at once. When you pass a list of `EntityMemoryItem`, the system:
<Warning>
**Legacy Approach**: While fully functional, this approach is considered legacy. For new projects requiring user-specific memory, consider using External Memory instead.
</Warning>
- Emits a single MemorySaveStartedEvent with `entity_count`
- Saves each entity internally, collecting any partial errors
- Emits MemorySaveCompletedEvent with aggregate metadata (saved count, errors)
- Raises a partial-save exception if some entities failed (includes counts)
User Memory integrates with [Mem0](https://mem0.ai/) to provide user-specific memory that persists across sessions and integrates with the crew's contextual memory system.
This improves performance and observability when writing many entities in one operation.
### Prerequisites
```bash
pip install mem0ai
```
## 2. External Memory
External Memory provides a standalone memory system that operates independently from the crew's built-in memory. This is ideal for specialized memory providers or cross-application memory sharing.
### Basic External Memory with Mem0
### Mem0 Cloud Configuration
```python
import os
from crewai import Agent, Crew, Process, Task
from crewai.memory.external.external_memory import ExternalMemory
from crewai import Crew, Process
# Create external memory instance with local Mem0 Configuration
external_memory = ExternalMemory(
embedder_config={
# Set your Mem0 API key
os.environ["MEM0_API_KEY"] = "m0-your-api-key"
crew = Crew(
agents=[...],
tasks=[...],
memory=True, # Required for contextual memory integration
memory_config={
"provider": "mem0",
"config": {"user_id": "john"},
"user_memory": {} # Required - triggers user memory initialization
},
process=Process.sequential,
verbose=True
)
```
### Advanced Mem0 Configuration
```python
crew = Crew(
agents=[...],
tasks=[...],
memory=True,
memory_config={
"provider": "mem0",
"config": {
"user_id": "john",
"org_id": "my_org_id", # Optional
"project_id": "my_project_id", # Optional
"api_key": "custom-api-key" # Optional - overrides env var
},
"user_memory": {}
}
)
```
### Local Mem0 Configuration
```python
crew = Crew(
agents=[...],
tasks=[...],
memory=True,
memory_config={
"provider": "mem0",
"config": {
"user_id": "john",
@@ -778,60 +761,37 @@ external_memory = ExternalMemory(
"provider": "openai",
"config": {"api_key": "your-api-key", "model": "text-embedding-3-small"}
}
},
"infer": True # Optional defaults to True
}
},
"user_memory": {}
}
)
crew = Crew(
agents=[...],
tasks=[...],
external_memory=external_memory, # Separate from basic memory
process=Process.sequential,
verbose=True
)
```
### Advanced External Memory with Mem0 Client
When using Mem0 Client, you can customize the memory configuration further, by using parameters like 'includes', 'excludes', 'custom_categories', 'infer' and 'run_id' (this is only for short-term memory).
You can find more details in the [Mem0 documentation](https://docs.mem0.ai/).
## 3. External Memory (New Approach)
External Memory provides a standalone memory system that operates independently from the crew's built-in memory. This is ideal for specialized memory providers or cross-application memory sharing.
### Basic External Memory with Mem0
```python
import os
from crewai import Agent, Crew, Process, Task
from crewai.memory.external.external_memory import ExternalMemory
new_categories = [
{"lifestyle_management_concerns": "Tracks daily routines, habits, hobbies and interests including cooking, time management and work-life balance"},
{"seeking_structure": "Documents goals around creating routines, schedules, and organized systems in various life areas"},
{"personal_information": "Basic information about the user including name, preferences, and personality traits"}
]
os.environ["MEM0_API_KEY"] = "your-api-key"
# Create external memory instance with Mem0 Client
# Create external memory instance
external_memory = ExternalMemory(
embedder_config={
"provider": "mem0",
"config": {
"user_id": "john",
"org_id": "my_org_id", # Optional
"project_id": "my_project_id", # Optional
"api_key": "custom-api-key" # Optional - overrides env var
"run_id": "my_run_id", # Optional - for short-term memory
"includes": "include1", # Optional
"excludes": "exclude1", # Optional
"infer": True # Optional defaults to True
"custom_categories": new_categories # Optional - custom categories for user memory
},
"config": {"user_id": "U-123"}
}
)
crew = Crew(
agents=[...],
tasks=[...],
external_memory=external_memory, # Separate from basic memory
external_memory=external_memory, # Separate from basic memory
process=Process.sequential,
verbose=True
)
@@ -870,18 +830,17 @@ crew = Crew(
)
```
## 🧠 Memory System Comparison
| **Category** | **Feature** | **Basic Memory** | **External Memory** |
|---------------------|------------------------|-----------------------------|------------------------------|
| **Ease of Use** | Setup Complexity | Simple | Moderate |
| | Integration | Built-in (contextual) | Standalone |
| **Persistence** | Storage | Local files | Custom / Mem0 |
| | Cross-session Support | ✅ | ✅ |
| **Personalization** | User-specific Memory | ❌ | ✅ |
| | Custom Providers | Limited | Any provider |
| **Use Case Fit** | Recommended For | Most general use cases | Specialized / custom needs |
## Memory System Comparison
| Feature | Basic Memory | User Memory (Legacy) | External Memory |
|---------|-------------|---------------------|----------------|
| **Setup Complexity** | Simple | Medium | Medium |
| **Integration** | Built-in contextual | Contextual + User-specific | Standalone |
| **Storage** | Local files | Mem0 Cloud/Local | Custom/Mem0 |
| **Cross-session** | ✅ | ✅ | ✅ |
| **User-specific** | ❌ | ✅ | ✅ |
| **Custom providers** | Limited | Mem0 only | Any provider |
| **Recommended for** | Most use cases | Legacy projects | Specialized needs |
## Supported Embedding Providers
@@ -1053,8 +1012,8 @@ CrewAI emits the following memory-related events:
Track memory operation timing to optimize your application:
```python
from crewai.events import (
BaseEventListener,
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
MemoryQueryCompletedEvent,
MemorySaveCompletedEvent
)
@@ -1088,8 +1047,8 @@ memory_monitor = MemoryPerformanceMonitor()
Log memory operations for debugging and insights:
```python
from crewai.events import (
BaseEventListener,
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
MemorySaveStartedEvent,
MemoryQueryStartedEvent,
MemoryRetrievalCompletedEvent
@@ -1129,8 +1088,8 @@ memory_logger = MemoryLogger()
Capture and respond to memory errors:
```python
from crewai.events import (
BaseEventListener,
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
MemorySaveFailedEvent,
MemoryQueryFailedEvent
)
@@ -1179,8 +1138,8 @@ error_tracker = MemoryErrorTracker(notify_email="admin@example.com")
Memory events can be forwarded to analytics and monitoring platforms to track performance metrics, detect anomalies, and visualize memory usage patterns:
```python
from crewai.events import (
BaseEventListener,
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
MemoryQueryCompletedEvent,
MemorySaveCompletedEvent
)

View File

@@ -2,7 +2,6 @@
title: Planning
description: Learn how to add planning to your CrewAI Crew and improve their performance.
icon: ruler-combined
mode: "wide"
---
## Overview

View File

@@ -2,7 +2,6 @@
title: Processes
description: Detailed guide on workflow management through processes in CrewAI, with updated implementation details.
icon: bars-staggered
mode: "wide"
---
## Overview

View File

@@ -2,7 +2,6 @@
title: Reasoning
description: "Learn how to enable and use agent reasoning to improve task execution."
icon: brain
mode: "wide"
---
## Overview

View File

@@ -2,7 +2,6 @@
title: Tasks
description: Detailed guide on managing and creating tasks within the CrewAI framework.
icon: list-check
mode: "wide"
---
## Overview
@@ -14,7 +13,7 @@ Tasks provide all necessary details for execution, such as a description, the ag
Tasks within CrewAI can be collaborative, requiring multiple agents to work together. This is managed through the task properties and orchestrated by the Crew's process, enhancing teamwork and efficiency.
<Note type="info" title="Enterprise Enhancement: Visual Task Builder">
CrewAI AMP includes a Visual Task Builder in Crew Studio that simplifies complex task creation and chaining. Design your task flows visually and test them in real-time without writing code.
CrewAI Enterprise includes a Visual Task Builder in Crew Studio that simplifies complex task creation and chaining. Design your task flows visually and test them in real-time without writing code.
![Task Builder Screenshot](/images/enterprise/crew-studio-interface.png)
@@ -55,17 +54,9 @@ crew = Crew(
| **Markdown** _(optional)_ | `markdown` | `Optional[bool]` | Whether the task should instruct the agent to return the final answer formatted in Markdown. Defaults to False. |
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
| **Create Directory** _(optional)_ | `create_directory` | `Optional[bool]` | Whether to create the directory for output_file if it doesn't exist. Defaults to True. |
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
| **Output Pydantic** _(optional)_ | `output_pydantic` | `Optional[Type[BaseModel]]` | A Pydantic model for task output. |
| **Callback** _(optional)_ | `callback` | `Optional[Any]` | Function/object to be executed after task completion. |
| **Guardrail** _(optional)_ | `guardrail` | `Optional[Callable]` | Function to validate task output before proceeding to next task. |
| **Guardrail Max Retries** _(optional)_ | `guardrail_max_retries` | `Optional[int]` | Maximum number of retries when guardrail validation fails. Defaults to 3. |
<Note type="warning" title="Deprecated: max_retries">
The task attribute `max_retries` is deprecated and will be removed in v1.0.0.
Use `guardrail_max_retries` instead to control retry attempts when a guardrail fails.
</Note>
## Creating Tasks
@@ -341,11 +332,9 @@ Task guardrails provide a way to validate and transform task outputs before they
are passed to the next task. This feature helps ensure data quality and provides
feedback to agents when their output doesn't meet specific criteria.
Guardrails are implemented as Python functions that contain custom validation logic, giving you complete control over the validation process and ensuring reliable, deterministic results.
### Using Task Guardrails
### Function-Based Guardrails
To add a function-based guardrail to a task, provide a validation function through the `guardrail` parameter:
To add a guardrail to a task, provide a validation function through the `guardrail` parameter:
```python Code
from typing import Tuple, Union, Dict, Any
@@ -383,7 +372,9 @@ blog_task = Task(
- On success: it returns a tuple of `(bool, Any)`. For example: `(True, validated_result)`
- On Failure: it returns a tuple of `(bool, str)`. For example: `(False, "Error message explain the failure")`
### LLMGuardrail
The `LLMGuardrail` class offers a robust mechanism for validating task outputs.
### Error Handling Best Practices
@@ -438,7 +429,7 @@ When a guardrail returns `(False, error)`:
2. The agent attempts to fix the issue
3. The process repeats until:
- The guardrail returns `(True, result)`
- Maximum retries are reached (`guardrail_max_retries`)
- Maximum retries are reached
Example with retry handling:
```python Code
@@ -459,7 +450,7 @@ task = Task(
expected_output="A valid JSON object",
agent=analyst,
guardrail=validate_json_output,
guardrail_max_retries=3 # Limit retry attempts
max_retries=3 # Limit retry attempts
)
```
@@ -807,103 +798,197 @@ While creating and executing tasks, certain validation mechanisms are in place t
These validations help in maintaining the consistency and reliability of task executions within the crewAI framework.
## Task Guardrails
Task guardrails provide a powerful way to validate, transform, or filter task outputs before they are passed to the next task. Guardrails are optional functions that execute before the next task starts, allowing you to ensure that task outputs meet specific requirements or formats.
### Basic Usage
#### Define your own logic to validate
```python Code
from typing import Tuple, Union
from crewai import Task
def validate_json_output(result: str) -> Tuple[bool, Union[dict, str]]:
"""Validate that the output is valid JSON."""
try:
json_data = json.loads(result)
return (True, json_data)
except json.JSONDecodeError:
return (False, "Output must be valid JSON")
task = Task(
description="Generate JSON data",
expected_output="Valid JSON object",
guardrail=validate_json_output
)
```
#### Leverage a no-code approach for validation
```python Code
from crewai import Task
task = Task(
description="Generate JSON data",
expected_output="Valid JSON object",
guardrail="Ensure the response is a valid JSON object"
)
```
#### Using YAML
```yaml
research_task:
...
guardrail: make sure each bullet contains a minimum of 100 words
...
```
```python Code
@CrewBase
class InternalCrew:
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
...
@task
def research_task(self):
return Task(config=self.tasks_config["research_task"]) # type: ignore[index]
...
```
#### Use custom models for code generation
```python Code
from crewai import Task
from crewai.llm import LLM
task = Task(
description="Generate JSON data",
expected_output="Valid JSON object",
guardrail=LLMGuardrail(
description="Ensure the response is a valid JSON object",
llm=LLM(model="gpt-4o-mini"),
)
)
```
### How Guardrails Work
1. **Optional Attribute**: Guardrails are an optional attribute at the task level, allowing you to add validation only where needed.
2. **Execution Timing**: The guardrail function is executed before the next task starts, ensuring valid data flow between tasks.
3. **Return Format**: Guardrails must return a tuple of `(success, data)`:
- If `success` is `True`, `data` is the validated/transformed result
- If `success` is `False`, `data` is the error message
4. **Result Routing**:
- On success (`True`), the result is automatically passed to the next task
- On failure (`False`), the error is sent back to the agent to generate a new answer
### Common Use Cases
#### Data Format Validation
```python Code
def validate_email_format(result: str) -> Tuple[bool, Union[str, str]]:
"""Ensure the output contains a valid email address."""
import re
email_pattern = r'^[\w\.-]+@[\w\.-]+\.\w+$'
if re.match(email_pattern, result.strip()):
return (True, result.strip())
return (False, "Output must be a valid email address")
```
#### Content Filtering
```python Code
def filter_sensitive_info(result: str) -> Tuple[bool, Union[str, str]]:
"""Remove or validate sensitive information."""
sensitive_patterns = ['SSN:', 'password:', 'secret:']
for pattern in sensitive_patterns:
if pattern.lower() in result.lower():
return (False, f"Output contains sensitive information ({pattern})")
return (True, result)
```
#### Data Transformation
```python Code
def normalize_phone_number(result: str) -> Tuple[bool, Union[str, str]]:
"""Ensure phone numbers are in a consistent format."""
import re
digits = re.sub(r'\D', '', result)
if len(digits) == 10:
formatted = f"({digits[:3]}) {digits[3:6]}-{digits[6:]}"
return (True, formatted)
return (False, "Output must be a 10-digit phone number")
```
### Advanced Features
#### Chaining Multiple Validations
```python Code
def chain_validations(*validators):
"""Chain multiple validators together."""
def combined_validator(result):
for validator in validators:
success, data = validator(result)
if not success:
return (False, data)
result = data
return (True, result)
return combined_validator
# Usage
task = Task(
description="Get user contact info",
expected_output="Email and phone",
guardrail=chain_validations(
validate_email_format,
filter_sensitive_info
)
)
```
#### Custom Retry Logic
```python Code
task = Task(
description="Generate data",
expected_output="Valid data",
guardrail=validate_data,
max_retries=5 # Override default retry limit
)
```
## Creating Directories when Saving Files
The `create_directory` parameter controls whether CrewAI should automatically create directories when saving task outputs to files. This feature is particularly useful for organizing outputs and ensuring that file paths are correctly structured, especially when working with complex project hierarchies.
### Default Behavior
By default, `create_directory=True`, which means CrewAI will automatically create any missing directories in the output file path:
You can now specify if a task should create directories when saving its output to a file. This is particularly useful for organizing outputs and ensuring that file paths are correctly structured.
```python Code
# Default behavior - directories are created automatically
report_task = Task(
description='Generate a comprehensive market analysis report',
expected_output='A detailed market analysis with charts and insights',
agent=analyst_agent,
output_file='reports/2025/market_analysis.md', # Creates 'reports/2025/' if it doesn't exist
markdown=True
# ...
save_output_task = Task(
description='Save the summarized AI news to a file',
expected_output='File saved successfully',
agent=research_agent,
tools=[file_save_tool],
output_file='outputs/ai_news_summary.txt',
create_directory=True
)
```
### Disabling Directory Creation
If you want to prevent automatic directory creation and ensure that the directory already exists, set `create_directory=False`:
```python Code
# Strict mode - directory must already exist
strict_output_task = Task(
description='Save critical data that requires existing infrastructure',
expected_output='Data saved to pre-configured location',
agent=data_agent,
output_file='secure/vault/critical_data.json',
create_directory=False # Will raise RuntimeError if 'secure/vault/' doesn't exist
)
```
### YAML Configuration
You can also configure this behavior in your YAML task definitions:
```yaml tasks.yaml
analysis_task:
description: >
Generate quarterly financial analysis
expected_output: >
A comprehensive financial report with quarterly insights
agent: financial_analyst
output_file: reports/quarterly/q4_2024_analysis.pdf
create_directory: true # Automatically create 'reports/quarterly/' directory
audit_task:
description: >
Perform compliance audit and save to existing audit directory
expected_output: >
A compliance audit report
agent: auditor
output_file: audit/compliance_report.md
create_directory: false # Directory must already exist
```
### Use Cases
**Automatic Directory Creation (`create_directory=True`):**
- Development and prototyping environments
- Dynamic report generation with date-based folders
- Automated workflows where directory structure may vary
- Multi-tenant applications with user-specific folders
**Manual Directory Management (`create_directory=False`):**
- Production environments with strict file system controls
- Security-sensitive applications where directories must be pre-configured
- Systems with specific permission requirements
- Compliance environments where directory creation is audited
### Error Handling
When `create_directory=False` and the directory doesn't exist, CrewAI will raise a `RuntimeError`:
```python Code
try:
result = crew.kickoff()
except RuntimeError as e:
# Handle missing directory error
print(f"Directory creation failed: {e}")
# Create directory manually or use fallback location
#...
```
Check out the video below to see how to use structured outputs in CrewAI:
<iframe
className="w-full aspect-video rounded-xl"
width="560"
height="315"
src="https://www.youtube.com/embed/dNpKQk5uxHw"
title="Structured outputs in CrewAI"
frameBorder="0"
title="YouTube video player"
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerPolicy="strict-origin-when-cross-origin"
allowFullScreen
referrerpolicy="strict-origin-when-cross-origin"
allowfullscreen
></iframe>
## Conclusion

View File

@@ -2,7 +2,6 @@
title: Testing
description: Learn how to test your CrewAI Crew and evaluate their performance.
icon: vial
mode: "wide"
---
## Overview

View File

@@ -2,7 +2,6 @@
title: Tools
description: Understanding and leveraging tools within the CrewAI framework for agent collaboration and task execution.
icon: screwdriver-wrench
mode: "wide"
---
## Overview
@@ -17,7 +16,7 @@ This includes tools from the [CrewAI Toolkit](https://github.com/joaomdmoura/cre
enabling everything from simple searches to complex interactions and effective teamwork among agents.
<Note type="info" title="Enterprise Enhancement: Tools Repository">
CrewAI AMP provides a comprehensive Tools Repository with pre-built integrations for common business systems and APIs. Deploy agents with enterprise tools in minutes instead of days.
CrewAI Enterprise provides a comprehensive Tools Repository with pre-built integrations for common business systems and APIs. Deploy agents with enterprise tools in minutes instead of days.
The Enterprise Tools Repository includes:
- Pre-built connectors for popular enterprise systems
@@ -208,7 +207,7 @@ from crewai.tools import BaseTool
class AsyncCustomTool(BaseTool):
name: str = "async_custom_tool"
description: str = "An asynchronous custom tool"
async def _run(self, query: str = "") -> str:
"""Asynchronously run the tool"""
# Your async implementation here

Some files were not shown because too many files have changed in this diff Show More