Compare commits

...

7 Commits

Author SHA1 Message Date
lorenzejay
3bb2162f64 more cassettes 2025-07-02 15:40:39 -07:00
lorenzejay
06ea9dcfe6 test: enhance cache call assertions in crew tests
* Improved the test for cache hitting between agents by filtering mock calls to ensure they include the expected 'tool' and 'input' keywords.
* Added assertions to verify the number of cache calls and their expected arguments, enhancing the reliability of the test.
* Cleaned up whitespace and improved readability in various test cases for better maintainability.
2025-07-02 15:04:36 -07:00
lorenzejay
20a517591d chore: pin agentops dependency version in pyproject.toml and uv.lock
* Updated agentops dependency from a range to a specific version (0.3.18) for consistency and to avoid potential compatibility issues.
* Adjusted related entries in uv.lock to reflect the pinned version, ensuring alignment across project files.
2025-07-02 14:58:33 -07:00
lorenzejay
dd6507bc55 chore: pin json-repair dependency version in pyproject.toml and uv.lock
* Updated json-repair dependency from a range to a specific version (0.25.2) for consistency and to avoid potential compatibility issues.
* Adjusted related entries in uv.lock to reflect the pinned version, ensuring alignment across project files.
2025-07-02 14:44:11 -07:00
lorenzejay
165190f87e chore: update uv.lock for dependency resolution and Python version compatibility
* Incremented revision to 2.
* Updated resolution markers to include support for Python 3.13 and adjusted platform checks for better compatibility.
* Added new wheel URLs for zstandard version 0.23.0 to ensure availability across various platforms.
2025-07-02 14:34:41 -07:00
lorenzejay
1c170ec411 chore: update pyproject.toml to exclude documentation from build targets
* Added exclusions for the `docs` directory in both wheel and sdist build targets to streamline the build process and reduce unnecessary file inclusion.
2025-07-02 14:27:03 -07:00
lorenzejay
22dc772d97 fix: clean up whitespace and update dependencies
* Removed unnecessary whitespace in multiple files for consistency.
* Updated `crewai-tools` dependency version to `0.49.0` in `pyproject.toml` and related template files.
* Bumped CrewAI version to `0.140.0` in `__init__.py` for alignment with updated dependencies.
2025-07-02 14:26:21 -07:00
15 changed files with 4698 additions and 2432 deletions

View File

@@ -260,7 +260,7 @@ def handle_success(self):
# Handle success case
pass
@listen("failure_path")
@listen("failure_path")
def handle_failure(self):
# Handle failure case
pass
@@ -288,7 +288,7 @@ class SelectiveFlow(Flow):
def critical_step(self):
# Only this method's state is persisted
self.state["important_data"] = "value"
@start()
def temporary_step(self):
# This method's state is not persisted
@@ -322,20 +322,20 @@ flow.plot("workflow_diagram") # Generates HTML visualization
class CyclicFlow(Flow):
max_iterations = 5
current_iteration = 0
@start("loop")
def process_iteration(self):
if self.current_iteration >= self.max_iterations:
return
# Process current iteration
self.current_iteration += 1
@router(process_iteration)
def check_continue(self):
if self.current_iteration < self.max_iterations:
return "loop" # Continue cycling
return "complete"
@listen("complete")
def finalize(self):
# Final processing
@@ -369,7 +369,7 @@ def risky_operation(self):
self.state["success"] = False
return None
@listen(risky_operation)
@listen(risky_operation)
def handle_result(self, result):
if self.state.get("success", False):
# Handle success case
@@ -390,7 +390,7 @@ class CrewOrchestrationFlow(Flow[WorkflowState]):
result = research_crew.crew().kickoff(inputs={"topic": self.state.research_topic})
self.state.research_results = result.raw
return result
@listen(research_phase)
def analysis_phase(self, research_results):
analysis_crew = AnalysisCrew()
@@ -400,13 +400,13 @@ class CrewOrchestrationFlow(Flow[WorkflowState]):
})
self.state.analysis_results = result.raw
return result
@router(analysis_phase)
def decide_next_action(self):
if self.state.analysis_results.confidence > 0.7:
return "generate_report"
return "additional_research"
@listen("generate_report")
def final_report(self):
reporting_crew = ReportingCrew()
@@ -439,7 +439,7 @@ class CrewOrchestrationFlow(Flow[WorkflowState]):
## CrewAI Version Compatibility:
- Stay updated with CrewAI releases for new features and bug fixes
- Test crew functionality when upgrading CrewAI versions
- Use version constraints in pyproject.toml (e.g., "crewai[tools]>=0.134.0,<1.0.0")
- Use version constraints in pyproject.toml (e.g., "crewai[tools]>=0.140.0,<1.0.0")
- Monitor deprecation warnings for future compatibility
## Code Examples and Implementation Patterns
@@ -464,22 +464,22 @@ class ResearchOutput(BaseModel):
@CrewBase
class ResearchCrew():
"""Advanced research crew with structured outputs and validation"""
agents: List[BaseAgent]
tasks: List[Task]
@before_kickoff
def setup_environment(self):
"""Initialize environment before crew execution"""
print("🚀 Setting up research environment...")
# Validate API keys, create directories, etc.
@after_kickoff
def cleanup_and_report(self, output):
"""Handle post-execution tasks"""
print(f"✅ Research completed. Generated {len(output.tasks_output)} task outputs")
print(f"📊 Token usage: {output.token_usage}")
@agent
def researcher(self) -> Agent:
return Agent(
@@ -490,7 +490,7 @@ class ResearchCrew():
max_iter=15,
max_execution_time=1800
)
@agent
def analyst(self) -> Agent:
return Agent(
@@ -499,7 +499,7 @@ class ResearchCrew():
verbose=True,
memory=True
)
@task
def research_task(self) -> Task:
return Task(
@@ -507,7 +507,7 @@ class ResearchCrew():
agent=self.researcher(),
output_pydantic=ResearchOutput
)
@task
def validation_task(self) -> Task:
return Task(
@@ -517,7 +517,7 @@ class ResearchCrew():
guardrail=self.validate_research_quality,
max_retries=3
)
def validate_research_quality(self, output) -> tuple[bool, str]:
"""Custom guardrail to ensure research quality"""
content = output.raw
@@ -526,7 +526,7 @@ class ResearchCrew():
if not any(keyword in content.lower() for keyword in ['conclusion', 'finding', 'result']):
return False, "Missing key analytical elements."
return True, content
@crew
def crew(self) -> Crew:
return Crew(
@@ -557,13 +557,13 @@ class RobustSearchTool(BaseTool):
name: str = "robust_search"
description: str = "Perform web search with retry logic and error handling"
args_schema: Type[BaseModel] = SearchInput
def __init__(self, api_key: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
self.api_key = api_key or os.getenv("SEARCH_API_KEY")
self.rate_limit_delay = 1.0
self.last_request_time = 0
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10)
@@ -575,43 +575,43 @@ class RobustSearchTool(BaseTool):
time_since_last = time.time() - self.last_request_time
if time_since_last < self.rate_limit_delay:
time.sleep(self.rate_limit_delay - time_since_last)
# Input validation
if not query or len(query.strip()) == 0:
return "Error: Empty search query provided"
if len(query) > 500:
return "Error: Search query too long (max 500 characters)"
# Perform search
results = self._perform_search(query, max_results, timeout)
self.last_request_time = time.time()
return self._format_results(results)
except requests.exceptions.Timeout:
return f"Search timed out after {timeout} seconds"
except requests.exceptions.RequestException as e:
return f"Search failed due to network error: {str(e)}"
except Exception as e:
return f"Unexpected error during search: {str(e)}"
def _perform_search(self, query: str, max_results: int, timeout: int) -> List[dict]:
"""Implement actual search logic here"""
# Your search API implementation
pass
def _format_results(self, results: List[dict]) -> str:
"""Format search results for LLM consumption"""
if not results:
return "No results found for the given query."
formatted = "Search Results:\n\n"
for i, result in enumerate(results[:10], 1):
formatted += f"{i}. {result.get('title', 'No title')}\n"
formatted += f" URL: {result.get('url', 'No URL')}\n"
formatted += f" Summary: {result.get('snippet', 'No summary')}\n\n"
return formatted
```
@@ -623,20 +623,20 @@ from crewai.memory.storage.mem0_storage import Mem0Storage
class AdvancedMemoryManager:
"""Enhanced memory management for CrewAI applications"""
def __init__(self, crew, config: dict = None):
self.crew = crew
self.config = config or {}
self.setup_memory_systems()
def setup_memory_systems(self):
"""Configure multiple memory systems"""
# Short-term memory for current session
self.short_term = ShortTermMemory()
# Long-term memory for cross-session persistence
self.long_term = LongTermMemory()
# External memory with Mem0 (if configured)
if self.config.get('use_external_memory'):
self.external = ExternalMemory.create_storage(
@@ -649,8 +649,8 @@ class AdvancedMemoryManager:
}
}
)
def save_with_context(self, content: str, memory_type: str = "short_term",
def save_with_context(self, content: str, memory_type: str = "short_term",
metadata: dict = None, agent: str = None):
"""Save content with enhanced metadata"""
enhanced_metadata = {
@@ -659,14 +659,14 @@ class AdvancedMemoryManager:
"crew_type": self.crew.__class__.__name__,
**(metadata or {})
}
if memory_type == "short_term":
self.short_term.save(content, enhanced_metadata, agent)
elif memory_type == "long_term":
self.long_term.save(content, enhanced_metadata, agent)
elif memory_type == "external" and hasattr(self, 'external'):
self.external.save(content, enhanced_metadata, agent)
def search_across_memories(self, query: str, limit: int = 5) -> dict:
"""Search across all memory systems"""
results = {
@@ -674,23 +674,23 @@ class AdvancedMemoryManager:
"long_term": [],
"external": []
}
# Search short-term memory
results["short_term"] = self.short_term.search(query, limit=limit)
# Search long-term memory
results["long_term"] = self.long_term.search(query, limit=limit)
# Search external memory (if available)
if hasattr(self, 'external'):
results["external"] = self.external.search(query, limit=limit)
return results
def cleanup_old_memories(self, days_threshold: int = 30):
"""Clean up old memories based on age"""
cutoff_time = time.time() - (days_threshold * 24 * 60 * 60)
# Implement cleanup logic based on timestamps in metadata
# This would vary based on your specific storage implementation
pass
@@ -719,12 +719,12 @@ class TaskMetrics:
class CrewMonitor:
"""Comprehensive monitoring for CrewAI applications"""
def __init__(self, crew_name: str, log_level: str = "INFO"):
self.crew_name = crew_name
self.metrics: List[TaskMetrics] = []
self.session_start = time.time()
# Setup logging
logging.basicConfig(
level=getattr(logging, log_level),
@@ -735,7 +735,7 @@ class CrewMonitor:
]
)
self.logger = logging.getLogger(f"CrewAI.{crew_name}")
def start_task_monitoring(self, task_name: str, agent_name: str) -> dict:
"""Start monitoring a task execution"""
context = {
@@ -743,16 +743,16 @@ class CrewMonitor:
"agent_name": agent_name,
"start_time": time.time()
}
self.logger.info(f"Task started: {task_name} by {agent_name}")
return context
def end_task_monitoring(self, context: dict, success: bool = True,
def end_task_monitoring(self, context: dict, success: bool = True,
tokens_used: int = 0, error: str = None):
"""End monitoring and record metrics"""
end_time = time.time()
duration = end_time - context["start_time"]
# Get memory usage (if psutil is available)
memory_usage = None
try:
@@ -761,7 +761,7 @@ class CrewMonitor:
memory_usage = process.memory_info().rss / 1024 / 1024 # MB
except ImportError:
pass
metrics = TaskMetrics(
task_name=context["task_name"],
agent_name=context["agent_name"],
@@ -773,29 +773,29 @@ class CrewMonitor:
error_message=error,
memory_usage_mb=memory_usage
)
self.metrics.append(metrics)
# Log the completion
status = "SUCCESS" if success else "FAILED"
self.logger.info(f"Task {status}: {context['task_name']} "
f"(Duration: {duration:.2f}s, Tokens: {tokens_used})")
if error:
self.logger.error(f"Task error: {error}")
def get_performance_summary(self) -> Dict[str, Any]:
"""Generate comprehensive performance summary"""
if not self.metrics:
return {"message": "No metrics recorded yet"}
successful_tasks = [m for m in self.metrics if m.success]
failed_tasks = [m for m in self.metrics if not m.success]
total_duration = sum(m.duration for m in self.metrics)
total_tokens = sum(m.tokens_used for m in self.metrics)
avg_duration = total_duration / len(self.metrics)
return {
"crew_name": self.crew_name,
"session_duration": time.time() - self.session_start,
@@ -811,7 +811,7 @@ class CrewMonitor:
"most_token_intensive": max(self.metrics, key=lambda x: x.tokens_used).task_name if self.metrics else None,
"common_errors": self._get_common_errors()
}
def _get_common_errors(self) -> Dict[str, int]:
"""Get frequency of common errors"""
error_counts = {}
@@ -819,20 +819,20 @@ class CrewMonitor:
if metric.error_message:
error_counts[metric.error_message] = error_counts.get(metric.error_message, 0) + 1
return dict(sorted(error_counts.items(), key=lambda x: x[1], reverse=True))
def export_metrics(self, filename: str = None) -> str:
"""Export metrics to JSON file"""
if not filename:
filename = f"crew_metrics_{self.crew_name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
export_data = {
"summary": self.get_performance_summary(),
"detailed_metrics": [asdict(m) for m in self.metrics]
}
with open(filename, 'w') as f:
json.dump(export_data, f, indent=2, default=str)
self.logger.info(f"Metrics exported to {filename}")
return filename
@@ -847,10 +847,10 @@ def monitored_research_task(self) -> Task:
if context:
tokens = getattr(task_output, 'token_usage', {}).get('total', 0)
monitor.end_task_monitoring(context, success=True, tokens_used=tokens)
# Start monitoring would be called before task execution
# This is a simplified example - in practice you'd integrate this into the task execution flow
return Task(
config=self.tasks_config['research_task'],
agent=self.researcher(),
@@ -872,7 +872,7 @@ class ErrorSeverity(Enum):
class CrewError(Exception):
"""Base exception for CrewAI applications"""
def __init__(self, message: str, severity: ErrorSeverity = ErrorSeverity.MEDIUM,
def __init__(self, message: str, severity: ErrorSeverity = ErrorSeverity.MEDIUM,
context: dict = None):
super().__init__(message)
self.severity = severity
@@ -893,19 +893,19 @@ class ConfigurationError(CrewError):
class ErrorHandler:
"""Centralized error handling for CrewAI applications"""
def __init__(self, crew_name: str):
self.crew_name = crew_name
self.error_log: List[CrewError] = []
self.recovery_strategies: Dict[type, Callable] = {}
def register_recovery_strategy(self, error_type: type, strategy: Callable):
"""Register a recovery strategy for specific error types"""
self.recovery_strategies[error_type] = strategy
def handle_error(self, error: Exception, context: dict = None) -> Any:
"""Handle errors with appropriate recovery strategies"""
# Convert to CrewError if needed
if not isinstance(error, CrewError):
crew_error = CrewError(
@@ -915,11 +915,11 @@ class ErrorHandler:
)
else:
crew_error = error
# Log the error
self.error_log.append(crew_error)
self._log_error(crew_error)
# Apply recovery strategy if available
error_type = type(error)
if error_type in self.recovery_strategies:
@@ -931,21 +931,21 @@ class ErrorHandler:
ErrorSeverity.HIGH,
{"original_error": str(error), "recovery_error": str(recovery_error)}
))
# If critical, re-raise
if crew_error.severity == ErrorSeverity.CRITICAL:
raise crew_error
return None
def _log_error(self, error: CrewError):
"""Log error with appropriate level based on severity"""
logger = logging.getLogger(f"CrewAI.{self.crew_name}.ErrorHandler")
error_msg = f"[{error.severity.value.upper()}] {error}"
if error.context:
error_msg += f" | Context: {error.context}"
if error.severity in [ErrorSeverity.HIGH, ErrorSeverity.CRITICAL]:
logger.error(error_msg)
logger.error(f"Stack trace: {traceback.format_exc()}")
@@ -953,16 +953,16 @@ class ErrorHandler:
logger.warning(error_msg)
else:
logger.info(error_msg)
def get_error_summary(self) -> Dict[str, Any]:
"""Get summary of errors encountered"""
if not self.error_log:
return {"total_errors": 0}
severity_counts = {}
for error in self.error_log:
severity_counts[error.severity.value] = severity_counts.get(error.severity.value, 0) + 1
return {
"total_errors": len(self.error_log),
"severity_breakdown": severity_counts,
@@ -1004,7 +1004,7 @@ def robust_task(self) -> Task:
# Use fallback response
return "Task failed, using fallback response"
return wrapper
return Task(
config=self.tasks_config['research_task'],
agent=self.researcher()
@@ -1020,60 +1020,60 @@ from pydantic import BaseSettings, Field, validator
class Environment(str, Enum):
DEVELOPMENT = "development"
TESTING = "testing"
TESTING = "testing"
STAGING = "staging"
PRODUCTION = "production"
class CrewAISettings(BaseSettings):
"""Comprehensive settings management for CrewAI applications"""
# Environment
environment: Environment = Field(default=Environment.DEVELOPMENT)
debug: bool = Field(default=True)
# API Keys (loaded from environment)
openai_api_key: Optional[str] = Field(default=None, env="OPENAI_API_KEY")
anthropic_api_key: Optional[str] = Field(default=None, env="ANTHROPIC_API_KEY")
serper_api_key: Optional[str] = Field(default=None, env="SERPER_API_KEY")
mem0_api_key: Optional[str] = Field(default=None, env="MEM0_API_KEY")
# CrewAI Configuration
crew_max_rpm: int = Field(default=100)
crew_max_execution_time: int = Field(default=3600) # 1 hour
default_llm_model: str = Field(default="gpt-4")
fallback_llm_model: str = Field(default="gpt-3.5-turbo")
# Memory and Storage
crewai_storage_dir: str = Field(default="./storage", env="CREWAI_STORAGE_DIR")
memory_enabled: bool = Field(default=True)
memory_cleanup_interval: int = Field(default=86400) # 24 hours in seconds
# Performance
enable_caching: bool = Field(default=True)
max_retries: int = Field(default=3)
retry_delay: float = Field(default=1.0)
# Monitoring
enable_monitoring: bool = Field(default=True)
log_level: str = Field(default="INFO")
metrics_export_interval: int = Field(default=3600) # 1 hour
# Security
input_sanitization: bool = Field(default=True)
max_input_length: int = Field(default=10000)
allowed_file_types: list = Field(default=["txt", "md", "pdf", "docx"])
@validator('environment', pre=True)
def set_debug_based_on_env(cls, v):
return v
@validator('debug')
def set_debug_from_env(cls, v, values):
env = values.get('environment')
if env == Environment.PRODUCTION:
return False
return v
@validator('openai_api_key')
def validate_openai_key(cls, v):
if not v:
@@ -1081,15 +1081,15 @@ class CrewAISettings(BaseSettings):
if not v.startswith('sk-'):
raise ValueError("Invalid OpenAI API key format")
return v
@property
def is_production(self) -> bool:
return self.environment == Environment.PRODUCTION
@property
def is_development(self) -> bool:
return self.environment == Environment.DEVELOPMENT
def get_llm_config(self) -> Dict[str, Any]:
"""Get LLM configuration based on environment"""
config = {
@@ -1098,12 +1098,12 @@ class CrewAISettings(BaseSettings):
"max_tokens": 4000 if self.is_production else 2000,
"timeout": 60
}
if self.is_development:
config["model"] = self.fallback_llm_model
return config
def get_memory_config(self) -> Dict[str, Any]:
"""Get memory configuration"""
return {
@@ -1112,7 +1112,7 @@ class CrewAISettings(BaseSettings):
"cleanup_interval": self.memory_cleanup_interval,
"provider": "mem0" if self.mem0_api_key and self.is_production else "local"
}
class Config:
env_file = ".env"
env_file_encoding = 'utf-8'
@@ -1125,25 +1125,25 @@ settings = CrewAISettings()
@CrewBase
class ConfigurableCrew():
"""Crew that uses centralized configuration"""
def __init__(self):
self.settings = settings
self.validate_configuration()
def validate_configuration(self):
"""Validate configuration before crew execution"""
required_keys = [self.settings.openai_api_key]
if not all(required_keys):
raise ConfigurationError("Missing required API keys")
if not os.path.exists(self.settings.crewai_storage_dir):
os.makedirs(self.settings.crewai_storage_dir, exist_ok=True)
@agent
def adaptive_agent(self) -> Agent:
"""Agent that adapts to configuration"""
llm_config = self.settings.get_llm_config()
return Agent(
config=self.agents_config['researcher'],
llm=llm_config["model"],
@@ -1163,7 +1163,7 @@ from crewai.tasks.task_output import TaskOutput
class CrewAITestFramework:
"""Comprehensive testing framework for CrewAI applications"""
@staticmethod
def create_mock_agent(role: str = "test_agent", tools: list = None) -> Mock:
"""Create a mock agent for testing"""
@@ -1175,9 +1175,9 @@ class CrewAITestFramework:
mock_agent.llm = "gpt-3.5-turbo"
mock_agent.verbose = False
return mock_agent
@staticmethod
def create_mock_task_output(content: str, success: bool = True,
def create_mock_task_output(content: str, success: bool = True,
tokens: int = 100) -> TaskOutput:
"""Create a mock task output for testing"""
return TaskOutput(
@@ -1187,13 +1187,13 @@ class CrewAITestFramework:
pydantic=None,
json_dict=None
)
@staticmethod
def create_test_crew(agents: list = None, tasks: list = None) -> Crew:
"""Create a test crew with mock components"""
test_agents = agents or [CrewAITestFramework.create_mock_agent()]
test_tasks = tasks or []
return Crew(
agents=test_agents,
tasks=test_tasks,
@@ -1203,53 +1203,53 @@ class CrewAITestFramework:
# Example test cases
class TestResearchCrew:
"""Test cases for research crew functionality"""
def setup_method(self):
"""Setup test environment"""
self.framework = CrewAITestFramework()
self.mock_serper = Mock()
@patch('crewai_tools.SerperDevTool')
def test_agent_creation(self, mock_serper_tool):
"""Test agent creation with proper configuration"""
mock_serper_tool.return_value = self.mock_serper
crew = ResearchCrew()
researcher = crew.researcher()
assert researcher.role == "Senior Research Analyst"
assert len(researcher.tools) > 0
assert researcher.verbose is True
def test_task_validation(self):
"""Test task validation logic"""
crew = ResearchCrew()
# Test valid output
valid_output = self.framework.create_mock_task_output(
"This is a comprehensive research summary with conclusions and findings."
)
is_valid, message = crew.validate_research_quality(valid_output)
assert is_valid is True
# Test invalid output (too short)
invalid_output = self.framework.create_mock_task_output("Too short")
is_valid, message = crew.validate_research_quality(invalid_output)
assert is_valid is False
assert "brief" in message.lower()
@patch('requests.get')
def test_tool_error_handling(self, mock_requests):
"""Test tool error handling and recovery"""
# Simulate network error
mock_requests.side_effect = requests.exceptions.RequestException("Network error")
tool = RobustSearchTool()
result = tool._run("test query")
assert "network error" in result.lower()
assert "failed" in result.lower()
@pytest.mark.asyncio
async def test_crew_execution_flow(self):
"""Test complete crew execution with mocked dependencies"""
@@ -1257,18 +1257,18 @@ class TestResearchCrew:
mock_execute.return_value = self.framework.create_mock_task_output(
"Research completed successfully with findings and recommendations."
)
crew = ResearchCrew()
result = crew.crew().kickoff(inputs={"topic": "AI testing"})
assert result is not None
assert "successfully" in result.raw.lower()
def test_memory_integration(self):
"""Test memory system integration"""
crew = ResearchCrew()
memory_manager = AdvancedMemoryManager(crew)
# Test saving to memory
test_content = "Important research finding about AI"
memory_manager.save_with_context(
@@ -1277,34 +1277,34 @@ class TestResearchCrew:
metadata={"importance": "high"},
agent="researcher"
)
# Test searching memory
results = memory_manager.search_across_memories("AI research")
assert "short_term" in results
def test_error_handling_workflow(self):
"""Test error handling and recovery mechanisms"""
error_handler = ErrorHandler("test_crew")
# Test error registration and handling
test_error = TaskExecutionError("Test task failed", ErrorSeverity.MEDIUM)
result = error_handler.handle_error(test_error)
assert len(error_handler.error_log) == 1
assert error_handler.error_log[0].severity == ErrorSeverity.MEDIUM
def test_configuration_validation(self):
"""Test configuration validation"""
# Test with missing API key
with patch.dict(os.environ, {}, clear=True):
with pytest.raises(ValueError):
settings = CrewAISettings()
# Test with valid configuration
with patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-key"}):
settings = CrewAISettings()
assert settings.openai_api_key == "sk-test-key"
@pytest.mark.integration
def test_end_to_end_workflow(self):
"""Integration test for complete workflow"""
@@ -1315,41 +1315,41 @@ class TestResearchCrew:
# Performance testing
class TestCrewPerformance:
"""Performance tests for CrewAI applications"""
def test_memory_usage(self):
"""Test memory usage during crew execution"""
import psutil
import gc
process = psutil.Process()
initial_memory = process.memory_info().rss
# Create and run crew multiple times
for i in range(10):
crew = ResearchCrew()
# Simulate crew execution
del crew
gc.collect()
final_memory = process.memory_info().rss
memory_increase = final_memory - initial_memory
# Assert memory increase is reasonable (less than 100MB)
assert memory_increase < 100 * 1024 * 1024
def test_concurrent_execution(self):
"""Test concurrent crew execution"""
import concurrent.futures
def run_crew(crew_id):
crew = ResearchCrew()
# Simulate execution
return f"crew_{crew_id}_completed"
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
futures = [executor.submit(run_crew, i) for i in range(5)]
results = [future.result() for future in futures]
assert len(results) == 5
assert all("completed" in result for result in results)
@@ -1400,7 +1400,7 @@ class TestCrewPerformance:
### Development:
1. Always use .env files for sensitive configuration
2. Implement comprehensive error handling and logging
2. Implement comprehensive error handling and logging
3. Use structured outputs with Pydantic for reliability
4. Test crew functionality with different input scenarios
5. Follow CrewAI patterns and conventions consistently
@@ -1426,4 +1426,4 @@ class TestCrewPerformance:
5. Use async patterns for I/O-bound operations
6. Implement proper connection pooling and resource management
7. Profile and optimize critical paths
8. Plan for horizontal scaling when needed
8. Plan for horizontal scaling when needed

View File

@@ -33,7 +33,7 @@ dependencies = [
"click>=8.1.7",
"appdirs>=1.4.4",
"jsonref>=1.1.0",
"json-repair>=0.25.2",
"json-repair==0.25.2",
"uv>=0.4.25",
"tomli-w>=1.1.0",
"tomli>=2.0.2",
@@ -47,11 +47,11 @@ Documentation = "https://docs.crewai.com"
Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = ["crewai-tools~=0.48.0"]
tools = ["crewai-tools~=0.49.0"]
embeddings = [
"tiktoken~=0.8.0"
]
agentops = ["agentops>=0.3.0"]
agentops = ["agentops==0.3.18"]
pdfplumber = [
"pdfplumber>=0.11.4",
]
@@ -123,3 +123,15 @@ path = "src/crewai/__init__.py"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
exclude = [
"docs/**",
"docs/",
]
[tool.hatch.build.targets.sdist]
exclude = [
"docs/**",
"docs/",
]

View File

@@ -28,19 +28,19 @@ _telemetry_submitted = False
def _track_install():
"""Track package installation/first-use via Scarf analytics."""
global _telemetry_submitted
if _telemetry_submitted or Telemetry._is_telemetry_disabled():
return
try:
pixel_url = "https://api.scarf.sh/v2/packages/CrewAI/crewai/docs/00f2dad1-8334-4a39-934e-003b2e1146db"
req = urllib.request.Request(pixel_url)
req.add_header('User-Agent', f'CrewAI-Python/{__version__}')
with urllib.request.urlopen(req, timeout=2): # nosec B310
_telemetry_submitted = True
except Exception:
pass
@@ -54,7 +54,7 @@ def _track_install_async():
_track_install_async()
__version__ = "0.134.0"
__version__ = "0.140.0"
__all__ = [
"Agent",
"Crew",

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.134.0,<1.0.0"
"crewai[tools]>=0.140.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.134.0,<1.0.0",
"crewai[tools]>=0.140.0,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.134.0"
"crewai[tools]>=0.140.0"
]
[tool.crewai]

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1028,4 +1028,637 @@ interactions:
status:
code: 200
message: OK
- request:
body: '{"input": ["Example: Toy Cars(Math Example): An example used to illustrate
addition by counting toy cars."], "model": "text-embedding-3-small", "encoding_format":
"base64"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '172'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SayxKyyral+/spVqwudUIuSmauHncQkOQmYkVFBSAiKKJcEsgT590r8N9xqqpj
A1ExYY45xjfzP//1119/d3lTFuPf//z196sexr//x3bslo3Z3//89T//9ddff/31n7/X/+/Mss3L
261+V7/Tf2/W71u5/P3PX+x/H/m/J/3z199GidoTsqiXc6z1VOAqVIG3cv6hWV69X6PVmpZJIGeG
Ljjqn7CVmAbrIAqdxU01A72LRSRyzxx7wtqhhPiJszzR7rJmNt5ZAV4PM8KnOEjy7j4JIrCyF4+P
EpdSrqvADFFdWTjvOTefzfulAL6rIazT4aOuBzS0ULmMJr7QgoL5CpgMKm9HndgDjiPSFYIEMfZP
OAwno1nUN8dCsdNSknFE6Yk8OxrIeL0gWms30WLcLzxwGsTjhCOywwY49lAcz0ci3dCN8tTrvkC+
5pGHhNxXh+6GM6ivionlopLBaHwkAx33s4hzP0uatatXEdVt3xCj1M4OL/F+gQaN/eJYlUlEVVJp
CClH19vRvqW0FDsJkFPwwfHBvOZrEEkQfa+WQqQy7ihHglGC/Wuf4Ys1CM7KsE0L2WhF0yGVS0py
oEroeryzxKidrzq7SVKil+z02HH7tV9vPPiC9W0gotkVymd8/jDITjt7EpPb2M9BkDGiWdDJW+Is
pJzRnGPw5c2aaNv1LH4Uaig7DhLO2lBqhL1eScBdFmaCB7PIeXEXsOiY8Qou67BW5/mcfmFvqDbJ
09TuhcXxakj1x4P4h8sJrFZFGZC3mYTl9GFSLtYEHk7M5UrczzfL16vaTWgVpNcEjX2QC0Omzuh+
WjC5F3dZ5bbrgbbl8uRKh2+/lPuqQlrj73CS6cAZ7ezFwyftTJK/+TBin/2ng69Zbogf3s903a0D
C5P7fiDGAXMqvYmHCu3Fe0pUt4mb5dl/JQRiVE2cL0r5nKPQQq4KSmxl54Wu73bOEI+EGZvyWXMW
cv5WQAsvO6+6IUQXHAw8jNUr56H3lKqcF/AJTCTmNvHZ86GOfhAlyLwtBlHGQ9svmewbKD+rA1ZM
S+1ZculEeL9FDTkKbtmze8bjobFjPS85tJJKhtisEdTWdIKOKed8rE0THGK/JXinXMHqJHcXPtVz
M4nc22z47f5D1aje+BxncSRkKA8BfYk3j3OBkXOckc8QGDXEGmfd83W6PDpRwK6G02SsKT/mc4fi
9XQm4YteGi4x8hbSM+SwHH8UIDxHX0It4r4T/Mp6xL4+QYocU0nIkRFhv3Cur/ypJxPUCljTHftF
j8OgYmyBr7MmhlIi7elfSGQN55791F0HC+rNGMdHqJLwiNsDGa0DDgjX9XSIcQUNP42wW7prtIzX
pUZj+MxwnJ3nvH19MvunB1in7NOZ7eLpwiJa2ak9YM6huzWaIBeYV68wJTcSHrPji3VqvfFv/WbW
/qZwDLUJyxaL6cpZ2R5MeHcierNbnElmJRddBlIQ+0HFZhkKeYbXa14Q3YJuxJKoz6DccZjcU4tX
e8h5DLScviVYQrw6p/regzyJWWyH06eZX71vgdIXLZItVdgLdz5Q0PPpOcRPvAPd9LZCY9hm3pAV
Vr4mluyiVy1FWFmCXUMfXyzB90BqchQi2q9OkZdQTg9v4hDWiLizHWio6mFEnE6vm7Ve9jYctITB
UqsOdJLY6/CrP29ubZnysdYOqLspBpaWWQPC42sq8LRIKzHCdnTGvohCxN/3e6Luwj6axN2VhyZj
KSTop6gRQhc8gQD6/TQv1wMdd2zjofr9bDzuzfTOEtmsAZ/xikku9HrOc1a4R2f5hLDq0m+z8rZl
wcaQTWLVkU7XrmItxDbTizipj3tKvY8CJl/0cGCxhM5DcUzBpo/kLDSduhyrWvvVC/bG2VbpHB1r
KKZdgc2+zcBy1toEnZa6JGf/a0fzxTgOMHwfj7gw/TpaKj4N4TRdF2+3uz4BldYoEZIdH2PpcBnB
ylmhCJs2U4kVtiNY7tO0wmIZhymy2Lkn1G4qVE7XHivj0jf0W11ryKnakYSTLDcrhLOG/NNTxuab
6en8eo0ZtM2IJyfCmqpQHDQfPeuITgBkx0i4T6SGE2FvOObeer4eq7pAJiAM8bj30aF9YVdg+fon
nBifnbMeM2FA6+UjeztQv8B8QCiGfZsZHndUH9GsDG4Br91lmECzi3I2ctcSLHaKcMK9DWc9XUQI
4joY8TWVGUoSW4pRxIoHrLo97vly5W2U7NgYy9bAgtlJ3hBSLrniY5p+6Fjy1IfnWb7hm/UsI5Z1
HxY0dryHpaT9gtXOWg+qXHrf9EToJ+3TFGjrJ/gMnIvKCsZx/6sfrMv6GA2POe8gc8+xJ/ToRef7
JOzhYmeI6MC5OFRvEgNp0fmyfV5QJ/XDMhB764M4PTdEozJoPGwSxpy+yavrV4LbDq3v9kO0cRmi
+SIlEG79ceIPbaWu0hy7aML3h4f8pY7WJVBSpKSd+ace1z6L9+jRcTlWkuTcr8fqW4BPxnoeAiFW
eYkNBnSqtZrcinuVs6mkl8gch4mcCkmM1sOuT8CvPiNdMxoKdtKEgAF9XJjSkFO9DwxoSMwF6yA/
OXO2q1NEE3giR7+eAWXmHYSbXkz78VqAFbGchnbw+MayfH7Q9THbIvym1hf74Z2jvVXBBM5FffMA
gY9oeb3GGdLnbSHYotiZI3ctIG3TowdbpY9ojuQZ3Uu3wZ7RQad7v482BBTM0wM0bS/kO6lC4/wI
J053lXw9u76L4mZeiDTJFWCf/eMLbmqnYc04xPmcQlOESP5U2E1OaTQd1EpE013ksO6vej78Wf9L
LxN73DfNvJ40C+0m+0KM8WCp9FO8KvTJ4hrH3k7NhU/59FF4IacJ+PXSr+37uId+Ib2wN0mVQ5dA
meHmL4kDsmM+XgEzA7deEnLSIMlXfCID8KbrC5vp8aCucxDtkZIQQk66q0dUMOcQxU2kE3mX6f2a
7YIOGkRssQ5qxVnkIYnhzgtMIjfzrqfjmV1/eoCTMnacKfKqEh3tbJoIyGwqWM8uBjtkvKev6T/6
ueTFPRRGG06gwp+eCvqhhudK53BqVyWdH6slAn2pc4Ljx8lZVxekENajP61h6/QFp7z/3V+07HTO
Oae88vCxX1QiebsHXW4U+3A5uDciLY+Yst3NTOFN/sQTD5mDuiKeshDfghuRXfoB3bGuCnAOjZe3
yKcW0Is5a8DeGwaW4/ACptCfJzT35TgxHvNwqPpmniDN3nCi/vrs5938ShDU5hQHfoTAZL5sCEp0
Bn/qk9f62oMwniRsqLLvrM1L6KDspCrxwlFXl2Pd8fB65ecJNaR06Bgxys9/egxkCoeMZ3aGWjyr
WMn0Qz8GkfWEn8NgkKIO5Yg/1g8XbfWAPZsE6kwu3R7uT4qL9U6XoqV7LiEsq0Qj2gNe8w5HjQcv
k21Nk3JI1VkezhCiRz17B7dJKAld0MJNX7f1MCjnFLkGwRnusd5PnNNxxlME3EOVCfY/lkNPFxDD
JeODif3p86f6dkCYrPj3POVLZHGxqD7vd2zS4UNnhmXj3/2eGOWQOgM+PyDs8oeC/TG704X1Kh9d
u/NAwlKj/fR8P/eQDPiANY+cosUpAx6daqMmsuDmPVuxgY0YwjfE8b9DvxToqIAhnkNyexqfqNWb
twe7w10kzlFt+mGvzylyzmqLf/6TTufehxiHJ6JvfpAX9MVCQukOJC6LVz4+VjmEmWCwWI7rkX7q
r+fD2dE3/ehZh86RXIFVqIOJ89dXvshDGYuc0nE49N5BPzIsHMSb8h2w9OpzSs92wIJJYlqiLZ0T
rRdNHVBDrwAr4WT0lGCigOYMCU5M6+VQaY5b5FL3S4rWNnIuA2MIPp+1nARruDiCd3nwiFvKM7Fa
5QOGrd/8+gnB/bhS6kXPWJT6mk5sWC4REdR1ho/jwSGYvC50mXGxguzoxthIkotDX5+sgsc0Z4ja
M3a+SANhD6x6+UwjIzLOciwhD10tWYn3PcoqOxRyBk4yaYlSR01DCzj70Po+SmIt8xsswenMQseU
EiIZhxBwXsDEsObedBJbxe65LQ9AE32TCSoLjNaSry34XHGOLciXlP+m7y+YHXPxpsylzgx2gQG9
KX95fKsc1Tk/xB1arWHBMkFN8z2doQsco/pOQ+iV0XwKcQHceDZIsaRpPmeo78AOPSOihZMZsdLw
LsSxrhQSq8dvNMxn/4sU7rMjpgZPzhBbLIMiQ5Wng0tdsFzVKvnTH0XusjTLHIU+hNpFw8qWD9bm
tfuC/AwHIpnyiS4X6cRDrY7eHkkrU/35KRhH5xv2OJw6HUQWC9/wY5PS41tn2fIg+lonZapR0vWr
j2PmV6/EbASejge1q1Gav2Jsxouk8naV/tEbfKJF4iyJZbFilEAZb9+njotzD3/9jWCg3sGkf64M
3Po/NkxLV/uLurYwTL8nb594EMyJFTBoCAXRA7urGgmR+zVgcJjPxK1DOWfDk8qg/VBY2F/mJ1js
sokRXMY91t8EOOSXh2I9OW18QY14CGwWjrWUENMCb2fq6nUPeD97YT29ThERzL0P5es1IrrGXfKB
s/09Wtv+Q3TyYtXRfJwM8C69lrjI4KPldFmf6PDx99j9HquIIBaVENKvj8NJfvTUjzIePKNVwGrf
HqLJu1Ye+HZGiD3IQHX88YT9UFrEM7rCIaEnJlCraYQlZASAO13EFtzLCWPsiprKioc2hubkZVga
PwSQi74k8A0+Kw4YsOu/haDbEJfujciFDOga2zUDlXdHJkTpwxm9iDNQdzLKiXmtvDozM88DemY4
D47zMVrIua6hVgdvbNuC3VNpLp7QScqQYFlf1MU/vWOYyOURSyHe0T9++1ZLPnY9popWvSHujydN
l6dXq+wUsXugdFZO/HH5bv68nUFvyDY2sqesUqdQEtDinU30yhyiH3+Ck5/V3m3ze4vSmQm0BW3C
Se18HXI4kALsyq9KNO79yid5PRZAa8IdNrLCUNcbT7+wyqsdyerIaJZP2ddQ1V9vfJQQpWtkoQme
A+eFldqxGs6PMg2o3N7BkXyumxXspBh9322Lj/FSUSKvRx7SNjsSb+u3pOS/NXzqcYSzTS/4Oco6
8dlMdyzLp5ZSabhr8BUavUfp+RbRJbBXSG6nAAcuPVJBHT8JdM6P0BNie6cun+obbqngQy47O6CU
s6LNL3g3svGnSJiu1Yy6nkmIEV5uzmq2exeC880lxzRgwaqQxUMb78A6fXrqvOMfT+jn5ouoFus3
0xBjC4ZCeyOmL8oqN0VcCM/qV/bY8WBGgp1NLTgewpYY+91dXdzM5uFTTyJvnxUtmPvKT+Gtej0J
Tv1AXY23PKCTaofE6VuYr4l0H+DxeJ+wTG9CsyyRNcOimQpimZYOOHhwBvirvxN9LirZ/ATUg97B
6kmt+kXpcAIds1awFyaZOh8znoFBxurYNv1Hs/nNGgb7GRG5Mu1/+6lkOsQkXdKEzp+CE5HKiQ45
FZdXtKT6/gkvpVuRoJBzwFLTFNGWx0jwNJ50EDTHgp/rxcZadnPy9aLvGaA9w8vEcuk5n/f6nKEJ
oxPxOLxXF7tKn3DLE5v+js4ffqKHloDPfblz6MZD4MYTiSrEl5zylrzCk2qF2JF13LOCoqXIXSiD
tbAdIvIts1WcSLJseY/mw7FkC5RdTk/y428Ca7QKrMQbJjoj5upcIOsr7rwpx0fKvpq1TxMLmt5c
bnniDL7ZLvgiUahiEmZn3NC20VbYP3cZPhIo9ewcBSLyfFHBWNYDlV2dUwqHxuUndusvS4FkBaLg
bWG85eslP5wlgJE1kos2ImcRDMVH0qDcsbHl0VFgTBec5LH1HhtZmPV3w8C75wNP8MWHOpT8KsLa
cndY4dK4mX781CCZSOxML/rVLgYGco96xJs/oJP+jlxov78r/j0v1EkuLSji2cWXN+Ns/qsL4aa/
nuBS11kl9jr96hsby2w6lHoPCQnF2OHf+i+cHfiQqWKfWMnLBJxbWskhyHgdu8pSRDPDwgS6Gl9i
hz7naGD2VYKC9DCSjXf04+udK0A+rNibk/ZL19tezOCWVyaIzm0+3/lHhhhJjIl2Rfd8mfBuhWNk
GNhGnt8sPz+08ePpq8pBPy+RlcLamvqJscni9DgYW7Dx3UnYKTxYV38vgs1//3hzxN35xyr6X5Wb
dlVSqbx4IBo8KZaIDZTYzs/f/+FLxrKvnaWrxT96RLb+4FD/bBmgUDmXYOErUX7jt3AM3sq0CC5o
hsTIXRha02vL00ouLFhTxI1v4dOb8VQhBbsaFNHM4mLcaxG98VCCX2uasCPkOFoWrPpiz7EKlhlw
byg+By3YeNpPr/P1W2YZnG/Kkajp8dIvfXpeYfx0dSJb8LvxcUcBjJxJExc/jhHnpu4fPkTSZFTo
1n+qP7zYs4VB3fxrAok3j8T2+Gc/O1kE/6z/CdS6KsjrkQUXZM/ervJeDjHvWBP3PVN4IyOW6rgE
igT8/B0Qt4ze+UrOQYbo875gt1aDiJ6uqwYzc/SJ42dJv3DKe4byh39jfeMz3xzEFvzpi7Hx4rmr
QAqDz1nG8cbvu2PJlgCw/Ixd9TjmCz5/E9gkRY4di47NKs/2BNx4NYjB+cCZx9QRIXpUM3GE5pvP
TpW68FJQltjJy+qHw47VwGKBw8QsfunM37K3QNYeWWLK+uT80XNGTiWc2Lun2mnNjoGW27bEbHYL
pfo7f0JzGBMsVxIHJkE/WGDcNYeJt3eaunYVtECyizsSWM8rXZ+vyYZ1MgKiZPq1GfTXywNuJBie
GCb7Zv4WzxS+Qb9i6QEFSg7ytwZPjX3jGJXPnDYfVMGXauX4UljUGVQylz+ei60HiOn67OsM6Y8n
nRahCBrBDwoN7guFxSpBWjQvTrI/bH6U6Bu/GNuP5EPhRFOsMyJwRoZ/MEhNJED0XTioXWJE3n7j
uxPqkeVs840O5m0qYZW8aN71VZrCpNwNWHv1t3zmtaL7M78y7O6qrqs/71F1MnIsA4drRhw8Wdiw
eTQxqq+qhNfcUAzN5eXtTckGv/wGf/7ZUK+yM4dHr4R6rd1xfLjgP34J1QkBHvw9X00PRDBo/Jec
Nv60PKZb+ONr3iG+YvAn3zxXYhK1usgNzytlB9+T5W39+B3NYSCXPz+95SEBDGB31cDm14hXavt/
9/fjYQ6xrL12oGftTIFr95RJUFgzXbe8CDa/h4tw+kYLBHb568fYPiR9Q423Yvz0mRjjclLH8Giw
4h06OfbGva6uS3S04SSVIvnND1fjfZyglb15TxyXqZmVWYaQIO1BvOypOZPxsQpoDmTzJ+3Jmfey
uIdVUV+I3YZSL6wuWIFVVB72UWKCwXrOE4y4fUB+/JeOF1DBwM6WP/xmbEbRB8/YbyZaWEtPugrW
4Oqk7vZ81NHCuakCux4m5Fr4E9h4TCZqcUzwredODn+6rAw8ZHGHXXQ2opW3JQsVIcdufj3Jf/NP
6MZ+NvnIcynLK7c9rEQGEKzBWaVnKzJg9318SYFucrRMZyiBLX/jMkxE54+/Oy8Kh6WlO4L+3e5n
+LD3DTm5YhPRw66Z4Jyr13/zcWXZx/BSDi4JQDT3a2RxE6JcfMX+Cyx0ucqPFk278Io9x6zV5aJo
LcyNasa60Ksq9/pc/+2n1PdujsinfsyQm+GHhGHb97/8CG3eTLCX6aVK73wgodMq2eR4VGs6l/tD
ByP9PWKzMk8OPVY1i3KztoiHzm9nFVFuAP2hm9h5emrPxtquBNSoDuTa7Nb+zzz38/FXcpK4LH/7
Z0uDHUhDb1fcHyqtl30N7fY4kRMteGddItlG8YrP087NS4f3ccHA6Nycpy1vOlTvrxoU0NPHib3H
Dvd+KTO63ybBe8WfZ06XcFZQKsL9tBMiDqzqm3EP1ys747u7Hh32bPsD9AE8bZ8nKu3Lo48cds9j
GTRGP2Zg9GEpMRVRmz2Ixt88c+uXk7j5yYXarA+ipDjjYDt/+eWrri++5ObXa07L/Sf8M3+WCdc1
VCWdBoN9FGzznXu/OEVkoE93Wf/MG/nN/wIDq8yfPLNQg1/B9v9IuPGxpZoLBd4iTSKRBftm/eWz
H5/0XmLg8M1r10FW40dysp5MNKhkX0AtulymGXkDXV+9v0d//3YF/Ne//vrrf/12GLTdrXxtGwPG
chn/47+3CvyH8B9Dm71ef7YhTENWlX//8+8dCH9/+q79jP977J7le/j7n784+Gevwd9jN2av//f4
v7af+q9//R8AAAD//wMAY2Pk+OAgAAA=
headers:
CF-RAY:
- 9591a0712f5d7e30-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 02 Jul 2025 22:37:16 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=.z.gWzfdOx2KplLQsVulTLIN_dP1NkYoQBh0J7Z3aQQ-1751495836-1.0.1.1-vkNBT_fdo_LMcsRYrqaBVA1UjX_zacI35sZZ9QIEaQsqEDhubfCucS4W2LRSfg9ryDF3w3eAFJReCUvm1J8Wt3B3RkOAUpuD9k.6eTNdzP8;
path=/; expires=Wed, 02-Jul-25 23:07:16 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=SNQ8k7m5IaO2hSRVhrXBbeiK6insJNPHwPYz0zBe1RY-1751495836461-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-allow-origin:
- '*'
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-model:
- text-embedding-3-small
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '52'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
via:
- envoy-router-6795fbb876-zzrww
x-envoy-upstream-service-time:
- '56'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '9999977'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_56d2a2018c52fccc1c611da9d2e4e06e
status:
code: 200
message: OK
- request:
body: '{"input": ["Example: Using Fingers(Math Example): An interactive way to
teach addition using fingers as counting tools."], "model": "text-embedding-3-small",
"encoding_format": "base64"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '186'
content-type:
- application/json
cookie:
- __cf_bm=.z.gWzfdOx2KplLQsVulTLIN_dP1NkYoQBh0J7Z3aQQ-1751495836-1.0.1.1-vkNBT_fdo_LMcsRYrqaBVA1UjX_zacI35sZZ9QIEaQsqEDhubfCucS4W2LRSfg9ryDF3w3eAFJReCUvm1J8Wt3B3RkOAUpuD9k.6eTNdzP8;
_cfuvid=SNQ8k7m5IaO2hSRVhrXBbeiK6insJNPHwPYz0zBe1RY-1751495836461-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1R6SROySrPm/vsVJ86WviGTVHJ2CIjIVCiD2NHRASoIDghIQdWN+9878P3idvfG
BZZaVmXmM2T+57/++uvvtmhul+/f//z197Mevn//j+XZNf/mf//z1//8119//fXXf/5e/7+Vt1dx
u17rd/Vb/nuzfl9v89///MX/95P/u+ifv/4+7q12rFMtdNkoCLF6MLQjPr2iJ6Mxv4lhdz6V440L
2oZt+SpE3fqj4XQ4Dw2TQT+qqlXkwaHICpdOl3elDvaT4O0W6mhqonMNt7N3widmYTZrrpIDn14I
Nl/tjvFW+M3AfT4fOLqah0KUXHGA+Lb7EizNpsGCioTIGXcpdvcacafkIteArnZBLg/njMj2eMxU
E102eHPaqBEbvEeGsnaNiEMUpR+K88qEM6oK4im9485pWg5yn1g+PivjhdFMxzVszTQfBbGMDD4V
LQob4e0HpZCqxcgSFEAhfWzsNaxA3zQ9jfB6SFesV3Bg8yF/TGgVqdy4mpSDyztM19SNnj1xJndW
T56rxlQz4bIl4U3XItHjVzdwpU2B4xntitl/3I8Aj/YzynzUMkH00QCllSd4u2NOL3yuuq2Ij01E
gmK4ugMeeVBtGj3IfjCdhueVoIbQFTlscPLdZSjSLDUuvY7gZtq70naURZXbPYzxHdOqmDxla0K6
X1d4f0oYo8/yGECNhxU53PBYsMu9qEA+8wXeSVe/kTykWDDsXBlbe83oJa5IJ3XzGh18vEhGQe3L
y0L1fkzIYUt3xmSbm04V/E4nW5qbPR/yjxxOh/OIz/t65XbAOxM6cDQj4eP9KAZSfWqV1ttbsOLM
DPHokwzq4ZyIBDe7i9EqrW6pI+/E5PYsqfs9gNLC9BB47GaZatBKsiw4lzQeucxAxtQEwgAPs3BH
4dwHxvy8oEzFZjyTrC620dTMxkMlTikTbMvIYCziPFjf088Igjr2k8zNJtKzWcDO+xZEwkZwU+Ao
9oP1jCiiTJUrWPKDxPprZtS/8Dzyg3s7ytfvnomB8+ngvsq3IyGG3dPY83TALjt+q5d3LRjE0QgH
0VNwGZHA5bvqSCH9ym6g1NMRUZtDHHDh7UK0c2wysRE3gborlIgYp5WEWL+9p8g+cN2oRFcWtSWy
TXDSTiD4yeGIr42bCPtaT/E1bC/GLPZPCs6lL7GxT7Y9f7GyUN1bISa26MzuSOm5RdvLTsDuuPYj
4UDfOfqEmU7Cct02gjQngUrm3gvmzT53pQRzF7TEH/bCYjTmQz5MMNhvMnJfxBVEyiqqbqdzQWw1
q10qhscKGDLqQAYiGNParF7woTYjucFSly/iXau6D/cbrJppbwxKMx5hXcozTi71vaBzG2TKqjRD
nDhRZYzqWgmgczcuDp7l0RAcoRL/3JfPd9Sd8iOvoejsxCMHpyoaz4QPVb9US5zwrcOE8RoqarHa
fYLX00X9tHUEUNF82eAChrqg+6GlQLYPlyTyd11MnZxk6rNYZyQS2Bu1yhY8sNm3IC7kzKDmcNRV
PrxsyfF2gWK2dbOD9kB9Epw4L5ptq7qoPvDNL18i4cFzOkhF8CKRPxiFpJ+QhQzgPXL4rt2GiR2C
Xz3BeIkP0a+HEIRLs8Wm27uGFILGq1l/I8Sfnk0zb/tAhHy9ibD92osGXV+3Ovh8tCX67XKJunbm
HeAgqDGelNkgkc1yUG0uGHmQ9gabPWbDyu/O40poh36OXm4HuLJkstv7h4Z/6gcO4hPdE7+TrsaS
/xRiKz6SdK8ZjXD+Xi/wScmLaNvog4gKB12VOCseH46r9QKXA4eqq1KTKw+biGcylUGhvBdw1vXF
BnPfxbCaBB2XvvxkLA5qXv3Vb7x3nIhJ4pFHJxAV7KbMQKxvigxkjTOIdbe+xszpSox09Gkw7hLf
YGru2tDJeI8Dp06KaXplnPqr/6dCexVTMpUxWs+fBh9uOCikoFMq5Deci/HhJfZULtY58vtswKZ3
ttB8+w4B1IXtkYNGL8VQaK0ITtW5AfWQiGg7mIMa4OEQrMqzW4zX54oCq/cO1o15ZPOkW+YvnvAp
P62LobdDR71qx4gcTXtgkzaH8i9fyfZ23jdzWZx1lHWajDdq07JJ5fmHOjthSmJGcc/TkXTwHToL
m1V+R/N8tCd1imslUISDUVCdBSma1VkOVD7iIwpkBHTnIjsQHcll3534HSVXzwAfsk0bMX015epL
yg74HKzf/ZRqR1NVtl0QcP1qV4jPTeuAmudbrK97O6Kf81zDYa9DwBDhmtmKvAAJfqvjY81yg1/w
VoGvtyM362qxmZ1dHnjqiSRTs9ro27GpVPl0eC/73zC+FIcQttyqwZuZyw12xVqMTlw4E89/5Mv/
hxeIfcnjjZhI6Mvf2BEeiaRj414QYx6n1QjVNB1I2rmvaAreawVIKefklI+S218HJVPzW7YnJy1/
N1/AN/MPHmEhvRbEOz881VoZmHiPy97gj5oRQzf6jBzfzGTCb//JlS9x9qoiNPuVf0OC3kzB9PLU
aKr92oKy3VukNJ1VTzfJ5ajan7NLEg9LLsND4KDGTp3l+x5oer+xBRxPN/ioH8yCXd46QCoHBb5W
D7sX/DzV1KG/XonV6iPqNrMyQJ9WJinXKyeaPK5VlHL9UHHqjXHx42Nqb3QpDlnLR6/L4+EgYZ3v
sRtvymhOEtNUf/np1c+q//E5NOihS8pr3RiTxI6e+vs+9632/fB1Vjnit5qMj/096SUaWBOIq0sZ
UNsvClasbzYc8fZInOmQ9QLajx2oaLPGtslFBbUO2AMze0XYfnzcRsKv24jaercbif58Fe1NCFtV
PyUc2SRMdyUQ7w/VTY4dcTjTj6hDM09lzXEXyHJ2KsRVS1vVfb4f2Cn12uV5xaqBs0oL+52kGiRV
+0DZZxstUCOjRq/7gyrK9M5bsl+Nu559u/Si+ofiFDw9fDLo9FImSO3rhyQvIUG0OPtH9FgbHbHr
4lnQck5btKoqGzvgP40x6zwe8lNYE2s4e7149l85eGe0x87bK13aV/xLFdKqwJc60JkU2EMH6R5V
I2ztsZ/OhypTfSHXsU7Jq2eqpAZQpZcNtvXp6xLF2Obq997wRAtjxSDP+6zBwvewv5JrNm0kO4aO
BmXAxmeKhJNdpepVCyMSJzu9mNtDE0J54j3sGCw1GKfbFXrMz/04zfNg0Lm1MpimaBM0dzogGj2C
FrlefiCmRj5LPqQmtGodYeNZXfu+ofJLtenhgf1bWhdztkeAkucTEe2sC81QZjRQ4bgTgs1pc42m
+HBo1T/1odu4rui/NQu4Mrkt+XpCfSkfQtU03HlkkLls8i6Rrj6VIAqYvAmML47uo+quXuugzzpv
iZ+dB+trohLv4ZwZ1ZkVw+lY9SQ3ndidVVXnock+JvaR8y7oTq1itTn5Pt58ah/RIfjkAGqzIdYl
mxumYkkG+uiOxNsYlTF3WjOCJRhKIKz8sRmbwFBgt0MtNpqH1Qjc0/aQfDiqGJvOqhmWfIN6596J
Ufotm9y0f4Bm0GmEXWsgiStuFNm7Nif2tch76rKGR+R6egV0o1jNjAL9guRSWPB4e+sJj1AF9Oxv
8WEvCQ1DNohKernEJAxf+4LhwbJBUHgNx0jTGjEhzxo+1Qhjde7rXqoddlSPPaRYH7MWMR3FL7V7
zUds5fePOwTbhw6Hb3wn+cIn521v8dB/4YmNLli7805ba+C4uB2RlJz7Hx6rzrhNxxW/WjU/fFWU
d7bCVvCE5inN1wAN0agT7aZXxUTHdwvnujXG9b4u3c9FwYCyg13hWPnUjJzju64khuDgzfcNUaMY
fg42HNfYUps4orf+7anHb+Zjs8xTRLhSeSHirglOWLtCI061Wr1+rk9ysDW/GLeO6YCoH2viRjD1
TPT3Cmxke0PSvp8Qq06lBgt+j3yZi2w6ZD4HH0OQyd7ATsGW85afTnkn+1rR+0FYqRyyO0HHG/H9
dmknSwF8rHwgpqd/3MlK9h4yqdIE0oABPXWFUvAbcEfxHTNj2joqKOtTEhFj3noF/cYZ//s83lc2
59Jc6Af0vN087IeKW8wyqi6qJWwU4q/uW1c6GfkIVbT2sVkkdjR1VU6RxBfrgE92dSTJtyyAPOH7
5by1gg89Yq5Xk6QTL9nvmz98bSjsBv/w7bN9lhnErr8it3Xlu7xhKMoffXxiFmGsv60eSFpVLgnQ
SSj6MnUzZJeFMPILn6RzIDsQfcogGBhpI/a8XgO08GtibSht7k0ix2BDuMaxGrwQC8VDDm3XPQNe
PG5dCmk4woI/ZOHj7veM+xx98az/6psxxR1REPpeE5x+6nM0+49PCMt9k5NmZwUjV2WEpb4TfK0N
V7hSTVdXL2kgrj92zbzobbV/nndYi9SLMdehraCFDxMj+7LmU7duDcg8Gjg6zAc2G9Z5hPuXC8hm
s8+NaYvPFCWSQbGtrR+MorFIgStPNxycklUxtFF7/O0HB1nSRXM8d7WiXactudJLj2gixJwq2akc
0Kx69rR7ZhnkL/cZrF+r2fhm75eGGqGyifV+1ZHg7PEAKUl1bNE37VkioQkcUzaxdo4fjFWG7IHE
mTFO6l0eMf2ETPjpYf0ZkX4uWdHC4+XCuMQfY0w0TVXjzmvsIPFtTG1UHVHc7gxsx23YDLuqssCN
eIYLNEjRdP3Mk9pU21eAIpiaSbmKPJB7fCTn0dfdKXCLG4Quz2HHuMjFJKeIoqpIxkBJ1Jb9+DY8
iWIEa2W8IGYDxylO6UX4+jyaBo9HngMfd0/sXNZdQ3vDqaEGWQ2krW248/d0v6mLvhn75f6WfOGQ
4O6CgDbbT8GmnTugBR+Cx8Z6R9Q3FQ+W+kMMLRyiMVnLEyDp1mNrQ4/9kl8i7HeDhf3tu2ck8LWL
+tp0wsjze7+RHmnuoJ/eWfKp+PEv8J5pGnBbfo3mg3zjkWLscuIv99nrKzmDyP/uyK4ZN2zKs/4G
RqnauLyvKzS/6o5H/Ce+ksOgGxHRozKE26c5EM9N+Z424sZD3WUGshVnvRCP2tNBivRdY4yzKZqm
QcvQKq0oPgmqYQjWJIbAe+X5Fw/N3Pq7CyreabDEMxikMuQAvpqsBcSWkfu5rFY5VK7OY4df8S7b
6+MNRbxSkC1p5mhUnJlXl3wdqyUfaLKeKOzDV4nd9GIZglAFCoxn3sTZ49M3cyIdONSevZHciuFq
PE4Ne6ji8+qQAH0ezcQrQQUvKT8Qd0Nv7qANgoWM+1kjftJajC54rqqi7eFk9fAiAeYefvWcWN+1
ZEzGmjxQvjYiYjwrtZkPlOSQ2uVn5JWPzr4uCo9qq1YRcW7nxBC67JXCOruFo4wVHk25MsdAz3hL
jNfeaaaYGIPqyZ4Z3HLv2LMv/xlAG80Vxpwps4lctgPsNg+TFFiqXTYc7AAWPkH8934o6JkPLZU6
sY639U12F3+CgjWOCXbBEKJx4RPK/nOT8O7hBBHbjpOoRmc7xnHKOc2iF0ERIvUdgPulBam/G0fN
QHmNQuY2DblvDR4EOfKI++RjV6ykwIKqOI0Lnq6b1rsUOlx5heINSvR+qQcjWHFm4TzXpYL8/J8G
VXS5z0NEvXvYqrCZEL6qFlewRV+rK0U84fN636CpOK8sZdEv2HC9L5uOW4WCFecWdi9EQZPg7EOY
49cLe4y0xR/9/vMTsH9v3Vkts0l1j0Qke3oMm3kYswvKR2IQfH8fGpZQM1X6SYzxj3/O4bACyHau
g03tmkfTe5/ocM1fNTHDyexZtnccILdbE0xyOrlUcWZRVXyTjMitzIhRXr4oaZGciPPhGrbEQ4Z+
69VTSdw//obHeSPeHLZWLx5vpQdX6ylhZ8wnl9r3Bd8fbhpglluFlFAvBiuK9tgRDk30q4/K9upR
Yv/02I/fMLBLcrN736Wb702BZ6KMWHu8H9FcHnyKlvqN9cuTd2c4f1K4R7DG2s45usw5fhzkb8zr
SFfroJizu/iATwAO3hsv25iGft3Cgvd4x51JQYRPFavPAmUjNcqyF5+5mK3D+Hsi+L7WmPDT78PG
DX5+QCQQ8e0AGdmV2EXZ9A/HNUdYkcIhdiluIl5pHROtFP5E3L2GXakqqKjaPJ/9m7+kyVeBfuJj
7MluzHrn+LHRoif+6NWpklYyLHo+ENeC0dCu8O0/+thK2LcfwrUdoAsnFti8GA+DP16FHIjidfhg
PDQmLnoAalGJsD7FJRqficWB8DzfiNM3FzQC71D0YY8uGC+D7U6d3g5QnkSP6LzqMTH+iBbU2TsL
vqHSR3RMhssf/PPPqdTTZxIA7Lf7I8bEaJt5hzY1SMePjY+r9RhNB6Xgf35/MIWvTzEy9g3RpzyY
OIzVnUF95aUARpWJfTX79oP29jv49Re8cvSLQcOGroYJHxM3DCqD/vRZc8I+2RRdF3U/PaF3eCC+
sROj6Qn1DURjOJOdEleM2q85hofivMmep5ZB449oqgvfJtY0OZHkHO823Eq8+fnN7oC+6woWPzng
BwTNIB3MGJWvLybO5jm5TAtlHVpWc9iviwmx1d59waY4qiNq409P/KdrKuH1KGAnurKip7x8Q1pr
JmRbvTFil32oIGuyfRwq5bsfZ+cSo2IK3gTfplsxfOgX/uj3bZCNDfNzSQSnNQeys1ZuwZ+FvYLo
YDFsFsba/SrcboRP617JZvF72uku2NAbbYq3QRb0P/2BzgfPW/oFH2MqwOOUuLbOeOc/cDSYW6sG
3LYp2e3LirFFn6rpiz7++MXz8SHr4NdCgF0pOTfDmFMTyISTxW/QGmGtNw9IZIeMwrZjxhcZkgeS
ppwC4ZP0Eetv0gt9YaVh2/Me0eyiMPzDb9Os09B3nFYDjOgh4JOiz80Qd28FKb5FyM5NP8b4wGUN
OuobYi/9k7aSJPl3PjgKjLsx9+1H/6P3tUU/0Vf15eH1zRE29OneE9LUjnqIb3usqWbfz4/1gYdo
635wsPQTvtxT8wDl7zc2v76Lhmx3tSGPv+NPH0fvtLimyuJ3Evy+nRlTExIrh0LuRnJ1txE79aEG
rZamI1riZwZ8sxQGTjku/lnfZWPVqevrSR1Xb+HUz5ZwtFR+3q2X/tLMehmco7rgB3G1h45+eIKy
hx5gnzzlhd9xIjyd6504J352p6Pmporex5+Fj+oRswQkw2s6dqMar9ZoDA+PEQTPc8nuGngN88uo
/eNvlMt+pewFHuwe0zCSe8jQdNeNQI3SZ0Ws1awU3eXGj/A0Nhp2X5ZRDGkWj6gbMfudZzPdej9T
tIkbR7r43ZKwyy/o53d5Qr+KxiZiKZye2XkEd3Vv6F1/6qCfFUr09fn868cEsOaDEzH71CiEYLJt
uDY39Ef/fY96bYPd5PdxteDbuH5nRxgtSHHS32r0VcuQ/voDBJtsaDo48DIs/tCoFuKAmLK7e3/8
9E1FDDYv/B0WfCBWxats8UtysMOKkUjeBC59T20N7lW0MT7ipp87BTxkPb7aj582NDl1MmxlWAer
H38sD9sJ+E96Jdvp8DXmNfAWWvg5KcBICkq/Ko9ajDOyX+8NxK82bxv49Ebw1r6/CurQ0APWhLuR
2/JnxBw7kUE9iO9x1SVfYzpZ+xrCdtSJmSZqP2d7BioUU0hK834tmA0iB58yMrFdiveIt5JNoG5l
bj32541jzIl0BoXu5iAgGf9u2P0aXEBI64IEFO0N0nrXFr3h1BHr1CVIoEIXgrSfyNL/a/oXTrVK
XfhBgBRvYMt+A/Trn0RRm7uMK+lDvTzBGF8L/vQ//clZVwtvVVNyqV8/QnCoqZPjWkfF5Ia8A+db
fxq5fvWOhtpIeTDKlR2sF//u55+hBY8wfux2rvRcNZY6cIlL9J14Lfj3/qqD2oQ3bLz2XTOI7kVG
NAoKbIqagGZLyM0/fjX9+SU/f27hR3jvnenPz+gAwg9HNPy6FfNHMCmMpThi4zDPjH4YnoDf6nLA
L/g5k8aOf3yUWEdQosk3s/zXfw0YZD0i5eIdLf1AnLnCq1j06/DD44WP2S5/9s0H5DEZ8a/e0sHZ
PpTjptthN+uGgn6po8BPb3nHY1qIx63GrV3ZyZZ+xIv1F+N+g+GhAbnMrxBNi58B3NbAAZclTjQ/
le8NFvwgGzJFSDpIXw1kGPakmM81++P/bd93nfifz4bR9TsL4e/fVMB//euvv/7Xb8Lg1V5vz2Uw
4Hubv//x36MC/yH9x/DKn88/YwjjkFe3v//59wTC35++fX2+//vbPm7v4e9//hL/PWvw97f95s//
9/m/lp/6r3/9HwAAAP//AwDb9NaU4CAAAA==
headers:
CF-RAY:
- 9591a071f8497e30-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 02 Jul 2025 22:37:17 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-allow-origin:
- '*'
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-model:
- text-embedding-3-small
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '85'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
via:
- envoy-router-79c9cd8ff5-w6hpv
x-envoy-upstream-service-time:
- '443'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '9999973'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_3a042b1efd09880c8484622f6bf9a343
status:
code: 200
message: OK
- request:
body: '{"input": ["Example: Using Fingers(Math Example): An interactive way to
teach addition using fingers as counting tools."], "model": "text-embedding-3-small",
"encoding_format": "base64"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '186'
content-type:
- application/json
cookie:
- __cf_bm=.z.gWzfdOx2KplLQsVulTLIN_dP1NkYoQBh0J7Z3aQQ-1751495836-1.0.1.1-vkNBT_fdo_LMcsRYrqaBVA1UjX_zacI35sZZ9QIEaQsqEDhubfCucS4W2LRSfg9ryDF3w3eAFJReCUvm1J8Wt3B3RkOAUpuD9k.6eTNdzP8;
_cfuvid=SNQ8k7m5IaO2hSRVhrXBbeiK6insJNPHwPYz0zBe1RY-1751495836461-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1R6SROySrPm/vsVJ86WviGTVHJ2CIjIVCiD2NHRASoIDghIQdWN+9878P3idvfG
BZZaVmXmM2T+57/++uvvtmhul+/f//z197Mevn//j+XZNf/mf//z1//8119//fXXf/5e/7+Vt1dx
u17rd/Vb/nuzfl9v89///MX/95P/u+ifv/4+7q12rFMtdNkoCLF6MLQjPr2iJ6Mxv4lhdz6V440L
2oZt+SpE3fqj4XQ4Dw2TQT+qqlXkwaHICpdOl3elDvaT4O0W6mhqonMNt7N3widmYTZrrpIDn14I
Nl/tjvFW+M3AfT4fOLqah0KUXHGA+Lb7EizNpsGCioTIGXcpdvcacafkIteArnZBLg/njMj2eMxU
E102eHPaqBEbvEeGsnaNiEMUpR+K88qEM6oK4im9485pWg5yn1g+PivjhdFMxzVszTQfBbGMDD4V
LQob4e0HpZCqxcgSFEAhfWzsNaxA3zQ9jfB6SFesV3Bg8yF/TGgVqdy4mpSDyztM19SNnj1xJndW
T56rxlQz4bIl4U3XItHjVzdwpU2B4xntitl/3I8Aj/YzynzUMkH00QCllSd4u2NOL3yuuq2Ij01E
gmK4ugMeeVBtGj3IfjCdhueVoIbQFTlscPLdZSjSLDUuvY7gZtq70naURZXbPYzxHdOqmDxla0K6
X1d4f0oYo8/yGECNhxU53PBYsMu9qEA+8wXeSVe/kTykWDDsXBlbe83oJa5IJ3XzGh18vEhGQe3L
y0L1fkzIYUt3xmSbm04V/E4nW5qbPR/yjxxOh/OIz/t65XbAOxM6cDQj4eP9KAZSfWqV1ttbsOLM
DPHokwzq4ZyIBDe7i9EqrW6pI+/E5PYsqfs9gNLC9BB47GaZatBKsiw4lzQeucxAxtQEwgAPs3BH
4dwHxvy8oEzFZjyTrC620dTMxkMlTikTbMvIYCziPFjf088Igjr2k8zNJtKzWcDO+xZEwkZwU+Ao
9oP1jCiiTJUrWPKDxPprZtS/8Dzyg3s7ytfvnomB8+ngvsq3IyGG3dPY83TALjt+q5d3LRjE0QgH
0VNwGZHA5bvqSCH9ym6g1NMRUZtDHHDh7UK0c2wysRE3gborlIgYp5WEWL+9p8g+cN2oRFcWtSWy
TXDSTiD4yeGIr42bCPtaT/E1bC/GLPZPCs6lL7GxT7Y9f7GyUN1bISa26MzuSOm5RdvLTsDuuPYj
4UDfOfqEmU7Cct02gjQngUrm3gvmzT53pQRzF7TEH/bCYjTmQz5MMNhvMnJfxBVEyiqqbqdzQWw1
q10qhscKGDLqQAYiGNParF7woTYjucFSly/iXau6D/cbrJppbwxKMx5hXcozTi71vaBzG2TKqjRD
nDhRZYzqWgmgczcuDp7l0RAcoRL/3JfPd9Sd8iOvoejsxCMHpyoaz4QPVb9US5zwrcOE8RoqarHa
fYLX00X9tHUEUNF82eAChrqg+6GlQLYPlyTyd11MnZxk6rNYZyQS2Bu1yhY8sNm3IC7kzKDmcNRV
PrxsyfF2gWK2dbOD9kB9Epw4L5ptq7qoPvDNL18i4cFzOkhF8CKRPxiFpJ+QhQzgPXL4rt2GiR2C
Xz3BeIkP0a+HEIRLs8Wm27uGFILGq1l/I8Sfnk0zb/tAhHy9ibD92osGXV+3Ovh8tCX67XKJunbm
HeAgqDGelNkgkc1yUG0uGHmQ9gabPWbDyu/O40poh36OXm4HuLJkstv7h4Z/6gcO4hPdE7+TrsaS
/xRiKz6SdK8ZjXD+Xi/wScmLaNvog4gKB12VOCseH46r9QKXA4eqq1KTKw+biGcylUGhvBdw1vXF
BnPfxbCaBB2XvvxkLA5qXv3Vb7x3nIhJ4pFHJxAV7KbMQKxvigxkjTOIdbe+xszpSox09Gkw7hLf
YGru2tDJeI8Dp06KaXplnPqr/6dCexVTMpUxWs+fBh9uOCikoFMq5Deci/HhJfZULtY58vtswKZ3
ttB8+w4B1IXtkYNGL8VQaK0ITtW5AfWQiGg7mIMa4OEQrMqzW4zX54oCq/cO1o15ZPOkW+YvnvAp
P62LobdDR71qx4gcTXtgkzaH8i9fyfZ23jdzWZx1lHWajDdq07JJ5fmHOjthSmJGcc/TkXTwHToL
m1V+R/N8tCd1imslUISDUVCdBSma1VkOVD7iIwpkBHTnIjsQHcll3534HSVXzwAfsk0bMX015epL
yg74HKzf/ZRqR1NVtl0QcP1qV4jPTeuAmudbrK97O6Kf81zDYa9DwBDhmtmKvAAJfqvjY81yg1/w
VoGvtyM362qxmZ1dHnjqiSRTs9ro27GpVPl0eC/73zC+FIcQttyqwZuZyw12xVqMTlw4E89/5Mv/
hxeIfcnjjZhI6Mvf2BEeiaRj414QYx6n1QjVNB1I2rmvaAreawVIKefklI+S218HJVPzW7YnJy1/
N1/AN/MPHmEhvRbEOz881VoZmHiPy97gj5oRQzf6jBzfzGTCb//JlS9x9qoiNPuVf0OC3kzB9PLU
aKr92oKy3VukNJ1VTzfJ5ajan7NLEg9LLsND4KDGTp3l+x5oer+xBRxPN/ioH8yCXd46QCoHBb5W
D7sX/DzV1KG/XonV6iPqNrMyQJ9WJinXKyeaPK5VlHL9UHHqjXHx42Nqb3QpDlnLR6/L4+EgYZ3v
sRtvymhOEtNUf/np1c+q//E5NOihS8pr3RiTxI6e+vs+9632/fB1Vjnit5qMj/096SUaWBOIq0sZ
UNsvClasbzYc8fZInOmQ9QLajx2oaLPGtslFBbUO2AMze0XYfnzcRsKv24jaercbif58Fe1NCFtV
PyUc2SRMdyUQ7w/VTY4dcTjTj6hDM09lzXEXyHJ2KsRVS1vVfb4f2Cn12uV5xaqBs0oL+52kGiRV
+0DZZxstUCOjRq/7gyrK9M5bsl+Nu559u/Si+ofiFDw9fDLo9FImSO3rhyQvIUG0OPtH9FgbHbHr
4lnQck5btKoqGzvgP40x6zwe8lNYE2s4e7149l85eGe0x87bK13aV/xLFdKqwJc60JkU2EMH6R5V
I2ztsZ/OhypTfSHXsU7Jq2eqpAZQpZcNtvXp6xLF2Obq997wRAtjxSDP+6zBwvewv5JrNm0kO4aO
BmXAxmeKhJNdpepVCyMSJzu9mNtDE0J54j3sGCw1GKfbFXrMz/04zfNg0Lm1MpimaBM0dzogGj2C
FrlefiCmRj5LPqQmtGodYeNZXfu+ofJLtenhgf1bWhdztkeAkucTEe2sC81QZjRQ4bgTgs1pc42m
+HBo1T/1odu4rui/NQu4Mrkt+XpCfSkfQtU03HlkkLls8i6Rrj6VIAqYvAmML47uo+quXuugzzpv
iZ+dB+trohLv4ZwZ1ZkVw+lY9SQ3ndidVVXnock+JvaR8y7oTq1itTn5Pt58ah/RIfjkAGqzIdYl
mxumYkkG+uiOxNsYlTF3WjOCJRhKIKz8sRmbwFBgt0MtNpqH1Qjc0/aQfDiqGJvOqhmWfIN6596J
Ufotm9y0f4Bm0GmEXWsgiStuFNm7Nif2tch76rKGR+R6egV0o1jNjAL9guRSWPB4e+sJj1AF9Oxv
8WEvCQ1DNohKernEJAxf+4LhwbJBUHgNx0jTGjEhzxo+1Qhjde7rXqoddlSPPaRYH7MWMR3FL7V7
zUds5fePOwTbhw6Hb3wn+cIn521v8dB/4YmNLli7805ba+C4uB2RlJz7Hx6rzrhNxxW/WjU/fFWU
d7bCVvCE5inN1wAN0agT7aZXxUTHdwvnujXG9b4u3c9FwYCyg13hWPnUjJzju64khuDgzfcNUaMY
fg42HNfYUps4orf+7anHb+Zjs8xTRLhSeSHirglOWLtCI061Wr1+rk9ysDW/GLeO6YCoH2viRjD1
TPT3Cmxke0PSvp8Qq06lBgt+j3yZi2w6ZD4HH0OQyd7ATsGW85afTnkn+1rR+0FYqRyyO0HHG/H9
dmknSwF8rHwgpqd/3MlK9h4yqdIE0oABPXWFUvAbcEfxHTNj2joqKOtTEhFj3noF/cYZ//s83lc2
59Jc6Af0vN087IeKW8wyqi6qJWwU4q/uW1c6GfkIVbT2sVkkdjR1VU6RxBfrgE92dSTJtyyAPOH7
5by1gg89Yq5Xk6QTL9nvmz98bSjsBv/w7bN9lhnErr8it3Xlu7xhKMoffXxiFmGsv60eSFpVLgnQ
SSj6MnUzZJeFMPILn6RzIDsQfcogGBhpI/a8XgO08GtibSht7k0ix2BDuMaxGrwQC8VDDm3XPQNe
PG5dCmk4woI/ZOHj7veM+xx98az/6psxxR1REPpeE5x+6nM0+49PCMt9k5NmZwUjV2WEpb4TfK0N
V7hSTVdXL2kgrj92zbzobbV/nndYi9SLMdehraCFDxMj+7LmU7duDcg8Gjg6zAc2G9Z5hPuXC8hm
s8+NaYvPFCWSQbGtrR+MorFIgStPNxycklUxtFF7/O0HB1nSRXM8d7WiXactudJLj2gixJwq2akc
0Kx69rR7ZhnkL/cZrF+r2fhm75eGGqGyifV+1ZHg7PEAKUl1bNE37VkioQkcUzaxdo4fjFWG7IHE
mTFO6l0eMf2ETPjpYf0ZkX4uWdHC4+XCuMQfY0w0TVXjzmvsIPFtTG1UHVHc7gxsx23YDLuqssCN
eIYLNEjRdP3Mk9pU21eAIpiaSbmKPJB7fCTn0dfdKXCLG4Quz2HHuMjFJKeIoqpIxkBJ1Jb9+DY8
iWIEa2W8IGYDxylO6UX4+jyaBo9HngMfd0/sXNZdQ3vDqaEGWQ2krW248/d0v6mLvhn75f6WfOGQ
4O6CgDbbT8GmnTugBR+Cx8Z6R9Q3FQ+W+kMMLRyiMVnLEyDp1mNrQ4/9kl8i7HeDhf3tu2ck8LWL
+tp0wsjze7+RHmnuoJ/eWfKp+PEv8J5pGnBbfo3mg3zjkWLscuIv99nrKzmDyP/uyK4ZN2zKs/4G
RqnauLyvKzS/6o5H/Ce+ksOgGxHRozKE26c5EM9N+Z424sZD3WUGshVnvRCP2tNBivRdY4yzKZqm
QcvQKq0oPgmqYQjWJIbAe+X5Fw/N3Pq7CyreabDEMxikMuQAvpqsBcSWkfu5rFY5VK7OY4df8S7b
6+MNRbxSkC1p5mhUnJlXl3wdqyUfaLKeKOzDV4nd9GIZglAFCoxn3sTZ49M3cyIdONSevZHciuFq
PE4Ne6ji8+qQAH0ezcQrQQUvKT8Qd0Nv7qANgoWM+1kjftJajC54rqqi7eFk9fAiAeYefvWcWN+1
ZEzGmjxQvjYiYjwrtZkPlOSQ2uVn5JWPzr4uCo9qq1YRcW7nxBC67JXCOruFo4wVHk25MsdAz3hL
jNfeaaaYGIPqyZ4Z3HLv2LMv/xlAG80Vxpwps4lctgPsNg+TFFiqXTYc7AAWPkH8934o6JkPLZU6
sY639U12F3+CgjWOCXbBEKJx4RPK/nOT8O7hBBHbjpOoRmc7xnHKOc2iF0ERIvUdgPulBam/G0fN
QHmNQuY2DblvDR4EOfKI++RjV6ykwIKqOI0Lnq6b1rsUOlx5heINSvR+qQcjWHFm4TzXpYL8/J8G
VXS5z0NEvXvYqrCZEL6qFlewRV+rK0U84fN636CpOK8sZdEv2HC9L5uOW4WCFecWdi9EQZPg7EOY
49cLe4y0xR/9/vMTsH9v3Vkts0l1j0Qke3oMm3kYswvKR2IQfH8fGpZQM1X6SYzxj3/O4bACyHau
g03tmkfTe5/ocM1fNTHDyexZtnccILdbE0xyOrlUcWZRVXyTjMitzIhRXr4oaZGciPPhGrbEQ4Z+
69VTSdw//obHeSPeHLZWLx5vpQdX6ylhZ8wnl9r3Bd8fbhpglluFlFAvBiuK9tgRDk30q4/K9upR
Yv/02I/fMLBLcrN736Wb702BZ6KMWHu8H9FcHnyKlvqN9cuTd2c4f1K4R7DG2s45usw5fhzkb8zr
SFfroJizu/iATwAO3hsv25iGft3Cgvd4x51JQYRPFavPAmUjNcqyF5+5mK3D+Hsi+L7WmPDT78PG
DX5+QCQQ8e0AGdmV2EXZ9A/HNUdYkcIhdiluIl5pHROtFP5E3L2GXakqqKjaPJ/9m7+kyVeBfuJj
7MluzHrn+LHRoif+6NWpklYyLHo+ENeC0dCu8O0/+thK2LcfwrUdoAsnFti8GA+DP16FHIjidfhg
PDQmLnoAalGJsD7FJRqficWB8DzfiNM3FzQC71D0YY8uGC+D7U6d3g5QnkSP6LzqMTH+iBbU2TsL
vqHSR3RMhssf/PPPqdTTZxIA7Lf7I8bEaJt5hzY1SMePjY+r9RhNB6Xgf35/MIWvTzEy9g3RpzyY
OIzVnUF95aUARpWJfTX79oP29jv49Re8cvSLQcOGroYJHxM3DCqD/vRZc8I+2RRdF3U/PaF3eCC+
sROj6Qn1DURjOJOdEleM2q85hofivMmep5ZB449oqgvfJtY0OZHkHO823Eq8+fnN7oC+6woWPzng
BwTNIB3MGJWvLybO5jm5TAtlHVpWc9iviwmx1d59waY4qiNq409P/KdrKuH1KGAnurKip7x8Q1pr
JmRbvTFil32oIGuyfRwq5bsfZ+cSo2IK3gTfplsxfOgX/uj3bZCNDfNzSQSnNQeys1ZuwZ+FvYLo
YDFsFsba/SrcboRP617JZvF72uku2NAbbYq3QRb0P/2BzgfPW/oFH2MqwOOUuLbOeOc/cDSYW6sG
3LYp2e3LirFFn6rpiz7++MXz8SHr4NdCgF0pOTfDmFMTyISTxW/QGmGtNw9IZIeMwrZjxhcZkgeS
ppwC4ZP0Eetv0gt9YaVh2/Me0eyiMPzDb9Os09B3nFYDjOgh4JOiz80Qd28FKb5FyM5NP8b4wGUN
OuobYi/9k7aSJPl3PjgKjLsx9+1H/6P3tUU/0Vf15eH1zRE29OneE9LUjnqIb3usqWbfz4/1gYdo
635wsPQTvtxT8wDl7zc2v76Lhmx3tSGPv+NPH0fvtLimyuJ3Evy+nRlTExIrh0LuRnJ1txE79aEG
rZamI1riZwZ8sxQGTjku/lnfZWPVqevrSR1Xb+HUz5ZwtFR+3q2X/tLMehmco7rgB3G1h45+eIKy
hx5gnzzlhd9xIjyd6504J352p6Pmporex5+Fj+oRswQkw2s6dqMar9ZoDA+PEQTPc8nuGngN88uo
/eNvlMt+pewFHuwe0zCSe8jQdNeNQI3SZ0Ws1awU3eXGj/A0Nhp2X5ZRDGkWj6gbMfudZzPdej9T
tIkbR7r43ZKwyy/o53d5Qr+KxiZiKZye2XkEd3Vv6F1/6qCfFUr09fn868cEsOaDEzH71CiEYLJt
uDY39Ef/fY96bYPd5PdxteDbuH5nRxgtSHHS32r0VcuQ/voDBJtsaDo48DIs/tCoFuKAmLK7e3/8
9E1FDDYv/B0WfCBWxats8UtysMOKkUjeBC59T20N7lW0MT7ipp87BTxkPb7aj582NDl1MmxlWAer
H38sD9sJ+E96Jdvp8DXmNfAWWvg5KcBICkq/Ko9ajDOyX+8NxK82bxv49Ebw1r6/CurQ0APWhLuR
2/JnxBw7kUE9iO9x1SVfYzpZ+xrCdtSJmSZqP2d7BioUU0hK834tmA0iB58yMrFdiveIt5JNoG5l
bj32541jzIl0BoXu5iAgGf9u2P0aXEBI64IEFO0N0nrXFr3h1BHr1CVIoEIXgrSfyNL/a/oXTrVK
XfhBgBRvYMt+A/Trn0RRm7uMK+lDvTzBGF8L/vQ//clZVwtvVVNyqV8/QnCoqZPjWkfF5Ia8A+db
fxq5fvWOhtpIeTDKlR2sF//u55+hBY8wfux2rvRcNZY6cIlL9J14Lfj3/qqD2oQ3bLz2XTOI7kVG
NAoKbIqagGZLyM0/fjX9+SU/f27hR3jvnenPz+gAwg9HNPy6FfNHMCmMpThi4zDPjH4YnoDf6nLA
L/g5k8aOf3yUWEdQosk3s/zXfw0YZD0i5eIdLf1AnLnCq1j06/DD44WP2S5/9s0H5DEZ8a/e0sHZ
PpTjptthN+uGgn6po8BPb3nHY1qIx63GrV3ZyZZ+xIv1F+N+g+GhAbnMrxBNi58B3NbAAZclTjQ/
le8NFvwgGzJFSDpIXw1kGPakmM81++P/bd93nfifz4bR9TsL4e/fVMB//euvv/7Xb8Lg1V5vz2Uw
4Hubv//x36MC/yH9x/DKn88/YwjjkFe3v//59wTC35++fX2+//vbPm7v4e9//hL/PWvw97f95s//
9/m/lp/6r3/9HwAAAP//AwDb9NaU4CAAAA==
headers:
CF-RAY:
- 9591a4f4ec6715be-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 02 Jul 2025 22:40:22 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-allow-origin:
- '*'
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-model:
- text-embedding-3-small
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '199'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
via:
- envoy-router-568d6b5f7d-zm9nq
x-envoy-upstream-service-time:
- '254'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '9999973'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_b30caf2b5ce7746764e692bac19cadeb
status:
code: 200
message: OK
version: 1

View File

@@ -52,6 +52,7 @@ from crewai.utilities.events.memory_events import (
MemoryRetrievalCompletedEvent,
)
@pytest.fixture
def ceo():
return Agent(
@@ -935,12 +936,27 @@ def test_cache_hitting_between_agents(researcher, writer, ceo):
read.return_value = "12"
crew.kickoff()
assert read.call_count == 2, "read was not called exactly twice"
# Check if read was called with the expected arguments
expected_calls = [
call(tool="multiplier", input={"first_number": 2, "second_number": 6}),
call(tool="multiplier", input={"first_number": 2, "second_number": 6}),
# Filter the mock calls to only include the ones with 'tool' and 'input' keywords
cache_calls = [
call
for call in read.call_args_list
if len(call.kwargs) == 2
and "tool" in call.kwargs
and "input" in call.kwargs
]
read.assert_has_calls(expected_calls, any_order=False)
# Check if we have the expected number of cache calls
assert len(cache_calls) == 2, f"Expected 2 cache calls, got {len(cache_calls)}"
# Check if both calls were made with the expected arguments
expected_call = call(
tool="multiplier", input={"first_number": 2, "second_number": 6}
)
assert cache_calls[0] == expected_call, f"First call mismatch: {cache_calls[0]}"
assert (
cache_calls[1] == expected_call
), f"Second call mismatch: {cache_calls[1]}"
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -1797,7 +1813,7 @@ def test_hierarchical_kickoff_usage_metrics_include_manager(researcher):
agent=researcher, # *regular* agent
)
# ── 2. Stub out each agents _token_process.get_summary() ───────────────────
# ── 2. Stub out each agent's _token_process.get_summary() ───────────────────
researcher_metrics = UsageMetrics(
total_tokens=120, prompt_tokens=80, completion_tokens=40, successful_requests=2
)
@@ -1821,7 +1837,7 @@ def test_hierarchical_kickoff_usage_metrics_include_manager(researcher):
process=Process.hierarchical,
)
# We dont care about LLM output here; patch execute_sync to avoid network
# We don't care about LLM output here; patch execute_sync to avoid network
with patch.object(
Task,
"execute_sync",
@@ -2489,17 +2505,19 @@ def test_using_contextual_memory():
memory=True,
)
with patch.object(ContextualMemory, "build_context_for_task", return_value="") as contextual_mem:
with patch.object(
ContextualMemory, "build_context_for_task", return_value=""
) as contextual_mem:
crew.kickoff()
contextual_mem.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_memory_events_are_emitted():
events = defaultdict(list)
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MemorySaveStartedEvent)
def handle_memory_save_started(source, event):
events["MemorySaveStartedEvent"].append(event)
@@ -2562,6 +2580,7 @@ def test_memory_events_are_emitted():
assert len(events["MemoryRetrievalStartedEvent"]) == 1
assert len(events["MemoryRetrievalCompletedEvent"]) == 1
@pytest.mark.vcr(filter_headers=["authorization"])
def test_using_contextual_memory_with_long_term_memory():
from unittest.mock import patch
@@ -2585,7 +2604,9 @@ def test_using_contextual_memory_with_long_term_memory():
long_term_memory=LongTermMemory(),
)
with patch.object(ContextualMemory, "build_context_for_task", return_value="") as contextual_mem:
with patch.object(
ContextualMemory, "build_context_for_task", return_value=""
) as contextual_mem:
crew.kickoff()
contextual_mem.assert_called_once()
assert crew.memory is False
@@ -2686,7 +2707,9 @@ def test_using_contextual_memory_with_short_term_memory():
short_term_memory=ShortTermMemory(),
)
with patch.object(ContextualMemory, "build_context_for_task", return_value="") as contextual_mem:
with patch.object(
ContextualMemory, "build_context_for_task", return_value=""
) as contextual_mem:
crew.kickoff()
contextual_mem.assert_called_once()
assert crew.memory is False
@@ -2715,7 +2738,9 @@ def test_disabled_memory_using_contextual_memory():
memory=False,
)
with patch.object(ContextualMemory, "build_context_for_task", return_value="") as contextual_mem:
with patch.object(
ContextualMemory, "build_context_for_task", return_value=""
) as contextual_mem:
crew.kickoff()
contextual_mem.assert_not_called()

5400
uv.lock generated

File diff suppressed because it is too large Load Diff