mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-26 08:38:15 +00:00
refactor: Implement CrewAI Flow for email processing
- Add EmailState model for Flow state management - Create EmailProcessingFlow class with event-based automation - Update tools and crews for Flow integration - Add comprehensive Flow tests - Implement error handling and state tracking - Add mock implementations for testing This implementation uses CrewAI Flow features to create an event-based email processing system that can analyze emails, research senders, and generate appropriate responses using specialized AI crews. Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
103
tests/test_complete_workflow.py
Normal file
103
tests/test_complete_workflow.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""Test script for complete email processing workflow"""
|
||||
from email_analysis_crew import EmailAnalysisCrew
|
||||
from response_crew import ResponseCrew
|
||||
from email_tool import EmailTool
|
||||
from mock_email_data import MockEmailThread
|
||||
import json
|
||||
|
||||
def test_email_analysis(email_tool, analysis_crew, thread_id):
|
||||
"""Test comprehensive email analysis including similar threads and research"""
|
||||
print("\nAnalyzing email thread...")
|
||||
|
||||
# Get thread context
|
||||
thread = email_tool.get_email_thread(thread_id)
|
||||
print(f"\nThread subject: {thread.subject}")
|
||||
|
||||
# Find similar threads
|
||||
similar = email_tool.find_similar_threads(thread.subject)
|
||||
print(f"\nFound {len(similar)} similar threads")
|
||||
|
||||
# Get sender history
|
||||
sender = thread.messages[0].from_email
|
||||
sender_info = email_tool.get_sender_history(sender)
|
||||
print(f"\nSender: {sender_info['name']} from {sender_info['company']}")
|
||||
print(f"Previous interactions: {sender_info['interaction_frequency']}")
|
||||
|
||||
# Analyze with crew
|
||||
analysis_result = analysis_crew.analyze_email(thread_id)
|
||||
print(f"\nAnalysis Results:")
|
||||
print(f"Response needed: {analysis_result.get('response_needed', False)}")
|
||||
print(f"Priority: {analysis_result.get('priority', 'error')}")
|
||||
print(f"Decision factors:")
|
||||
context = analysis_result.get('analysis', {}).get('context', {})
|
||||
print(f"- Thread type: {context.get('thread_type', 'unknown')}")
|
||||
print(f"- Similar threads found: {analysis_result.get('similar_threads_found', 0)}")
|
||||
print(f"- Interaction frequency: {sender_info.get('interaction_frequency', 'unknown')}")
|
||||
print(f"- Urgency indicators: {context.get('urgency_indicators', False)}")
|
||||
print(f"- Conversation stage: {context.get('conversation_stage', 'unknown')}")
|
||||
|
||||
return analysis_result
|
||||
|
||||
def test_complete_workflow():
|
||||
"""Test the complete email processing workflow"""
|
||||
try:
|
||||
print("\nTesting Complete Email Processing Workflow")
|
||||
print("=========================================")
|
||||
|
||||
# Initialize tools and crews
|
||||
email_tool = EmailTool()
|
||||
analysis_crew = EmailAnalysisCrew()
|
||||
response_crew = ResponseCrew()
|
||||
|
||||
# Test 1: Process email requiring response (weekly interaction)
|
||||
print("\nTest 1: Processing email requiring response")
|
||||
print("------------------------------------------")
|
||||
thread_id = "thread_1" # Meeting follow-up thread from frequent contact
|
||||
|
||||
analysis_result = test_email_analysis(email_tool, analysis_crew, thread_id)
|
||||
|
||||
if analysis_result.get('response_needed', False):
|
||||
print("\nGenerating response...")
|
||||
response_result = response_crew.draft_response(thread_id, analysis_result)
|
||||
print("\nGenerated Response:")
|
||||
print(json.dumps(response_result.get('response', {}), indent=2))
|
||||
|
||||
# Verify response matches context
|
||||
print("\nResponse Analysis:")
|
||||
print(f"Tone matches relationship: {response_result['response']['review_notes']['context_awareness']['relationship_acknowledged']}")
|
||||
print(f"Priority reflected: {response_result['response']['review_notes']['context_awareness']['priority_reflected']}")
|
||||
print(f"Background used: {response_result['response']['review_notes']['context_awareness']['background_used']}")
|
||||
else:
|
||||
print("\nNo response required.")
|
||||
|
||||
# Test 2: Process email not requiring response (first-time sender)
|
||||
print("\nTest 2: Processing email not requiring response")
|
||||
print("----------------------------------------------")
|
||||
thread_id = "thread_3" # First-time contact
|
||||
|
||||
analysis_result = test_email_analysis(email_tool, analysis_crew, thread_id)
|
||||
|
||||
if analysis_result.get('response_needed', False):
|
||||
print("\nGenerating response...")
|
||||
response_result = response_crew.draft_response(thread_id, analysis_result)
|
||||
print("\nGenerated Response:")
|
||||
print(json.dumps(response_result.get('response', {}), indent=2))
|
||||
|
||||
# Verify response matches context
|
||||
print("\nResponse Analysis:")
|
||||
context = analysis_result.get('analysis', {}).get('context', {})
|
||||
print(f"Thread type: {context.get('thread_type', 'unknown')}")
|
||||
print(f"Conversation stage: {context.get('conversation_stage', 'unknown')}")
|
||||
print(f"Response priority: {analysis_result.get('priority', 'unknown')}")
|
||||
else:
|
||||
print("\nNo response required - First time sender with no urgent context")
|
||||
|
||||
print("\nWorkflow test completed successfully!")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Analysis error: {str(e)}")
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_complete_workflow()
|
||||
64
tests/test_crewai_components.py
Normal file
64
tests/test_crewai_components.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
from typing import List
|
||||
import json
|
||||
from pydantic import BaseModel
|
||||
import os
|
||||
|
||||
# Define output models
|
||||
class EmailAnalysis(BaseModel):
|
||||
needs_response: bool
|
||||
priority: str
|
||||
context: str
|
||||
|
||||
# Sample email data for testing
|
||||
SAMPLE_EMAIL = {
|
||||
"subject": "Meeting Follow-up",
|
||||
"body": "Thanks for the great discussion yesterday. Looking forward to next steps.",
|
||||
"sender": "john@example.com"
|
||||
}
|
||||
|
||||
# Test Agent Creation
|
||||
researcher = Agent(
|
||||
role="Email Researcher",
|
||||
goal="Analyze email content and gather relevant context",
|
||||
backstory="Expert at analyzing communication patterns and gathering contextual information",
|
||||
verbose=True,
|
||||
allow_delegation=True
|
||||
)
|
||||
|
||||
# Test Task Creation
|
||||
analysis_task = Task(
|
||||
description=f"Analyze this email content and determine if it requires a response: {json.dumps(SAMPLE_EMAIL)}",
|
||||
agent=researcher,
|
||||
expected_output="Detailed analysis of email content and response requirement",
|
||||
output_json=EmailAnalysis
|
||||
)
|
||||
|
||||
# Test Crew Creation with Sequential Process
|
||||
crew = Crew(
|
||||
agents=[researcher],
|
||||
tasks=[analysis_task],
|
||||
process=Process.sequential,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Test execution with error handling
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
# Ensure we have an API key
|
||||
if not os.getenv("OPENAI_API_KEY"):
|
||||
print("Please set OPENAI_API_KEY environment variable")
|
||||
exit(1)
|
||||
|
||||
result = crew.kickoff()
|
||||
print("Execution Results:", result)
|
||||
|
||||
# Access structured output
|
||||
if hasattr(result, "output") and result.output:
|
||||
analysis = EmailAnalysis.parse_raw(result.output)
|
||||
print("\nStructured Analysis:")
|
||||
print(f"Needs Response: {analysis.needs_response}")
|
||||
print(f"Priority: {analysis.priority}")
|
||||
print(f"Context: {analysis.context}")
|
||||
except Exception as e:
|
||||
print(f"Error during execution: {str(e)}")
|
||||
93
tests/test_email_crew.py
Normal file
93
tests/test_email_crew.py
Normal file
@@ -0,0 +1,93 @@
|
||||
"""Test script for email analysis crew"""
|
||||
from email_analysis_crew import EmailAnalysisCrew
|
||||
from email_tool import EmailTool
|
||||
import json
|
||||
|
||||
def test_email_tool():
|
||||
"""Test the email tool functionality first"""
|
||||
try:
|
||||
tool = EmailTool()
|
||||
|
||||
# Test get_thread operation
|
||||
result = tool._run("get_thread", thread_id="thread_1")
|
||||
print("\nThread details:")
|
||||
print(f"Subject: {result['subject']}")
|
||||
print(f"Participants: {', '.join(result['participants'])}")
|
||||
|
||||
# Test find_similar operation
|
||||
result = tool._run("find_similar", query="meeting")
|
||||
print("\nSimilar threads:")
|
||||
for thread in result['threads']:
|
||||
print(f"- {thread['subject']}")
|
||||
|
||||
# Test get_history operation
|
||||
result = tool._run("get_history", sender_email="john@example.com")
|
||||
print("\nSender history:")
|
||||
print(f"Name: {result['name']}")
|
||||
print(f"Company: {result['company']}")
|
||||
|
||||
# Test analyze_context operation
|
||||
result = tool._run("analyze_context", thread_id="thread_1")
|
||||
print("\nContext analysis:")
|
||||
print(f"Thread length: {result['context_summary']['thread_length']}")
|
||||
print(f"Relationship: {result['context_summary']['sender_relationship']}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Tool test error: {str(e)}")
|
||||
return False
|
||||
|
||||
def test_email_analysis():
|
||||
"""Test the email analysis crew functionality"""
|
||||
if not test_email_tool():
|
||||
print("Skipping crew test due to tool failure")
|
||||
return False
|
||||
|
||||
try:
|
||||
# Initialize crew
|
||||
crew = EmailAnalysisCrew()
|
||||
print("\nTesting email analysis crew...")
|
||||
|
||||
# Test analysis of thread_1 (meeting follow-up thread)
|
||||
print("\nAnalyzing meeting follow-up thread...")
|
||||
result = crew.analyze_email("thread_1")
|
||||
|
||||
print("\nAnalysis Results:")
|
||||
print(f"Thread ID: {result['thread_id']}")
|
||||
print(f"Response Needed: {result['response_needed']}")
|
||||
print(f"Priority: {result['priority']}")
|
||||
|
||||
if result['response_needed']:
|
||||
print("\nContext Analysis:")
|
||||
print(json.dumps(result['analysis']['context'], indent=2))
|
||||
print("\nSender Research:")
|
||||
print(json.dumps(result['analysis']['research'], indent=2))
|
||||
print("\nResponse Strategy:")
|
||||
print(json.dumps(result['analysis']['strategy'], indent=2))
|
||||
|
||||
# Test analysis of thread_3 (new inquiry)
|
||||
print("\nAnalyzing new inquiry thread...")
|
||||
result = crew.analyze_email("thread_3")
|
||||
|
||||
print("\nAnalysis Results:")
|
||||
print(f"Thread ID: {result['thread_id']}")
|
||||
print(f"Response Needed: {result['response_needed']}")
|
||||
print(f"Priority: {result['priority']}")
|
||||
|
||||
if result['response_needed']:
|
||||
print("\nContext Analysis:")
|
||||
print(json.dumps(result['analysis']['context'], indent=2))
|
||||
print("\nSender Research:")
|
||||
print(json.dumps(result['analysis']['research'], indent=2))
|
||||
print("\nResponse Strategy:")
|
||||
print(json.dumps(result['analysis']['strategy'], indent=2))
|
||||
|
||||
print("\nAll tests completed successfully!")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during testing: {str(e)}")
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_email_analysis()
|
||||
195
tests/test_email_flow.py
Normal file
195
tests/test_email_flow.py
Normal file
@@ -0,0 +1,195 @@
|
||||
"""
|
||||
Test suite for email processing flow implementation.
|
||||
"""
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
from typing import Dict, List
|
||||
from unittest.mock import MagicMock
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
from email_processor.models import EmailState
|
||||
from email_processor.email_flow import EmailProcessingFlow
|
||||
from email_processor.email_analysis_crew import EmailAnalysisCrew
|
||||
from email_processor.response_crew import ResponseCrew
|
||||
|
||||
class MockGmailTool(BaseTool):
|
||||
"""Mock Gmail tool for testing"""
|
||||
name: str = "Gmail Tool"
|
||||
description: str = "Tool for interacting with Gmail"
|
||||
|
||||
def get_latest_emails(self, limit: int = 5) -> List[Dict]:
|
||||
"""Mock getting latest emails"""
|
||||
return [
|
||||
{
|
||||
"id": f"email_{i}",
|
||||
"thread_id": f"thread_{i}",
|
||||
"subject": f"Test Email {i}",
|
||||
"sender": "test@example.com",
|
||||
"body": f"Test email body {i}",
|
||||
"date": datetime.now().isoformat()
|
||||
}
|
||||
for i in range(limit)
|
||||
]
|
||||
|
||||
def get_thread_history(self, thread_id: str) -> List[Dict]:
|
||||
"""Mock getting thread history"""
|
||||
return [
|
||||
{
|
||||
"id": f"history_{i}",
|
||||
"thread_id": thread_id,
|
||||
"subject": f"Previous Email {i}",
|
||||
"sender": "test@example.com",
|
||||
"body": f"Previous email body {i}",
|
||||
"date": datetime.now().isoformat()
|
||||
}
|
||||
for i in range(3)
|
||||
]
|
||||
|
||||
def get_sender_info(self, email: str) -> Dict:
|
||||
"""Mock getting sender information"""
|
||||
return {
|
||||
"email": email,
|
||||
"name": "Test User",
|
||||
"company": "Test Corp",
|
||||
"previous_threads": ["thread_1", "thread_2"],
|
||||
"interaction_history": {
|
||||
"total_emails": 10,
|
||||
"last_interaction": datetime.now().isoformat()
|
||||
}
|
||||
}
|
||||
|
||||
def _run(self, method: str = "get_latest_emails", **kwargs) -> Dict:
|
||||
"""Required implementation of BaseTool._run"""
|
||||
if method == "get_latest_emails":
|
||||
return self.get_latest_emails(kwargs.get("limit", 5))
|
||||
elif method == "get_thread_history":
|
||||
return self.get_thread_history(kwargs.get("thread_id"))
|
||||
elif method == "get_sender_info":
|
||||
return self.get_sender_info(kwargs.get("email"))
|
||||
return None
|
||||
|
||||
@pytest.fixture
|
||||
def mock_crews(monkeypatch):
|
||||
"""Mock analysis and response crews"""
|
||||
def mock_analyze_email(*args, **kwargs):
|
||||
email = kwargs.get("email", {})
|
||||
return {
|
||||
"email_id": email.get("id", "unknown"),
|
||||
"thread_id": email.get("thread_id", "unknown"),
|
||||
"response_needed": True,
|
||||
"priority": "high",
|
||||
"similar_threads": ["thread_1"],
|
||||
"sender_context": {"previous_interactions": 5},
|
||||
"company_info": {"name": "Test Corp", "industry": "Technology"},
|
||||
"response_strategy": {"tone": "professional", "key_points": ["previous collaboration"]}
|
||||
}
|
||||
|
||||
def mock_draft_response(*args, **kwargs):
|
||||
email = kwargs.get("email", {})
|
||||
return {
|
||||
"email_id": email.get("id", "unknown"),
|
||||
"response_text": "Thank you for your email. We appreciate your continued collaboration.",
|
||||
"strategy": {"type": "professional", "focus": "relationship building"},
|
||||
"metadata": {
|
||||
"generated_at": datetime.now().isoformat(),
|
||||
"reviewed": True,
|
||||
"review_feedback": {"quality": "high", "tone": "appropriate"}
|
||||
}
|
||||
}
|
||||
|
||||
monkeypatch.setattr(EmailAnalysisCrew, "analyze_email", mock_analyze_email)
|
||||
monkeypatch.setattr(ResponseCrew, "draft_response", mock_draft_response)
|
||||
|
||||
@pytest.fixture
|
||||
def email_flow(monkeypatch):
|
||||
"""Create email flow with mocked components"""
|
||||
mock_tool = MockGmailTool()
|
||||
def mock_init(self):
|
||||
self.gmail_tool = mock_tool
|
||||
self.analysis_crew = EmailAnalysisCrew(gmail_tool=mock_tool)
|
||||
self.response_crew = ResponseCrew(gmail_tool=mock_tool)
|
||||
self._state = EmailState()
|
||||
self._initialize_state()
|
||||
|
||||
monkeypatch.setattr(EmailProcessingFlow, "__init__", mock_init)
|
||||
return EmailProcessingFlow()
|
||||
|
||||
def test_email_flow_initialization(email_flow):
|
||||
"""Test flow initialization and state setup"""
|
||||
# Verify state initialization
|
||||
assert hasattr(email_flow._state, "latest_emails")
|
||||
assert hasattr(email_flow._state, "analysis_results")
|
||||
assert hasattr(email_flow._state, "generated_responses")
|
||||
assert isinstance(email_flow._state.latest_emails, list)
|
||||
assert isinstance(email_flow._state.analysis_results, dict)
|
||||
assert isinstance(email_flow._state.generated_responses, dict)
|
||||
|
||||
def test_email_fetching(email_flow):
|
||||
"""Test email fetching with 5-email limit"""
|
||||
email_flow.kickoff()
|
||||
|
||||
# Verify email fetching
|
||||
assert len(email_flow._state.latest_emails) <= 5
|
||||
assert len(email_flow._state.latest_emails) > 0
|
||||
assert all(isinstance(email, dict) for email in email_flow._state.latest_emails)
|
||||
|
||||
def test_email_analysis(email_flow, mock_crews):
|
||||
"""Test email analysis and response decision"""
|
||||
email_flow.kickoff()
|
||||
|
||||
# Verify analysis results
|
||||
assert len(email_flow._state.analysis_results) > 0
|
||||
for email_id, analysis in email_flow._state.analysis_results.items():
|
||||
assert "response_needed" in analysis
|
||||
assert "priority" in analysis
|
||||
assert isinstance(analysis["response_needed"], bool)
|
||||
|
||||
def test_response_generation(email_flow, mock_crews):
|
||||
"""Test response generation for emails needing response"""
|
||||
email_flow.kickoff()
|
||||
|
||||
# Verify response generation
|
||||
for email_id, analysis in email_flow._state.analysis_results.items():
|
||||
if analysis["response_needed"]:
|
||||
assert email_id in email_flow._state.generated_responses
|
||||
response = email_flow._state.generated_responses[email_id]
|
||||
assert "response_text" in response
|
||||
assert "strategy" in response
|
||||
assert "metadata" in response
|
||||
|
||||
def test_complete_flow(email_flow, mock_crews):
|
||||
"""Test complete email processing flow"""
|
||||
result = email_flow.kickoff()
|
||||
|
||||
# Verify complete flow execution
|
||||
assert len(email_flow._state.latest_emails) <= 5
|
||||
assert isinstance(email_flow._state.analysis_results, dict)
|
||||
assert isinstance(email_flow._state.generated_responses, dict)
|
||||
|
||||
# Verify response generation for emails needing response
|
||||
for email_id, analysis in email_flow._state.analysis_results.items():
|
||||
if analysis["response_needed"]:
|
||||
assert email_id in email_flow._state.generated_responses
|
||||
assert email_flow._state.generated_responses[email_id]["email_id"] == email_id
|
||||
|
||||
def test_error_handling(email_flow):
|
||||
"""Test error handling in flow execution"""
|
||||
# Simulate error in email fetching by modifying _run method
|
||||
original_run = email_flow.gmail_tool._run
|
||||
|
||||
def mock_run(method: str = None, **kwargs):
|
||||
if method == "get_latest_emails":
|
||||
raise Exception("Test error")
|
||||
return original_run(method, **kwargs)
|
||||
|
||||
email_flow.gmail_tool._run = mock_run
|
||||
result = email_flow.kickoff()
|
||||
|
||||
# Verify error handling
|
||||
assert "flow_execution" in email_flow._state.errors
|
||||
assert isinstance(email_flow._state.errors["flow_execution"], list)
|
||||
assert len(email_flow._state.errors["flow_execution"]) > 0
|
||||
assert "Test error" in email_flow._state.errors["flow_execution"][0]["error"]
|
||||
|
||||
# Restore original method
|
||||
email_flow.gmail_tool._run = original_run
|
||||
48
tests/test_email_tool.py
Normal file
48
tests/test_email_tool.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""Test script for email processing tool"""
|
||||
from gmail_tool import EmailTool
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
def test_email_tool():
|
||||
"""Test email processing tool functionality"""
|
||||
try:
|
||||
# Initialize tool
|
||||
email_tool = EmailTool()
|
||||
|
||||
# Test getting email thread
|
||||
print("\nTesting thread retrieval...")
|
||||
thread = email_tool.get_email_thread("thread_1")
|
||||
print(f"Retrieved thread: {thread.subject}")
|
||||
print(f"Participants: {', '.join(thread.participants)}")
|
||||
print(f"Messages: {len(thread.messages)}")
|
||||
|
||||
# Test finding similar threads
|
||||
print("\nTesting similar thread search...")
|
||||
similar = email_tool.find_similar_threads("meeting")
|
||||
print(f"Found {len(similar)} similar threads")
|
||||
for t in similar:
|
||||
print(f"- {t.subject}")
|
||||
|
||||
# Test sender history
|
||||
print("\nTesting sender history...")
|
||||
history = email_tool.get_sender_history("john@example.com")
|
||||
print(f"Sender: {history['name']} from {history['company']}")
|
||||
print(f"Last interaction: {history['last_interaction']}")
|
||||
print(f"Interaction frequency: {history['interaction_frequency']}")
|
||||
|
||||
# Test thread context analysis
|
||||
print("\nTesting thread context analysis...")
|
||||
context = email_tool.analyze_thread_context("thread_1")
|
||||
print("Context Summary:")
|
||||
print(f"Thread length: {context['context_summary']['thread_length']} messages")
|
||||
print(f"Time span: {context['context_summary']['time_span']} days")
|
||||
print(f"Relationship: {context['context_summary']['sender_relationship']}")
|
||||
|
||||
print("\nAll tests completed successfully!")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during testing: {str(e)}")
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_email_tool()
|
||||
45
tests/test_response_crew.py
Normal file
45
tests/test_response_crew.py
Normal file
@@ -0,0 +1,45 @@
|
||||
"""Test script for response crew functionality"""
|
||||
from response_crew import ResponseCrew
|
||||
from email_analysis_crew import EmailAnalysisCrew
|
||||
import json
|
||||
|
||||
def test_response_crew():
|
||||
"""Test the response crew functionality"""
|
||||
try:
|
||||
# First get analysis results
|
||||
analysis_crew = EmailAnalysisCrew()
|
||||
analysis_result = analysis_crew.analyze_email("thread_1")
|
||||
|
||||
if not analysis_result.get("response_needed", False):
|
||||
print("No response needed for this thread")
|
||||
return True
|
||||
|
||||
# Initialize response crew
|
||||
response_crew = ResponseCrew()
|
||||
print("\nTesting response crew...")
|
||||
|
||||
# Draft response
|
||||
result = response_crew.draft_response("thread_1", analysis_result)
|
||||
|
||||
print("\nResponse Results:")
|
||||
print(f"Thread ID: {result['thread_id']}")
|
||||
|
||||
if result.get("error"):
|
||||
print(f"Error: {result['error']}")
|
||||
return False
|
||||
|
||||
print("\nContent Strategy:")
|
||||
print(json.dumps(result['strategy_used'], indent=2))
|
||||
|
||||
print("\nFinal Response:")
|
||||
print(json.dumps(result['response'], indent=2))
|
||||
|
||||
print("\nAll tests completed successfully!")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during testing: {str(e)}")
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_response_crew()
|
||||
Reference in New Issue
Block a user