mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 23:58:34 +00:00
Merge branch 'main' into feat-emit-stream-tool
This commit is contained in:
@@ -288,26 +288,20 @@ To add a guardrail to a task, provide a validation function through the `guardra
|
||||
|
||||
```python Code
|
||||
from typing import Tuple, Union, Dict, Any
|
||||
from crewai import TaskOutput
|
||||
|
||||
def validate_blog_content(result: str) -> Tuple[bool, Union[Dict[str, Any], str]]:
|
||||
def validate_blog_content(result: TaskOutput) -> Tuple[bool, Any]:
|
||||
"""Validate blog content meets requirements."""
|
||||
try:
|
||||
# Check word count
|
||||
word_count = len(result.split())
|
||||
if word_count > 200:
|
||||
return (False, {
|
||||
"error": "Blog content exceeds 200 words",
|
||||
"code": "WORD_COUNT_ERROR",
|
||||
"context": {"word_count": word_count}
|
||||
})
|
||||
return (False, "Blog content exceeds 200 words")
|
||||
|
||||
# Additional validation logic here
|
||||
return (True, result.strip())
|
||||
except Exception as e:
|
||||
return (False, {
|
||||
"error": "Unexpected error during validation",
|
||||
"code": "SYSTEM_ERROR"
|
||||
})
|
||||
return (False, "Unexpected error during validation")
|
||||
|
||||
blog_task = Task(
|
||||
description="Write a blog post about AI",
|
||||
@@ -325,29 +319,24 @@ blog_task = Task(
|
||||
- Type hints are recommended but optional
|
||||
|
||||
2. **Return Values**:
|
||||
- Success: Return `(True, validated_result)`
|
||||
- Failure: Return `(False, error_details)`
|
||||
- On success: it returns a tuple of `(bool, Any)`. For example: `(True, validated_result)`
|
||||
- On Failure: it returns a tuple of `(bool, str)`. For example: `(False, "Error message explain the failure")`
|
||||
|
||||
### Error Handling Best Practices
|
||||
|
||||
1. **Structured Error Responses**:
|
||||
```python Code
|
||||
def validate_with_context(result: str) -> Tuple[bool, Union[Dict[str, Any], str]]:
|
||||
from crewai import TaskOutput
|
||||
|
||||
def validate_with_context(result: TaskOutput) -> Tuple[bool, Any]:
|
||||
try:
|
||||
# Main validation logic
|
||||
validated_data = perform_validation(result)
|
||||
return (True, validated_data)
|
||||
except ValidationError as e:
|
||||
return (False, {
|
||||
"error": str(e),
|
||||
"code": "VALIDATION_ERROR",
|
||||
"context": {"input": result}
|
||||
})
|
||||
return (False, f"VALIDATION_ERROR: {str(e)}")
|
||||
except Exception as e:
|
||||
return (False, {
|
||||
"error": "Unexpected error",
|
||||
"code": "SYSTEM_ERROR"
|
||||
})
|
||||
return (False, str(e))
|
||||
```
|
||||
|
||||
2. **Error Categories**:
|
||||
@@ -358,28 +347,25 @@ def validate_with_context(result: str) -> Tuple[bool, Union[Dict[str, Any], str]
|
||||
3. **Validation Chain**:
|
||||
```python Code
|
||||
from typing import Any, Dict, List, Tuple, Union
|
||||
from crewai import TaskOutput
|
||||
|
||||
def complex_validation(result: str) -> Tuple[bool, Union[str, Dict[str, Any]]]:
|
||||
def complex_validation(result: TaskOutput) -> Tuple[bool, Any]:
|
||||
"""Chain multiple validation steps."""
|
||||
# Step 1: Basic validation
|
||||
if not result:
|
||||
return (False, {"error": "Empty result", "code": "EMPTY_INPUT"})
|
||||
return (False, "Empty result")
|
||||
|
||||
# Step 2: Content validation
|
||||
try:
|
||||
validated = validate_content(result)
|
||||
if not validated:
|
||||
return (False, {"error": "Invalid content", "code": "CONTENT_ERROR"})
|
||||
return (False, "Invalid content")
|
||||
|
||||
# Step 3: Format validation
|
||||
formatted = format_output(validated)
|
||||
return (True, formatted)
|
||||
except Exception as e:
|
||||
return (False, {
|
||||
"error": str(e),
|
||||
"code": "VALIDATION_ERROR",
|
||||
"context": {"step": "content_validation"}
|
||||
})
|
||||
return (False, str(e))
|
||||
```
|
||||
|
||||
### Handling Guardrail Results
|
||||
@@ -394,19 +380,16 @@ When a guardrail returns `(False, error)`:
|
||||
Example with retry handling:
|
||||
```python Code
|
||||
from typing import Optional, Tuple, Union
|
||||
from crewai import TaskOutput, Task
|
||||
|
||||
def validate_json_output(result: str) -> Tuple[bool, Union[Dict[str, Any], str]]:
|
||||
def validate_json_output(result: TaskOutput) -> Tuple[bool, Any]:
|
||||
"""Validate and parse JSON output."""
|
||||
try:
|
||||
# Try to parse as JSON
|
||||
data = json.loads(result)
|
||||
return (True, data)
|
||||
except json.JSONDecodeError as e:
|
||||
return (False, {
|
||||
"error": "Invalid JSON format",
|
||||
"code": "JSON_ERROR",
|
||||
"context": {"line": e.lineno, "column": e.colno}
|
||||
})
|
||||
return (False, "Invalid JSON format")
|
||||
|
||||
task = Task(
|
||||
description="Generate a JSON report",
|
||||
|
||||
@@ -30,7 +30,7 @@ pip install 'crewai[tools]'
|
||||
Here are updated examples on how to utilize the JSONSearchTool effectively for searching within JSON files. These examples take into account the current implementation and usage patterns identified in the codebase.
|
||||
|
||||
```python Code
|
||||
from crewai.json_tools import JSONSearchTool # Updated import path
|
||||
from crewai_tools import JSONSearchTool
|
||||
|
||||
# General JSON content search
|
||||
# This approach is suitable when the JSON path is either known beforehand or can be dynamically identified.
|
||||
|
||||
Reference in New Issue
Block a user