Merge branch 'main' into devin/1742244238-fix-issue-2395

This commit is contained in:
Brandon Hancock (bhancock_ai)
2025-03-20 12:12:44 -04:00
committed by GitHub
8 changed files with 175 additions and 16 deletions

View File

@@ -460,12 +460,12 @@ class SpaceNewsKnowledgeSource(BaseKnowledgeSource):
data = response.json() data = response.json()
articles = data.get('results', []) articles = data.get('results', [])
formatted_data = self._format_articles(articles) formatted_data = self.validate_content(articles)
return {self.api_endpoint: formatted_data} return {self.api_endpoint: formatted_data}
except Exception as e: except Exception as e:
raise ValueError(f"Failed to fetch space news: {str(e)}") raise ValueError(f"Failed to fetch space news: {str(e)}")
def _format_articles(self, articles: list) -> str: def validate_content(self, articles: list) -> str:
"""Format articles into readable text.""" """Format articles into readable text."""
formatted = "Space News Articles:\n\n" formatted = "Space News Articles:\n\n"
for article in articles: for article in articles:

View File

@@ -158,7 +158,11 @@ In this section, you'll find detailed examples that help you select, configure,
<Accordion title="Anthropic"> <Accordion title="Anthropic">
```toml Code ```toml Code
# Required
ANTHROPIC_API_KEY=sk-ant-... ANTHROPIC_API_KEY=sk-ant-...
# Optional
ANTHROPIC_API_BASE=<custom-base-url>
``` ```
Example usage in your CrewAI project: Example usage in your CrewAI project:
@@ -250,6 +254,40 @@ In this section, you'll find detailed examples that help you select, configure,
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0" model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
) )
``` ```
Before using Amazon Bedrock, make sure you have boto3 installed in your environment
[Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/models-regions.html) is a managed service that provides access to multiple foundation models from top AI companies through a unified API, enabling secure and responsible AI application development.
| Model | Context Window | Best For |
|-------------------------|----------------------|-------------------------------------------------------------------|
| Amazon Nova Pro | Up to 300k tokens | High-performance, model balancing accuracy, speed, and cost-effectiveness across diverse tasks. |
| Amazon Nova Micro | Up to 128k tokens | High-performance, cost-effective text-only model optimized for lowest latency responses. |
| Amazon Nova Lite | Up to 300k tokens | High-performance, affordable multimodal processing for images, video, and text with real-time capabilities. |
| Claude 3.7 Sonnet | Up to 128k tokens | High-performance, best for complex reasoning, coding & AI agents |
| Claude 3.5 Sonnet v2 | Up to 200k tokens | State-of-the-art model specialized in software engineering, agentic capabilities, and computer interaction at optimized cost. |
| Claude 3.5 Sonnet | Up to 200k tokens | High-performance model delivering superior intelligence and reasoning across diverse tasks with optimal speed-cost balance. |
| Claude 3.5 Haiku | Up to 200k tokens | Fast, compact multimodal model optimized for quick responses and seamless human-like interactions |
| Claude 3 Sonnet | Up to 200k tokens | Multimodal model balancing intelligence and speed for high-volume deployments. |
| Claude 3 Haiku | Up to 200k tokens | Compact, high-speed multimodal model optimized for quick responses and natural conversational interactions |
| Claude 3 Opus | Up to 200k tokens | Most advanced multimodal model excelling at complex tasks with human-like reasoning and superior contextual understanding. |
| Claude 2.1 | Up to 200k tokens | Enhanced version with expanded context window, improved reliability, and reduced hallucinations for long-form and RAG applications |
| Claude | Up to 100k tokens | Versatile model excelling in sophisticated dialogue, creative content, and precise instruction following. |
| Claude Instant | Up to 100k tokens | Fast, cost-effective model for everyday tasks like dialogue, analysis, summarization, and document Q&A |
| Llama 3.1 405B Instruct | Up to 128k tokens | Advanced LLM for synthetic data generation, distillation, and inference for chatbots, coding, and domain-specific tasks. |
| Llama 3.1 70B Instruct | Up to 128k tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. |
| Llama 3.1 8B Instruct | Up to 128k tokens | Advanced state-of-the-art model with language understanding, superior reasoning, and text generation. |
| Llama 3 70B Instruct | Up to 8k tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. |
| Llama 3 8B Instruct | Up to 8k tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. |
| Titan Text G1 - Lite | Up to 4k tokens | Lightweight, cost-effective model optimized for English tasks and fine-tuning with focus on summarization and content generation. |
| Titan Text G1 - Express | Up to 8k tokens | Versatile model for general language tasks, chat, and RAG applications with support for English and 100+ languages. |
| Cohere Command | Up to 4k tokens | Model specialized in following user commands and delivering practical enterprise solutions. |
| Jurassic-2 Mid | Up to 8,191 tokens | Cost-effective model balancing quality and affordability for diverse language tasks like Q&A, summarization, and content generation. |
| Jurassic-2 Ultra | Up to 8,191 tokens | Model for advanced text generation and comprehension, excelling in complex tasks like analysis and content creation. |
| Jamba-Instruct | Up to 256k tokens | Model with extended context window optimized for cost-effective text generation, summarization, and Q&A. |
| Mistral 7B Instruct | Up to 32k tokens | This LLM follows instructions, completes requests, and generates creative text. |
| Mistral 8x7B Instruct | Up to 32k tokens | An MOE LLM that follows instructions, completes requests, and generates creative text. |
</Accordion> </Accordion>
<Accordion title="Amazon SageMaker"> <Accordion title="Amazon SageMaker">

View File

@@ -1,4 +1,5 @@
---title: Customizing Prompts ---
title: Customizing Prompts
description: Dive deeper into low-level prompt customization for CrewAI, enabling super custom and complex use cases for different models and languages. description: Dive deeper into low-level prompt customization for CrewAI, enabling super custom and complex use cases for different models and languages.
icon: message-pen icon: message-pen
--- ---

View File

@@ -19,6 +19,8 @@ from typing import (
Tuple, Tuple,
Type, Type,
Union, Union,
get_args,
get_origin,
) )
from pydantic import ( from pydantic import (
@@ -178,15 +180,29 @@ class Task(BaseModel):
""" """
if v is not None: if v is not None:
sig = inspect.signature(v) sig = inspect.signature(v)
if len(sig.parameters) != 1: positional_args = [
param
for param in sig.parameters.values()
if param.default is inspect.Parameter.empty
]
if len(positional_args) != 1:
raise ValueError("Guardrail function must accept exactly one parameter") raise ValueError("Guardrail function must accept exactly one parameter")
# Check return annotation if present, but don't require it # Check return annotation if present, but don't require it
return_annotation = sig.return_annotation return_annotation = sig.return_annotation
if return_annotation != inspect.Signature.empty: if return_annotation != inspect.Signature.empty:
return_annotation_args = get_args(return_annotation)
if not ( if not (
return_annotation == Tuple[bool, Any] get_origin(return_annotation) is tuple
or str(return_annotation) == "Tuple[bool, Any]" and len(return_annotation_args) == 2
and return_annotation_args[0] is bool
and (
return_annotation_args[1] is Any
or return_annotation_args[1] is str
or return_annotation_args[1] is TaskOutput
or return_annotation_args[1] == Union[str, TaskOutput]
)
): ):
raise ValueError( raise ValueError(
"If return type is annotated, it must be Tuple[bool, Any]" "If return type is annotated, it must be Tuple[bool, Any]"

View File

@@ -67,15 +67,12 @@ class CrewAIEventsBus:
source: The object emitting the event source: The object emitting the event
event: The event instance to emit event: The event instance to emit
""" """
event_type = type(event) for event_type, handlers in self._handlers.items():
if event_type in self._handlers: if isinstance(event, event_type):
for handler in self._handlers[event_type]: for handler in handlers:
handler(source, event) handler(source, event)
self._signal.send(source, event=event)
def clear_handlers(self) -> None: self._signal.send(source, event=event)
"""Clear all registered event handlers - useful for testing"""
self._handlers.clear()
def register_handler( def register_handler(
self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None] self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]

View File

@@ -1,6 +1,6 @@
from typing import Any, Dict, Optional, Union from typing import Any, Dict, Optional, Union
from pydantic import BaseModel from pydantic import BaseModel, ConfigDict
from .base_events import CrewEvent from .base_events import CrewEvent
@@ -52,9 +52,11 @@ class MethodExecutionFailedEvent(FlowEvent):
flow_name: str flow_name: str
method_name: str method_name: str
error: Any error: Exception
type: str = "method_execution_failed" type: str = "method_execution_failed"
model_config = ConfigDict(arbitrary_types_allowed=True)
class FlowFinishedEvent(FlowEvent): class FlowFinishedEvent(FlowEvent):
"""Event emitted when a flow completes execution""" """Event emitted when a flow completes execution"""

View File

@@ -3,6 +3,8 @@
import hashlib import hashlib
import json import json
import os import os
from functools import partial
from typing import Tuple, Union
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
import pytest import pytest
@@ -215,6 +217,75 @@ def test_multiple_output_type_error():
) )
def test_guardrail_type_error():
desc = "Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting."
expected_output = "Bullet point list of 5 interesting ideas."
# Lambda function
Task(
description=desc,
expected_output=expected_output,
guardrail=lambda x: (True, x),
)
# Function
def guardrail_fn(x: TaskOutput) -> tuple[bool, TaskOutput]:
return (True, x)
Task(
description=desc,
expected_output=expected_output,
guardrail=guardrail_fn,
)
class Object:
def guardrail_fn(self, x: TaskOutput) -> tuple[bool, TaskOutput]:
return (True, x)
@classmethod
def guardrail_class_fn(cls, x: TaskOutput) -> tuple[bool, str]:
return (True, x)
@staticmethod
def guardrail_static_fn(x: TaskOutput) -> tuple[bool, Union[str, TaskOutput]]:
return (True, x)
obj = Object()
# Method
Task(
description=desc,
expected_output=expected_output,
guardrail=obj.guardrail_fn,
)
# Class method
Task(
description=desc,
expected_output=expected_output,
guardrail=Object.guardrail_class_fn,
)
# Static method
Task(
description=desc,
expected_output=expected_output,
guardrail=Object.guardrail_static_fn,
)
def error_fn(x: TaskOutput, y: bool) -> Tuple[bool, TaskOutput]:
return (y, x)
Task(
description=desc,
expected_output=expected_output,
guardrail=partial(error_fn, y=True),
)
with pytest.raises(ValidationError):
Task(
description=desc,
expected_output=expected_output,
guardrail=error_fn,
)
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_output_pydantic_sequential(): def test_output_pydantic_sequential():
class ScoreOutput(BaseModel): class ScoreOutput(BaseModel):

View File

@@ -0,0 +1,34 @@
from unittest.mock import Mock
from crewai.utilities.events.base_events import CrewEvent
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
class TestEvent(CrewEvent):
pass
def test_specific_event_handler():
mock_handler = Mock()
@crewai_event_bus.on(TestEvent)
def handler(source, event):
mock_handler(source, event)
event = TestEvent(type="test_event")
crewai_event_bus.emit("source_object", event)
mock_handler.assert_called_once_with("source_object", event)
def test_wildcard_event_handler():
mock_handler = Mock()
@crewai_event_bus.on(CrewEvent)
def handler(source, event):
mock_handler(source, event)
event = TestEvent(type="test_event")
crewai_event_bus.emit("source_object", event)
mock_handler.assert_called_once_with("source_object", event)