diff --git a/docs/concepts/knowledge.mdx b/docs/concepts/knowledge.mdx index b5827551a..13a875409 100644 --- a/docs/concepts/knowledge.mdx +++ b/docs/concepts/knowledge.mdx @@ -460,12 +460,12 @@ class SpaceNewsKnowledgeSource(BaseKnowledgeSource): data = response.json() articles = data.get('results', []) - formatted_data = self._format_articles(articles) + formatted_data = self.validate_content(articles) return {self.api_endpoint: formatted_data} except Exception as e: raise ValueError(f"Failed to fetch space news: {str(e)}") - def _format_articles(self, articles: list) -> str: + def validate_content(self, articles: list) -> str: """Format articles into readable text.""" formatted = "Space News Articles:\n\n" for article in articles: diff --git a/docs/concepts/llms.mdx b/docs/concepts/llms.mdx index 8d815246f..f1d586bee 100644 --- a/docs/concepts/llms.mdx +++ b/docs/concepts/llms.mdx @@ -158,7 +158,11 @@ In this section, you'll find detailed examples that help you select, configure, ```toml Code + # Required ANTHROPIC_API_KEY=sk-ant-... + + # Optional + ANTHROPIC_API_BASE= ``` Example usage in your CrewAI project: @@ -250,6 +254,40 @@ In this section, you'll find detailed examples that help you select, configure, model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0" ) ``` + + Before using Amazon Bedrock, make sure you have boto3 installed in your environment + + [Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/models-regions.html) is a managed service that provides access to multiple foundation models from top AI companies through a unified API, enabling secure and responsible AI application development. + + | Model | Context Window | Best For | + |-------------------------|----------------------|-------------------------------------------------------------------| + | Amazon Nova Pro | Up to 300k tokens | High-performance, model balancing accuracy, speed, and cost-effectiveness across diverse tasks. | + | Amazon Nova Micro | Up to 128k tokens | High-performance, cost-effective text-only model optimized for lowest latency responses. | + | Amazon Nova Lite | Up to 300k tokens | High-performance, affordable multimodal processing for images, video, and text with real-time capabilities. | + | Claude 3.7 Sonnet | Up to 128k tokens | High-performance, best for complex reasoning, coding & AI agents | + | Claude 3.5 Sonnet v2 | Up to 200k tokens | State-of-the-art model specialized in software engineering, agentic capabilities, and computer interaction at optimized cost. | + | Claude 3.5 Sonnet | Up to 200k tokens | High-performance model delivering superior intelligence and reasoning across diverse tasks with optimal speed-cost balance. | + | Claude 3.5 Haiku | Up to 200k tokens | Fast, compact multimodal model optimized for quick responses and seamless human-like interactions | + | Claude 3 Sonnet | Up to 200k tokens | Multimodal model balancing intelligence and speed for high-volume deployments. | + | Claude 3 Haiku | Up to 200k tokens | Compact, high-speed multimodal model optimized for quick responses and natural conversational interactions | + | Claude 3 Opus | Up to 200k tokens | Most advanced multimodal model excelling at complex tasks with human-like reasoning and superior contextual understanding. | + | Claude 2.1 | Up to 200k tokens | Enhanced version with expanded context window, improved reliability, and reduced hallucinations for long-form and RAG applications | + | Claude | Up to 100k tokens | Versatile model excelling in sophisticated dialogue, creative content, and precise instruction following. | + | Claude Instant | Up to 100k tokens | Fast, cost-effective model for everyday tasks like dialogue, analysis, summarization, and document Q&A | + | Llama 3.1 405B Instruct | Up to 128k tokens | Advanced LLM for synthetic data generation, distillation, and inference for chatbots, coding, and domain-specific tasks. | + | Llama 3.1 70B Instruct | Up to 128k tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. | + | Llama 3.1 8B Instruct | Up to 128k tokens | Advanced state-of-the-art model with language understanding, superior reasoning, and text generation. | + | Llama 3 70B Instruct | Up to 8k tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. | + | Llama 3 8B Instruct | Up to 8k tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. | + | Titan Text G1 - Lite | Up to 4k tokens | Lightweight, cost-effective model optimized for English tasks and fine-tuning with focus on summarization and content generation. | + | Titan Text G1 - Express | Up to 8k tokens | Versatile model for general language tasks, chat, and RAG applications with support for English and 100+ languages. | + | Cohere Command | Up to 4k tokens | Model specialized in following user commands and delivering practical enterprise solutions. | + | Jurassic-2 Mid | Up to 8,191 tokens | Cost-effective model balancing quality and affordability for diverse language tasks like Q&A, summarization, and content generation. | + | Jurassic-2 Ultra | Up to 8,191 tokens | Model for advanced text generation and comprehension, excelling in complex tasks like analysis and content creation. | + | Jamba-Instruct | Up to 256k tokens | Model with extended context window optimized for cost-effective text generation, summarization, and Q&A. | + | Mistral 7B Instruct | Up to 32k tokens | This LLM follows instructions, completes requests, and generates creative text. | + | Mistral 8x7B Instruct | Up to 32k tokens | An MOE LLM that follows instructions, completes requests, and generates creative text. | + diff --git a/docs/guides/advanced/customizing-prompts.mdx b/docs/guides/advanced/customizing-prompts.mdx index 2622cdcca..4458184fc 100644 --- a/docs/guides/advanced/customizing-prompts.mdx +++ b/docs/guides/advanced/customizing-prompts.mdx @@ -1,4 +1,5 @@ ----title: Customizing Prompts +--- +title: Customizing Prompts description: Dive deeper into low-level prompt customization for CrewAI, enabling super custom and complex use cases for different models and languages. icon: message-pen --- diff --git a/src/crewai/task.py b/src/crewai/task.py index be400e99a..0c063e4f9 100644 --- a/src/crewai/task.py +++ b/src/crewai/task.py @@ -19,6 +19,8 @@ from typing import ( Tuple, Type, Union, + get_args, + get_origin, ) from pydantic import ( @@ -178,15 +180,29 @@ class Task(BaseModel): """ if v is not None: sig = inspect.signature(v) - if len(sig.parameters) != 1: + positional_args = [ + param + for param in sig.parameters.values() + if param.default is inspect.Parameter.empty + ] + if len(positional_args) != 1: raise ValueError("Guardrail function must accept exactly one parameter") # Check return annotation if present, but don't require it return_annotation = sig.return_annotation if return_annotation != inspect.Signature.empty: + + return_annotation_args = get_args(return_annotation) if not ( - return_annotation == Tuple[bool, Any] - or str(return_annotation) == "Tuple[bool, Any]" + get_origin(return_annotation) is tuple + and len(return_annotation_args) == 2 + and return_annotation_args[0] is bool + and ( + return_annotation_args[1] is Any + or return_annotation_args[1] is str + or return_annotation_args[1] is TaskOutput + or return_annotation_args[1] == Union[str, TaskOutput] + ) ): raise ValueError( "If return type is annotated, it must be Tuple[bool, Any]" diff --git a/src/crewai/utilities/events/crewai_event_bus.py b/src/crewai/utilities/events/crewai_event_bus.py index c0cf50908..5df5ee689 100644 --- a/src/crewai/utilities/events/crewai_event_bus.py +++ b/src/crewai/utilities/events/crewai_event_bus.py @@ -67,15 +67,12 @@ class CrewAIEventsBus: source: The object emitting the event event: The event instance to emit """ - event_type = type(event) - if event_type in self._handlers: - for handler in self._handlers[event_type]: - handler(source, event) - self._signal.send(source, event=event) + for event_type, handlers in self._handlers.items(): + if isinstance(event, event_type): + for handler in handlers: + handler(source, event) - def clear_handlers(self) -> None: - """Clear all registered event handlers - useful for testing""" - self._handlers.clear() + self._signal.send(source, event=event) def register_handler( self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None] diff --git a/src/crewai/utilities/events/flow_events.py b/src/crewai/utilities/events/flow_events.py index 435d64214..8800b301b 100644 --- a/src/crewai/utilities/events/flow_events.py +++ b/src/crewai/utilities/events/flow_events.py @@ -1,6 +1,6 @@ from typing import Any, Dict, Optional, Union -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict from .base_events import CrewEvent @@ -52,9 +52,11 @@ class MethodExecutionFailedEvent(FlowEvent): flow_name: str method_name: str - error: Any + error: Exception type: str = "method_execution_failed" + model_config = ConfigDict(arbitrary_types_allowed=True) + class FlowFinishedEvent(FlowEvent): """Event emitted when a flow completes execution""" diff --git a/tests/task_test.py b/tests/task_test.py index 3cd11cfc7..ac25a14f8 100644 --- a/tests/task_test.py +++ b/tests/task_test.py @@ -3,6 +3,8 @@ import hashlib import json import os +from functools import partial +from typing import Tuple, Union from unittest.mock import MagicMock, patch import pytest @@ -215,6 +217,75 @@ def test_multiple_output_type_error(): ) +def test_guardrail_type_error(): + desc = "Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting." + expected_output = "Bullet point list of 5 interesting ideas." + # Lambda function + Task( + description=desc, + expected_output=expected_output, + guardrail=lambda x: (True, x), + ) + + # Function + def guardrail_fn(x: TaskOutput) -> tuple[bool, TaskOutput]: + return (True, x) + + Task( + description=desc, + expected_output=expected_output, + guardrail=guardrail_fn, + ) + + class Object: + def guardrail_fn(self, x: TaskOutput) -> tuple[bool, TaskOutput]: + return (True, x) + + @classmethod + def guardrail_class_fn(cls, x: TaskOutput) -> tuple[bool, str]: + return (True, x) + + @staticmethod + def guardrail_static_fn(x: TaskOutput) -> tuple[bool, Union[str, TaskOutput]]: + return (True, x) + + obj = Object() + # Method + Task( + description=desc, + expected_output=expected_output, + guardrail=obj.guardrail_fn, + ) + # Class method + Task( + description=desc, + expected_output=expected_output, + guardrail=Object.guardrail_class_fn, + ) + # Static method + Task( + description=desc, + expected_output=expected_output, + guardrail=Object.guardrail_static_fn, + ) + + def error_fn(x: TaskOutput, y: bool) -> Tuple[bool, TaskOutput]: + return (y, x) + + Task( + description=desc, + expected_output=expected_output, + guardrail=partial(error_fn, y=True), + ) + + with pytest.raises(ValidationError): + Task( + description=desc, + expected_output=expected_output, + guardrail=error_fn, + ) + + @pytest.mark.vcr(filter_headers=["authorization"]) def test_output_pydantic_sequential(): class ScoreOutput(BaseModel): diff --git a/tests/utilities/events/test_crewai_event_bus.py b/tests/utilities/events/test_crewai_event_bus.py new file mode 100644 index 000000000..0dd8c8b34 --- /dev/null +++ b/tests/utilities/events/test_crewai_event_bus.py @@ -0,0 +1,34 @@ +from unittest.mock import Mock + +from crewai.utilities.events.base_events import CrewEvent +from crewai.utilities.events.crewai_event_bus import crewai_event_bus + + +class TestEvent(CrewEvent): + pass + + +def test_specific_event_handler(): + mock_handler = Mock() + + @crewai_event_bus.on(TestEvent) + def handler(source, event): + mock_handler(source, event) + + event = TestEvent(type="test_event") + crewai_event_bus.emit("source_object", event) + + mock_handler.assert_called_once_with("source_object", event) + + +def test_wildcard_event_handler(): + mock_handler = Mock() + + @crewai_event_bus.on(CrewEvent) + def handler(source, event): + mock_handler(source, event) + + event = TestEvent(type="test_event") + crewai_event_bus.emit("source_object", event) + + mock_handler.assert_called_once_with("source_object", event)