Compare commits

..

5 Commits

Author SHA1 Message Date
Brandon Hancock (bhancock_ai)
27396a2fe1 Merge branch 'main' into improvement/speed-up-calculate-node-levels 2025-02-25 15:27:10 -05:00
Lorenze Jay
62d0479fad Merge branch 'main' into improvement/speed-up-calculate-node-levels 2025-02-25 09:41:00 -08:00
Brandon Hancock (bhancock_ai)
32f2f16251 Merge branch 'main' into improvement/speed-up-calculate-node-levels 2025-02-24 16:43:01 -05:00
Brandon Hancock (bhancock_ai)
771cce027c Merge branch 'main' into improvement/speed-up-calculate-node-levels 2025-02-24 15:24:39 -05:00
Brandon Hancock
476396c5d9 incorporating fix from @misrasaurabh1 with additional type fix 2025-02-24 12:31:10 -05:00
5 changed files with 10 additions and 194 deletions

View File

@@ -216,43 +216,10 @@ MODELS = {
"watsonx/ibm/granite-3-8b-instruct",
],
"bedrock": [
"bedrock/us.amazon.nova-pro-v1:0",
"bedrock/us.amazon.nova-micro-v1:0",
"bedrock/us.amazon.nova-lite-v1:0",
"bedrock/us.anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0",
"bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0",
"bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
"bedrock/us.anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/us.anthropic.claude-3-opus-20240229-v1:0",
"bedrock/us.anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
"bedrock/us.meta.llama3-2-90b-instruct-v1:0",
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
"bedrock/us.meta.llama3-1-8b-instruct-v1:0",
"bedrock/us.meta.llama3-1-70b-instruct-v1:0",
"bedrock/us.meta.llama3-3-70b-instruct-v1:0",
"bedrock/us.meta.llama3-1-405b-instruct-v1:0",
"bedrock/eu.anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/eu.anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/eu.anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/eu.meta.llama3-2-3b-instruct-v1:0",
"bedrock/eu.meta.llama3-2-1b-instruct-v1:0",
"bedrock/apac.anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/apac.anthropic.claude-3-5-sonnet-20241022-v2:0",
"bedrock/apac.anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/apac.anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/amazon.nova-pro-v1:0",
"bedrock/amazon.nova-micro-v1:0",
"bedrock/amazon.nova-lite-v1:0",
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
"bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
"bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0",
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
"bedrock/anthropic.claude-v2:1",
"bedrock/anthropic.claude-v2",
"bedrock/anthropic.claude-instant-v1",
@@ -267,6 +234,8 @@ MODELS = {
"bedrock/ai21.j2-mid-v1",
"bedrock/ai21.j2-ultra-v1",
"bedrock/ai21.jamba-instruct-v1:0",
"bedrock/meta.llama2-13b-chat-v1",
"bedrock/meta.llama2-70b-chat-v1",
"bedrock/mistral.mistral-7b-instruct-v0:2",
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
],

View File

@@ -64,7 +64,6 @@ LLM_CONTEXT_WINDOW_SIZES = {
"gpt-4-turbo": 128000,
"o1-preview": 128000,
"o1-mini": 128000,
"o3-mini": 200000, # Based on official o3-mini specifications
# gemini
"gemini-2.0-flash": 1048576,
"gemini-1.5-pro": 2097152,
@@ -486,23 +485,10 @@ class LLM:
"""
Returns the context window size, using 75% of the maximum to avoid
cutting off messages mid-thread.
Raises:
ValueError: If a model's context window size is outside valid bounds (1024-2097152)
"""
if self.context_window_size != 0:
return self.context_window_size
MIN_CONTEXT = 1024
MAX_CONTEXT = 2097152 # Current max from gemini-1.5-pro
# Validate all context window sizes
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
if value < MIN_CONTEXT or value > MAX_CONTEXT:
raise ValueError(
f"Context window for {key} must be between {MIN_CONTEXT} and {MAX_CONTEXT}"
)
self.context_window_size = int(
DEFAULT_CONTEXT_WINDOW_SIZE * CONTEXT_WINDOW_USAGE_RATIO
)

View File

@@ -19,8 +19,6 @@ from typing import (
Tuple,
Type,
Union,
get_args,
get_origin,
)
from pydantic import (
@@ -174,29 +172,15 @@ class Task(BaseModel):
"""
if v is not None:
sig = inspect.signature(v)
positional_args = [
param
for param in sig.parameters.values()
if param.default is inspect.Parameter.empty
]
if len(positional_args) != 1:
if len(sig.parameters) != 1:
raise ValueError("Guardrail function must accept exactly one parameter")
# Check return annotation if present, but don't require it
return_annotation = sig.return_annotation
if return_annotation != inspect.Signature.empty:
return_annotation_args = get_args(return_annotation)
if not (
get_origin(return_annotation) is tuple
and len(return_annotation_args) == 2
and return_annotation_args[0] is bool
and (
return_annotation_args[1] is Any
or return_annotation_args[1] is str
or return_annotation_args[1] is TaskOutput
or return_annotation_args[1] == Union[str, TaskOutput]
)
return_annotation == Tuple[bool, Any]
or str(return_annotation) == "Tuple[bool, Any]"
):
raise ValueError(
"If return type is annotated, it must be Tuple[bool, Any]"
@@ -451,9 +435,9 @@ class Task(BaseModel):
content = (
json_output
if json_output
else (
pydantic_output.model_dump_json() if pydantic_output else result
)
else pydantic_output.model_dump_json()
if pydantic_output
else result
)
self._save_file(content)
crewai_event_bus.emit(self, TaskCompletedEvent(output=task_output))

View File

@@ -6,7 +6,7 @@ import pytest
from pydantic import BaseModel
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM
from crewai.llm import LLM
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.tool_usage_events import ToolExecutionErrorEvent
from crewai.utilities.token_counter_callback import TokenCalcHandler
@@ -285,23 +285,6 @@ def test_o3_mini_reasoning_effort_medium():
assert isinstance(result, str)
assert "Paris" in result
def test_context_window_validation():
"""Test that context window validation works correctly."""
# Test valid window size
llm = LLM(model="o3-mini")
assert llm.get_context_window_size() == int(200000 * CONTEXT_WINDOW_USAGE_RATIO)
# Test invalid window size
with pytest.raises(ValueError) as excinfo:
with patch.dict(
"crewai.llm.LLM_CONTEXT_WINDOW_SIZES",
{"test-model": 500}, # Below minimum
clear=True,
):
llm = LLM(model="test-model")
llm.get_context_window_size()
assert "must be between 1024 and 2097152" in str(excinfo.value)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.fixture

View File

@@ -1283,109 +1283,3 @@ def test_interpolate_valid_types():
assert parsed["optional"] is None
assert parsed["nested"]["flag"] is True
assert parsed["nested"]["empty"] is None
def test_guardrail_with_new_style_annotations():
"""Test that guardrails with new-style type annotations work correctly."""
# Define a guardrail with new-style annotation
def guardrail(result: TaskOutput) -> tuple[bool, str]:
return (True, result.raw.upper())
agent = MagicMock()
agent.role = "test_agent"
agent.execute_task.return_value = "test result"
agent.crew = None
task = Task(description="Test task", expected_output="Output", guardrail=guardrail)
result = task.execute_sync(agent=agent)
assert isinstance(result, TaskOutput)
assert result.raw == "TEST RESULT"
def test_guardrail_with_specific_return_type():
"""Test that guardrails with specific return types work correctly."""
# Define a guardrail with specific return type
def guardrail(result: TaskOutput) -> tuple[bool, TaskOutput]:
if "error" in result.raw.lower():
return (False, "Contains error")
return (True, result)
agent = MagicMock()
agent.role = "test_agent"
agent.execute_task.return_value = "success result"
agent.crew = None
task = Task(description="Test task", expected_output="Output", guardrail=guardrail)
result = task.execute_sync(agent=agent)
assert isinstance(result, TaskOutput)
assert result.raw == "success result"
def test_guardrail_with_positional_and_default_args():
"""Test that guardrails with positional and default arguments work correctly."""
# Define a guardrail with a positional argument and a default argument
def guardrail(result: TaskOutput, optional_arg=None) -> tuple[bool, str]:
return (True, result.raw.upper())
agent = MagicMock()
agent.role = "test_agent"
agent.execute_task.return_value = "test result"
agent.crew = None
# This should now work with the updated validator
task = Task(description="Test task", expected_output="Output", guardrail=guardrail)
result = task.execute_sync(agent=agent)
assert isinstance(result, TaskOutput)
assert result.raw == "TEST RESULT"
def test_guardrail_with_multiple_positional_args():
"""Test that guardrails with multiple positional arguments are rejected."""
# Define a guardrail with multiple positional arguments
def guardrail(result: TaskOutput, another_required_arg) -> tuple[bool, str]:
return (True, result.raw.upper())
agent = MagicMock()
agent.role = "test_agent"
agent.execute_task.return_value = "test result"
agent.crew = None
# This should raise a ValueError because guardrail must accept exactly one positional parameter
with pytest.raises(ValueError) as excinfo:
Task(description="Test task", expected_output="Output", guardrail=guardrail)
assert "Guardrail function must accept exactly one parameter" in str(excinfo.value)
def test_guardrail_with_positional_and_default_args():
"""Validate that the guardrail function has the correct signature and behavior.
While type hints provide static checking, this validator ensures runtime safety by:
1. Verifying the function accepts exactly one required parameter (the TaskOutput)
(additional parameters with default values are allowed)
2. Checking return type annotations match Tuple[bool, Any] or tuple[bool, Any] if present
3. Providing clear, immediate error messages for debugging
"""
# Define a guardrail with a positional argument and a default argument
def guardrail(result: TaskOutput, optional_arg=None) -> tuple[bool, str]:
return (True, result.raw.upper())
agent = MagicMock()
agent.role = "test_agent"
agent.execute_task.return_value = "test result"
agent.crew = None
# This should now work with the updated validator
task = Task(description="Test task", expected_output="Output", guardrail=guardrail)
result = task.execute_sync(agent=agent)
assert isinstance(result, TaskOutput)
assert result.raw == "TEST RESULT"