chore: refactor llms to base models

This commit is contained in:
Greyson LaLonde
2025-11-10 14:22:09 -05:00
parent 0f1c173d02
commit 46785adf58
60 changed files with 706 additions and 612 deletions

View File

@@ -1212,7 +1212,7 @@ Learn how to get the most out of your LLM configuration:
```python
import httpx
from crewai import LLM
from crewai.llms.hooks import BaseInterceptor
from crewai.llm.hooks import BaseInterceptor
class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Custom interceptor to modify requests and responses."""

View File

@@ -9,7 +9,7 @@ from crewai.crews.crew_output import CrewOutput
from crewai.flow.flow import Flow
from crewai.knowledge.knowledge import Knowledge
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.process import Process
from crewai.task import Task
from crewai.tasks.llm_guardrail import LLMGuardrail

View File

@@ -39,7 +39,7 @@ from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
from crewai.lite_agent import LiteAgent
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.mcp import (
MCPClient,
MCPServerConfig,
@@ -626,7 +626,7 @@ class Agent(BaseAgent):
)
self.agent_executor = CrewAgentExecutor(
llm=self.llm,
llm=self.llm, # type: ignore[arg-type]
task=task, # type: ignore[arg-type]
agent=self,
crew=self.crew,
@@ -803,6 +803,7 @@ class Agent(BaseAgent):
from crewai.tools.base_tool import BaseTool
from crewai.tools.mcp_native_tool import MCPNativeTool
transport: StdioTransport | HTTPTransport | SSETransport
if isinstance(mcp_config, MCPServerStdio):
transport = StdioTransport(
command=mcp_config.command,
@@ -896,10 +897,12 @@ class Agent(BaseAgent):
server_name=server_name,
run_context=None,
)
if mcp_config.tool_filter(context, tool):
# Try new signature first
if mcp_config.tool_filter(context, tool): # type: ignore[arg-type,call-arg]
filtered_tools.append(tool)
except (TypeError, AttributeError):
if mcp_config.tool_filter(tool):
# Fallback to old signature
if mcp_config.tool_filter(tool): # type: ignore[arg-type,call-arg]
filtered_tools.append(tool)
else:
# Not callable - include tool
@@ -974,7 +977,9 @@ class Agent(BaseAgent):
path = parsed.path.replace("/", "_").strip("_")
return f"{domain}_{path}" if path else domain
def _get_mcp_tool_schemas(self, server_params: dict) -> dict[str, dict]:
def _get_mcp_tool_schemas(
self, server_params: dict[str, Any]
) -> dict[str, dict[str, Any]]:
"""Get tool schemas from MCP server for wrapper creation with caching."""
server_url = server_params["url"]
@@ -988,7 +993,7 @@ class Agent(BaseAgent):
self._logger.log(
"debug", f"Using cached MCP tool schemas for {server_url}"
)
return cached_data
return cast(dict[str, dict[str, Any]], cached_data)
try:
schemas = asyncio.run(self._get_mcp_tool_schemas_async(server_params))
@@ -1006,7 +1011,7 @@ class Agent(BaseAgent):
async def _get_mcp_tool_schemas_async(
self, server_params: dict[str, Any]
) -> dict[str, dict]:
) -> dict[str, dict[str, Any]]:
"""Async implementation of MCP tool schema retrieval with timeouts and retries."""
server_url = server_params["url"]
return await self._retry_mcp_discovery(
@@ -1014,7 +1019,7 @@ class Agent(BaseAgent):
)
async def _retry_mcp_discovery(
self, operation_func, server_url: str
self, operation_func: Any, server_url: str
) -> dict[str, dict[str, Any]]:
"""Retry MCP discovery operation with exponential backoff, avoiding try-except in loop."""
last_error = None
@@ -1045,7 +1050,7 @@ class Agent(BaseAgent):
@staticmethod
async def _attempt_mcp_discovery(
operation_func, server_url: str
operation_func: Any, server_url: str
) -> tuple[dict[str, dict[str, Any]] | None, str, bool]:
"""Attempt single MCP discovery operation and return (result, error_message, should_retry)."""
try:
@@ -1149,13 +1154,13 @@ class Agent(BaseAgent):
Field(..., description=field_description),
)
else:
field_definitions[field_name] = (
field_definitions[field_name] = ( # type: ignore[assignment]
field_type | None,
Field(default=None, description=field_description),
)
model_name = f"{tool_name.replace('-', '_').replace(' ', '_')}Schema"
return create_model(model_name, **field_definitions)
return create_model(model_name, **field_definitions) # type: ignore[no-any-return,call-overload]
def _json_type_to_python(self, field_schema: dict[str, Any]) -> type:
"""Convert JSON Schema type to Python type.
@@ -1175,16 +1180,16 @@ class Agent(BaseAgent):
if "const" in option:
types.append(str)
else:
types.append(self._json_type_to_python(option))
types.append(self._json_type_to_python(option)) # type: ignore[arg-type]
unique_types = list(set(types))
if len(unique_types) > 1:
result = unique_types[0]
for t in unique_types[1:]:
result = result | t
result = result | t # type: ignore[assignment]
return result
return unique_types[0]
type_mapping = {
type_mapping: dict[str, type] = {
"string": str,
"number": float,
"integer": int,
@@ -1193,10 +1198,10 @@ class Agent(BaseAgent):
"object": dict,
}
return type_mapping.get(json_type, Any)
return type_mapping.get(json_type or "", Any)
@staticmethod
def _fetch_amp_mcp_servers(mcp_name: str) -> list[dict]:
def _fetch_amp_mcp_servers(mcp_name: str) -> list[dict[str, Any]]:
"""Fetch MCP server configurations from CrewAI AMP API."""
# TODO: Implement AMP API call to "integrations/mcps" endpoint
# Should return list of server configs with URLs

View File

@@ -137,7 +137,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
default=False,
description="Enable agent to delegate and ask questions among each other.",
)
tools: list[BaseTool] | None = Field(
tools: list[BaseTool] = Field(
default_factory=list, description="Tools at agents' disposal"
)
max_iter: int = Field(
@@ -161,7 +161,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
description="An instance of the ToolsHandler class.",
)
tools_results: list[dict[str, Any]] = Field(
default=[], description="Results of the tools used by the agent."
default_factory=list, description="Results of the tools used by the agent."
)
max_tokens: int | None = Field(
default=None, description="Maximum number of tokens for the agent's execution."
@@ -265,7 +265,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
if not mcps:
return mcps
validated_mcps = []
validated_mcps: list[str | MCPServerConfig] = []
for mcp in mcps:
if isinstance(mcp, str):
if mcp.startswith(("https://", "crewai-amp:")):

View File

@@ -47,7 +47,7 @@ if TYPE_CHECKING:
from crewai.agent import Agent
from crewai.agents.tools_handler import ToolsHandler
from crewai.crew import Crew
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool

View File

@@ -57,7 +57,7 @@ from crewai.flow.flow_trackable import FlowTrackable
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.memory.entity.entity_memory import EntityMemory
from crewai.memory.external.external_memory import ExternalMemory
from crewai.memory.long_term.long_term_memory import LongTermMemory

View File

@@ -40,7 +40,7 @@ from crewai.events.types.logging_events import AgentLogsExecutionEvent
from crewai.flow.flow_trackable import FlowTrackable
from crewai.lite_agent_output import LiteAgentOutput
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.utilities.agent_utils import (
@@ -503,7 +503,7 @@ class LiteAgent(FlowTrackable, BaseModel):
AgentFinish: The final result of the agent execution.
"""
# Execute the agent loop
formatted_answer = None
formatted_answer: AgentAction | AgentFinish | None = None
while not isinstance(formatted_answer, AgentFinish):
try:
if has_reached_max_iterations(self._iterations, self.max_iterations):
@@ -551,7 +551,8 @@ class LiteAgent(FlowTrackable, BaseModel):
show_logs=self._show_logs,
)
self._append_message(formatted_answer.text, role="assistant")
if formatted_answer is not None:
self._append_message(formatted_answer.text, role="assistant")
except OutputParserError as e: # noqa: PERF203
self._printer.print(
content="Failed to parse LLM output. Retrying...",

View File

@@ -0,0 +1,4 @@
from crewai.llm.core import LLM
__all__ = ["LLM"]

View File

@@ -11,9 +11,10 @@ from datetime import datetime
import json
import logging
import re
from typing import TYPE_CHECKING, Any, Final
from typing import TYPE_CHECKING, Any, ClassVar, Final
from pydantic import BaseModel
import httpx
from pydantic import BaseModel, ConfigDict, Field, model_validator
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.llm_events import (
@@ -28,6 +29,8 @@ from crewai.events.types.tool_usage_events import (
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
from crewai.llm.hooks.base import BaseInterceptor
from crewai.llm.internal.meta import LLMMeta
from crewai.types.usage_metrics import UsageMetrics
@@ -43,7 +46,7 @@ DEFAULT_SUPPORTS_STOP_WORDS: Final[bool] = True
_JSON_EXTRACTION_PATTERN: Final[re.Pattern[str]] = re.compile(r"\{.*}", re.DOTALL)
class BaseLLM(ABC):
class BaseLLM(BaseModel, ABC, metaclass=LLMMeta):
"""Abstract base class for LLM implementations.
This class defines the interface that all LLM implementations must follow.
@@ -62,46 +65,96 @@ class BaseLLM(ABC):
additional_params: Additional provider-specific parameters.
"""
is_litellm: bool = False
model_config: ClassVar[ConfigDict] = ConfigDict(
arbitrary_types_allowed=True, extra="allow", validate_assignment=True
)
def __init__(
self,
model: str,
temperature: float | None = None,
api_key: str | None = None,
base_url: str | None = None,
provider: str | None = None,
**kwargs: Any,
) -> None:
"""Initialize the BaseLLM with default attributes.
# Core fields
model: str = Field(..., description="The model identifier/name")
temperature: float | None = Field(
None, description="Temperature setting for response generation"
)
api_key: str | None = Field(None, description="API key for authentication")
base_url: str | None = Field(None, description="Base URL for API requests")
provider: str = Field(
default="openai", description="Provider name (openai, anthropic, etc.)"
)
stop: list[str] = Field(
default_factory=list, description="Stop sequences for generation"
)
# Internal fields
is_litellm: bool = Field(
default=False, description="Whether this instance uses LiteLLM"
)
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = Field(
None, description="HTTP request/response interceptor"
)
_token_usage: dict[str, int] = {
"total_tokens": 0,
"prompt_tokens": 0,
"completion_tokens": 0,
"successful_requests": 0,
"cached_prompt_tokens": 0,
}
@model_validator(mode="before")
@classmethod
def _extract_stop_and_validate(cls, values: dict[str, Any]) -> dict[str, Any]:
"""Extract and normalize stop sequences before model initialization.
Args:
model: The model identifier/name.
temperature: Optional temperature setting for response generation.
stop: Optional list of stop sequences for generation.
**kwargs: Additional provider-specific parameters.
values: Input values dictionary
Returns:
Processed values dictionary
"""
if not model:
if not values.get("model"):
raise ValueError("Model name is required and cannot be empty")
self.model = model
self.temperature = temperature
self.api_key = api_key
self.base_url = base_url
# Store additional parameters for provider-specific use
self.additional_params = kwargs
self._provider = provider or "openai"
stop = kwargs.pop("stop", None)
# Handle stop sequences
stop = values.get("stop")
if stop is None:
self.stop: list[str] = []
values["stop"] = []
elif isinstance(stop, str):
self.stop = [stop]
elif isinstance(stop, list):
self.stop = stop
else:
self.stop = []
values["stop"] = [stop]
elif not isinstance(stop, list):
values["stop"] = []
# Set default provider if not specified
if "provider" not in values or values["provider"] is None:
values["provider"] = "openai"
return values
@property
def additional_params(self) -> dict[str, Any]:
"""Get additional parameters stored as extra fields.
Returns:
Dictionary of additional parameters
"""
return self.__pydantic_extra__ or {}
@additional_params.setter
def additional_params(self, value: dict[str, Any]) -> None:
"""Set additional parameters as extra fields.
Args:
value: Dictionary of additional parameters to set
"""
if not isinstance(value, dict):
raise ValueError("additional_params must be a dictionary")
if self.__pydantic_extra__ is None:
self.__pydantic_extra__ = {}
self.__pydantic_extra__.update(value)
def model_post_init(self, __context: Any) -> None:
"""Initialize token usage tracking after model initialization.
Args:
__context: Pydantic context (unused)
"""
self._token_usage = {
"total_tokens": 0,
"prompt_tokens": 0,
@@ -110,16 +163,6 @@ class BaseLLM(ABC):
"cached_prompt_tokens": 0,
}
@property
def provider(self) -> str:
"""Get the provider of the LLM."""
return self._provider
@provider.setter
def provider(self, value: str) -> None:
"""Set the provider of the LLM."""
self._provider = value
@abstractmethod
def call(
self,

View File

@@ -20,9 +20,7 @@ from typing import (
)
from dotenv import load_dotenv
import httpx
from pydantic import BaseModel, Field
from typing_extensions import Self
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.llm_events import (
@@ -37,14 +35,7 @@ from crewai.events.types.tool_usage_events import (
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
from crewai.llms.base_llm import BaseLLM
from crewai.llms.constants import (
ANTHROPIC_MODELS,
AZURE_MODELS,
BEDROCK_MODELS,
GEMINI_MODELS,
OPENAI_MODELS,
)
from crewai.llm.base_llm import BaseLLM
from crewai.utilities import InternalInstructor
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
@@ -61,7 +52,6 @@ if TYPE_CHECKING:
from litellm.utils import supports_response_schema
from crewai.agent.core import Agent
from crewai.llms.hooks.base import BaseInterceptor
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
from crewai.utilities.types import LLMMessage
@@ -327,249 +317,57 @@ class AccumulatedToolArgs(BaseModel):
class LLM(BaseLLM):
completion_cost: float | None = None
"""LiteLLM-based LLM implementation for CrewAI.
def __new__(cls, model: str, is_litellm: bool = False, **kwargs: Any) -> LLM:
"""Factory method that routes to native SDK or falls back to LiteLLM.
This class provides LiteLLM integration for models not covered by native providers.
The metaclass (LLMMeta) automatically routes to native providers when appropriate.
"""
Routing priority:
1. If 'provider' kwarg is present, use that provider with constants
2. If only 'model' kwarg, use constants to infer provider
3. If "/" in model name:
- Check if prefix is a native provider (openai/anthropic/azure/bedrock/gemini)
- If yes, validate model against constants
- If valid, route to native SDK; otherwise route to LiteLLM
"""
if not model or not isinstance(model, str):
raise ValueError("Model must be a non-empty string")
# LiteLLM-specific fields
completion_cost: float | None = Field(None, description="Cost of completion")
timeout: float | int | None = Field(None, description="Request timeout")
top_p: float | None = Field(None, description="Top-p sampling parameter")
n: int | None = Field(None, description="Number of completions to generate")
max_completion_tokens: int | None = Field(
None, description="Maximum completion tokens"
)
max_tokens: int | float | None = Field(None, description="Maximum total tokens")
presence_penalty: float | None = Field(None, description="Presence penalty")
frequency_penalty: float | None = Field(None, description="Frequency penalty")
logit_bias: dict[int, float] | None = Field(None, description="Logit bias")
response_format: type[BaseModel] | None = Field(
None, description="Response format model"
)
seed: int | None = Field(None, description="Random seed for reproducibility")
logprobs: int | None = Field(None, description="Log probabilities to return")
top_logprobs: int | None = Field(None, description="Top log probabilities")
api_base: str | None = Field(None, description="API base URL (alias for base_url)")
api_version: str | None = Field(None, description="API version")
callbacks: list[Any] | None = Field(None, description="Callback functions")
context_window_size: int = Field(0, description="Context window size in tokens")
reasoning_effort: Literal["none", "low", "medium", "high"] | None = Field(
None, description="Reasoning effort level"
)
is_anthropic: bool = Field(False, description="Whether model is from Anthropic")
stream: bool = Field(False, description="Whether to stream responses")
explicit_provider = kwargs.get("provider")
if explicit_provider:
provider = explicit_provider
use_native = True
model_string = model
elif "/" in model:
prefix, _, model_part = model.partition("/")
provider_mapping = {
"openai": "openai",
"anthropic": "anthropic",
"claude": "anthropic",
"azure": "azure",
"azure_openai": "azure",
"google": "gemini",
"gemini": "gemini",
"bedrock": "bedrock",
"aws": "bedrock",
}
canonical_provider = provider_mapping.get(prefix.lower())
if canonical_provider and cls._validate_model_in_constants(
model_part, canonical_provider
):
provider = canonical_provider
use_native = True
model_string = model_part
else:
provider = prefix
use_native = False
model_string = model_part
else:
provider = cls._infer_provider_from_model(model)
use_native = True
model_string = model
native_class = cls._get_native_provider(provider) if use_native else None
if native_class and not is_litellm and provider in SUPPORTED_NATIVE_PROVIDERS:
try:
# Remove 'provider' from kwargs if it exists to avoid duplicate keyword argument
kwargs_copy = {k: v for k, v in kwargs.items() if k != 'provider'}
return cast(
Self, native_class(model=model_string, provider=provider, **kwargs_copy)
)
except NotImplementedError:
raise
except Exception as e:
raise ImportError(f"Error importing native provider: {e}") from e
# FALLBACK to LiteLLM
if not LITELLM_AVAILABLE:
logger.error("LiteLLM is not available, falling back to LiteLLM")
raise ImportError("Fallback to LiteLLM is not available") from None
instance = object.__new__(cls)
super(LLM, instance).__init__(model=model, is_litellm=True, **kwargs)
instance.is_litellm = True
return instance
@classmethod
def _validate_model_in_constants(cls, model: str, provider: str) -> bool:
"""Validate if a model name exists in the provider's constants.
def model_post_init(self, __context: Any) -> None:
"""Initialize LiteLLM-specific settings after model initialization.
Args:
model: The model name to validate
provider: The provider to check against (canonical name)
Returns:
True if the model exists in the provider's constants, False otherwise
__context: Pydantic context
"""
if provider == "openai":
return model in OPENAI_MODELS
super().model_post_init(__context)
if provider == "anthropic" or provider == "claude":
return model in ANTHROPIC_MODELS
# Configure LiteLLM
if LITELLM_AVAILABLE:
litellm.drop_params = True
if provider == "gemini":
return model in GEMINI_MODELS
# Determine if this is an Anthropic model
self.is_anthropic = self._is_anthropic_model(self.model)
if provider == "bedrock":
return model in BEDROCK_MODELS
if provider == "azure":
# azure does not provide a list of available models, determine a better way to handle this
return True
return False
@classmethod
def _infer_provider_from_model(cls, model: str) -> str:
"""Infer the provider from the model name.
Args:
model: The model name without provider prefix
Returns:
The inferred provider name, defaults to "openai"
"""
if model in OPENAI_MODELS:
return "openai"
if model in ANTHROPIC_MODELS:
return "anthropic"
if model in GEMINI_MODELS:
return "gemini"
if model in BEDROCK_MODELS:
return "bedrock"
if model in AZURE_MODELS:
return "azure"
return "openai"
@classmethod
def _get_native_provider(cls, provider: str) -> type | None:
"""Get native provider class if available."""
if provider == "openai":
from crewai.llms.providers.openai.completion import OpenAICompletion
return OpenAICompletion
if provider == "anthropic" or provider == "claude":
from crewai.llms.providers.anthropic.completion import (
AnthropicCompletion,
)
return AnthropicCompletion
if provider == "azure" or provider == "azure_openai":
from crewai.llms.providers.azure.completion import AzureCompletion
return AzureCompletion
if provider == "google" or provider == "gemini":
from crewai.llms.providers.gemini.completion import GeminiCompletion
return GeminiCompletion
if provider == "bedrock":
from crewai.llms.providers.bedrock.completion import BedrockCompletion
return BedrockCompletion
return None
def __init__(
self,
model: str,
timeout: float | int | None = None,
temperature: float | None = None,
top_p: float | None = None,
n: int | None = None,
stop: str | list[str] | None = None,
max_completion_tokens: int | None = None,
max_tokens: int | float | None = None,
presence_penalty: float | None = None,
frequency_penalty: float | None = None,
logit_bias: dict[int, float] | None = None,
response_format: type[BaseModel] | None = None,
seed: int | None = None,
logprobs: int | None = None,
top_logprobs: int | None = None,
base_url: str | None = None,
api_base: str | None = None,
api_version: str | None = None,
api_key: str | None = None,
callbacks: list[Any] | None = None,
reasoning_effort: Literal["none", "low", "medium", "high"] | None = None,
stream: bool = False,
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
**kwargs: Any,
) -> None:
"""Initialize LLM instance.
Note: This __init__ method is only called for fallback instances.
Native provider instances handle their own initialization in their respective classes.
"""
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
base_url=base_url,
timeout=timeout,
**kwargs,
)
self.model = model
self.timeout = timeout
self.temperature = temperature
self.top_p = top_p
self.n = n
self.max_completion_tokens = max_completion_tokens
self.max_tokens = max_tokens
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.logit_bias = logit_bias
self.response_format = response_format
self.seed = seed
self.logprobs = logprobs
self.top_logprobs = top_logprobs
self.base_url = base_url
self.api_base = api_base
self.api_version = api_version
self.api_key = api_key
self.callbacks = callbacks
self.context_window_size = 0
self.reasoning_effort = reasoning_effort
self.additional_params = kwargs
self.is_anthropic = self._is_anthropic_model(model)
self.stream = stream
self.interceptor = interceptor
litellm.drop_params = True
# Normalize self.stop to always be a list[str]
if stop is None:
self.stop: list[str] = []
elif isinstance(stop, str):
self.stop = [stop]
else:
self.stop = stop
self.set_callbacks(callbacks or [])
# Set up callbacks
self.set_callbacks(self.callbacks or [])
self.set_env_callbacks()
@staticmethod
@@ -1649,7 +1447,7 @@ class LLM(BaseLLM):
**filtered_params,
)
def __deepcopy__(self, memo: dict[int, Any] | None) -> LLM:
def __deepcopy__(self, memo: dict[int, Any] | None) -> LLM: # type: ignore[override]
"""Create a deep copy of the LLM instance."""
import copy

View File

@@ -1,6 +1,6 @@
"""Interceptor contracts for crewai"""
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llm.hooks.base import BaseInterceptor
__all__ = ["BaseInterceptor"]

View File

@@ -22,7 +22,7 @@ if TYPE_CHECKING:
from httpx import Limits, Request, Response
from httpx._types import CertTypes, ProxyTypes
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llm.hooks.base import BaseInterceptor
class HTTPTransportKwargs(TypedDict, total=False):

View File

@@ -0,0 +1,232 @@
"""Metaclass for LLM provider routing.
This metaclass enables automatic routing to native provider implementations
based on the model parameter at instantiation time.
"""
from __future__ import annotations
import logging
from typing import Any
from pydantic._internal._model_construction import ModelMetaclass
# Provider constants imported from crewai.llm.constants
SUPPORTED_NATIVE_PROVIDERS: list[str] = [
"openai",
"anthropic",
"claude",
"azure",
"azure_openai",
"google",
"gemini",
"bedrock",
"aws",
]
class LLMMeta(ModelMetaclass):
"""Metaclass for LLM that handles provider routing.
This metaclass intercepts LLM instantiation and routes to the appropriate
native provider implementation based on the model parameter.
"""
def __call__(cls, model: str, is_litellm: bool = False, **kwargs: Any) -> Any: # noqa: N805
"""Route to appropriate provider implementation at instantiation time.
Args:
model: The model identifier (e.g., "gpt-4", "claude-3-opus")
is_litellm: Force use of LiteLLM instead of native provider
**kwargs: Additional parameters for the LLM
Returns:
Instance of the appropriate provider class or LLM class
Raises:
ValueError: If model is not a valid string
"""
if not model or not isinstance(model, str):
raise ValueError("Model must be a non-empty string")
# Only perform routing if called on the base LLM class
# Subclasses (OpenAICompletion, etc.) should create normally
from crewai.llm import LLM
if cls is not LLM:
# Direct instantiation of provider class, skip routing
return super().__call__(model=model, **kwargs)
# Extract provider information
explicit_provider = kwargs.get("provider")
if explicit_provider:
provider = explicit_provider
use_native = True
model_string = model
elif "/" in model:
prefix, _, model_part = model.partition("/")
provider_mapping = {
"openai": "openai",
"anthropic": "anthropic",
"claude": "anthropic",
"azure": "azure",
"azure_openai": "azure",
"google": "gemini",
"gemini": "gemini",
"bedrock": "bedrock",
"aws": "bedrock",
}
canonical_provider = provider_mapping.get(prefix.lower())
if canonical_provider and cls._validate_model_in_constants(
model_part, canonical_provider
):
provider = canonical_provider
use_native = True
model_string = model_part
else:
provider = prefix
use_native = False
model_string = model_part
else:
provider = cls._infer_provider_from_model(model)
use_native = True
model_string = model
# Route to native provider if available
native_class = cls._get_native_provider(provider) if use_native else None
if native_class and not is_litellm and provider in SUPPORTED_NATIVE_PROVIDERS:
try:
# Remove 'provider' from kwargs to avoid duplicate keyword argument
kwargs_copy = {k: v for k, v in kwargs.items() if k != "provider"}
return native_class(
model=model_string, provider=provider, **kwargs_copy
)
except NotImplementedError:
raise
except Exception as e:
raise ImportError(f"Error importing native provider: {e}") from e
# Fallback to LiteLLM
try:
import litellm # noqa: F401
except ImportError:
logging.error("LiteLLM is not available, falling back to LiteLLM")
raise ImportError("Fallback to LiteLLM is not available") from None
# Create actual LLM instance with is_litellm=True
return super().__call__(model=model, is_litellm=True, **kwargs)
@staticmethod
def _validate_model_in_constants(model: str, provider: str) -> bool:
"""Validate if a model name exists in the provider's constants.
Args:
model: The model name to validate
provider: The provider to check against (canonical name)
Returns:
True if the model exists in the provider's constants, False otherwise
"""
from crewai.llm.constants import (
ANTHROPIC_MODELS,
BEDROCK_MODELS,
GEMINI_MODELS,
OPENAI_MODELS,
)
if provider == "openai":
return model in OPENAI_MODELS
if provider == "anthropic" or provider == "claude":
return model in ANTHROPIC_MODELS
if provider == "gemini":
return model in GEMINI_MODELS
if provider == "bedrock":
return model in BEDROCK_MODELS
if provider == "azure":
# azure does not provide a list of available models
return True
return False
@staticmethod
def _infer_provider_from_model(model: str) -> str:
"""Infer the provider from the model name.
Args:
model: The model name without provider prefix
Returns:
The inferred provider name, defaults to "openai"
"""
from crewai.llm.constants import (
ANTHROPIC_MODELS,
AZURE_MODELS,
BEDROCK_MODELS,
GEMINI_MODELS,
OPENAI_MODELS,
)
if model in OPENAI_MODELS:
return "openai"
if model in ANTHROPIC_MODELS:
return "anthropic"
if model in GEMINI_MODELS:
return "gemini"
if model in BEDROCK_MODELS:
return "bedrock"
if model in AZURE_MODELS:
return "azure"
return "openai"
@staticmethod
def _get_native_provider(provider: str) -> type | None:
"""Get native provider class if available.
Args:
provider: The provider name
Returns:
The provider class or None if not available
"""
if provider == "openai":
from crewai.llm.providers.openai.completion import OpenAICompletion
return OpenAICompletion
if provider == "anthropic" or provider == "claude":
from crewai.llm.providers.anthropic.completion import (
AnthropicCompletion,
)
return AnthropicCompletion
if provider == "azure" or provider == "azure_openai":
from crewai.llm.providers.azure.completion import AzureCompletion
return AzureCompletion
if provider == "google" or provider == "gemini":
from crewai.llm.providers.gemini.completion import GeminiCompletion
return GeminiCompletion
if provider == "bedrock":
from crewai.llm.providers.bedrock.completion import BedrockCompletion
return BedrockCompletion
return None

View File

@@ -3,13 +3,15 @@ from __future__ import annotations
import json
import logging
import os
from typing import TYPE_CHECKING, Any, cast
from typing import TYPE_CHECKING, Any, ClassVar, cast
from pydantic import BaseModel
from pydantic import BaseModel, ConfigDict
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM
from crewai.llms.hooks.transport import HTTPTransport
from crewai.llm.base_llm import BaseLLM
from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO
from crewai.llm.hooks.transport import HTTPTransport
from crewai.llm.providers.utils.common import safe_tool_conversion
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
@@ -18,7 +20,7 @@ from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llm.hooks.base import BaseInterceptor
try:
from anthropic import Anthropic
@@ -38,6 +40,8 @@ class AnthropicCompletion(BaseLLM):
offering native tool use, streaming support, and proper message formatting.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(ignored_types=(property,))
def __init__(
self,
model: str = "claude-3-5-sonnet-20241022",
@@ -94,29 +98,30 @@ class AnthropicCompletion(BaseLLM):
self.is_claude_3 = "claude-3" in model.lower()
self.supports_tools = self.is_claude_3 # Claude 3+ supports tool use
@property
def stop(self) -> list[str]:
"""Get stop sequences sent to the API."""
return self.stop_sequences
#
# @property
# def stop(self) -> list[str]: # type: ignore[misc]
# """Get stop sequences sent to the API."""
# return self.stop_sequences
@stop.setter
def stop(self, value: list[str] | str | None) -> None:
"""Set stop sequences.
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
are properly sent to the Anthropic API.
Args:
value: Stop sequences as a list, single string, or None
"""
if value is None:
self.stop_sequences = []
elif isinstance(value, str):
self.stop_sequences = [value]
elif isinstance(value, list):
self.stop_sequences = value
else:
self.stop_sequences = []
# @stop.setter
# def stop(self, value: list[str] | str | None) -> None:
# """Set stop sequences.
#
# Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
# are properly sent to the Anthropic API.
#
# Args:
# value: Stop sequences as a list, single string, or None
# """
# if value is None:
# self.stop_sequences = []
# elif isinstance(value, str):
# self.stop_sequences = [value]
# elif isinstance(value, list):
# self.stop_sequences = value
# else:
# self.stop_sequences = []
def _get_client_params(self) -> dict[str, Any]:
"""Get client parameters."""
@@ -266,8 +271,6 @@ class AnthropicCompletion(BaseLLM):
continue
try:
from crewai.llms.providers.utils.common import safe_tool_conversion
name, description, parameters = safe_tool_conversion(tool, "Anthropic")
except (ImportError, KeyError, ValueError) as e:
logging.error(f"Error converting tool to Anthropic format: {e}")
@@ -636,7 +639,6 @@ class AnthropicCompletion(BaseLLM):
def get_context_window_size(self) -> int:
"""Get the context window size for the model."""
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO
# Context window sizes for Anthropic models
context_windows = {

View File

@@ -7,6 +7,8 @@ from typing import TYPE_CHECKING, Any
from pydantic import BaseModel
from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
from crewai.llm.providers.utils.common import safe_tool_conversion
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
@@ -15,7 +17,7 @@ from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llm.hooks.base import BaseInterceptor
from crewai.tools.base_tool import BaseTool
@@ -36,7 +38,7 @@ try:
)
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
except ImportError:
raise ImportError(
@@ -317,8 +319,6 @@ class AzureCompletion(BaseLLM):
) -> list[dict[str, Any]]:
"""Convert CrewAI tool format to Azure OpenAI function calling format."""
from crewai.llms.providers.utils.common import safe_tool_conversion
azure_tools = []
for tool in tools:
@@ -554,7 +554,6 @@ class AzureCompletion(BaseLLM):
def get_context_window_size(self) -> int:
"""Get the context window size for the model."""
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
min_context = 1024
max_context = 2097152

View File

@@ -3,13 +3,15 @@ from __future__ import annotations
from collections.abc import Mapping, Sequence
import logging
import os
from typing import TYPE_CHECKING, Any, TypedDict, cast
from typing import TYPE_CHECKING, Any, ClassVar, TypedDict, cast
from pydantic import BaseModel
from pydantic import BaseModel, ConfigDict
from typing_extensions import Required
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO
from crewai.llm.providers.utils.common import safe_tool_conversion
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
@@ -30,7 +32,7 @@ if TYPE_CHECKING:
ToolTypeDef,
)
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llm.hooks.base import BaseInterceptor
try:
@@ -143,6 +145,8 @@ class BedrockCompletion(BaseLLM):
- Model-specific conversation format handling (e.g., Cohere requirements)
"""
model_config: ClassVar[ConfigDict] = ConfigDict(ignored_types=(property,))
def __init__(
self,
model: str = "anthropic.claude-3-5-sonnet-20241022-v2:0",
@@ -243,29 +247,29 @@ class BedrockCompletion(BaseLLM):
# Handle inference profiles for newer models
self.model_id = model
@property
def stop(self) -> list[str]:
"""Get stop sequences sent to the API."""
return list(self.stop_sequences)
# @property
# def stop(self) -> list[str]: # type: ignore[misc]
# """Get stop sequences sent to the API."""
# return list(self.stop_sequences)
@stop.setter
def stop(self, value: Sequence[str] | str | None) -> None:
"""Set stop sequences.
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
are properly sent to the Bedrock API.
Args:
value: Stop sequences as a Sequence, single string, or None
"""
if value is None:
self.stop_sequences = []
elif isinstance(value, str):
self.stop_sequences = [value]
elif isinstance(value, Sequence):
self.stop_sequences = list(value)
else:
self.stop_sequences = []
# @stop.setter
# def stop(self, value: Sequence[str] | str | None) -> None:
# """Set stop sequences.
#
# Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
# are properly sent to the Bedrock API.
#
# Args:
# value: Stop sequences as a Sequence, single string, or None
# """
# if value is None:
# self.stop_sequences = []
# elif isinstance(value, str):
# self.stop_sequences = [value]
# elif isinstance(value, Sequence):
# self.stop_sequences = list(value)
# else:
# self.stop_sequences = []
def call(
self,
@@ -778,7 +782,6 @@ class BedrockCompletion(BaseLLM):
tools: list[dict[str, Any]],
) -> list[ConverseToolTypeDef]:
"""Convert CrewAI tools to Converse API format following AWS specification."""
from crewai.llms.providers.utils.common import safe_tool_conversion
converse_tools: list[ConverseToolTypeDef] = []
@@ -871,7 +874,6 @@ class BedrockCompletion(BaseLLM):
def get_context_window_size(self) -> int:
"""Get the context window size for the model."""
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO
# Context window sizes for common Bedrock models
context_windows = {

View File

@@ -1,12 +1,14 @@
import logging
import os
from typing import Any, cast
from typing import Any, ClassVar, cast
from pydantic import BaseModel
from pydantic import BaseModel, ConfigDict
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llm.base_llm import BaseLLM
from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
from crewai.llm.hooks.base import BaseInterceptor
from crewai.llm.providers.utils.common import safe_tool_conversion
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
@@ -31,6 +33,8 @@ class GeminiCompletion(BaseLLM):
offering native function calling, streaming support, and proper Gemini formatting.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(ignored_types=(property,))
def __init__(
self,
model: str = "gemini-2.0-flash-001",
@@ -104,29 +108,29 @@ class GeminiCompletion(BaseLLM):
self.is_gemini_1_5 = "gemini-1.5" in model.lower()
self.supports_tools = self.is_gemini_1_5 or self.is_gemini_2
@property
def stop(self) -> list[str]:
"""Get stop sequences sent to the API."""
return self.stop_sequences
# @property
# def stop(self) -> list[str]: # type: ignore[misc]
# """Get stop sequences sent to the API."""
# return self.stop_sequences
@stop.setter
def stop(self, value: list[str] | str | None) -> None:
"""Set stop sequences.
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
are properly sent to the Gemini API.
Args:
value: Stop sequences as a list, single string, or None
"""
if value is None:
self.stop_sequences = []
elif isinstance(value, str):
self.stop_sequences = [value]
elif isinstance(value, list):
self.stop_sequences = value
else:
self.stop_sequences = []
# @stop.setter
# def stop(self, value: list[str] | str | None) -> None:
# """Set stop sequences.
#
# Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
# are properly sent to the Gemini API.
#
# Args:
# value: Stop sequences as a list, single string, or None
# """
# if value is None:
# self.stop_sequences = []
# elif isinstance(value, str):
# self.stop_sequences = [value]
# elif isinstance(value, list):
# self.stop_sequences = value
# else:
# self.stop_sequences = []
def _initialize_client(self, use_vertexai: bool = False) -> genai.Client: # type: ignore[no-any-unimported]
"""Initialize the Google Gen AI client with proper parameter handling.
@@ -335,8 +339,6 @@ class GeminiCompletion(BaseLLM):
"""Convert CrewAI tool format to Gemini function declaration format."""
gemini_tools = []
from crewai.llms.providers.utils.common import safe_tool_conversion
for tool in tools:
name, description, parameters = safe_tool_conversion(tool, "Gemini")
@@ -547,7 +549,6 @@ class GeminiCompletion(BaseLLM):
def get_context_window_size(self) -> int:
"""Get the context window size for the model."""
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
min_context = 1024
max_context = 2097152

View File

@@ -11,11 +11,13 @@ from openai import APIConnectionError, NotFoundError, OpenAI
from openai.types.chat import ChatCompletion, ChatCompletionChunk
from openai.types.chat.chat_completion import Choice
from openai.types.chat.chat_completion_chunk import ChoiceDelta
from pydantic import BaseModel
from pydantic import BaseModel, Field
from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM
from crewai.llms.hooks.transport import HTTPTransport
from crewai.llm.base_llm import BaseLLM
from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
from crewai.llm.hooks.transport import HTTPTransport
from crewai.llm.providers.utils.common import safe_tool_conversion
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
@@ -25,7 +27,6 @@ from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.agent.core import Agent
from crewai.llms.hooks.base import BaseInterceptor
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
@@ -37,61 +38,61 @@ class OpenAICompletion(BaseLLM):
offering native structured outputs, function calling, and streaming support.
"""
def __init__(
self,
model: str = "gpt-4o",
api_key: str | None = None,
base_url: str | None = None,
organization: str | None = None,
project: str | None = None,
timeout: float | None = None,
max_retries: int = 2,
default_headers: dict[str, str] | None = None,
default_query: dict[str, Any] | None = None,
client_params: dict[str, Any] | None = None,
temperature: float | None = None,
top_p: float | None = None,
frequency_penalty: float | None = None,
presence_penalty: float | None = None,
max_tokens: int | None = None,
max_completion_tokens: int | None = None,
seed: int | None = None,
stream: bool = False,
response_format: dict[str, Any] | type[BaseModel] | None = None,
logprobs: bool | None = None,
top_logprobs: int | None = None,
reasoning_effort: str | None = None,
provider: str | None = None,
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
**kwargs: Any,
) -> None:
"""Initialize OpenAI chat completion client."""
# Client configuration fields
organization: str | None = Field(None, description="OpenAI organization ID")
project: str | None = Field(None, description="OpenAI project ID")
max_retries: int = Field(2, description="Maximum number of retries")
default_headers: dict[str, str] | None = Field(
None, description="Default headers for requests"
)
default_query: dict[str, Any] | None = Field(
None, description="Default query parameters"
)
client_params: dict[str, Any] | None = Field(
None, description="Additional client parameters"
)
timeout: float | None = Field(None, description="Request timeout")
api_base: str | None = Field(None, description="API base URL (deprecated)")
if provider is None:
provider = kwargs.pop("provider", "openai")
# Completion parameters
top_p: float | None = Field(None, description="Top-p sampling parameter")
frequency_penalty: float | None = Field(None, description="Frequency penalty")
presence_penalty: float | None = Field(None, description="Presence penalty")
max_tokens: int | None = Field(None, description="Maximum tokens")
max_completion_tokens: int | None = Field(
None, description="Maximum completion tokens"
)
seed: int | None = Field(None, description="Random seed")
stream: bool = Field(False, description="Enable streaming")
response_format: dict[str, Any] | type[BaseModel] | None = Field(
None, description="Response format"
)
logprobs: bool | None = Field(None, description="Return log probabilities")
top_logprobs: int | None = Field(
None, description="Number of top log probabilities"
)
reasoning_effort: str | None = Field(None, description="Reasoning effort level")
self.interceptor = interceptor
# Client configuration attributes
self.organization = organization
self.project = project
self.max_retries = max_retries
self.default_headers = default_headers
self.default_query = default_query
self.client_params = client_params
self.timeout = timeout
self.base_url = base_url
self.api_base = kwargs.pop("api_base", None)
# Internal state
client: OpenAI = Field(
default_factory=OpenAI, exclude=True, description="OpenAI client instance"
)
is_o1_model: bool = Field(False, description="Whether this is an O1 model")
is_gpt4_model: bool = Field(False, description="Whether this is a GPT-4 model")
super().__init__(
model=model,
temperature=temperature,
api_key=api_key or os.getenv("OPENAI_API_KEY"),
base_url=base_url,
timeout=timeout,
provider=provider,
**kwargs,
)
def model_post_init(self, __context: Any) -> None:
"""Initialize OpenAI client after model initialization.
Args:
__context: Pydantic context
"""
super().model_post_init(__context)
# Set API key from environment if not provided
if self.api_key is None:
self.api_key = os.getenv("OPENAI_API_KEY")
# Initialize client
client_config = self._get_client_params()
if self.interceptor:
transport = HTTPTransport(interceptor=self.interceptor)
@@ -100,20 +101,9 @@ class OpenAICompletion(BaseLLM):
self.client = OpenAI(**client_config)
# Completion parameters
self.top_p = top_p
self.frequency_penalty = frequency_penalty
self.presence_penalty = presence_penalty
self.max_tokens = max_tokens
self.max_completion_tokens = max_completion_tokens
self.seed = seed
self.stream = stream
self.response_format = response_format
self.logprobs = logprobs
self.top_logprobs = top_logprobs
self.reasoning_effort = reasoning_effort
self.is_o1_model = "o1" in model.lower()
self.is_gpt4_model = "gpt-4" in model.lower()
# Set model flags
self.is_o1_model = "o1" in self.model.lower()
self.is_gpt4_model = "gpt-4" in self.model.lower()
def _get_client_params(self) -> dict[str, Any]:
"""Get OpenAI client parameters."""
@@ -268,7 +258,6 @@ class OpenAICompletion(BaseLLM):
self, tools: list[dict[str, BaseTool]]
) -> list[dict[str, Any]]:
"""Convert CrewAI tool format to OpenAI function calling format."""
from crewai.llms.providers.utils.common import safe_tool_conversion
openai_tools = []
@@ -560,7 +549,6 @@ class OpenAICompletion(BaseLLM):
def get_context_window_size(self) -> int:
"""Get the context window size for the model."""
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
min_context = 1024
max_context = 2097152

View File

@@ -1 +0,0 @@
"""Third-party LLM implementations for crewAI."""

View File

@@ -4,7 +4,7 @@ from pydantic import BaseModel, Field
from crewai.agent import Agent
from crewai.lite_agent_output import LiteAgentOutput
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.tasks.task_output import TaskOutput

View File

@@ -16,7 +16,7 @@ from crewai.agents.parser import (
parse,
)
from crewai.cli.config import Settings
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.tools import BaseTool as CrewAITool
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool

View File

@@ -19,7 +19,7 @@ if TYPE_CHECKING:
from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
_JSON_PATTERN: Final[re.Pattern[str]] = re.compile(r"({.*})", re.DOTALL)
_I18N = get_i18n()

View File

@@ -11,7 +11,7 @@ from rich.table import Table
from crewai.agent import Agent
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.crew_events import CrewTestResultEvent
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput

View File

@@ -10,7 +10,7 @@ from crewai.utilities.logger_utils import suppress_warnings
if TYPE_CHECKING:
from crewai.agent import Agent
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.utilities.types import LLMMessage

View File

@@ -4,7 +4,7 @@ from typing import Any, Final
from crewai.cli.constants import DEFAULT_LLM_MODEL, ENV_VARS, LITELLM_PARAMS
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
logger = logging.getLogger(__name__)

View File

@@ -5,7 +5,7 @@ import logging
from pydantic import BaseModel, Field
from crewai.agent import Agent
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.task import Task

View File

@@ -15,7 +15,7 @@ if TYPE_CHECKING:
from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.task import Task

View File

@@ -14,7 +14,7 @@ from crewai.knowledge.knowledge_config import KnowledgeConfig
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.process import Process
from crewai.tools.tool_calling import InstructorToolCalling
from crewai.tools.tool_usage import ToolUsage

View File

@@ -9,7 +9,7 @@ from crewai.events.types.agent_events import LiteAgentExecutionStartedEvent
from crewai.events.types.tool_usage_events import ToolUsageStartedEvent
from crewai.lite_agent import LiteAgent
from crewai.lite_agent_output import LiteAgentOutput
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from pydantic import BaseModel, Field
import pytest

View File

@@ -590,7 +590,7 @@ interactions:
"<function BaseTool.<lambda> at 0x107389260>", "result_as_answer": "False",
"max_usage_count": "None", "current_usage_count": "0"}], "max_iter": 2, "agent_executor":
"<crewai.agents.crew_agent_executor.CrewAgentExecutor object at 0x130de6540>",
"llm": "<crewai.llms.providers.openai.completion.OpenAICompletion object at
"llm": "<crewai.llm.providers.openai.completion.OpenAICompletion object at
0x130db6de0>", "crew": {"parent_flow": null, "name": "crew", "cache": true,
"tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'':
{''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', ''description'':
@@ -605,7 +605,7 @@ interactions:
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
at 0x107389260>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
0}], ''max_iter'': 2, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x130de6540>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x130de6540>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x130db6de0>, ''crew'': Crew(id=991ac83f-9a29-411f-b0a0-0a335c7a2d0e,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -634,7 +634,7 @@ interactions:
''abc.Learn_About_Ai''>, ''description_updated'': False, ''cache_function'':
<function BaseTool.<lambda> at 0x107389260>, ''result_as_answer'': False, ''max_usage_count'':
None, ''current_usage_count'': 0}], ''max_iter'': 2, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x130de6540>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x130de6540>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x130db6de0>, ''crew'': Crew(id=991ac83f-9a29-411f-b0a0-0a335c7a2d0e,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -657,7 +657,7 @@ interactions:
{"fingerprint": {"metadata": "{}"}}, "callbacks": [], "adapted_agent": false,
"knowledge_config": null, "max_execution_time": null, "agent_ops_agent_name":
"test role", "agent_ops_agent_id": null, "step_callback": null, "use_system_prompt":
true, "function_calling_llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
true, "function_calling_llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x130db7020>", "system_template": null, "prompt_template": null, "response_template":
null, "allow_code_execution": false, "respect_context_window": true, "max_retry_limit":
2, "multimodal": false, "inject_date": false, "date_format": "%Y-%m-%d", "code_execution_mode":
@@ -1068,7 +1068,7 @@ interactions:
"<function BaseTool.<lambda> at 0x107e394e0>", "result_as_answer": "False",
"max_usage_count": "None", "current_usage_count": "0"}], "max_iter": 2, "agent_executor":
"<crewai.agents.crew_agent_executor.CrewAgentExecutor object at 0x13b37c980>",
"llm": "<crewai.llms.providers.openai.completion.OpenAICompletion object at
"llm": "<crewai.llm.providers.openai.completion.OpenAICompletion object at
0x13b7563c0>", "crew": {"parent_flow": null, "name": "crew", "cache": true,
"tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'':
{''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', ''description'':
@@ -1083,7 +1083,7 @@ interactions:
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
0}], ''max_iter'': 2, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13b37c980>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13b37c980>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13b7563c0>, ''crew'': Crew(id=f38365e9-3206-45b6-8754-950cb03fe57e,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1112,7 +1112,7 @@ interactions:
''abc.Learn_About_Ai''>, ''description_updated'': False, ''cache_function'':
<function BaseTool.<lambda> at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'':
None, ''current_usage_count'': 0}], ''max_iter'': 2, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13b37c980>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13b37c980>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13b7563c0>, ''crew'': Crew(id=f38365e9-3206-45b6-8754-950cb03fe57e,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1135,7 +1135,7 @@ interactions:
{"fingerprint": {"metadata": "{}"}}, "callbacks": [], "adapted_agent": false,
"knowledge_config": null, "max_execution_time": null, "agent_ops_agent_name":
"test role", "agent_ops_agent_id": null, "step_callback": null, "use_system_prompt":
true, "function_calling_llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
true, "function_calling_llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13b756690>", "system_template": null, "prompt_template": null, "response_template":
null, "allow_code_execution": false, "respect_context_window": true, "max_retry_limit":
2, "multimodal": false, "inject_date": false, "date_format": "%Y-%m-%d", "code_execution_mode":

View File

@@ -1274,7 +1274,7 @@ interactions:
"b6cf723e-04c8-40c5-a927-e2078cfbae59", "role": "test role", "goal": "test goal",
"backstory": "test backstory", "cache": true, "verbose": true, "max_rpm": null,
"allow_delegation": false, "tools": [], "max_iter": 6, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab0abd0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab0abd0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>", "crew": {"parent_flow": null, "name": "crew", "cache":
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
@@ -1285,7 +1285,7 @@ interactions:
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1309,7 +1309,7 @@ interactions:
role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1491,7 +1491,7 @@ interactions:
"goal": "test goal", "backstory": "test backstory", "cache": true, "verbose":
true, "max_rpm": null, "allow_delegation": false, "tools": [], "max_iter": 6,
"agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor object
at 0x13ab0abd0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
at 0x13ab0abd0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>", "crew": {"parent_flow": null, "name": "crew", "cache":
true, "tasks": ["{''used_tools'': 2, ''tools_errors'': 0, ''delegations'': 0,
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
@@ -1502,7 +1502,7 @@ interactions:
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1528,7 +1528,7 @@ interactions:
role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1660,7 +1660,7 @@ interactions:
role", "goal": "test goal", "backstory": "test backstory", "cache": true, "verbose":
true, "max_rpm": null, "allow_delegation": false, "tools": [], "max_iter": 6,
"agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor object
at 0x13ab0abd0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
at 0x13ab0abd0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>", "crew": {"parent_flow": null, "name": "crew", "cache":
true, "tasks": ["{''used_tools'': 3, ''tools_errors'': 0, ''delegations'': 0,
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
@@ -1671,7 +1671,7 @@ interactions:
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1698,7 +1698,7 @@ interactions:
role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1839,7 +1839,7 @@ interactions:
"goal": "test goal", "backstory": "test backstory", "cache": true, "verbose":
true, "max_rpm": null, "allow_delegation": false, "tools": [], "max_iter": 6,
"agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor object
at 0x13ab0abd0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
at 0x13ab0abd0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>", "crew": {"parent_flow": null, "name": "crew", "cache":
true, "tasks": ["{''used_tools'': 4, ''tools_errors'': 0, ''delegations'': 0,
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
@@ -1850,7 +1850,7 @@ interactions:
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1879,7 +1879,7 @@ interactions:
role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -2029,7 +2029,7 @@ interactions:
"goal": "test goal", "backstory": "test backstory", "cache": true, "verbose":
true, "max_rpm": null, "allow_delegation": false, "tools": [], "max_iter": 6,
"agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor object
at 0x13ab0abd0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
at 0x13ab0abd0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>", "crew": {"parent_flow": null, "name": "crew", "cache":
true, "tasks": ["{''used_tools'': 5, ''tools_errors'': 0, ''delegations'': 0,
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
@@ -2040,7 +2040,7 @@ interactions:
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -2070,7 +2070,7 @@ interactions:
role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler

View File

@@ -1082,7 +1082,7 @@ interactions:
"role": "test role", "goal": "test goal", "backstory": "test backstory", "cache":
true, "verbose": true, "max_rpm": 10, "allow_delegation": false, "tools": [],
"max_iter": 4, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x133d41100>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x133d41100>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x133d40500>", "crew": {"parent_flow": null, "name": "crew", "cache":
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
@@ -1093,7 +1093,7 @@ interactions:
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': 10, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 4, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x133d41100>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x133d41100>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x133d40500>, ''crew'': Crew(id=4c6d502e-f6ec-446a-8f76-644563c4aa94,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1117,7 +1117,7 @@ interactions:
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': 10, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 4, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x133d41100>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x133d41100>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x133d40500>, ''crew'': Crew(id=4c6d502e-f6ec-446a-8f76-644563c4aa94,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1910,7 +1910,7 @@ interactions:
"role": "test role", "goal": "test goal", "backstory": "test backstory", "cache":
true, "verbose": true, "max_rpm": 10, "allow_delegation": false, "tools": [],
"max_iter": 4, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x10308d610>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x10308d610>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x129201640>", "crew": {"parent_flow": null, "name": "crew", "cache":
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
@@ -1921,7 +1921,7 @@ interactions:
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': 10, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 4, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x10308d610>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x10308d610>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x129201640>, ''crew'': Crew(id=1a07d718-fed5-49fa-bee2-de2db91c9f33,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1945,7 +1945,7 @@ interactions:
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
True, ''verbose'': True, ''max_rpm'': 10, ''allow_delegation'': False, ''tools'':
[], ''max_iter'': 4, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x10308d610>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x10308d610>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x129201640>, ''crew'': Crew(id=1a07d718-fed5-49fa-bee2-de2db91c9f33,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler

View File

@@ -937,7 +937,7 @@ interactions:
"description_updated": "False", "cache_function": "<function BaseTool.<lambda>
at 0x10614d3a0>", "result_as_answer": "False", "max_usage_count": "None", "current_usage_count":
"0"}], "max_iter": 25, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x10f6c3bc0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x10f6c3bc0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x10f6c27e0>", "crew": {"parent_flow": null, "name": "crew", "cache":
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
@@ -977,7 +977,7 @@ interactions:
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
at 0x10614d3a0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
0}], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x10f6c3bc0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x10f6c3bc0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x10f6c27e0>, ''crew'': Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6,
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -996,7 +996,7 @@ interactions:
''First Agent'', ''goal'': ''First goal'', ''backstory'': ''First backstory'',
''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x11059ca10>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x11059ca10>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x10f6e6ae0>, ''crew'': Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6,
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1007,7 +1007,7 @@ interactions:
''role'': ''Second Agent'', ''goal'': ''Second goal'', ''backstory'': ''Second
backstory'', ''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x10f6c3500>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x10f6c3500>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x10f6d2000>, ''crew'': Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6,
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1017,7 +1017,7 @@ interactions:
False, ''knowledge_config'': None}"], "process": "hierarchical", "verbose":
false, "memory": false, "short_term_memory": null, "long_term_memory": null,
"entity_memory": null, "external_memory": null, "embedder": null, "usage_metrics":
null, "manager_llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
null, "manager_llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x10f6c27e0>", "manager_agent": {"id": "UUID(''b0898472-5e3b-45bb-bd90-05bad0b5a8ce'')",
"role": "''Crew Manager''", "goal": "''Manage the team to complete the task
in the best way possible.''", "backstory": "\"You are a seasoned manager with
@@ -1053,7 +1053,7 @@ interactions:
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
at 0x10614d3a0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
0}]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x10f6c3bc0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x10f6c3bc0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x10f6c27e0>", "crew": "Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6,
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1)", "i18n":
"{''prompt_file'': None}", "cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler
@@ -1805,7 +1805,7 @@ interactions:
"description_updated": "False", "cache_function": "<function BaseTool.<lambda>
at 0x107e394e0>", "result_as_answer": "False", "max_usage_count": "None", "current_usage_count":
"0"}], "max_iter": 25, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x1388bedb0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x1388bedb0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x1388bf710>", "crew": {"parent_flow": null, "name": "crew", "cache":
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
@@ -1845,7 +1845,7 @@ interactions:
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
0}], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x1388bedb0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x1388bedb0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x1388bf710>, ''crew'': Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac,
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1864,7 +1864,7 @@ interactions:
''First Agent'', ''goal'': ''First goal'', ''backstory'': ''First backstory'',
''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x1388d5c70>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x1388d5c70>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x1388bde80>, ''crew'': Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac,
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1875,7 +1875,7 @@ interactions:
''role'': ''Second Agent'', ''goal'': ''Second goal'', ''backstory'': ''Second
backstory'', ''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x1388bf7d0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x1388bf7d0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x1388bfb90>, ''crew'': Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac,
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -1885,7 +1885,7 @@ interactions:
False, ''knowledge_config'': None}"], "process": "hierarchical", "verbose":
false, "memory": false, "short_term_memory": null, "long_term_memory": null,
"entity_memory": null, "external_memory": null, "embedder": null, "usage_metrics":
null, "manager_llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
null, "manager_llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x1388bf710>", "manager_agent": {"id": "UUID(''09794b42-447f-4b7a-b634-3a861f457357'')",
"role": "''Crew Manager''", "goal": "''Manage the team to complete the task
in the best way possible.''", "backstory": "\"You are a seasoned manager with
@@ -1921,7 +1921,7 @@ interactions:
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
0}]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x1388bedb0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x1388bedb0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x1388bf710>", "crew": "Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac,
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1)", "i18n":
"{''prompt_file'': None}", "cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler

View File

@@ -126,7 +126,7 @@ interactions:
a freelancer and is now working on doing research and analysis for a new customer.\",
''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x12b973fe0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x12b973fe0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x12b910290>, ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'':
{}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler object at 0x12b9934d0>,
''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'':
@@ -149,7 +149,7 @@ interactions:
writing content for a new customer.\", ''cache'': True, ''verbose'': False,
''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'':
25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x12b7bbbf0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x12b7bbbf0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x12b9903b0>, ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'':
{}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler object at 0x12b631bb0>,
''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'':
@@ -169,7 +169,7 @@ interactions:
a freelancer and is now working on doing research and analysis for a new customer.\"",
"cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation":
"False", "tools": "[]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x12b973fe0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x12b973fe0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x12b910290>", "crew": "None", "i18n": "{''prompt_file'': None}",
"cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler
object at 0x12b9934d0>", "tools_results": "[]", "max_tokens": "None", "knowledge":
@@ -182,7 +182,7 @@ interactions:
You work as a freelancer and are now working on writing content for a new customer.\"",
"cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation":
"False", "tools": "[]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x12b7bbbf0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x12b7bbbf0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x12b9903b0>", "crew": "None", "i18n": "{''prompt_file'': None}",
"cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler
object at 0x12b631bb0>", "tools_results": "[]", "max_tokens": "None", "knowledge":
@@ -214,7 +214,7 @@ interactions:
a freelancer and is now working on doing research and analysis for a new customer.\",
''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x12b973fe0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x12b973fe0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x12b910290>, ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'':
{}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler object at 0x12b9934d0>,
''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'':
@@ -237,7 +237,7 @@ interactions:
writing content for a new customer.\", ''cache'': True, ''verbose'': False,
''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'':
25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x12b7bbbf0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x12b7bbbf0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x12b9903b0>, ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'':
{}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler object at 0x12b631bb0>,
''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'':
@@ -257,7 +257,7 @@ interactions:
a freelancer and is now working on doing research and analysis for a new customer.\"",
"cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation":
"False", "tools": "[]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x12b973fe0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x12b973fe0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x12b910290>", "crew": "None", "i18n": "{''prompt_file'': None}",
"cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler
object at 0x12b9934d0>", "tools_results": "[]", "max_tokens": "None", "knowledge":
@@ -270,7 +270,7 @@ interactions:
You work as a freelancer and are now working on writing content for a new customer.\"",
"cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation":
"False", "tools": "[]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x12b7bbbf0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x12b7bbbf0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x12b9903b0>", "crew": "None", "i18n": "{''prompt_file'': None}",
"cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler
object at 0x12b631bb0>", "tools_results": "[]", "max_tokens": "None", "knowledge":

View File

@@ -468,7 +468,7 @@ interactions:
"description_updated": "False", "cache_function": "<function BaseTool.<lambda>
at 0x107ff9440>", "result_as_answer": "True", "max_usage_count": "None", "current_usage_count":
"0"}], "max_iter": 25, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab2e030>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab2e030>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab2e5d0>", "crew": {"parent_flow": null, "name": "crew", "cache":
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
@@ -484,7 +484,7 @@ interactions:
<class ''abc.MyCustomToolSchema''>, ''description_updated'': False, ''cache_function'':
<function BaseTool.<lambda> at 0x107ff9440>, ''result_as_answer'': True, ''max_usage_count'':
None, ''current_usage_count'': 0}], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab2e030>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab2e030>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab2e5d0>, ''crew'': Crew(id=f74956dd-60d0-402a-a703-2cc3d767397f,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
@@ -512,7 +512,7 @@ interactions:
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
at 0x107ff9440>, ''result_as_answer'': True, ''max_usage_count'': None, ''current_usage_count'':
0}], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
object at 0x13ab2e030>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
object at 0x13ab2e030>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
object at 0x13ab2e5d0>, ''crew'': Crew(id=f74956dd-60d0-402a-a703-2cc3d767397f,
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler

View File

@@ -34,7 +34,7 @@ def test_anthropic_completion_is_used_when_claude_provider():
"""
llm = LLM(model="claude/claude-3-5-sonnet-20241022")
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.provider == "anthropic"
assert llm.model == "claude-3-5-sonnet-20241022"
@@ -47,7 +47,7 @@ def test_anthropic_tool_use_conversation_flow():
Test that the Anthropic completion properly handles tool use conversation flow
"""
from unittest.mock import Mock, patch
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
from anthropic.types.tool_use_block import ToolUseBlock
# Create AnthropicCompletion instance
@@ -123,7 +123,7 @@ def test_anthropic_completion_module_is_imported():
"""
Test that the completion module is properly imported when using Anthropic provider
"""
module_name = "crewai.llms.providers.anthropic.completion"
module_name = "crewai.llm.providers.anthropic.completion"
# Remove module from cache if it exists
if module_name in sys.modules:
@@ -175,7 +175,7 @@ def test_anthropic_completion_initialization_parameters():
api_key="test-key"
)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.model == "claude-3-5-sonnet-20241022"
assert llm.temperature == 0.7
@@ -195,7 +195,7 @@ def test_anthropic_specific_parameters():
timeout=60
)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.stop_sequences == ["Human:", "Assistant:"]
assert llm.stream == True
@@ -390,7 +390,7 @@ def test_anthropic_raises_error_when_model_not_supported():
"""Test that AnthropicCompletion raises ValueError when model not supported"""
# Mock the Anthropic client to raise an error
with patch('crewai.llms.providers.anthropic.completion.Anthropic') as mock_anthropic_class:
with patch('crewai.llm.providers.anthropic.completion.Anthropic') as mock_anthropic_class:
mock_client = MagicMock()
mock_anthropic_class.return_value = mock_client
@@ -427,7 +427,7 @@ def test_anthropic_client_params_setup():
client_params=custom_client_params
)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.client_params == custom_client_params
@@ -462,7 +462,7 @@ def test_anthropic_client_params_override_defaults():
)
# Verify this is actually AnthropicCompletion, not LiteLLM fallback
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
merged_params = llm._get_client_params()
@@ -487,7 +487,7 @@ def test_anthropic_client_params_none():
client_params=None
)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.client_params is None
@@ -515,7 +515,7 @@ def test_anthropic_client_params_empty_dict():
client_params={}
)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion)
assert llm.client_params == {}
@@ -538,7 +538,7 @@ def test_anthropic_model_detection():
for model_name in anthropic_test_cases:
llm = LLM(model=model_name)
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
assert isinstance(llm, AnthropicCompletion), f"Failed for model: {model_name}"

View File

@@ -37,7 +37,7 @@ def test_azure_completion_is_used_when_azure_openai_provider():
"""
llm = LLM(model="azure_openai/gpt-4")
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
assert llm.provider == "azure"
assert llm.model == "gpt-4"
@@ -47,7 +47,7 @@ def test_azure_tool_use_conversation_flow():
"""
Test that the Azure completion properly handles tool use conversation flow
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
from azure.ai.inference.models import ChatCompletionsToolCall
# Create AzureCompletion instance
@@ -105,7 +105,7 @@ def test_azure_completion_module_is_imported():
"""
Test that the completion module is properly imported when using Azure provider
"""
module_name = "crewai.llms.providers.azure.completion"
module_name = "crewai.llm.providers.azure.completion"
# Remove module from cache if it exists
if module_name in sys.modules:
@@ -160,7 +160,7 @@ def test_azure_completion_initialization_parameters():
endpoint="https://test.openai.azure.com"
)
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
assert llm.model == "gpt-4"
assert llm.temperature == 0.7
@@ -182,7 +182,7 @@ def test_azure_specific_parameters():
endpoint="https://test.openai.azure.com"
)
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
assert llm.stop == ["Human:", "Assistant:"]
assert llm.stream == True
@@ -374,7 +374,7 @@ def test_azure_completion_with_tools():
def test_azure_raises_error_when_endpoint_missing():
"""Test that AzureCompletion raises ValueError when endpoint is missing"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
# Clear environment variables
with patch.dict(os.environ, {}, clear=True):
@@ -383,7 +383,7 @@ def test_azure_raises_error_when_endpoint_missing():
def test_azure_raises_error_when_api_key_missing():
"""Test that AzureCompletion raises ValueError when API key is missing"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
# Clear environment variables
with patch.dict(os.environ, {}, clear=True):
@@ -400,7 +400,7 @@ def test_azure_endpoint_configuration():
}):
llm = LLM(model="azure/gpt-4")
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
assert llm.endpoint == "https://test1.openai.azure.com/openai/deployments/gpt-4"
@@ -426,7 +426,7 @@ def test_azure_api_key_configuration():
}):
llm = LLM(model="azure/gpt-4")
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
assert llm.api_key == "test-azure-key"
@@ -437,7 +437,7 @@ def test_azure_model_capabilities():
"""
# Test GPT-4 model (supports function calling)
llm_gpt4 = LLM(model="azure/gpt-4")
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
assert isinstance(llm_gpt4, AzureCompletion)
assert llm_gpt4.is_openai_model == True
assert llm_gpt4.supports_function_calling() == True
@@ -466,7 +466,7 @@ def test_azure_completion_params_preparation():
max_tokens=1000
)
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
messages = [{"role": "user", "content": "Hello"}]
@@ -494,7 +494,7 @@ def test_azure_model_detection():
for model_name in azure_test_cases:
llm = LLM(model=model_name)
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion), f"Failed for model: {model_name}"
@@ -662,7 +662,7 @@ def test_azure_streaming_completion():
"""
Test that streaming completions work properly
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
from azure.ai.inference.models import StreamingChatCompletionsUpdate
llm = LLM(model="azure/gpt-4", stream=True)
@@ -698,7 +698,7 @@ def test_azure_api_version_default():
"""
llm = LLM(model="azure/gpt-4")
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
assert isinstance(llm, AzureCompletion)
# Should use default or environment variable
assert llm.api_version is not None
@@ -721,7 +721,7 @@ def test_azure_openai_endpoint_url_construction():
"""
Test that Azure OpenAI endpoint URLs are automatically constructed correctly
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
@@ -738,7 +738,7 @@ def test_azure_openai_endpoint_url_with_trailing_slash():
"""
Test that trailing slashes are handled correctly in endpoint URLs
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
@@ -804,7 +804,7 @@ def test_non_azure_openai_model_parameter_included():
"""
Test that model parameter IS included for non-Azure OpenAI endpoints
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
@@ -824,7 +824,7 @@ def test_azure_message_formatting_with_role():
"""
Test that messages are formatted with both 'role' and 'content' fields
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
llm = LLM(model="azure/gpt-4")
@@ -886,7 +886,7 @@ def test_azure_improved_error_messages():
"""
Test that improved error messages are provided for common HTTP errors
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
from azure.core.exceptions import HttpResponseError
llm = LLM(model="azure/gpt-4")
@@ -918,7 +918,7 @@ def test_azure_api_version_properly_passed():
"""
Test that api_version is properly passed to the client
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
@@ -940,7 +940,7 @@ def test_azure_timeout_and_max_retries_stored():
"""
Test that timeout and max_retries parameters are stored
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
@@ -960,7 +960,7 @@ def test_azure_complete_params_include_optional_params():
"""
Test that optional parameters are included in completion params when set
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
@@ -992,7 +992,7 @@ def test_azure_endpoint_validation_with_azure_prefix():
"""
Test that 'azure/' prefix is properly stripped when constructing endpoint
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
with patch.dict(os.environ, {
"AZURE_API_KEY": "test-key",
@@ -1009,7 +1009,7 @@ def test_azure_message_formatting_preserves_all_roles():
"""
Test that all message roles (system, user, assistant) are preserved correctly
"""
from crewai.llms.providers.azure.completion import AzureCompletion
from crewai.llm.providers.azure.completion import AzureCompletion
llm = LLM(model="azure/gpt-4")

View File

@@ -19,7 +19,7 @@ def mock_aws_credentials():
"AWS_DEFAULT_REGION": "us-east-1"
}):
# Mock boto3 Session to prevent actual AWS connections
with patch('crewai.llms.providers.bedrock.completion.Session') as mock_session_class:
with patch('crewai.llm.providers.bedrock.completion.Session') as mock_session_class:
# Create mock session instance
mock_session_instance = MagicMock()
mock_client = MagicMock()
@@ -67,7 +67,7 @@ def test_bedrock_completion_module_is_imported():
"""
Test that the completion module is properly imported when using Bedrock provider
"""
module_name = "crewai.llms.providers.bedrock.completion"
module_name = "crewai.llm.providers.bedrock.completion"
# Remove module from cache if it exists
if module_name in sys.modules:
@@ -124,7 +124,7 @@ def test_bedrock_completion_initialization_parameters():
region_name="us-west-2"
)
from crewai.llms.providers.bedrock.completion import BedrockCompletion
from crewai.llm.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm, BedrockCompletion)
assert llm.model == "anthropic.claude-3-5-sonnet-20241022-v2:0"
assert llm.temperature == 0.7
@@ -145,7 +145,7 @@ def test_bedrock_specific_parameters():
region_name="us-east-1"
)
from crewai.llms.providers.bedrock.completion import BedrockCompletion
from crewai.llm.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm, BedrockCompletion)
assert llm.stop_sequences == ["Human:", "Assistant:"]
assert llm.stream == True
@@ -369,7 +369,7 @@ def test_bedrock_aws_credentials_configuration():
}):
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
from crewai.llms.providers.bedrock.completion import BedrockCompletion
from crewai.llm.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm, BedrockCompletion)
assert llm.region_name == "us-east-1"
@@ -390,7 +390,7 @@ def test_bedrock_model_capabilities():
"""
# Test Claude model
llm_claude = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
from crewai.llms.providers.bedrock.completion import BedrockCompletion
from crewai.llm.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm_claude, BedrockCompletion)
assert llm_claude.is_claude_model == True
assert llm_claude.supports_tools == True
@@ -413,7 +413,7 @@ def test_bedrock_inference_config():
max_tokens=1000
)
from crewai.llms.providers.bedrock.completion import BedrockCompletion
from crewai.llm.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm, BedrockCompletion)
# Test config preparation
@@ -444,7 +444,7 @@ def test_bedrock_model_detection():
for model_name in bedrock_test_cases:
llm = LLM(model=model_name)
from crewai.llms.providers.bedrock.completion import BedrockCompletion
from crewai.llm.providers.bedrock.completion import BedrockCompletion
assert isinstance(llm, BedrockCompletion), f"Failed for model: {model_name}"

View File

@@ -34,7 +34,7 @@ def test_gemini_completion_is_used_when_gemini_provider():
"""
llm = LLM(model="gemini/gemini-2.0-flash-001")
from crewai.llms.providers.gemini.completion import GeminiCompletion
from crewai.llm.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
assert llm.provider == "gemini"
assert llm.model == "gemini-2.0-flash-001"
@@ -47,7 +47,7 @@ def test_gemini_tool_use_conversation_flow():
Test that the Gemini completion properly handles tool use conversation flow
"""
from unittest.mock import Mock, patch
from crewai.llms.providers.gemini.completion import GeminiCompletion
from crewai.llm.providers.gemini.completion import GeminiCompletion
# Create GeminiCompletion instance
completion = GeminiCompletion(model="gemini-2.0-flash-001")
@@ -102,7 +102,7 @@ def test_gemini_completion_module_is_imported():
"""
Test that the completion module is properly imported when using Google provider
"""
module_name = "crewai.llms.providers.gemini.completion"
module_name = "crewai.llm.providers.gemini.completion"
# Remove module from cache if it exists
if module_name in sys.modules:
@@ -159,7 +159,7 @@ def test_gemini_completion_initialization_parameters():
api_key="test-key"
)
from crewai.llms.providers.gemini.completion import GeminiCompletion
from crewai.llm.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
assert llm.model == "gemini-2.0-flash-001"
assert llm.temperature == 0.7
@@ -186,7 +186,7 @@ def test_gemini_specific_parameters():
location="us-central1"
)
from crewai.llms.providers.gemini.completion import GeminiCompletion
from crewai.llm.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
assert llm.stop_sequences == ["Human:", "Assistant:"]
assert llm.stream == True
@@ -382,7 +382,7 @@ def test_gemini_raises_error_when_model_not_supported():
"""Test that GeminiCompletion raises ValueError when model not supported"""
# Mock the Google client to raise an error
with patch('crewai.llms.providers.gemini.completion.genai') as mock_genai:
with patch('crewai.llm.providers.gemini.completion.genai') as mock_genai:
mock_client = MagicMock()
mock_genai.Client.return_value = mock_client
@@ -420,7 +420,7 @@ def test_gemini_vertex_ai_setup():
location="us-west1"
)
from crewai.llms.providers.gemini.completion import GeminiCompletion
from crewai.llm.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
assert llm.project == "test-project"
@@ -435,7 +435,7 @@ def test_gemini_api_key_configuration():
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-google-key"}):
llm = LLM(model="google/gemini-2.0-flash-001")
from crewai.llms.providers.gemini.completion import GeminiCompletion
from crewai.llm.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
assert llm.api_key == "test-google-key"
@@ -453,7 +453,7 @@ def test_gemini_model_capabilities():
"""
# Test Gemini 2.0 model
llm_2_0 = LLM(model="google/gemini-2.0-flash-001")
from crewai.llms.providers.gemini.completion import GeminiCompletion
from crewai.llm.providers.gemini.completion import GeminiCompletion
assert isinstance(llm_2_0, GeminiCompletion)
assert llm_2_0.is_gemini_2 == True
assert llm_2_0.supports_tools == True
@@ -477,7 +477,7 @@ def test_gemini_generation_config():
max_output_tokens=1000
)
from crewai.llms.providers.gemini.completion import GeminiCompletion
from crewai.llm.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion)
# Test config preparation
@@ -504,7 +504,7 @@ def test_gemini_model_detection():
for model_name in gemini_test_cases:
llm = LLM(model=model_name)
from crewai.llms.providers.gemini.completion import GeminiCompletion
from crewai.llm.providers.gemini.completion import GeminiCompletion
assert isinstance(llm, GeminiCompletion), f"Failed for model: {model_name}"

View File

@@ -6,7 +6,7 @@ import httpx
import pytest
from crewai.llm import LLM
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llm.hooks.base import BaseInterceptor
@pytest.fixture(autouse=True)

View File

@@ -3,7 +3,7 @@
import httpx
import pytest
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llm.hooks.base import BaseInterceptor
class SimpleInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):

View File

@@ -4,7 +4,7 @@ import httpx
import pytest
from crewai.llm import LLM
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llm.hooks.base import BaseInterceptor
class OpenAITestInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):

View File

@@ -5,8 +5,8 @@ from unittest.mock import Mock
import httpx
import pytest
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llms.hooks.transport import AsyncHTTPTransport, HTTPTransport
from crewai.llm.hooks.base import BaseInterceptor
from crewai.llm.hooks.transport import AsyncHTTPTransport, HTTPTransport
class TrackingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):

View File

@@ -6,7 +6,7 @@ import httpx
import pytest
from crewai.llm import LLM
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llm.hooks.base import BaseInterceptor
@pytest.fixture(autouse=True)

View File

@@ -6,7 +6,7 @@ import openai
import pytest
from crewai.llm import LLM
from crewai.llms.providers.openai.completion import OpenAICompletion
from crewai.llm.providers.openai.completion import OpenAICompletion
from crewai.crew import Crew
from crewai.agent import Agent
from crewai.task import Task
@@ -29,7 +29,7 @@ def test_openai_completion_is_used_when_no_provider_prefix():
"""
llm = LLM(model="gpt-4o")
from crewai.llms.providers.openai.completion import OpenAICompletion
from crewai.llm.providers.openai.completion import OpenAICompletion
assert isinstance(llm, OpenAICompletion)
assert llm.provider == "openai"
assert llm.model == "gpt-4o"
@@ -63,7 +63,7 @@ def test_openai_completion_module_is_imported():
"""
Test that the completion module is properly imported when using OpenAI provider
"""
module_name = "crewai.llms.providers.openai.completion"
module_name = "crewai.llm.providers.openai.completion"
# Remove module from cache if it exists
if module_name in sys.modules:
@@ -114,7 +114,7 @@ def test_openai_completion_initialization_parameters():
api_key="test-key"
)
from crewai.llms.providers.openai.completion import OpenAICompletion
from crewai.llm.providers.openai.completion import OpenAICompletion
assert isinstance(llm, OpenAICompletion)
assert llm.model == "gpt-4o"
assert llm.temperature == 0.7
@@ -335,7 +335,7 @@ def test_openai_completion_call_returns_usage_metrics():
def test_openai_raises_error_when_model_not_supported():
"""Test that OpenAICompletion raises ValueError when model not supported"""
with patch('crewai.llms.providers.openai.completion.OpenAI') as mock_openai_class:
with patch('crewai.llm.providers.openai.completion.OpenAI') as mock_openai_class:
mock_client = MagicMock()
mock_openai_class.return_value = mock_client

View File

@@ -2,7 +2,7 @@ from typing import Any, Dict, List, Optional, Union
import pytest
from crewai import Agent, Crew, Process, Task
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.utilities.llm_utils import create_llm

View File

@@ -743,7 +743,7 @@ def test_llm_emits_call_failed_event():
error_message = "OpenAI API call failed: Simulated API failure"
with patch(
"crewai.llms.providers.openai.completion.OpenAICompletion._handle_completion"
"crewai.llm.providers.openai.completion.OpenAICompletion._handle_completion"
) as mock_handle_completion:
mock_handle_completion.side_effect = Exception("Simulated API failure")

View File

@@ -4,7 +4,7 @@ from unittest.mock import patch
from crewai.cli.constants import DEFAULT_LLM_MODEL
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.llm.base_llm import BaseLLM
from crewai.utilities.llm_utils import create_llm
import pytest

20
logs.txt Normal file
View File

@@ -0,0 +1,20 @@
lib/crewai/src/crewai/agent/core.py:901: error: Argument 1 has incompatible type "ToolFilterContext"; expected "dict[str, Any]" [arg-type]
lib/crewai/src/crewai/agent/core.py:901: note: Error code "arg-type" not covered by "type: ignore" comment
lib/crewai/src/crewai/agent/core.py:905: error: Argument 1 has incompatible type "dict[str, Any]"; expected "ToolFilterContext" [arg-type]
lib/crewai/src/crewai/agent/core.py:905: note: Error code "arg-type" not covered by "type: ignore" comment
lib/crewai/src/crewai/agent/core.py:996: error: Returning Any from function declared to return "dict[str, dict[str, Any]]" [no-any-return]
lib/crewai/src/crewai/agent/core.py:1157: error: Incompatible types in assignment (expression has type "tuple[UnionType, None]", target has type "tuple[type, Any]") [assignment]
lib/crewai/src/crewai/agent/core.py:1183: error: Argument 1 to "append" of "list" has incompatible type "type"; expected "type[str]" [arg-type]
lib/crewai/src/crewai/agent/core.py:1188: error: Incompatible types in assignment (expression has type "UnionType", variable has type "type[str]") [assignment]
lib/crewai/src/crewai/agent/core.py:1201: error: Argument 1 to "get" of "dict" has incompatible type "Any | None"; expected "str" [arg-type]
Found 7 errors in 1 file (checked 4 source files)
Success: no issues found in 4 source files
lib/crewai/src/crewai/llm/providers/gemini/completion.py:111: error: BaseModel field may only be overridden by another field [misc]
Found 1 error in 1 file (checked 4 source files)
Success: no issues found in 4 source files
lib/crewai/src/crewai/llm/providers/anthropic/completion.py:101: error: BaseModel field may only be overridden by another field [misc]
Found 1 error in 1 file (checked 4 source files)
lib/crewai/src/crewai/llm/providers/bedrock/completion.py:250: error: BaseModel field may only be overridden by another field [misc]
Found 1 error in 1 file (checked 4 source files)
uv-lock..............................................(no files to check)Skipped