diff --git a/docs/en/concepts/llms.mdx b/docs/en/concepts/llms.mdx index fabf27aaa..fa080bc3e 100644 --- a/docs/en/concepts/llms.mdx +++ b/docs/en/concepts/llms.mdx @@ -1212,7 +1212,7 @@ Learn how to get the most out of your LLM configuration: ```python import httpx from crewai import LLM -from crewai.llms.hooks import BaseInterceptor +from crewai.llm.hooks import BaseInterceptor class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]): """Custom interceptor to modify requests and responses.""" diff --git a/lib/crewai/src/crewai/__init__.py b/lib/crewai/src/crewai/__init__.py index c992f11f7..ef2bcf78d 100644 --- a/lib/crewai/src/crewai/__init__.py +++ b/lib/crewai/src/crewai/__init__.py @@ -9,7 +9,7 @@ from crewai.crews.crew_output import CrewOutput from crewai.flow.flow import Flow from crewai.knowledge.knowledge import Knowledge from crewai.llm import LLM -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM from crewai.process import Process from crewai.task import Task from crewai.tasks.llm_guardrail import LLMGuardrail diff --git a/lib/crewai/src/crewai/agent/core.py b/lib/crewai/src/crewai/agent/core.py index 1d94c4d19..7c2b96f71 100644 --- a/lib/crewai/src/crewai/agent/core.py +++ b/lib/crewai/src/crewai/agent/core.py @@ -39,7 +39,7 @@ from crewai.knowledge.knowledge import Knowledge from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context from crewai.lite_agent import LiteAgent -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM from crewai.mcp import ( MCPClient, MCPServerConfig, @@ -626,7 +626,7 @@ class Agent(BaseAgent): ) self.agent_executor = CrewAgentExecutor( - llm=self.llm, + llm=self.llm, # type: ignore[arg-type] task=task, # type: ignore[arg-type] agent=self, crew=self.crew, @@ -803,6 +803,7 @@ class Agent(BaseAgent): from crewai.tools.base_tool import BaseTool from crewai.tools.mcp_native_tool import MCPNativeTool + transport: StdioTransport | HTTPTransport | SSETransport if isinstance(mcp_config, MCPServerStdio): transport = StdioTransport( command=mcp_config.command, @@ -896,10 +897,12 @@ class Agent(BaseAgent): server_name=server_name, run_context=None, ) - if mcp_config.tool_filter(context, tool): + # Try new signature first + if mcp_config.tool_filter(context, tool): # type: ignore[arg-type,call-arg] filtered_tools.append(tool) except (TypeError, AttributeError): - if mcp_config.tool_filter(tool): + # Fallback to old signature + if mcp_config.tool_filter(tool): # type: ignore[arg-type,call-arg] filtered_tools.append(tool) else: # Not callable - include tool @@ -974,7 +977,9 @@ class Agent(BaseAgent): path = parsed.path.replace("/", "_").strip("_") return f"{domain}_{path}" if path else domain - def _get_mcp_tool_schemas(self, server_params: dict) -> dict[str, dict]: + def _get_mcp_tool_schemas( + self, server_params: dict[str, Any] + ) -> dict[str, dict[str, Any]]: """Get tool schemas from MCP server for wrapper creation with caching.""" server_url = server_params["url"] @@ -988,7 +993,7 @@ class Agent(BaseAgent): self._logger.log( "debug", f"Using cached MCP tool schemas for {server_url}" ) - return cached_data + return cast(dict[str, dict[str, Any]], cached_data) try: schemas = asyncio.run(self._get_mcp_tool_schemas_async(server_params)) @@ -1006,7 +1011,7 @@ class Agent(BaseAgent): async def _get_mcp_tool_schemas_async( self, server_params: dict[str, Any] - ) -> dict[str, dict]: + ) -> dict[str, dict[str, Any]]: """Async implementation of MCP tool schema retrieval with timeouts and retries.""" server_url = server_params["url"] return await self._retry_mcp_discovery( @@ -1014,7 +1019,7 @@ class Agent(BaseAgent): ) async def _retry_mcp_discovery( - self, operation_func, server_url: str + self, operation_func: Any, server_url: str ) -> dict[str, dict[str, Any]]: """Retry MCP discovery operation with exponential backoff, avoiding try-except in loop.""" last_error = None @@ -1045,7 +1050,7 @@ class Agent(BaseAgent): @staticmethod async def _attempt_mcp_discovery( - operation_func, server_url: str + operation_func: Any, server_url: str ) -> tuple[dict[str, dict[str, Any]] | None, str, bool]: """Attempt single MCP discovery operation and return (result, error_message, should_retry).""" try: @@ -1149,13 +1154,13 @@ class Agent(BaseAgent): Field(..., description=field_description), ) else: - field_definitions[field_name] = ( + field_definitions[field_name] = ( # type: ignore[assignment] field_type | None, Field(default=None, description=field_description), ) model_name = f"{tool_name.replace('-', '_').replace(' ', '_')}Schema" - return create_model(model_name, **field_definitions) + return create_model(model_name, **field_definitions) # type: ignore[no-any-return,call-overload] def _json_type_to_python(self, field_schema: dict[str, Any]) -> type: """Convert JSON Schema type to Python type. @@ -1175,16 +1180,16 @@ class Agent(BaseAgent): if "const" in option: types.append(str) else: - types.append(self._json_type_to_python(option)) + types.append(self._json_type_to_python(option)) # type: ignore[arg-type] unique_types = list(set(types)) if len(unique_types) > 1: result = unique_types[0] for t in unique_types[1:]: - result = result | t + result = result | t # type: ignore[assignment] return result return unique_types[0] - type_mapping = { + type_mapping: dict[str, type] = { "string": str, "number": float, "integer": int, @@ -1193,10 +1198,10 @@ class Agent(BaseAgent): "object": dict, } - return type_mapping.get(json_type, Any) + return type_mapping.get(json_type or "", Any) @staticmethod - def _fetch_amp_mcp_servers(mcp_name: str) -> list[dict]: + def _fetch_amp_mcp_servers(mcp_name: str) -> list[dict[str, Any]]: """Fetch MCP server configurations from CrewAI AMP API.""" # TODO: Implement AMP API call to "integrations/mcps" endpoint # Should return list of server configs with URLs diff --git a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py index 932c98611..33d137220 100644 --- a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py +++ b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py @@ -137,7 +137,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta): default=False, description="Enable agent to delegate and ask questions among each other.", ) - tools: list[BaseTool] | None = Field( + tools: list[BaseTool] = Field( default_factory=list, description="Tools at agents' disposal" ) max_iter: int = Field( @@ -161,7 +161,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta): description="An instance of the ToolsHandler class.", ) tools_results: list[dict[str, Any]] = Field( - default=[], description="Results of the tools used by the agent." + default_factory=list, description="Results of the tools used by the agent." ) max_tokens: int | None = Field( default=None, description="Maximum number of tokens for the agent's execution." @@ -265,7 +265,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta): if not mcps: return mcps - validated_mcps = [] + validated_mcps: list[str | MCPServerConfig] = [] for mcp in mcps: if isinstance(mcp, str): if mcp.startswith(("https://", "crewai-amp:")): diff --git a/lib/crewai/src/crewai/agents/crew_agent_executor.py b/lib/crewai/src/crewai/agents/crew_agent_executor.py index 5b806658c..8cf3c1e12 100644 --- a/lib/crewai/src/crewai/agents/crew_agent_executor.py +++ b/lib/crewai/src/crewai/agents/crew_agent_executor.py @@ -47,7 +47,7 @@ if TYPE_CHECKING: from crewai.agent import Agent from crewai.agents.tools_handler import ToolsHandler from crewai.crew import Crew - from crewai.llms.base_llm import BaseLLM + from crewai.llm.base_llm import BaseLLM from crewai.task import Task from crewai.tools.base_tool import BaseTool from crewai.tools.structured_tool import CrewStructuredTool diff --git a/lib/crewai/src/crewai/crew.py b/lib/crewai/src/crewai/crew.py index f5af4a426..31eb7466c 100644 --- a/lib/crewai/src/crewai/crew.py +++ b/lib/crewai/src/crewai/crew.py @@ -57,7 +57,7 @@ from crewai.flow.flow_trackable import FlowTrackable from crewai.knowledge.knowledge import Knowledge from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource from crewai.llm import LLM -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM from crewai.memory.entity.entity_memory import EntityMemory from crewai.memory.external.external_memory import ExternalMemory from crewai.memory.long_term.long_term_memory import LongTermMemory diff --git a/lib/crewai/src/crewai/lite_agent.py b/lib/crewai/src/crewai/lite_agent.py index 4314e900e..ef877e01b 100644 --- a/lib/crewai/src/crewai/lite_agent.py +++ b/lib/crewai/src/crewai/lite_agent.py @@ -40,7 +40,7 @@ from crewai.events.types.logging_events import AgentLogsExecutionEvent from crewai.flow.flow_trackable import FlowTrackable from crewai.lite_agent_output import LiteAgentOutput from crewai.llm import LLM -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM from crewai.tools.base_tool import BaseTool from crewai.tools.structured_tool import CrewStructuredTool from crewai.utilities.agent_utils import ( @@ -503,7 +503,7 @@ class LiteAgent(FlowTrackable, BaseModel): AgentFinish: The final result of the agent execution. """ # Execute the agent loop - formatted_answer = None + formatted_answer: AgentAction | AgentFinish | None = None while not isinstance(formatted_answer, AgentFinish): try: if has_reached_max_iterations(self._iterations, self.max_iterations): @@ -551,7 +551,8 @@ class LiteAgent(FlowTrackable, BaseModel): show_logs=self._show_logs, ) - self._append_message(formatted_answer.text, role="assistant") + if formatted_answer is not None: + self._append_message(formatted_answer.text, role="assistant") except OutputParserError as e: # noqa: PERF203 self._printer.print( content="Failed to parse LLM output. Retrying...", diff --git a/lib/crewai/src/crewai/llm/__init__.py b/lib/crewai/src/crewai/llm/__init__.py new file mode 100644 index 000000000..57cb57069 --- /dev/null +++ b/lib/crewai/src/crewai/llm/__init__.py @@ -0,0 +1,4 @@ +from crewai.llm.core import LLM + + +__all__ = ["LLM"] diff --git a/lib/crewai/src/crewai/llms/base_llm.py b/lib/crewai/src/crewai/llm/base_llm.py similarity index 84% rename from lib/crewai/src/crewai/llms/base_llm.py rename to lib/crewai/src/crewai/llm/base_llm.py index a7026c5c5..122276345 100644 --- a/lib/crewai/src/crewai/llms/base_llm.py +++ b/lib/crewai/src/crewai/llm/base_llm.py @@ -11,9 +11,10 @@ from datetime import datetime import json import logging import re -from typing import TYPE_CHECKING, Any, Final +from typing import TYPE_CHECKING, Any, ClassVar, Final -from pydantic import BaseModel +import httpx +from pydantic import BaseModel, ConfigDict, Field, model_validator from crewai.events.event_bus import crewai_event_bus from crewai.events.types.llm_events import ( @@ -28,6 +29,8 @@ from crewai.events.types.tool_usage_events import ( ToolUsageFinishedEvent, ToolUsageStartedEvent, ) +from crewai.llm.hooks.base import BaseInterceptor +from crewai.llm.internal.meta import LLMMeta from crewai.types.usage_metrics import UsageMetrics @@ -43,7 +46,7 @@ DEFAULT_SUPPORTS_STOP_WORDS: Final[bool] = True _JSON_EXTRACTION_PATTERN: Final[re.Pattern[str]] = re.compile(r"\{.*}", re.DOTALL) -class BaseLLM(ABC): +class BaseLLM(BaseModel, ABC, metaclass=LLMMeta): """Abstract base class for LLM implementations. This class defines the interface that all LLM implementations must follow. @@ -62,46 +65,96 @@ class BaseLLM(ABC): additional_params: Additional provider-specific parameters. """ - is_litellm: bool = False + model_config: ClassVar[ConfigDict] = ConfigDict( + arbitrary_types_allowed=True, extra="allow", validate_assignment=True + ) - def __init__( - self, - model: str, - temperature: float | None = None, - api_key: str | None = None, - base_url: str | None = None, - provider: str | None = None, - **kwargs: Any, - ) -> None: - """Initialize the BaseLLM with default attributes. + # Core fields + model: str = Field(..., description="The model identifier/name") + temperature: float | None = Field( + None, description="Temperature setting for response generation" + ) + api_key: str | None = Field(None, description="API key for authentication") + base_url: str | None = Field(None, description="Base URL for API requests") + provider: str = Field( + default="openai", description="Provider name (openai, anthropic, etc.)" + ) + stop: list[str] = Field( + default_factory=list, description="Stop sequences for generation" + ) + + # Internal fields + is_litellm: bool = Field( + default=False, description="Whether this instance uses LiteLLM" + ) + interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = Field( + None, description="HTTP request/response interceptor" + ) + _token_usage: dict[str, int] = { + "total_tokens": 0, + "prompt_tokens": 0, + "completion_tokens": 0, + "successful_requests": 0, + "cached_prompt_tokens": 0, + } + + @model_validator(mode="before") + @classmethod + def _extract_stop_and_validate(cls, values: dict[str, Any]) -> dict[str, Any]: + """Extract and normalize stop sequences before model initialization. Args: - model: The model identifier/name. - temperature: Optional temperature setting for response generation. - stop: Optional list of stop sequences for generation. - **kwargs: Additional provider-specific parameters. + values: Input values dictionary + + Returns: + Processed values dictionary """ - if not model: + if not values.get("model"): raise ValueError("Model name is required and cannot be empty") - self.model = model - self.temperature = temperature - self.api_key = api_key - self.base_url = base_url - # Store additional parameters for provider-specific use - self.additional_params = kwargs - self._provider = provider or "openai" - - stop = kwargs.pop("stop", None) + # Handle stop sequences + stop = values.get("stop") if stop is None: - self.stop: list[str] = [] + values["stop"] = [] elif isinstance(stop, str): - self.stop = [stop] - elif isinstance(stop, list): - self.stop = stop - else: - self.stop = [] + values["stop"] = [stop] + elif not isinstance(stop, list): + values["stop"] = [] + # Set default provider if not specified + if "provider" not in values or values["provider"] is None: + values["provider"] = "openai" + + return values + + @property + def additional_params(self) -> dict[str, Any]: + """Get additional parameters stored as extra fields. + + Returns: + Dictionary of additional parameters + """ + return self.__pydantic_extra__ or {} + + @additional_params.setter + def additional_params(self, value: dict[str, Any]) -> None: + """Set additional parameters as extra fields. + + Args: + value: Dictionary of additional parameters to set + """ + if not isinstance(value, dict): + raise ValueError("additional_params must be a dictionary") + if self.__pydantic_extra__ is None: + self.__pydantic_extra__ = {} + self.__pydantic_extra__.update(value) + + def model_post_init(self, __context: Any) -> None: + """Initialize token usage tracking after model initialization. + + Args: + __context: Pydantic context (unused) + """ self._token_usage = { "total_tokens": 0, "prompt_tokens": 0, @@ -110,16 +163,6 @@ class BaseLLM(ABC): "cached_prompt_tokens": 0, } - @property - def provider(self) -> str: - """Get the provider of the LLM.""" - return self._provider - - @provider.setter - def provider(self, value: str) -> None: - """Set the provider of the LLM.""" - self._provider = value - @abstractmethod def call( self, diff --git a/lib/crewai/src/crewai/llms/constants.py b/lib/crewai/src/crewai/llm/constants.py similarity index 100% rename from lib/crewai/src/crewai/llms/constants.py rename to lib/crewai/src/crewai/llm/constants.py diff --git a/lib/crewai/src/crewai/llm.py b/lib/crewai/src/crewai/llm/core.py similarity index 86% rename from lib/crewai/src/crewai/llm.py rename to lib/crewai/src/crewai/llm/core.py index b0cf42091..a386e2bbc 100644 --- a/lib/crewai/src/crewai/llm.py +++ b/lib/crewai/src/crewai/llm/core.py @@ -20,9 +20,7 @@ from typing import ( ) from dotenv import load_dotenv -import httpx from pydantic import BaseModel, Field -from typing_extensions import Self from crewai.events.event_bus import crewai_event_bus from crewai.events.types.llm_events import ( @@ -37,14 +35,7 @@ from crewai.events.types.tool_usage_events import ( ToolUsageFinishedEvent, ToolUsageStartedEvent, ) -from crewai.llms.base_llm import BaseLLM -from crewai.llms.constants import ( - ANTHROPIC_MODELS, - AZURE_MODELS, - BEDROCK_MODELS, - GEMINI_MODELS, - OPENAI_MODELS, -) +from crewai.llm.base_llm import BaseLLM from crewai.utilities import InternalInstructor from crewai.utilities.exceptions.context_window_exceeding_exception import ( LLMContextLengthExceededError, @@ -61,7 +52,6 @@ if TYPE_CHECKING: from litellm.utils import supports_response_schema from crewai.agent.core import Agent - from crewai.llms.hooks.base import BaseInterceptor from crewai.task import Task from crewai.tools.base_tool import BaseTool from crewai.utilities.types import LLMMessage @@ -327,249 +317,57 @@ class AccumulatedToolArgs(BaseModel): class LLM(BaseLLM): - completion_cost: float | None = None + """LiteLLM-based LLM implementation for CrewAI. - def __new__(cls, model: str, is_litellm: bool = False, **kwargs: Any) -> LLM: - """Factory method that routes to native SDK or falls back to LiteLLM. + This class provides LiteLLM integration for models not covered by native providers. + The metaclass (LLMMeta) automatically routes to native providers when appropriate. + """ - Routing priority: - 1. If 'provider' kwarg is present, use that provider with constants - 2. If only 'model' kwarg, use constants to infer provider - 3. If "/" in model name: - - Check if prefix is a native provider (openai/anthropic/azure/bedrock/gemini) - - If yes, validate model against constants - - If valid, route to native SDK; otherwise route to LiteLLM - """ - if not model or not isinstance(model, str): - raise ValueError("Model must be a non-empty string") + # LiteLLM-specific fields + completion_cost: float | None = Field(None, description="Cost of completion") + timeout: float | int | None = Field(None, description="Request timeout") + top_p: float | None = Field(None, description="Top-p sampling parameter") + n: int | None = Field(None, description="Number of completions to generate") + max_completion_tokens: int | None = Field( + None, description="Maximum completion tokens" + ) + max_tokens: int | float | None = Field(None, description="Maximum total tokens") + presence_penalty: float | None = Field(None, description="Presence penalty") + frequency_penalty: float | None = Field(None, description="Frequency penalty") + logit_bias: dict[int, float] | None = Field(None, description="Logit bias") + response_format: type[BaseModel] | None = Field( + None, description="Response format model" + ) + seed: int | None = Field(None, description="Random seed for reproducibility") + logprobs: int | None = Field(None, description="Log probabilities to return") + top_logprobs: int | None = Field(None, description="Top log probabilities") + api_base: str | None = Field(None, description="API base URL (alias for base_url)") + api_version: str | None = Field(None, description="API version") + callbacks: list[Any] | None = Field(None, description="Callback functions") + context_window_size: int = Field(0, description="Context window size in tokens") + reasoning_effort: Literal["none", "low", "medium", "high"] | None = Field( + None, description="Reasoning effort level" + ) + is_anthropic: bool = Field(False, description="Whether model is from Anthropic") + stream: bool = Field(False, description="Whether to stream responses") - explicit_provider = kwargs.get("provider") - - if explicit_provider: - provider = explicit_provider - use_native = True - model_string = model - elif "/" in model: - prefix, _, model_part = model.partition("/") - - provider_mapping = { - "openai": "openai", - "anthropic": "anthropic", - "claude": "anthropic", - "azure": "azure", - "azure_openai": "azure", - "google": "gemini", - "gemini": "gemini", - "bedrock": "bedrock", - "aws": "bedrock", - } - - canonical_provider = provider_mapping.get(prefix.lower()) - - if canonical_provider and cls._validate_model_in_constants( - model_part, canonical_provider - ): - provider = canonical_provider - use_native = True - model_string = model_part - else: - provider = prefix - use_native = False - model_string = model_part - else: - provider = cls._infer_provider_from_model(model) - use_native = True - model_string = model - - native_class = cls._get_native_provider(provider) if use_native else None - if native_class and not is_litellm and provider in SUPPORTED_NATIVE_PROVIDERS: - try: - # Remove 'provider' from kwargs if it exists to avoid duplicate keyword argument - kwargs_copy = {k: v for k, v in kwargs.items() if k != 'provider'} - return cast( - Self, native_class(model=model_string, provider=provider, **kwargs_copy) - ) - except NotImplementedError: - raise - except Exception as e: - raise ImportError(f"Error importing native provider: {e}") from e - - # FALLBACK to LiteLLM - if not LITELLM_AVAILABLE: - logger.error("LiteLLM is not available, falling back to LiteLLM") - raise ImportError("Fallback to LiteLLM is not available") from None - - instance = object.__new__(cls) - super(LLM, instance).__init__(model=model, is_litellm=True, **kwargs) - instance.is_litellm = True - return instance - - @classmethod - def _validate_model_in_constants(cls, model: str, provider: str) -> bool: - """Validate if a model name exists in the provider's constants. + def model_post_init(self, __context: Any) -> None: + """Initialize LiteLLM-specific settings after model initialization. Args: - model: The model name to validate - provider: The provider to check against (canonical name) - - Returns: - True if the model exists in the provider's constants, False otherwise + __context: Pydantic context """ - if provider == "openai": - return model in OPENAI_MODELS + super().model_post_init(__context) - if provider == "anthropic" or provider == "claude": - return model in ANTHROPIC_MODELS + # Configure LiteLLM + if LITELLM_AVAILABLE: + litellm.drop_params = True - if provider == "gemini": - return model in GEMINI_MODELS + # Determine if this is an Anthropic model + self.is_anthropic = self._is_anthropic_model(self.model) - if provider == "bedrock": - return model in BEDROCK_MODELS - - if provider == "azure": - # azure does not provide a list of available models, determine a better way to handle this - return True - - return False - - @classmethod - def _infer_provider_from_model(cls, model: str) -> str: - """Infer the provider from the model name. - - Args: - model: The model name without provider prefix - - Returns: - The inferred provider name, defaults to "openai" - """ - - if model in OPENAI_MODELS: - return "openai" - - if model in ANTHROPIC_MODELS: - return "anthropic" - - if model in GEMINI_MODELS: - return "gemini" - - if model in BEDROCK_MODELS: - return "bedrock" - - if model in AZURE_MODELS: - return "azure" - - return "openai" - - @classmethod - def _get_native_provider(cls, provider: str) -> type | None: - """Get native provider class if available.""" - if provider == "openai": - from crewai.llms.providers.openai.completion import OpenAICompletion - - return OpenAICompletion - - if provider == "anthropic" or provider == "claude": - from crewai.llms.providers.anthropic.completion import ( - AnthropicCompletion, - ) - - return AnthropicCompletion - - if provider == "azure" or provider == "azure_openai": - from crewai.llms.providers.azure.completion import AzureCompletion - - return AzureCompletion - - if provider == "google" or provider == "gemini": - from crewai.llms.providers.gemini.completion import GeminiCompletion - - return GeminiCompletion - - if provider == "bedrock": - from crewai.llms.providers.bedrock.completion import BedrockCompletion - - return BedrockCompletion - - return None - - def __init__( - self, - model: str, - timeout: float | int | None = None, - temperature: float | None = None, - top_p: float | None = None, - n: int | None = None, - stop: str | list[str] | None = None, - max_completion_tokens: int | None = None, - max_tokens: int | float | None = None, - presence_penalty: float | None = None, - frequency_penalty: float | None = None, - logit_bias: dict[int, float] | None = None, - response_format: type[BaseModel] | None = None, - seed: int | None = None, - logprobs: int | None = None, - top_logprobs: int | None = None, - base_url: str | None = None, - api_base: str | None = None, - api_version: str | None = None, - api_key: str | None = None, - callbacks: list[Any] | None = None, - reasoning_effort: Literal["none", "low", "medium", "high"] | None = None, - stream: bool = False, - interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None, - **kwargs: Any, - ) -> None: - """Initialize LLM instance. - - Note: This __init__ method is only called for fallback instances. - Native provider instances handle their own initialization in their respective classes. - """ - super().__init__( - model=model, - temperature=temperature, - api_key=api_key, - base_url=base_url, - timeout=timeout, - **kwargs, - ) - self.model = model - self.timeout = timeout - self.temperature = temperature - self.top_p = top_p - self.n = n - self.max_completion_tokens = max_completion_tokens - self.max_tokens = max_tokens - self.presence_penalty = presence_penalty - self.frequency_penalty = frequency_penalty - self.logit_bias = logit_bias - self.response_format = response_format - self.seed = seed - self.logprobs = logprobs - self.top_logprobs = top_logprobs - self.base_url = base_url - self.api_base = api_base - self.api_version = api_version - self.api_key = api_key - self.callbacks = callbacks - self.context_window_size = 0 - self.reasoning_effort = reasoning_effort - self.additional_params = kwargs - self.is_anthropic = self._is_anthropic_model(model) - self.stream = stream - self.interceptor = interceptor - - litellm.drop_params = True - - # Normalize self.stop to always be a list[str] - if stop is None: - self.stop: list[str] = [] - elif isinstance(stop, str): - self.stop = [stop] - else: - self.stop = stop - - self.set_callbacks(callbacks or []) + # Set up callbacks + self.set_callbacks(self.callbacks or []) self.set_env_callbacks() @staticmethod @@ -1649,7 +1447,7 @@ class LLM(BaseLLM): **filtered_params, ) - def __deepcopy__(self, memo: dict[int, Any] | None) -> LLM: + def __deepcopy__(self, memo: dict[int, Any] | None) -> LLM: # type: ignore[override] """Create a deep copy of the LLM instance.""" import copy diff --git a/lib/crewai/src/crewai/llms/hooks/__init__.py b/lib/crewai/src/crewai/llm/hooks/__init__.py similarity index 58% rename from lib/crewai/src/crewai/llms/hooks/__init__.py rename to lib/crewai/src/crewai/llm/hooks/__init__.py index 2bbad217d..5c949294e 100644 --- a/lib/crewai/src/crewai/llms/hooks/__init__.py +++ b/lib/crewai/src/crewai/llm/hooks/__init__.py @@ -1,6 +1,6 @@ """Interceptor contracts for crewai""" -from crewai.llms.hooks.base import BaseInterceptor +from crewai.llm.hooks.base import BaseInterceptor __all__ = ["BaseInterceptor"] diff --git a/lib/crewai/src/crewai/llms/hooks/base.py b/lib/crewai/src/crewai/llm/hooks/base.py similarity index 100% rename from lib/crewai/src/crewai/llms/hooks/base.py rename to lib/crewai/src/crewai/llm/hooks/base.py diff --git a/lib/crewai/src/crewai/llms/hooks/transport.py b/lib/crewai/src/crewai/llm/hooks/transport.py similarity index 98% rename from lib/crewai/src/crewai/llms/hooks/transport.py rename to lib/crewai/src/crewai/llm/hooks/transport.py index 27a0972ab..db99a0d59 100644 --- a/lib/crewai/src/crewai/llms/hooks/transport.py +++ b/lib/crewai/src/crewai/llm/hooks/transport.py @@ -22,7 +22,7 @@ if TYPE_CHECKING: from httpx import Limits, Request, Response from httpx._types import CertTypes, ProxyTypes - from crewai.llms.hooks.base import BaseInterceptor + from crewai.llm.hooks.base import BaseInterceptor class HTTPTransportKwargs(TypedDict, total=False): diff --git a/lib/crewai/src/crewai/llms/providers/__init__.py b/lib/crewai/src/crewai/llm/internal/__init__.py similarity index 100% rename from lib/crewai/src/crewai/llms/providers/__init__.py rename to lib/crewai/src/crewai/llm/internal/__init__.py diff --git a/lib/crewai/src/crewai/llm/internal/meta.py b/lib/crewai/src/crewai/llm/internal/meta.py new file mode 100644 index 000000000..4bf83b655 --- /dev/null +++ b/lib/crewai/src/crewai/llm/internal/meta.py @@ -0,0 +1,232 @@ +"""Metaclass for LLM provider routing. + +This metaclass enables automatic routing to native provider implementations +based on the model parameter at instantiation time. +""" + +from __future__ import annotations + +import logging +from typing import Any + +from pydantic._internal._model_construction import ModelMetaclass + + +# Provider constants imported from crewai.llm.constants +SUPPORTED_NATIVE_PROVIDERS: list[str] = [ + "openai", + "anthropic", + "claude", + "azure", + "azure_openai", + "google", + "gemini", + "bedrock", + "aws", +] + + +class LLMMeta(ModelMetaclass): + """Metaclass for LLM that handles provider routing. + + This metaclass intercepts LLM instantiation and routes to the appropriate + native provider implementation based on the model parameter. + """ + + def __call__(cls, model: str, is_litellm: bool = False, **kwargs: Any) -> Any: # noqa: N805 + """Route to appropriate provider implementation at instantiation time. + + Args: + model: The model identifier (e.g., "gpt-4", "claude-3-opus") + is_litellm: Force use of LiteLLM instead of native provider + **kwargs: Additional parameters for the LLM + + Returns: + Instance of the appropriate provider class or LLM class + + Raises: + ValueError: If model is not a valid string + """ + if not model or not isinstance(model, str): + raise ValueError("Model must be a non-empty string") + + # Only perform routing if called on the base LLM class + # Subclasses (OpenAICompletion, etc.) should create normally + from crewai.llm import LLM + + if cls is not LLM: + # Direct instantiation of provider class, skip routing + return super().__call__(model=model, **kwargs) + + # Extract provider information + explicit_provider = kwargs.get("provider") + + if explicit_provider: + provider = explicit_provider + use_native = True + model_string = model + elif "/" in model: + prefix, _, model_part = model.partition("/") + + provider_mapping = { + "openai": "openai", + "anthropic": "anthropic", + "claude": "anthropic", + "azure": "azure", + "azure_openai": "azure", + "google": "gemini", + "gemini": "gemini", + "bedrock": "bedrock", + "aws": "bedrock", + } + + canonical_provider = provider_mapping.get(prefix.lower()) + + if canonical_provider and cls._validate_model_in_constants( + model_part, canonical_provider + ): + provider = canonical_provider + use_native = True + model_string = model_part + else: + provider = prefix + use_native = False + model_string = model_part + else: + provider = cls._infer_provider_from_model(model) + use_native = True + model_string = model + + # Route to native provider if available + native_class = cls._get_native_provider(provider) if use_native else None + if native_class and not is_litellm and provider in SUPPORTED_NATIVE_PROVIDERS: + try: + # Remove 'provider' from kwargs to avoid duplicate keyword argument + kwargs_copy = {k: v for k, v in kwargs.items() if k != "provider"} + return native_class( + model=model_string, provider=provider, **kwargs_copy + ) + except NotImplementedError: + raise + except Exception as e: + raise ImportError(f"Error importing native provider: {e}") from e + + # Fallback to LiteLLM + try: + import litellm # noqa: F401 + except ImportError: + logging.error("LiteLLM is not available, falling back to LiteLLM") + raise ImportError("Fallback to LiteLLM is not available") from None + + # Create actual LLM instance with is_litellm=True + return super().__call__(model=model, is_litellm=True, **kwargs) + + @staticmethod + def _validate_model_in_constants(model: str, provider: str) -> bool: + """Validate if a model name exists in the provider's constants. + + Args: + model: The model name to validate + provider: The provider to check against (canonical name) + + Returns: + True if the model exists in the provider's constants, False otherwise + """ + from crewai.llm.constants import ( + ANTHROPIC_MODELS, + BEDROCK_MODELS, + GEMINI_MODELS, + OPENAI_MODELS, + ) + + if provider == "openai": + return model in OPENAI_MODELS + + if provider == "anthropic" or provider == "claude": + return model in ANTHROPIC_MODELS + + if provider == "gemini": + return model in GEMINI_MODELS + + if provider == "bedrock": + return model in BEDROCK_MODELS + + if provider == "azure": + # azure does not provide a list of available models + return True + + return False + + @staticmethod + def _infer_provider_from_model(model: str) -> str: + """Infer the provider from the model name. + + Args: + model: The model name without provider prefix + + Returns: + The inferred provider name, defaults to "openai" + """ + from crewai.llm.constants import ( + ANTHROPIC_MODELS, + AZURE_MODELS, + BEDROCK_MODELS, + GEMINI_MODELS, + OPENAI_MODELS, + ) + + if model in OPENAI_MODELS: + return "openai" + + if model in ANTHROPIC_MODELS: + return "anthropic" + + if model in GEMINI_MODELS: + return "gemini" + + if model in BEDROCK_MODELS: + return "bedrock" + + if model in AZURE_MODELS: + return "azure" + + return "openai" + + @staticmethod + def _get_native_provider(provider: str) -> type | None: + """Get native provider class if available. + + Args: + provider: The provider name + + Returns: + The provider class or None if not available + """ + if provider == "openai": + from crewai.llm.providers.openai.completion import OpenAICompletion + + return OpenAICompletion + + if provider == "anthropic" or provider == "claude": + from crewai.llm.providers.anthropic.completion import ( + AnthropicCompletion, + ) + + return AnthropicCompletion + + if provider == "azure" or provider == "azure_openai": + from crewai.llm.providers.azure.completion import AzureCompletion + + return AzureCompletion + + if provider == "google" or provider == "gemini": + from crewai.llm.providers.gemini.completion import GeminiCompletion + + return GeminiCompletion + + if provider == "bedrock": + from crewai.llm.providers.bedrock.completion import BedrockCompletion + + return BedrockCompletion + + return None diff --git a/lib/crewai/src/crewai/llms/providers/anthropic/__init__.py b/lib/crewai/src/crewai/llm/providers/__init__.py similarity index 100% rename from lib/crewai/src/crewai/llms/providers/anthropic/__init__.py rename to lib/crewai/src/crewai/llm/providers/__init__.py diff --git a/lib/crewai/src/crewai/llms/providers/azure/__init__.py b/lib/crewai/src/crewai/llm/providers/anthropic/__init__.py similarity index 100% rename from lib/crewai/src/crewai/llms/providers/azure/__init__.py rename to lib/crewai/src/crewai/llm/providers/anthropic/__init__.py diff --git a/lib/crewai/src/crewai/llms/providers/anthropic/completion.py b/lib/crewai/src/crewai/llm/providers/anthropic/completion.py similarity index 94% rename from lib/crewai/src/crewai/llms/providers/anthropic/completion.py rename to lib/crewai/src/crewai/llm/providers/anthropic/completion.py index ea161fc63..8ebba1673 100644 --- a/lib/crewai/src/crewai/llms/providers/anthropic/completion.py +++ b/lib/crewai/src/crewai/llm/providers/anthropic/completion.py @@ -3,13 +3,15 @@ from __future__ import annotations import json import logging import os -from typing import TYPE_CHECKING, Any, cast +from typing import TYPE_CHECKING, Any, ClassVar, cast -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict from crewai.events.types.llm_events import LLMCallType -from crewai.llms.base_llm import BaseLLM -from crewai.llms.hooks.transport import HTTPTransport +from crewai.llm.base_llm import BaseLLM +from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO +from crewai.llm.hooks.transport import HTTPTransport +from crewai.llm.providers.utils.common import safe_tool_conversion from crewai.utilities.agent_utils import is_context_length_exceeded from crewai.utilities.exceptions.context_window_exceeding_exception import ( LLMContextLengthExceededError, @@ -18,7 +20,7 @@ from crewai.utilities.types import LLMMessage if TYPE_CHECKING: - from crewai.llms.hooks.base import BaseInterceptor + from crewai.llm.hooks.base import BaseInterceptor try: from anthropic import Anthropic @@ -38,6 +40,8 @@ class AnthropicCompletion(BaseLLM): offering native tool use, streaming support, and proper message formatting. """ + model_config: ClassVar[ConfigDict] = ConfigDict(ignored_types=(property,)) + def __init__( self, model: str = "claude-3-5-sonnet-20241022", @@ -94,29 +98,30 @@ class AnthropicCompletion(BaseLLM): self.is_claude_3 = "claude-3" in model.lower() self.supports_tools = self.is_claude_3 # Claude 3+ supports tool use - @property - def stop(self) -> list[str]: - """Get stop sequences sent to the API.""" - return self.stop_sequences + # + # @property + # def stop(self) -> list[str]: # type: ignore[misc] + # """Get stop sequences sent to the API.""" + # return self.stop_sequences - @stop.setter - def stop(self, value: list[str] | str | None) -> None: - """Set stop sequences. - - Synchronizes stop_sequences to ensure values set by CrewAgentExecutor - are properly sent to the Anthropic API. - - Args: - value: Stop sequences as a list, single string, or None - """ - if value is None: - self.stop_sequences = [] - elif isinstance(value, str): - self.stop_sequences = [value] - elif isinstance(value, list): - self.stop_sequences = value - else: - self.stop_sequences = [] + # @stop.setter + # def stop(self, value: list[str] | str | None) -> None: + # """Set stop sequences. + # + # Synchronizes stop_sequences to ensure values set by CrewAgentExecutor + # are properly sent to the Anthropic API. + # + # Args: + # value: Stop sequences as a list, single string, or None + # """ + # if value is None: + # self.stop_sequences = [] + # elif isinstance(value, str): + # self.stop_sequences = [value] + # elif isinstance(value, list): + # self.stop_sequences = value + # else: + # self.stop_sequences = [] def _get_client_params(self) -> dict[str, Any]: """Get client parameters.""" @@ -266,8 +271,6 @@ class AnthropicCompletion(BaseLLM): continue try: - from crewai.llms.providers.utils.common import safe_tool_conversion - name, description, parameters = safe_tool_conversion(tool, "Anthropic") except (ImportError, KeyError, ValueError) as e: logging.error(f"Error converting tool to Anthropic format: {e}") @@ -636,7 +639,6 @@ class AnthropicCompletion(BaseLLM): def get_context_window_size(self) -> int: """Get the context window size for the model.""" - from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO # Context window sizes for Anthropic models context_windows = { diff --git a/lib/crewai/src/crewai/llms/providers/bedrock/__init__.py b/lib/crewai/src/crewai/llm/providers/azure/__init__.py similarity index 100% rename from lib/crewai/src/crewai/llms/providers/bedrock/__init__.py rename to lib/crewai/src/crewai/llm/providers/azure/__init__.py diff --git a/lib/crewai/src/crewai/llms/providers/azure/completion.py b/lib/crewai/src/crewai/llm/providers/azure/completion.py similarity index 98% rename from lib/crewai/src/crewai/llms/providers/azure/completion.py rename to lib/crewai/src/crewai/llm/providers/azure/completion.py index 17306d8a2..a389c1825 100644 --- a/lib/crewai/src/crewai/llms/providers/azure/completion.py +++ b/lib/crewai/src/crewai/llm/providers/azure/completion.py @@ -7,6 +7,8 @@ from typing import TYPE_CHECKING, Any from pydantic import BaseModel +from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES +from crewai.llm.providers.utils.common import safe_tool_conversion from crewai.utilities.agent_utils import is_context_length_exceeded from crewai.utilities.exceptions.context_window_exceeding_exception import ( LLMContextLengthExceededError, @@ -15,7 +17,7 @@ from crewai.utilities.types import LLMMessage if TYPE_CHECKING: - from crewai.llms.hooks.base import BaseInterceptor + from crewai.llm.hooks.base import BaseInterceptor from crewai.tools.base_tool import BaseTool @@ -36,7 +38,7 @@ try: ) from crewai.events.types.llm_events import LLMCallType - from crewai.llms.base_llm import BaseLLM + from crewai.llm.base_llm import BaseLLM except ImportError: raise ImportError( @@ -317,8 +319,6 @@ class AzureCompletion(BaseLLM): ) -> list[dict[str, Any]]: """Convert CrewAI tool format to Azure OpenAI function calling format.""" - from crewai.llms.providers.utils.common import safe_tool_conversion - azure_tools = [] for tool in tools: @@ -554,7 +554,6 @@ class AzureCompletion(BaseLLM): def get_context_window_size(self) -> int: """Get the context window size for the model.""" - from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES min_context = 1024 max_context = 2097152 diff --git a/lib/crewai/src/crewai/llms/providers/gemini/__init__.py b/lib/crewai/src/crewai/llm/providers/bedrock/__init__.py similarity index 100% rename from lib/crewai/src/crewai/llms/providers/gemini/__init__.py rename to lib/crewai/src/crewai/llm/providers/bedrock/__init__.py diff --git a/lib/crewai/src/crewai/llms/providers/bedrock/completion.py b/lib/crewai/src/crewai/llm/providers/bedrock/completion.py similarity index 96% rename from lib/crewai/src/crewai/llms/providers/bedrock/completion.py rename to lib/crewai/src/crewai/llm/providers/bedrock/completion.py index 20eabf763..f67414c63 100644 --- a/lib/crewai/src/crewai/llms/providers/bedrock/completion.py +++ b/lib/crewai/src/crewai/llm/providers/bedrock/completion.py @@ -3,13 +3,15 @@ from __future__ import annotations from collections.abc import Mapping, Sequence import logging import os -from typing import TYPE_CHECKING, Any, TypedDict, cast +from typing import TYPE_CHECKING, Any, ClassVar, TypedDict, cast -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict from typing_extensions import Required from crewai.events.types.llm_events import LLMCallType -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM +from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO +from crewai.llm.providers.utils.common import safe_tool_conversion from crewai.utilities.agent_utils import is_context_length_exceeded from crewai.utilities.exceptions.context_window_exceeding_exception import ( LLMContextLengthExceededError, @@ -30,7 +32,7 @@ if TYPE_CHECKING: ToolTypeDef, ) - from crewai.llms.hooks.base import BaseInterceptor + from crewai.llm.hooks.base import BaseInterceptor try: @@ -143,6 +145,8 @@ class BedrockCompletion(BaseLLM): - Model-specific conversation format handling (e.g., Cohere requirements) """ + model_config: ClassVar[ConfigDict] = ConfigDict(ignored_types=(property,)) + def __init__( self, model: str = "anthropic.claude-3-5-sonnet-20241022-v2:0", @@ -243,29 +247,29 @@ class BedrockCompletion(BaseLLM): # Handle inference profiles for newer models self.model_id = model - @property - def stop(self) -> list[str]: - """Get stop sequences sent to the API.""" - return list(self.stop_sequences) + # @property + # def stop(self) -> list[str]: # type: ignore[misc] + # """Get stop sequences sent to the API.""" + # return list(self.stop_sequences) - @stop.setter - def stop(self, value: Sequence[str] | str | None) -> None: - """Set stop sequences. - - Synchronizes stop_sequences to ensure values set by CrewAgentExecutor - are properly sent to the Bedrock API. - - Args: - value: Stop sequences as a Sequence, single string, or None - """ - if value is None: - self.stop_sequences = [] - elif isinstance(value, str): - self.stop_sequences = [value] - elif isinstance(value, Sequence): - self.stop_sequences = list(value) - else: - self.stop_sequences = [] + # @stop.setter + # def stop(self, value: Sequence[str] | str | None) -> None: + # """Set stop sequences. + # + # Synchronizes stop_sequences to ensure values set by CrewAgentExecutor + # are properly sent to the Bedrock API. + # + # Args: + # value: Stop sequences as a Sequence, single string, or None + # """ + # if value is None: + # self.stop_sequences = [] + # elif isinstance(value, str): + # self.stop_sequences = [value] + # elif isinstance(value, Sequence): + # self.stop_sequences = list(value) + # else: + # self.stop_sequences = [] def call( self, @@ -778,7 +782,6 @@ class BedrockCompletion(BaseLLM): tools: list[dict[str, Any]], ) -> list[ConverseToolTypeDef]: """Convert CrewAI tools to Converse API format following AWS specification.""" - from crewai.llms.providers.utils.common import safe_tool_conversion converse_tools: list[ConverseToolTypeDef] = [] @@ -871,7 +874,6 @@ class BedrockCompletion(BaseLLM): def get_context_window_size(self) -> int: """Get the context window size for the model.""" - from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO # Context window sizes for common Bedrock models context_windows = { diff --git a/lib/crewai/src/crewai/llms/providers/openai/__init__.py b/lib/crewai/src/crewai/llm/providers/gemini/__init__.py similarity index 100% rename from lib/crewai/src/crewai/llms/providers/openai/__init__.py rename to lib/crewai/src/crewai/llm/providers/gemini/__init__.py diff --git a/lib/crewai/src/crewai/llms/providers/gemini/completion.py b/lib/crewai/src/crewai/llm/providers/gemini/completion.py similarity index 94% rename from lib/crewai/src/crewai/llms/providers/gemini/completion.py rename to lib/crewai/src/crewai/llm/providers/gemini/completion.py index 8668a8f58..263309910 100644 --- a/lib/crewai/src/crewai/llms/providers/gemini/completion.py +++ b/lib/crewai/src/crewai/llm/providers/gemini/completion.py @@ -1,12 +1,14 @@ import logging import os -from typing import Any, cast +from typing import Any, ClassVar, cast -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict from crewai.events.types.llm_events import LLMCallType -from crewai.llms.base_llm import BaseLLM -from crewai.llms.hooks.base import BaseInterceptor +from crewai.llm.base_llm import BaseLLM +from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES +from crewai.llm.hooks.base import BaseInterceptor +from crewai.llm.providers.utils.common import safe_tool_conversion from crewai.utilities.agent_utils import is_context_length_exceeded from crewai.utilities.exceptions.context_window_exceeding_exception import ( LLMContextLengthExceededError, @@ -31,6 +33,8 @@ class GeminiCompletion(BaseLLM): offering native function calling, streaming support, and proper Gemini formatting. """ + model_config: ClassVar[ConfigDict] = ConfigDict(ignored_types=(property,)) + def __init__( self, model: str = "gemini-2.0-flash-001", @@ -104,29 +108,29 @@ class GeminiCompletion(BaseLLM): self.is_gemini_1_5 = "gemini-1.5" in model.lower() self.supports_tools = self.is_gemini_1_5 or self.is_gemini_2 - @property - def stop(self) -> list[str]: - """Get stop sequences sent to the API.""" - return self.stop_sequences + # @property + # def stop(self) -> list[str]: # type: ignore[misc] + # """Get stop sequences sent to the API.""" + # return self.stop_sequences - @stop.setter - def stop(self, value: list[str] | str | None) -> None: - """Set stop sequences. - - Synchronizes stop_sequences to ensure values set by CrewAgentExecutor - are properly sent to the Gemini API. - - Args: - value: Stop sequences as a list, single string, or None - """ - if value is None: - self.stop_sequences = [] - elif isinstance(value, str): - self.stop_sequences = [value] - elif isinstance(value, list): - self.stop_sequences = value - else: - self.stop_sequences = [] + # @stop.setter + # def stop(self, value: list[str] | str | None) -> None: + # """Set stop sequences. + # + # Synchronizes stop_sequences to ensure values set by CrewAgentExecutor + # are properly sent to the Gemini API. + # + # Args: + # value: Stop sequences as a list, single string, or None + # """ + # if value is None: + # self.stop_sequences = [] + # elif isinstance(value, str): + # self.stop_sequences = [value] + # elif isinstance(value, list): + # self.stop_sequences = value + # else: + # self.stop_sequences = [] def _initialize_client(self, use_vertexai: bool = False) -> genai.Client: # type: ignore[no-any-unimported] """Initialize the Google Gen AI client with proper parameter handling. @@ -335,8 +339,6 @@ class GeminiCompletion(BaseLLM): """Convert CrewAI tool format to Gemini function declaration format.""" gemini_tools = [] - from crewai.llms.providers.utils.common import safe_tool_conversion - for tool in tools: name, description, parameters = safe_tool_conversion(tool, "Gemini") @@ -547,7 +549,6 @@ class GeminiCompletion(BaseLLM): def get_context_window_size(self) -> int: """Get the context window size for the model.""" - from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES min_context = 1024 max_context = 2097152 diff --git a/lib/crewai/src/crewai/llms/providers/utils/__init__.py b/lib/crewai/src/crewai/llm/providers/openai/__init__.py similarity index 100% rename from lib/crewai/src/crewai/llms/providers/utils/__init__.py rename to lib/crewai/src/crewai/llm/providers/openai/__init__.py diff --git a/lib/crewai/src/crewai/llms/providers/openai/completion.py b/lib/crewai/src/crewai/llm/providers/openai/completion.py similarity index 87% rename from lib/crewai/src/crewai/llms/providers/openai/completion.py rename to lib/crewai/src/crewai/llm/providers/openai/completion.py index fdf7b03c7..f9f65c8b1 100644 --- a/lib/crewai/src/crewai/llms/providers/openai/completion.py +++ b/lib/crewai/src/crewai/llm/providers/openai/completion.py @@ -11,11 +11,13 @@ from openai import APIConnectionError, NotFoundError, OpenAI from openai.types.chat import ChatCompletion, ChatCompletionChunk from openai.types.chat.chat_completion import Choice from openai.types.chat.chat_completion_chunk import ChoiceDelta -from pydantic import BaseModel +from pydantic import BaseModel, Field from crewai.events.types.llm_events import LLMCallType -from crewai.llms.base_llm import BaseLLM -from crewai.llms.hooks.transport import HTTPTransport +from crewai.llm.base_llm import BaseLLM +from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES +from crewai.llm.hooks.transport import HTTPTransport +from crewai.llm.providers.utils.common import safe_tool_conversion from crewai.utilities.agent_utils import is_context_length_exceeded from crewai.utilities.exceptions.context_window_exceeding_exception import ( LLMContextLengthExceededError, @@ -25,7 +27,6 @@ from crewai.utilities.types import LLMMessage if TYPE_CHECKING: from crewai.agent.core import Agent - from crewai.llms.hooks.base import BaseInterceptor from crewai.task import Task from crewai.tools.base_tool import BaseTool @@ -37,61 +38,61 @@ class OpenAICompletion(BaseLLM): offering native structured outputs, function calling, and streaming support. """ - def __init__( - self, - model: str = "gpt-4o", - api_key: str | None = None, - base_url: str | None = None, - organization: str | None = None, - project: str | None = None, - timeout: float | None = None, - max_retries: int = 2, - default_headers: dict[str, str] | None = None, - default_query: dict[str, Any] | None = None, - client_params: dict[str, Any] | None = None, - temperature: float | None = None, - top_p: float | None = None, - frequency_penalty: float | None = None, - presence_penalty: float | None = None, - max_tokens: int | None = None, - max_completion_tokens: int | None = None, - seed: int | None = None, - stream: bool = False, - response_format: dict[str, Any] | type[BaseModel] | None = None, - logprobs: bool | None = None, - top_logprobs: int | None = None, - reasoning_effort: str | None = None, - provider: str | None = None, - interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None, - **kwargs: Any, - ) -> None: - """Initialize OpenAI chat completion client.""" + # Client configuration fields + organization: str | None = Field(None, description="OpenAI organization ID") + project: str | None = Field(None, description="OpenAI project ID") + max_retries: int = Field(2, description="Maximum number of retries") + default_headers: dict[str, str] | None = Field( + None, description="Default headers for requests" + ) + default_query: dict[str, Any] | None = Field( + None, description="Default query parameters" + ) + client_params: dict[str, Any] | None = Field( + None, description="Additional client parameters" + ) + timeout: float | None = Field(None, description="Request timeout") + api_base: str | None = Field(None, description="API base URL (deprecated)") - if provider is None: - provider = kwargs.pop("provider", "openai") + # Completion parameters + top_p: float | None = Field(None, description="Top-p sampling parameter") + frequency_penalty: float | None = Field(None, description="Frequency penalty") + presence_penalty: float | None = Field(None, description="Presence penalty") + max_tokens: int | None = Field(None, description="Maximum tokens") + max_completion_tokens: int | None = Field( + None, description="Maximum completion tokens" + ) + seed: int | None = Field(None, description="Random seed") + stream: bool = Field(False, description="Enable streaming") + response_format: dict[str, Any] | type[BaseModel] | None = Field( + None, description="Response format" + ) + logprobs: bool | None = Field(None, description="Return log probabilities") + top_logprobs: int | None = Field( + None, description="Number of top log probabilities" + ) + reasoning_effort: str | None = Field(None, description="Reasoning effort level") - self.interceptor = interceptor - # Client configuration attributes - self.organization = organization - self.project = project - self.max_retries = max_retries - self.default_headers = default_headers - self.default_query = default_query - self.client_params = client_params - self.timeout = timeout - self.base_url = base_url - self.api_base = kwargs.pop("api_base", None) + # Internal state + client: OpenAI = Field( + default_factory=OpenAI, exclude=True, description="OpenAI client instance" + ) + is_o1_model: bool = Field(False, description="Whether this is an O1 model") + is_gpt4_model: bool = Field(False, description="Whether this is a GPT-4 model") - super().__init__( - model=model, - temperature=temperature, - api_key=api_key or os.getenv("OPENAI_API_KEY"), - base_url=base_url, - timeout=timeout, - provider=provider, - **kwargs, - ) + def model_post_init(self, __context: Any) -> None: + """Initialize OpenAI client after model initialization. + Args: + __context: Pydantic context + """ + super().model_post_init(__context) + + # Set API key from environment if not provided + if self.api_key is None: + self.api_key = os.getenv("OPENAI_API_KEY") + + # Initialize client client_config = self._get_client_params() if self.interceptor: transport = HTTPTransport(interceptor=self.interceptor) @@ -100,20 +101,9 @@ class OpenAICompletion(BaseLLM): self.client = OpenAI(**client_config) - # Completion parameters - self.top_p = top_p - self.frequency_penalty = frequency_penalty - self.presence_penalty = presence_penalty - self.max_tokens = max_tokens - self.max_completion_tokens = max_completion_tokens - self.seed = seed - self.stream = stream - self.response_format = response_format - self.logprobs = logprobs - self.top_logprobs = top_logprobs - self.reasoning_effort = reasoning_effort - self.is_o1_model = "o1" in model.lower() - self.is_gpt4_model = "gpt-4" in model.lower() + # Set model flags + self.is_o1_model = "o1" in self.model.lower() + self.is_gpt4_model = "gpt-4" in self.model.lower() def _get_client_params(self) -> dict[str, Any]: """Get OpenAI client parameters.""" @@ -268,7 +258,6 @@ class OpenAICompletion(BaseLLM): self, tools: list[dict[str, BaseTool]] ) -> list[dict[str, Any]]: """Convert CrewAI tool format to OpenAI function calling format.""" - from crewai.llms.providers.utils.common import safe_tool_conversion openai_tools = [] @@ -560,7 +549,6 @@ class OpenAICompletion(BaseLLM): def get_context_window_size(self) -> int: """Get the context window size for the model.""" - from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES min_context = 1024 max_context = 2097152 diff --git a/lib/crewai/src/crewai/llm/providers/utils/__init__.py b/lib/crewai/src/crewai/llm/providers/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lib/crewai/src/crewai/llms/providers/utils/common.py b/lib/crewai/src/crewai/llm/providers/utils/common.py similarity index 100% rename from lib/crewai/src/crewai/llms/providers/utils/common.py rename to lib/crewai/src/crewai/llm/providers/utils/common.py diff --git a/lib/crewai/src/crewai/llms/third_party/__init__.py b/lib/crewai/src/crewai/llms/third_party/__init__.py deleted file mode 100644 index 947a62fa4..000000000 --- a/lib/crewai/src/crewai/llms/third_party/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Third-party LLM implementations for crewAI.""" diff --git a/lib/crewai/src/crewai/tasks/llm_guardrail.py b/lib/crewai/src/crewai/tasks/llm_guardrail.py index 803b2d749..b540feebf 100644 --- a/lib/crewai/src/crewai/tasks/llm_guardrail.py +++ b/lib/crewai/src/crewai/tasks/llm_guardrail.py @@ -4,7 +4,7 @@ from pydantic import BaseModel, Field from crewai.agent import Agent from crewai.lite_agent_output import LiteAgentOutput -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM from crewai.tasks.task_output import TaskOutput diff --git a/lib/crewai/src/crewai/utilities/agent_utils.py b/lib/crewai/src/crewai/utilities/agent_utils.py index a6403c315..6fc8bd863 100644 --- a/lib/crewai/src/crewai/utilities/agent_utils.py +++ b/lib/crewai/src/crewai/utilities/agent_utils.py @@ -16,7 +16,7 @@ from crewai.agents.parser import ( parse, ) from crewai.cli.config import Settings -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM from crewai.tools import BaseTool as CrewAITool from crewai.tools.base_tool import BaseTool from crewai.tools.structured_tool import CrewStructuredTool diff --git a/lib/crewai/src/crewai/utilities/converter.py b/lib/crewai/src/crewai/utilities/converter.py index 0a42a467e..ce827ce82 100644 --- a/lib/crewai/src/crewai/utilities/converter.py +++ b/lib/crewai/src/crewai/utilities/converter.py @@ -19,7 +19,7 @@ if TYPE_CHECKING: from crewai.agent import Agent from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.llm import LLM - from crewai.llms.base_llm import BaseLLM + from crewai.llm.base_llm import BaseLLM _JSON_PATTERN: Final[re.Pattern[str]] = re.compile(r"({.*})", re.DOTALL) _I18N = get_i18n() diff --git a/lib/crewai/src/crewai/utilities/evaluators/crew_evaluator_handler.py b/lib/crewai/src/crewai/utilities/evaluators/crew_evaluator_handler.py index 9c9cac0c6..5420cb4d4 100644 --- a/lib/crewai/src/crewai/utilities/evaluators/crew_evaluator_handler.py +++ b/lib/crewai/src/crewai/utilities/evaluators/crew_evaluator_handler.py @@ -11,7 +11,7 @@ from rich.table import Table from crewai.agent import Agent from crewai.events.event_bus import crewai_event_bus from crewai.events.types.crew_events import CrewTestResultEvent -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM from crewai.task import Task from crewai.tasks.task_output import TaskOutput diff --git a/lib/crewai/src/crewai/utilities/internal_instructor.py b/lib/crewai/src/crewai/utilities/internal_instructor.py index 06a95d234..72d0bf75b 100644 --- a/lib/crewai/src/crewai/utilities/internal_instructor.py +++ b/lib/crewai/src/crewai/utilities/internal_instructor.py @@ -10,7 +10,7 @@ from crewai.utilities.logger_utils import suppress_warnings if TYPE_CHECKING: from crewai.agent import Agent from crewai.llm import LLM - from crewai.llms.base_llm import BaseLLM + from crewai.llm.base_llm import BaseLLM from crewai.utilities.types import LLMMessage diff --git a/lib/crewai/src/crewai/utilities/llm_utils.py b/lib/crewai/src/crewai/utilities/llm_utils.py index 129f064d5..506425534 100644 --- a/lib/crewai/src/crewai/utilities/llm_utils.py +++ b/lib/crewai/src/crewai/utilities/llm_utils.py @@ -4,7 +4,7 @@ from typing import Any, Final from crewai.cli.constants import DEFAULT_LLM_MODEL, ENV_VARS, LITELLM_PARAMS from crewai.llm import LLM -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM logger = logging.getLogger(__name__) diff --git a/lib/crewai/src/crewai/utilities/planning_handler.py b/lib/crewai/src/crewai/utilities/planning_handler.py index c76153020..338bf11ee 100644 --- a/lib/crewai/src/crewai/utilities/planning_handler.py +++ b/lib/crewai/src/crewai/utilities/planning_handler.py @@ -5,7 +5,7 @@ import logging from pydantic import BaseModel, Field from crewai.agent import Agent -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM from crewai.task import Task diff --git a/lib/crewai/src/crewai/utilities/tool_utils.py b/lib/crewai/src/crewai/utilities/tool_utils.py index eb433c02c..186db1adf 100644 --- a/lib/crewai/src/crewai/utilities/tool_utils.py +++ b/lib/crewai/src/crewai/utilities/tool_utils.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: from crewai.agent import Agent from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.llm import LLM - from crewai.llms.base_llm import BaseLLM + from crewai.llm.base_llm import BaseLLM from crewai.task import Task diff --git a/lib/crewai/tests/agents/test_agent.py b/lib/crewai/tests/agents/test_agent.py index 4fd1f3b5b..94e716b97 100644 --- a/lib/crewai/tests/agents/test_agent.py +++ b/lib/crewai/tests/agents/test_agent.py @@ -14,7 +14,7 @@ from crewai.knowledge.knowledge_config import KnowledgeConfig from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource from crewai.llm import LLM -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM from crewai.process import Process from crewai.tools.tool_calling import InstructorToolCalling from crewai.tools.tool_usage import ToolUsage diff --git a/lib/crewai/tests/agents/test_lite_agent.py b/lib/crewai/tests/agents/test_lite_agent.py index 0c6b00c23..1215c7804 100644 --- a/lib/crewai/tests/agents/test_lite_agent.py +++ b/lib/crewai/tests/agents/test_lite_agent.py @@ -9,7 +9,7 @@ from crewai.events.types.agent_events import LiteAgentExecutionStartedEvent from crewai.events.types.tool_usage_events import ToolUsageStartedEvent from crewai.lite_agent import LiteAgent from crewai.lite_agent_output import LiteAgentOutput -from crewai.llms.base_llm import BaseLLM +from crewai.llm.base_llm import BaseLLM from pydantic import BaseModel, Field import pytest diff --git a/lib/crewai/tests/cassettes/test_agent_function_calling_llm.yaml b/lib/crewai/tests/cassettes/test_agent_function_calling_llm.yaml index 0136b60c6..eeb43b9c2 100644 --- a/lib/crewai/tests/cassettes/test_agent_function_calling_llm.yaml +++ b/lib/crewai/tests/cassettes/test_agent_function_calling_llm.yaml @@ -590,7 +590,7 @@ interactions: " at 0x107389260>", "result_as_answer": "False", "max_usage_count": "None", "current_usage_count": "0"}], "max_iter": 2, "agent_executor": "", - "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', ''description'': @@ -605,7 +605,7 @@ interactions: ''description_updated'': False, ''cache_function'': at 0x107389260>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'': 0}], ''max_iter'': 2, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=991ac83f-9a29-411f-b0a0-0a335c7a2d0e, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''description_updated'': False, ''cache_function'': at 0x107389260>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'': 0}], ''max_iter'': 2, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=991ac83f-9a29-411f-b0a0-0a335c7a2d0e, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': ", "system_template": null, "prompt_template": null, "response_template": null, "allow_code_execution": false, "respect_context_window": true, "max_retry_limit": 2, "multimodal": false, "inject_date": false, "date_format": "%Y-%m-%d", "code_execution_mode": @@ -1068,7 +1068,7 @@ interactions: " at 0x107e394e0>", "result_as_answer": "False", "max_usage_count": "None", "current_usage_count": "0"}], "max_iter": 2, "agent_executor": "", - "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', ''description'': @@ -1083,7 +1083,7 @@ interactions: ''description_updated'': False, ''cache_function'': at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'': 0}], ''max_iter'': 2, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=f38365e9-3206-45b6-8754-950cb03fe57e, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''description_updated'': False, ''cache_function'': at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'': 0}], ''max_iter'': 2, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=f38365e9-3206-45b6-8754-950cb03fe57e, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': ", "system_template": null, "prompt_template": null, "response_template": null, "allow_code_execution": false, "respect_context_window": true, "max_retry_limit": 2, "multimodal": false, "inject_date": false, "date_format": "%Y-%m-%d", "code_execution_mode": diff --git a/lib/crewai/tests/cassettes/test_agent_remembers_output_format_after_using_tools_too_many_times.yaml b/lib/crewai/tests/cassettes/test_agent_remembers_output_format_after_using_tools_too_many_times.yaml index a0c8a3e40..e82b2e5d4 100644 --- a/lib/crewai/tests/cassettes/test_agent_remembers_output_format_after_using_tools_too_many_times.yaml +++ b/lib/crewai/tests/cassettes/test_agent_remembers_output_format_after_using_tools_too_many_times.yaml @@ -1274,7 +1274,7 @@ interactions: "b6cf723e-04c8-40c5-a927-e2078cfbae59", "role": "test role", "goal": "test goal", "backstory": "test backstory", "cache": true, "verbose": true, "max_rpm": null, "allow_delegation": false, "tools": [], "max_iter": 6, "agent_executor": "", "llm": "", "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', @@ -1285,7 +1285,7 @@ interactions: ''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'': True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'': 6, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''llm'': , ''llm'': , ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': ", "llm": "", "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 2, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', @@ -1502,7 +1502,7 @@ interactions: ''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'': True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'': 6, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''llm'': , ''llm'': , ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': ", "llm": "", "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 3, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', @@ -1671,7 +1671,7 @@ interactions: ''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'': True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'': 6, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''llm'': , ''llm'': , ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': ", "llm": "", "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 4, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', @@ -1850,7 +1850,7 @@ interactions: ''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'': True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'': 6, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''llm'': , ''llm'': , ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': ", "llm": "", "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 5, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', @@ -2040,7 +2040,7 @@ interactions: ''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'': True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'': 6, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''llm'': , ''llm'': , ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': ", "llm": "", "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', @@ -1093,7 +1093,7 @@ interactions: ''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'': True, ''verbose'': True, ''max_rpm'': 10, ''allow_delegation'': False, ''tools'': [], ''max_iter'': 4, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=4c6d502e-f6ec-446a-8f76-644563c4aa94, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''llm'': , ''llm'': , ''crew'': Crew(id=4c6d502e-f6ec-446a-8f76-644563c4aa94, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': ", "llm": "", "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', @@ -1921,7 +1921,7 @@ interactions: ''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'': True, ''verbose'': True, ''max_rpm'': 10, ''allow_delegation'': False, ''tools'': [], ''max_iter'': 4, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=1a07d718-fed5-49fa-bee2-de2db91c9f33, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''llm'': , ''llm'': , ''crew'': Crew(id=1a07d718-fed5-49fa-bee2-de2db91c9f33, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': at 0x10614d3a0>", "result_as_answer": "False", "max_usage_count": "None", "current_usage_count": "0"}], "max_iter": 25, "agent_executor": "", "llm": "", "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', @@ -977,7 +977,7 @@ interactions: ''description_updated'': False, ''cache_function'': at 0x10614d3a0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'': 0}], ''max_iter'': 25, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6, process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''llm'': , ''llm'': , ''crew'': Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6, process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''llm'': , ''llm'': , ''crew'': Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6, process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': ", "manager_agent": {"id": "UUID(''b0898472-5e3b-45bb-bd90-05bad0b5a8ce'')", "role": "''Crew Manager''", "goal": "''Manage the team to complete the task in the best way possible.''", "backstory": "\"You are a seasoned manager with @@ -1053,7 +1053,7 @@ interactions: ''description_updated'': False, ''cache_function'': at 0x10614d3a0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'': 0}]", "max_iter": "25", "agent_executor": "", "llm": "", "llm": "", "crew": "Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6, process=Process.hierarchical, number_of_agents=2, number_of_tasks=1)", "i18n": "{''prompt_file'': None}", "cache_handler": "{}", "tools_handler": " at 0x107e394e0>", "result_as_answer": "False", "max_usage_count": "None", "current_usage_count": "0"}], "max_iter": 25, "agent_executor": "", "llm": "", "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', @@ -1845,7 +1845,7 @@ interactions: ''description_updated'': False, ''cache_function'': at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'': 0}], ''max_iter'': 25, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac, process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''llm'': , ''llm'': , ''crew'': Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac, process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''llm'': , ''llm'': , ''crew'': Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac, process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': ", "manager_agent": {"id": "UUID(''09794b42-447f-4b7a-b634-3a861f457357'')", "role": "''Crew Manager''", "goal": "''Manage the team to complete the task in the best way possible.''", "backstory": "\"You are a seasoned manager with @@ -1921,7 +1921,7 @@ interactions: ''description_updated'': False, ''cache_function'': at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'': 0}]", "max_iter": "25", "agent_executor": "", "llm": "", "llm": "", "crew": "Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac, process=Process.hierarchical, number_of_agents=2, number_of_tasks=1)", "i18n": "{''prompt_file'': None}", "cache_handler": "{}", "tools_handler": ", ''llm'': , ''llm'': , ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'': @@ -149,7 +149,7 @@ interactions: writing content for a new customer.\", ''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'': @@ -169,7 +169,7 @@ interactions: a freelancer and is now working on doing research and analysis for a new customer.\"", "cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation": "False", "tools": "[]", "max_iter": "25", "agent_executor": "", "llm": "", "llm": "", "crew": "None", "i18n": "{''prompt_file'': None}", "cache_handler": "{}", "tools_handler": "", "tools_results": "[]", "max_tokens": "None", "knowledge": @@ -182,7 +182,7 @@ interactions: You work as a freelancer and are now working on writing content for a new customer.\"", "cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation": "False", "tools": "[]", "max_iter": "25", "agent_executor": "", "llm": "", "llm": "", "crew": "None", "i18n": "{''prompt_file'': None}", "cache_handler": "{}", "tools_handler": "", "tools_results": "[]", "max_tokens": "None", "knowledge": @@ -214,7 +214,7 @@ interactions: a freelancer and is now working on doing research and analysis for a new customer.\", ''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'': @@ -237,7 +237,7 @@ interactions: writing content for a new customer.\", ''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': , ''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'': @@ -257,7 +257,7 @@ interactions: a freelancer and is now working on doing research and analysis for a new customer.\"", "cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation": "False", "tools": "[]", "max_iter": "25", "agent_executor": "", "llm": "", "llm": "", "crew": "None", "i18n": "{''prompt_file'': None}", "cache_handler": "{}", "tools_handler": "", "tools_results": "[]", "max_tokens": "None", "knowledge": @@ -270,7 +270,7 @@ interactions: You work as a freelancer and are now working on writing content for a new customer.\"", "cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation": "False", "tools": "[]", "max_iter": "25", "agent_executor": "", "llm": "", "llm": "", "crew": "None", "i18n": "{''prompt_file'': None}", "cache_handler": "{}", "tools_handler": "", "tools_results": "[]", "max_tokens": "None", "knowledge": diff --git a/lib/crewai/tests/cassettes/test_tool_result_as_answer_is_the_final_answer_for_the_agent.yaml b/lib/crewai/tests/cassettes/test_tool_result_as_answer_is_the_final_answer_for_the_agent.yaml index d3784b9e7..a0941cf37 100644 --- a/lib/crewai/tests/cassettes/test_tool_result_as_answer_is_the_final_answer_for_the_agent.yaml +++ b/lib/crewai/tests/cassettes/test_tool_result_as_answer_is_the_final_answer_for_the_agent.yaml @@ -468,7 +468,7 @@ interactions: "description_updated": "False", "cache_function": " at 0x107ff9440>", "result_as_answer": "True", "max_usage_count": "None", "current_usage_count": "0"}], "max_iter": 25, "agent_executor": "", "llm": "", "llm": "", "crew": {"parent_flow": null, "name": "crew", "cache": true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', @@ -484,7 +484,7 @@ interactions: , ''description_updated'': False, ''cache_function'': at 0x107ff9440>, ''result_as_answer'': True, ''max_usage_count'': None, ''current_usage_count'': 0}], ''max_iter'': 25, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=f74956dd-60d0-402a-a703-2cc3d767397f, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': at 0x107ff9440>, ''result_as_answer'': True, ''max_usage_count'': None, ''current_usage_count'': 0}], ''max_iter'': 25, ''agent_executor'': , ''llm'': , ''llm'': , ''crew'': Crew(id=f74956dd-60d0-402a-a703-2cc3d767397f, process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'': {''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'':