feat: add support for llm message interceptor hooks

This commit is contained in:
Greyson LaLonde
2025-11-05 11:38:44 -05:00
committed by GitHub
parent 54710a8711
commit 61ad1fb112
22 changed files with 2421 additions and 35 deletions

View File

@@ -1200,6 +1200,52 @@ Learn how to get the most out of your LLM configuration:
) )
``` ```
</Accordion> </Accordion>
<Accordion title="Transport Interceptors">
CrewAI provides message interceptors for several providers, allowing you to hook into request/response cycles at the transport layer.
**Supported Providers:**
- ✅ OpenAI
- ✅ Anthropic
**Basic Usage:**
```python
import httpx
from crewai import LLM
from crewai.llms.hooks import BaseInterceptor
class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Custom interceptor to modify requests and responses."""
def on_outbound(self, request: httpx.Request) -> httpx.Request:
"""Print request before sending to the LLM provider."""
print(request)
return request
def on_inbound(self, response: httpx.Response) -> httpx.Response:
"""Process response after receiving from the LLM provider."""
print(f"Status: {response.status_code}")
print(f"Response time: {response.elapsed}")
return response
# Use the interceptor with an LLM
llm = LLM(
model="openai/gpt-4o",
interceptor=CustomInterceptor()
)
```
**Important Notes:**
- Both methods must return the received object or type of object.
- Modifying received objects may result in unexpected behavior or application crashes.
- Not all providers support interceptors - check the supported providers list above
<Info>
Interceptors operate at the transport layer. This is particularly useful for:
- Message transformation and filtering
- Debugging API interactions
</Info>
</Accordion>
</AccordionGroup> </AccordionGroup>
## Common Issues and Solutions ## Common Issues and Solutions

View File

@@ -20,6 +20,7 @@ from typing import (
) )
from dotenv import load_dotenv from dotenv import load_dotenv
import httpx
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from typing_extensions import Self from typing_extensions import Self
@@ -53,6 +54,7 @@ if TYPE_CHECKING:
from litellm.utils import supports_response_schema from litellm.utils import supports_response_schema
from crewai.agent.core import Agent from crewai.agent.core import Agent
from crewai.llms.hooks.base import BaseInterceptor
from crewai.task import Task from crewai.task import Task
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
from crewai.utilities.types import LLMMessage from crewai.utilities.types import LLMMessage
@@ -334,6 +336,8 @@ class LLM(BaseLLM):
return cast( return cast(
Self, native_class(model=model_string, provider=provider, **kwargs) Self, native_class(model=model_string, provider=provider, **kwargs)
) )
except NotImplementedError:
raise
except Exception as e: except Exception as e:
raise ImportError(f"Error importing native provider: {e}") from e raise ImportError(f"Error importing native provider: {e}") from e
@@ -403,6 +407,7 @@ class LLM(BaseLLM):
callbacks: list[Any] | None = None, callbacks: list[Any] | None = None,
reasoning_effort: Literal["none", "low", "medium", "high"] | None = None, reasoning_effort: Literal["none", "low", "medium", "high"] | None = None,
stream: bool = False, stream: bool = False,
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
**kwargs: Any, **kwargs: Any,
) -> None: ) -> None:
"""Initialize LLM instance. """Initialize LLM instance.
@@ -442,6 +447,7 @@ class LLM(BaseLLM):
self.additional_params = kwargs self.additional_params = kwargs
self.is_anthropic = self._is_anthropic_model(model) self.is_anthropic = self._is_anthropic_model(model)
self.stream = stream self.stream = stream
self.interceptor = interceptor
litellm.drop_params = True litellm.drop_params = True

View File

@@ -0,0 +1,6 @@
"""Interceptor contracts for crewai"""
from crewai.llms.hooks.base import BaseInterceptor
__all__ = ["BaseInterceptor"]

View File

@@ -0,0 +1,82 @@
"""Base classes for LLM transport interceptors.
This module provides abstract base classes for intercepting and modifying
outbound and inbound messages at the transport level.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Generic, TypeVar
T = TypeVar("T")
U = TypeVar("U")
class BaseInterceptor(ABC, Generic[T, U]):
"""Abstract base class for intercepting transport-level messages.
Provides hooks to intercept and modify outbound and inbound messages
at the transport layer.
Type parameters:
T: Outbound message type (e.g., httpx.Request)
U: Inbound message type (e.g., httpx.Response)
Example:
>>> class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
... def on_outbound(self, message: httpx.Request) -> httpx.Request:
... message.headers["X-Custom-Header"] = "value"
... return message
...
... def on_inbound(self, message: httpx.Response) -> httpx.Response:
... print(f"Status: {message.status_code}")
... return message
"""
@abstractmethod
def on_outbound(self, message: T) -> T:
"""Intercept outbound message before sending.
Args:
message: Outbound message object.
Returns:
Modified message object.
"""
...
@abstractmethod
def on_inbound(self, message: U) -> U:
"""Intercept inbound message after receiving.
Args:
message: Inbound message object.
Returns:
Modified message object.
"""
...
async def aon_outbound(self, message: T) -> T:
"""Async version of on_outbound.
Args:
message: Outbound message object.
Returns:
Modified message object.
"""
raise NotImplementedError
async def aon_inbound(self, message: U) -> U:
"""Async version of on_inbound.
Args:
message: Inbound message object.
Returns:
Modified message object.
"""
raise NotImplementedError

View File

@@ -0,0 +1,87 @@
"""HTTP transport implementations for LLM request/response interception.
This module provides internal transport classes that integrate with BaseInterceptor
to enable request/response modification at the transport level.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import httpx
if TYPE_CHECKING:
from crewai.llms.hooks.base import BaseInterceptor
class HTTPTransport(httpx.HTTPTransport):
"""HTTP transport that uses an interceptor for request/response modification.
This transport is used internally when a user provides a BaseInterceptor.
Users should not instantiate this class directly - instead, pass an interceptor
to the LLM client and this transport will be created automatically.
"""
def __init__(
self,
interceptor: BaseInterceptor[httpx.Request, httpx.Response],
**kwargs: Any,
) -> None:
"""Initialize transport with interceptor.
Args:
interceptor: HTTP interceptor for modifying raw request/response objects.
**kwargs: Additional arguments passed to httpx.HTTPTransport.
"""
super().__init__(**kwargs)
self.interceptor = interceptor
def handle_request(self, request: httpx.Request) -> httpx.Response:
"""Handle request with interception.
Args:
request: The HTTP request to handle.
Returns:
The HTTP response.
"""
request = self.interceptor.on_outbound(request)
response = super().handle_request(request)
return self.interceptor.on_inbound(response)
class AsyncHTTPransport(httpx.AsyncHTTPTransport):
"""Async HTTP transport that uses an interceptor for request/response modification.
This transport is used internally when a user provides a BaseInterceptor.
Users should not instantiate this class directly - instead, pass an interceptor
to the LLM client and this transport will be created automatically.
"""
def __init__(
self,
interceptor: BaseInterceptor[httpx.Request, httpx.Response],
**kwargs: Any,
) -> None:
"""Initialize async transport with interceptor.
Args:
interceptor: HTTP interceptor for modifying raw request/response objects.
**kwargs: Additional arguments passed to httpx.AsyncHTTPTransport.
"""
super().__init__(**kwargs)
self.interceptor = interceptor
async def handle_async_request(self, request: httpx.Request) -> httpx.Response:
"""Handle async request with interception.
Args:
request: The HTTP request to handle.
Returns:
The HTTP response.
"""
request = await self.interceptor.aon_outbound(request)
response = await super().handle_async_request(request)
return await self.interceptor.aon_inbound(response)

View File

@@ -1,15 +1,15 @@
from __future__ import annotations from __future__ import annotations
import json import json
import logging import logging
import os import os
from typing import Any, cast from typing import TYPE_CHECKING, Any, cast
from pydantic import BaseModel from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM from crewai.llms.base_llm import BaseLLM
from crewai.llms.hooks.transport import HTTPTransport
from crewai.utilities.agent_utils import is_context_length_exceeded from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import ( from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError, LLMContextLengthExceededError,
@@ -17,10 +17,14 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
from crewai.utilities.types import LLMMessage from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.llms.hooks.base import BaseInterceptor
try: try:
from anthropic import Anthropic from anthropic import Anthropic
from anthropic.types import Message from anthropic.types import Message
from anthropic.types.tool_use_block import ToolUseBlock from anthropic.types.tool_use_block import ToolUseBlock
import httpx
except ImportError: except ImportError:
raise ImportError( raise ImportError(
'Anthropic native provider not available, to install: uv add "crewai[anthropic]"' 'Anthropic native provider not available, to install: uv add "crewai[anthropic]"'
@@ -47,7 +51,8 @@ class AnthropicCompletion(BaseLLM):
stop_sequences: list[str] | None = None, stop_sequences: list[str] | None = None,
stream: bool = False, stream: bool = False,
client_params: dict[str, Any] | None = None, client_params: dict[str, Any] | None = None,
**kwargs, interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
**kwargs: Any,
): ):
"""Initialize Anthropic chat completion client. """Initialize Anthropic chat completion client.
@@ -63,6 +68,7 @@ class AnthropicCompletion(BaseLLM):
stop_sequences: Stop sequences (Anthropic uses stop_sequences, not stop) stop_sequences: Stop sequences (Anthropic uses stop_sequences, not stop)
stream: Enable streaming responses stream: Enable streaming responses
client_params: Additional parameters for the Anthropic client client_params: Additional parameters for the Anthropic client
interceptor: HTTP interceptor for modifying requests/responses at transport level.
**kwargs: Additional parameters **kwargs: Additional parameters
""" """
super().__init__( super().__init__(
@@ -70,6 +76,7 @@ class AnthropicCompletion(BaseLLM):
) )
# Client params # Client params
self.interceptor = interceptor
self.client_params = client_params self.client_params = client_params
self.base_url = base_url self.base_url = base_url
self.timeout = timeout self.timeout = timeout
@@ -102,6 +109,11 @@ class AnthropicCompletion(BaseLLM):
"max_retries": self.max_retries, "max_retries": self.max_retries,
} }
if self.interceptor:
transport = HTTPTransport(interceptor=self.interceptor)
http_client = httpx.Client(transport=transport)
client_params["http_client"] = http_client # type: ignore[assignment]
if self.client_params: if self.client_params:
client_params.update(self.client_params) client_params.update(self.client_params)
@@ -110,7 +122,7 @@ class AnthropicCompletion(BaseLLM):
def call( def call(
self, self,
messages: str | list[LLMMessage], messages: str | list[LLMMessage],
tools: list[dict] | None = None, tools: list[dict[str, Any]] | None = None,
callbacks: list[Any] | None = None, callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None, available_functions: dict[str, Any] | None = None,
from_task: Any | None = None, from_task: Any | None = None,
@@ -133,7 +145,7 @@ class AnthropicCompletion(BaseLLM):
try: try:
# Emit call started event # Emit call started event
self._emit_call_started_event( self._emit_call_started_event(
messages=messages, # type: ignore[arg-type] messages=messages,
tools=tools, tools=tools,
callbacks=callbacks, callbacks=callbacks,
available_functions=available_functions, available_functions=available_functions,
@@ -143,7 +155,7 @@ class AnthropicCompletion(BaseLLM):
# Format messages for Anthropic # Format messages for Anthropic
formatted_messages, system_message = self._format_messages_for_anthropic( formatted_messages, system_message = self._format_messages_for_anthropic(
messages # type: ignore[arg-type] messages
) )
# Prepare completion parameters # Prepare completion parameters
@@ -181,7 +193,7 @@ class AnthropicCompletion(BaseLLM):
self, self,
messages: list[LLMMessage], messages: list[LLMMessage],
system_message: str | None = None, system_message: str | None = None,
tools: list[dict] | None = None, tools: list[dict[str, Any]] | None = None,
) -> dict[str, Any]: ) -> dict[str, Any]:
"""Prepare parameters for Anthropic messages API. """Prepare parameters for Anthropic messages API.
@@ -218,7 +230,9 @@ class AnthropicCompletion(BaseLLM):
return params return params
def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]: def _convert_tools_for_interference(
self, tools: list[dict[str, Any]]
) -> list[dict[str, Any]]:
"""Convert CrewAI tool format to Anthropic tool use format.""" """Convert CrewAI tool format to Anthropic tool use format."""
anthropic_tools = [] anthropic_tools = []

View File

@@ -3,7 +3,7 @@ from __future__ import annotations
import json import json
import logging import logging
import os import os
from typing import Any, TYPE_CHECKING from typing import TYPE_CHECKING, Any
from pydantic import BaseModel from pydantic import BaseModel
@@ -13,23 +13,25 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
) )
from crewai.utilities.types import LLMMessage from crewai.utilities.types import LLMMessage
if TYPE_CHECKING: if TYPE_CHECKING:
from crewai.llms.hooks.base import BaseInterceptor
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
try: try:
from azure.ai.inference import ( # type: ignore[import-not-found] from azure.ai.inference import (
ChatCompletionsClient, ChatCompletionsClient,
) )
from azure.ai.inference.models import ( # type: ignore[import-not-found] from azure.ai.inference.models import (
ChatCompletions, ChatCompletions,
ChatCompletionsToolCall, ChatCompletionsToolCall,
StreamingChatCompletionsUpdate, StreamingChatCompletionsUpdate,
) )
from azure.core.credentials import ( # type: ignore[import-not-found] from azure.core.credentials import (
AzureKeyCredential, AzureKeyCredential,
) )
from azure.core.exceptions import ( # type: ignore[import-not-found] from azure.core.exceptions import (
HttpResponseError, HttpResponseError,
) )
@@ -64,7 +66,8 @@ class AzureCompletion(BaseLLM):
max_tokens: int | None = None, max_tokens: int | None = None,
stop: list[str] | None = None, stop: list[str] | None = None,
stream: bool = False, stream: bool = False,
**kwargs, interceptor: BaseInterceptor[Any, Any] | None = None,
**kwargs: Any,
): ):
"""Initialize Azure AI Inference chat completion client. """Initialize Azure AI Inference chat completion client.
@@ -82,8 +85,15 @@ class AzureCompletion(BaseLLM):
max_tokens: Maximum tokens in response max_tokens: Maximum tokens in response
stop: Stop sequences stop: Stop sequences
stream: Enable streaming responses stream: Enable streaming responses
interceptor: HTTP interceptor (not yet supported for Azure).
**kwargs: Additional parameters **kwargs: Additional parameters
""" """
if interceptor is not None:
raise NotImplementedError(
"HTTP interceptors are not yet supported for Azure AI Inference provider. "
"Interceptors are currently supported for OpenAI and Anthropic providers only."
)
super().__init__( super().__init__(
model=model, temperature=temperature, stop=stop or [], **kwargs model=model, temperature=temperature, stop=stop or [], **kwargs
) )
@@ -121,7 +131,7 @@ class AzureCompletion(BaseLLM):
if self.api_version: if self.api_version:
client_kwargs["api_version"] = self.api_version client_kwargs["api_version"] = self.api_version
self.client = ChatCompletionsClient(**client_kwargs) self.client = ChatCompletionsClient(**client_kwargs) # type: ignore[arg-type]
self.top_p = top_p self.top_p = top_p
self.frequency_penalty = frequency_penalty self.frequency_penalty = frequency_penalty
@@ -249,7 +259,7 @@ class AzureCompletion(BaseLLM):
def _prepare_completion_params( def _prepare_completion_params(
self, self,
messages: list[LLMMessage], messages: list[LLMMessage],
tools: list[dict] | None = None, tools: list[dict[str, Any]] | None = None,
response_model: type[BaseModel] | None = None, response_model: type[BaseModel] | None = None,
) -> dict[str, Any]: ) -> dict[str, Any]:
"""Prepare parameters for Azure AI Inference chat completion. """Prepare parameters for Azure AI Inference chat completion.
@@ -302,7 +312,9 @@ class AzureCompletion(BaseLLM):
return params return params
def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]: def _convert_tools_for_interference(
self, tools: list[dict[str, Any]]
) -> list[dict[str, Any]]:
"""Convert CrewAI tool format to Azure OpenAI function calling format.""" """Convert CrewAI tool format to Azure OpenAI function calling format."""
from crewai.llms.providers.utils.common import safe_tool_conversion from crewai.llms.providers.utils.common import safe_tool_conversion

View File

@@ -30,6 +30,8 @@ if TYPE_CHECKING:
ToolTypeDef, ToolTypeDef,
) )
from crewai.llms.hooks.base import BaseInterceptor
try: try:
from boto3.session import Session from boto3.session import Session
@@ -157,8 +159,9 @@ class BedrockCompletion(BaseLLM):
guardrail_config: dict[str, Any] | None = None, guardrail_config: dict[str, Any] | None = None,
additional_model_request_fields: dict[str, Any] | None = None, additional_model_request_fields: dict[str, Any] | None = None,
additional_model_response_field_paths: list[str] | None = None, additional_model_response_field_paths: list[str] | None = None,
**kwargs, interceptor: BaseInterceptor[Any, Any] | None = None,
): **kwargs: Any,
) -> None:
"""Initialize AWS Bedrock completion client. """Initialize AWS Bedrock completion client.
Args: Args:
@@ -176,8 +179,15 @@ class BedrockCompletion(BaseLLM):
guardrail_config: Guardrail configuration for content filtering guardrail_config: Guardrail configuration for content filtering
additional_model_request_fields: Model-specific request parameters additional_model_request_fields: Model-specific request parameters
additional_model_response_field_paths: Custom response field paths additional_model_response_field_paths: Custom response field paths
interceptor: HTTP interceptor (not yet supported for Bedrock).
**kwargs: Additional parameters **kwargs: Additional parameters
""" """
if interceptor is not None:
raise NotImplementedError(
"HTTP interceptors are not yet supported for AWS Bedrock provider. "
"Interceptors are currently supported for OpenAI and Anthropic providers only."
)
# Extract provider from kwargs to avoid duplicate argument # Extract provider from kwargs to avoid duplicate argument
kwargs.pop("provider", None) kwargs.pop("provider", None)
@@ -247,7 +257,7 @@ class BedrockCompletion(BaseLLM):
try: try:
# Emit call started event # Emit call started event
self._emit_call_started_event( self._emit_call_started_event(
messages=messages, # type: ignore[arg-type] messages=messages,
tools=tools, tools=tools,
callbacks=callbacks, callbacks=callbacks,
available_functions=available_functions, available_functions=available_functions,
@@ -740,7 +750,9 @@ class BedrockCompletion(BaseLLM):
return converse_messages, system_message return converse_messages, system_message
@staticmethod @staticmethod
def _format_tools_for_converse(tools: list[dict]) -> list[ConverseToolTypeDef]: def _format_tools_for_converse(
tools: list[dict[str, Any]],
) -> list[ConverseToolTypeDef]:
"""Convert CrewAI tools to Converse API format following AWS specification.""" """Convert CrewAI tools to Converse API format following AWS specification."""
from crewai.llms.providers.utils.common import safe_tool_conversion from crewai.llms.providers.utils.common import safe_tool_conversion

View File

@@ -1,4 +1,3 @@
import json
import logging import logging
import os import os
from typing import Any, cast from typing import Any, cast
@@ -7,6 +6,7 @@ from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM from crewai.llms.base_llm import BaseLLM
from crewai.llms.hooks.base import BaseInterceptor
from crewai.utilities.agent_utils import is_context_length_exceeded from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import ( from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError, LLMContextLengthExceededError,
@@ -45,7 +45,8 @@ class GeminiCompletion(BaseLLM):
stream: bool = False, stream: bool = False,
safety_settings: dict[str, Any] | None = None, safety_settings: dict[str, Any] | None = None,
client_params: dict[str, Any] | None = None, client_params: dict[str, Any] | None = None,
**kwargs, interceptor: BaseInterceptor[Any, Any] | None = None,
**kwargs: Any,
): ):
"""Initialize Google Gemini chat completion client. """Initialize Google Gemini chat completion client.
@@ -63,8 +64,15 @@ class GeminiCompletion(BaseLLM):
safety_settings: Safety filter settings safety_settings: Safety filter settings
client_params: Additional parameters to pass to the Google Gen AI Client constructor. client_params: Additional parameters to pass to the Google Gen AI Client constructor.
Supports parameters like http_options, credentials, debug_config, etc. Supports parameters like http_options, credentials, debug_config, etc.
interceptor: HTTP interceptor (not yet supported for Gemini).
**kwargs: Additional parameters **kwargs: Additional parameters
""" """
if interceptor is not None:
raise NotImplementedError(
"HTTP interceptors are not yet supported for Google Gemini provider. "
"Interceptors are currently supported for OpenAI and Anthropic providers only."
)
super().__init__( super().__init__(
model=model, temperature=temperature, stop=stop_sequences or [], **kwargs model=model, temperature=temperature, stop=stop_sequences or [], **kwargs
) )
@@ -96,7 +104,7 @@ class GeminiCompletion(BaseLLM):
self.is_gemini_1_5 = "gemini-1.5" in model.lower() self.is_gemini_1_5 = "gemini-1.5" in model.lower()
self.supports_tools = self.is_gemini_1_5 or self.is_gemini_2 self.supports_tools = self.is_gemini_1_5 or self.is_gemini_2
def _initialize_client(self, use_vertexai: bool = False) -> genai.Client: def _initialize_client(self, use_vertexai: bool = False) -> genai.Client: # type: ignore[no-any-unimported]
"""Initialize the Google Gen AI client with proper parameter handling. """Initialize the Google Gen AI client with proper parameter handling.
Args: Args:
@@ -171,7 +179,7 @@ class GeminiCompletion(BaseLLM):
def call( def call(
self, self,
messages: str | list[LLMMessage], messages: str | list[LLMMessage],
tools: list[dict] | None = None, tools: list[dict[str, Any]] | None = None,
callbacks: list[Any] | None = None, callbacks: list[Any] | None = None,
available_functions: dict[str, Any] | None = None, available_functions: dict[str, Any] | None = None,
from_task: Any | None = None, from_task: Any | None = None,
@@ -193,7 +201,7 @@ class GeminiCompletion(BaseLLM):
""" """
try: try:
self._emit_call_started_event( self._emit_call_started_event(
messages=messages, # type: ignore[arg-type] messages=messages,
tools=tools, tools=tools,
callbacks=callbacks, callbacks=callbacks,
available_functions=available_functions, available_functions=available_functions,
@@ -203,7 +211,7 @@ class GeminiCompletion(BaseLLM):
self.tools = tools self.tools = tools
formatted_content, system_instruction = self._format_messages_for_gemini( formatted_content, system_instruction = self._format_messages_for_gemini(
messages # type: ignore[arg-type] messages
) )
config = self._prepare_generation_config( config = self._prepare_generation_config(
@@ -245,10 +253,10 @@ class GeminiCompletion(BaseLLM):
) )
raise raise
def _prepare_generation_config( def _prepare_generation_config( # type: ignore[no-any-unimported]
self, self,
system_instruction: str | None = None, system_instruction: str | None = None,
tools: list[dict] | None = None, tools: list[dict[str, Any]] | None = None,
response_model: type[BaseModel] | None = None, response_model: type[BaseModel] | None = None,
) -> types.GenerateContentConfig: ) -> types.GenerateContentConfig:
"""Prepare generation config for Google Gemini API. """Prepare generation config for Google Gemini API.
@@ -297,7 +305,9 @@ class GeminiCompletion(BaseLLM):
return types.GenerateContentConfig(**config_params) return types.GenerateContentConfig(**config_params)
def _convert_tools_for_interference(self, tools: list[dict]) -> list[types.Tool]: def _convert_tools_for_interference( # type: ignore[no-any-unimported]
self, tools: list[dict[str, Any]]
) -> list[types.Tool]:
"""Convert CrewAI tool format to Gemini function declaration format.""" """Convert CrewAI tool format to Gemini function declaration format."""
gemini_tools = [] gemini_tools = []
@@ -320,7 +330,7 @@ class GeminiCompletion(BaseLLM):
return gemini_tools return gemini_tools
def _format_messages_for_gemini( def _format_messages_for_gemini( # type: ignore[no-any-unimported]
self, messages: str | list[LLMMessage] self, messages: str | list[LLMMessage]
) -> tuple[list[types.Content], str | None]: ) -> tuple[list[types.Content], str | None]:
"""Format messages for Gemini API. """Format messages for Gemini API.
@@ -364,7 +374,7 @@ class GeminiCompletion(BaseLLM):
return contents, system_instruction return contents, system_instruction
def _handle_completion( def _handle_completion( # type: ignore[no-any-unimported]
self, self,
contents: list[types.Content], contents: list[types.Content],
system_instruction: str | None, system_instruction: str | None,
@@ -431,7 +441,7 @@ class GeminiCompletion(BaseLLM):
return content return content
def _handle_streaming_completion( def _handle_streaming_completion( # type: ignore[no-any-unimported]
self, self,
contents: list[types.Content], contents: list[types.Content],
config: types.GenerateContentConfig, config: types.GenerateContentConfig,
@@ -560,8 +570,9 @@ class GeminiCompletion(BaseLLM):
} }
return {"total_tokens": 0} return {"total_tokens": 0}
def _convert_contents_to_dict( def _convert_contents_to_dict( # type: ignore[no-any-unimported]
self, contents: list[types.Content] self,
contents: list[types.Content],
) -> list[dict[str, str]]: ) -> list[dict[str, str]]:
"""Convert contents to dict format.""" """Convert contents to dict format."""
return [ return [

View File

@@ -6,6 +6,7 @@ import logging
import os import os
from typing import TYPE_CHECKING, Any from typing import TYPE_CHECKING, Any
import httpx
from openai import APIConnectionError, NotFoundError, OpenAI from openai import APIConnectionError, NotFoundError, OpenAI
from openai.types.chat import ChatCompletion, ChatCompletionChunk from openai.types.chat import ChatCompletion, ChatCompletionChunk
from openai.types.chat.chat_completion import Choice from openai.types.chat.chat_completion import Choice
@@ -14,6 +15,7 @@ from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM from crewai.llms.base_llm import BaseLLM
from crewai.llms.hooks.transport import HTTPTransport
from crewai.utilities.agent_utils import is_context_length_exceeded from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import ( from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError, LLMContextLengthExceededError,
@@ -23,6 +25,7 @@ from crewai.utilities.types import LLMMessage
if TYPE_CHECKING: if TYPE_CHECKING:
from crewai.agent.core import Agent from crewai.agent.core import Agent
from crewai.llms.hooks.base import BaseInterceptor
from crewai.task import Task from crewai.task import Task
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
@@ -59,6 +62,7 @@ class OpenAICompletion(BaseLLM):
top_logprobs: int | None = None, top_logprobs: int | None = None,
reasoning_effort: str | None = None, reasoning_effort: str | None = None,
provider: str | None = None, provider: str | None = None,
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
**kwargs: Any, **kwargs: Any,
) -> None: ) -> None:
"""Initialize OpenAI chat completion client.""" """Initialize OpenAI chat completion client."""
@@ -66,6 +70,7 @@ class OpenAICompletion(BaseLLM):
if provider is None: if provider is None:
provider = kwargs.pop("provider", "openai") provider = kwargs.pop("provider", "openai")
self.interceptor = interceptor
# Client configuration attributes # Client configuration attributes
self.organization = organization self.organization = organization
self.project = project self.project = project
@@ -88,6 +93,11 @@ class OpenAICompletion(BaseLLM):
) )
client_config = self._get_client_params() client_config = self._get_client_params()
if self.interceptor:
transport = HTTPTransport(interceptor=self.interceptor)
http_client = httpx.Client(transport=transport)
client_config["http_client"] = http_client
self.client = OpenAI(**client_config) self.client = OpenAI(**client_config)
# Completion parameters # Completion parameters

View File

@@ -0,0 +1,106 @@
interactions:
- request:
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"Reply with just
the word: SUCCESS"}],"model":"claude-3-5-haiku-20241022","stream":false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
anthropic-version:
- '2023-06-01'
connection:
- keep-alive
content-length:
- '145'
content-type:
- application/json
host:
- api.anthropic.com
user-agent:
- Anthropic/Python 0.71.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 0.71.0
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
x-stainless-timeout:
- NOT_GIVEN
method: POST
uri: https://api.anthropic.com/v1/messages
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//dJDLasMwEEX/5a5lsF1nUe1KCARCu6gpFEoRgzTEamzJ1qNNCf734tDQ
F10N3HNmBu4JgzfcQ0L3lA0XV8Wq6MgeclGXdVOVdQ0BayAxxL0qq9rcXm/vb2g37ehtc+xeHu+m
4xYC6X3kxeIYac8QCL5fAorRxkQuQUB7l9glyKfTxU98XMh5SLQP6/WmbTE/C8TkRxWYoneQYGdU
ysHhE0SeMjvNkC73vUA+f5UnWDfmpJI/sIuQVSOgSXesdGBK1jv1UygvPDCZ/9hld7nPY8cDB+rV
avjrf9Gq+01nAZ/T96gRiBxerWaVLAdILE0ZCgbz/AEAAP//AwA4VVIcmwEAAA==
headers:
CF-RAY:
- 9997ac4cbfb443fa-EWR
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Nov 2025 22:50:55 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Robots-Tag:
- none
anthropic-organization-id:
- 87faf353-e074-4658-b885-bfac7aa5a7b5
anthropic-ratelimit-input-tokens-limit:
- '400000'
anthropic-ratelimit-input-tokens-remaining:
- '400000'
anthropic-ratelimit-input-tokens-reset:
- '2025-11-04T22:50:55Z'
anthropic-ratelimit-output-tokens-limit:
- '80000'
anthropic-ratelimit-output-tokens-remaining:
- '80000'
anthropic-ratelimit-output-tokens-reset:
- '2025-11-04T22:50:55Z'
anthropic-ratelimit-requests-limit:
- '4000'
anthropic-ratelimit-requests-remaining:
- '3999'
anthropic-ratelimit-requests-reset:
- '2025-11-04T22:50:54Z'
anthropic-ratelimit-tokens-limit:
- '480000'
anthropic-ratelimit-tokens-remaining:
- '480000'
anthropic-ratelimit-tokens-reset:
- '2025-11-04T22:50:55Z'
cf-cache-status:
- DYNAMIC
request-id:
- req_011CUofgQ1jTrjQ2sveXU1cC
retry-after:
- '5'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-envoy-upstream-service-time:
- '461'
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,108 @@
interactions:
- request:
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"Say ''Hello World''
and nothing else"}],"model":"claude-3-5-haiku-20241022","stream":false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
anthropic-version:
- '2023-06-01'
connection:
- keep-alive
content-length:
- '146'
content-type:
- application/json
host:
- api.anthropic.com
user-agent:
- Anthropic/Python 0.71.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 0.71.0
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
x-stainless-timeout:
- NOT_GIVEN
method: POST
uri: https://api.anthropic.com/v1/messages
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//dJBNS8QwEIb/irznFNqu3UPOHvYoeGhFJIRk2IZNk5pMRCn979LF4hee
Bt7nmRl4F0zRkoeE8bpYqg5VV43aXUrV1u1tU7ctBJyFxJTPqm7u3OP9sTFD7uJwLkN/6seHQwcB
fp9psyhnfSYIpOi3QOfsMuvAEDAxMAWGfFp2n+ltI9chcSLv400fk7dYnwUyx1kl0jkGSFCwiksK
+ASZXgoFQ5CheC9Qrp/lAhfmworjhUKGbI4CRpuRlEmk2cWgfgr1zhNp+x/bd7f7NI80UdJeddNf
/4s242+6CsTC36NOIFN6dYYUO0qQ2NqyOlms6wcAAAD//wMArYPuQZ8BAAA=
headers:
CF-RAY:
- 9997ac4268d972a4-EWR
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Nov 2025 22:50:53 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Response-Tracked:
- 'true'
X-Robots-Tag:
- none
anthropic-organization-id:
- 87faf353-e074-4658-b885-bfac7aa5a7b5
anthropic-ratelimit-input-tokens-limit:
- '400000'
anthropic-ratelimit-input-tokens-remaining:
- '400000'
anthropic-ratelimit-input-tokens-reset:
- '2025-11-04T22:50:53Z'
anthropic-ratelimit-output-tokens-limit:
- '80000'
anthropic-ratelimit-output-tokens-remaining:
- '80000'
anthropic-ratelimit-output-tokens-reset:
- '2025-11-04T22:50:53Z'
anthropic-ratelimit-requests-limit:
- '4000'
anthropic-ratelimit-requests-remaining:
- '3999'
anthropic-ratelimit-requests-reset:
- '2025-11-04T22:50:53Z'
anthropic-ratelimit-tokens-limit:
- '480000'
anthropic-ratelimit-tokens-remaining:
- '480000'
anthropic-ratelimit-tokens-reset:
- '2025-11-04T22:50:53Z'
cf-cache-status:
- DYNAMIC
request-id:
- req_011CUofgGvqpCtoqCmKcwXdK
retry-after:
- '7'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-envoy-upstream-service-time:
- '441'
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,107 @@
interactions:
- request:
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"Count from 1 to
3"}],"model":"claude-3-5-haiku-20241022","stream":false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
anthropic-version:
- '2023-06-01'
connection:
- keep-alive
content-length:
- '129'
content-type:
- application/json
host:
- api.anthropic.com
user-agent:
- Anthropic/Python 0.71.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 0.71.0
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
x-stainless-timeout:
- NOT_GIVEN
method: POST
uri: https://api.anthropic.com/v1/messages
response:
body:
string: !!binary |
H4sIAAAAAAAAA3SQy2rDMBBFf8XcTTcy2E5DQbuuUvrIJt3VRQh7EovYI1calbTB/14cGvqiq4F7
zgzDPWLwLfXQaHqbWsoX+TLvrNunvCqqy7KoKii4FhpD3JmiXD2s7++u+HG1kcP77cGuN9tyuIaC
vI00WxSj3REUgu/nwMboolgWKDSehVign45nX+gwk9PQuKFAFzFrfGJxvMu2wQ9ZmYnPFrrmmsua
q5oXmJ4VovjRBLLRMzSIWyMpMD5BpJdE3BA0p75XSKev9BGOxyRG/J44QpdLhcY2HZkmkBXn2fwU
ijMPZNv/2Hl3vk9jRwMF25vl8Nf/omX3m04KPsn3qCoUIoVX15ARRwEac5WtDS2m6QMAAP//AwC8
QSj4vAEAAA==
headers:
CF-RAY:
- 9997ac45ea33de93-EWR
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Nov 2025 22:50:54 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Robots-Tag:
- none
anthropic-organization-id:
- 87faf353-e074-4658-b885-bfac7aa5a7b5
anthropic-ratelimit-input-tokens-limit:
- '400000'
anthropic-ratelimit-input-tokens-remaining:
- '400000'
anthropic-ratelimit-input-tokens-reset:
- '2025-11-04T22:50:54Z'
anthropic-ratelimit-output-tokens-limit:
- '80000'
anthropic-ratelimit-output-tokens-remaining:
- '80000'
anthropic-ratelimit-output-tokens-reset:
- '2025-11-04T22:50:54Z'
anthropic-ratelimit-requests-limit:
- '4000'
anthropic-ratelimit-requests-remaining:
- '3999'
anthropic-ratelimit-requests-reset:
- '2025-11-04T22:50:53Z'
anthropic-ratelimit-tokens-limit:
- '480000'
anthropic-ratelimit-tokens-remaining:
- '480000'
anthropic-ratelimit-tokens-reset:
- '2025-11-04T22:50:54Z'
cf-cache-status:
- DYNAMIC
request-id:
- req_011CUofgKLSz1aDmL9qbduuR
retry-after:
- '6'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-envoy-upstream-service-time:
- '801'
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,116 @@
interactions:
- request:
body: '{"messages":[{"role":"user","content":"Reply with just the word: SUCCESS"}],"model":"gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '98'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jJJNT+MwEIbv+RXWnJtVW/oBvbEVPexyq2AFCEXGnqReHI9lTxAr1P++
clKa8CVxyWGeecfvO5mXTAgwGlYC1E6yqr3N1ze/Z8+Xm+X1+cU1P23qXydSnz5ubvXP8o+FUVLQ
w19U/Kr6oaj2FtmQ67AKKBnT1MlyMZ2eLU6WkxbUpNEmWeU5n1FeG2fy6Xg6y8fLfHJ6UO/IKIyw
EneZEEK8tN/k02l8hpUYj14rNcYoK4TVsUkICGRTBWSMJrJ0DKMeKnKMrrW+vVqvL7bbIQ1YNlEm
h66xdgCkc8QyJWx93R/I/ujEUuUDPcR3UiiNM3FXBJSRXHo1Mnlo6T4T4r5N3LwJAT5Q7blgesT2
ucmsGwf9ngfwwJhY2kF5PvpkWKGRpbFxsDBQUu1Q98p+u7LRhgYgG0T+6OWz2V1s46rvjO+BUugZ
deEDaqPe5u3bAqYj/KrtuOLWMEQMT0ZhwQZD+g0aS9nY7jQg/ouMdVEaV2HwwXT3UfpivhjLcoHz
+Rlk++w/AAAA//8DAGpm+y8tAwAA
headers:
CF-RAY:
- 9997a55e8e85d954-EWR
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Nov 2025 22:46:11 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=Ljd6Yw.qJgdFyASoXMTCHgeOXz.kPJVf9verbOyhWzg-1762296371-1.0.1.1-HutBZMolyfao56ckVJOnqKZgW8SSm0S_xA1DF2HIE4eYlqsLEi3OtkeTKNc536CxqhcmuTINB23o_A6nID5TAGpXCeNYBEgLJKiggQamQ9w;
path=/; expires=Tue, 04-Nov-25 23:16:11 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=Tz6VwwwbLcFpqp9Poc_3sUeqc33hmGkTq8YCekrTAns-1762296371669-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '194'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '222'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999987'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999987'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_5f6372db5713441eb3ba1cc481aeb0fe
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,119 @@
interactions:
- request:
body: '{"messages":[{"role":"user","content":"Say ''Hello World'' and nothing
else"}],"model":"gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '99'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jJJNT+MwEIbv+RXWnJtVGvoBvXLhS0Jc2EUIRcaepN51PJY9QaxQ/zty
UpoUWGkvPviZd/y+43nLhACjYSNAbSWr1tv8/OF68Xr/wtKfXMaLhb6xv+71Dd1dGV3cwiwp6Pk3
Kv5Q/VDUeotsyA1YBZSMqet8vSrLs9XJet6DljTaJGs85wvKW+NMXhblIi/W+fx0r96SURhhIx4z
IYR468/k02l8hY0oZh83LcYoG4TNoUgICGTTDcgYTWTpGGYjVOQYXW/9Aq0l8ZOC1dOKgHUXZXLp
OmsnQDpHLFPK3tvTnuwObiw1PtBz/CSF2jgTt1VAGcmllyOTh57uMiGe+tTdURDwgVrPFdMf7J+b
L4d2MM56hOWeMbG0E8169k2zSiNLY+NkaKCk2qIeleOEZacNTUA2ifzVy3e9h9jGNf/TfgRKoWfU
lQ+ojTrOO5YFTIv4r7LDiHvDEDG8GIUVGwzpGzTWsrPDekD8GxnbqjauweCDGXak9tVyVch6hcvl
GWS77B0AAP//AwC41MWDMQMAAA==
headers:
CF-RAY:
- 9997a563da0042a3-EWR
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Nov 2025 22:46:12 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=71X9mE9Hmg7j990_h7K01BESKxBp2D4QYv9j1PmSm6I-1762296372-1.0.1.1-V7pmEV0YDa.OeJ8Pht15YJt2XRqusPvH52QlHRhBCRAoGIkSmqMCG.rYS44HRNCR3Kf2D4UeRaNaUMgws1tL74cvebKOa_aGVjBw_O2okGc;
path=/; expires=Tue, 04-Nov-25 23:16:12 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=.9w.Y6a8QsaD_7IAK4u3JaHCreibv0u6ujLC7HVF2nY-1762296372265-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
X-Response-Tracked:
- 'true'
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '332'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '349'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999987'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999990'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_21f5fd685fdf43e7b06e4ccf5f796b96
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,116 @@
interactions:
- request:
body: '{"messages":[{"role":"user","content":"Count from 1 to 3"}],"model":"gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '82'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jJJBb9swDIXv/hUCz3YRu4nT5FpgC9DrLttQGIpM29pkUZDobEGR/z7I
TmO364BdfODHR71H8yURAnQNewGqk6x6Z7LHr0/rc85lR8fdIXw7DWd3ePr869Pj6fClgzQq6PgD
Fb+q7hT1ziBrshNWHiVjnJpvy6LYlffbYgQ91WiirHWcrSnrtdVZsSrW2Wqb5Q9XdUdaYYC9+J4I
IcTL+I0+bY2/YS9W6WulxxBki7C/NQkBnkysgAxBB5aWIZ2hIstoR+t5KopU3N8tscdmCDJatIMx
CyCtJZYx4mjs+UouNyuGWufpGN5JodFWh67yKAPZ+GxgcjDSSyLE8xh5eJMCnKfeccX0E8fn8vU0
DuZFz/DhyphYmrlcFOkHw6oaWWoTFhsDJVWH9ayc1yuHWtMCJIvIf3v5aPYUW9v2f8bPQCl0jHXl
PNZavc07t3mMV/ivttuKR8MQ0J+0woo1+vgbamzkYKbbgHAOjH3VaNuid15PB9K4alOuZFPiZrOD
5JL8AQAA//8DAIyvq4AuAwAA
headers:
CF-RAY:
- 9997a567bce9c35e-EWR
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 04 Nov 2025 22:46:13 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=WbtKMfrbJkHmW8iHwTlAt1O0TT9hmE7i6Jc4CuzPFkk-1762296373-1.0.1.1-H4_jBpfR_9YQFFm2iDhVCcmwtOAfFhVkN6HaUsD3H8frMqxJjj7oiLathDv89L6e412o.pMtaQVL5e5XfVEv0diMAwtUsWsbzbTwF3rgkug;
path=/; expires=Tue, 04-Nov-25 23:16:13 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=rxHDwk1CRF6MkO5Jc7ikrkXBxkrhhmf.yJD6Z94mvUI-1762296373153-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '552'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '601'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999992'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999992'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_f67a12044f894859b6b867e583e42e24
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1 @@
"""Tests for LLM interceptor hooks functionality."""

View File

@@ -0,0 +1,311 @@
"""Tests for Anthropic provider with interceptor integration."""
import os
import httpx
import pytest
from crewai.llm import LLM
from crewai.llms.hooks.base import BaseInterceptor
@pytest.fixture(autouse=True)
def setup_anthropic_api_key(monkeypatch):
"""Set dummy Anthropic API key for tests that don't make real API calls."""
if "ANTHROPIC_API_KEY" not in os.environ:
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-test-key-dummy")
class AnthropicTestInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Test interceptor for Anthropic provider."""
def __init__(self) -> None:
"""Initialize tracking and modification state."""
self.outbound_calls: list[httpx.Request] = []
self.inbound_calls: list[httpx.Response] = []
self.custom_header_value = "anthropic-test-value"
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Track and modify outbound Anthropic requests.
Args:
message: The outbound request.
Returns:
Modified request with custom headers.
"""
self.outbound_calls.append(message)
message.headers["X-Anthropic-Interceptor"] = self.custom_header_value
message.headers["X-Request-ID"] = "test-request-456"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Track inbound Anthropic responses.
Args:
message: The inbound response.
Returns:
The response with tracking header.
"""
self.inbound_calls.append(message)
message.headers["X-Response-Tracked"] = "true"
return message
class TestAnthropicInterceptorIntegration:
"""Test suite for Anthropic provider with interceptor."""
def test_anthropic_llm_accepts_interceptor(self) -> None:
"""Test that Anthropic LLM accepts interceptor parameter."""
interceptor = AnthropicTestInterceptor()
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor)
assert llm.interceptor is interceptor
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
def test_anthropic_call_with_interceptor_tracks_requests(self) -> None:
"""Test that interceptor tracks Anthropic API requests."""
interceptor = AnthropicTestInterceptor()
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", interceptor=interceptor)
# Make a simple completion call
result = llm.call(
messages=[{"role": "user", "content": "Say 'Hello World' and nothing else"}]
)
# Verify custom headers were added
for request in interceptor.outbound_calls:
assert "X-Anthropic-Interceptor" in request.headers
assert request.headers["X-Anthropic-Interceptor"] == "anthropic-test-value"
assert "X-Request-ID" in request.headers
assert request.headers["X-Request-ID"] == "test-request-456"
# Verify response was tracked
for response in interceptor.inbound_calls:
assert "X-Response-Tracked" in response.headers
assert response.headers["X-Response-Tracked"] == "true"
# Verify result is valid
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
def test_anthropic_without_interceptor_works(self) -> None:
"""Test that Anthropic LLM works without interceptor."""
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
assert llm.interceptor is None
def test_multiple_anthropic_llms_different_interceptors(self) -> None:
"""Test that multiple Anthropic LLMs can have different interceptors."""
interceptor1 = AnthropicTestInterceptor()
interceptor1.custom_header_value = "claude-opus-value"
interceptor2 = AnthropicTestInterceptor()
interceptor2.custom_header_value = "claude-sonnet-value"
llm1 = LLM(model="anthropic/claude-3-opus-20240229", interceptor=interceptor1)
llm2 = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor2)
assert llm1.interceptor is interceptor1
assert llm2.interceptor is interceptor2
assert llm1.interceptor.custom_header_value == "claude-opus-value"
assert llm2.interceptor.custom_header_value == "claude-sonnet-value"
class AnthropicLoggingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor that logs Anthropic request/response details."""
def __init__(self) -> None:
"""Initialize logging lists."""
self.request_urls: list[str] = []
self.request_methods: list[str] = []
self.response_status_codes: list[int] = []
self.anthropic_version_headers: list[str] = []
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Log outbound request details.
Args:
message: The outbound request.
Returns:
The request unchanged.
"""
self.request_urls.append(str(message.url))
self.request_methods.append(message.method)
if "anthropic-version" in message.headers:
self.anthropic_version_headers.append(message.headers["anthropic-version"])
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Log inbound response details.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
self.response_status_codes.append(message.status_code)
return message
class TestAnthropicLoggingInterceptor:
"""Test suite for logging interceptor with Anthropic."""
def test_logging_interceptor_instantiation(self) -> None:
"""Test that logging interceptor can be created with Anthropic LLM."""
interceptor = AnthropicLoggingInterceptor()
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor)
assert llm.interceptor is interceptor
assert isinstance(llm.interceptor, AnthropicLoggingInterceptor)
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
def test_logging_interceptor_tracks_details(self) -> None:
"""Test that logging interceptor tracks request/response details."""
interceptor = AnthropicLoggingInterceptor()
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", interceptor=interceptor)
# Make a completion call
result = llm.call(messages=[{"role": "user", "content": "Count from 1 to 3"}])
# Verify URL points to Anthropic API
for url in interceptor.request_urls:
assert "anthropic" in url.lower() or "api" in url.lower()
# Verify methods are POST (messages endpoint uses POST)
for method in interceptor.request_methods:
assert method == "POST"
# Verify successful status codes
for status_code in interceptor.response_status_codes:
assert 200 <= status_code < 300
# Verify result is valid
assert result is not None
class AnthropicHeaderInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor that adds Anthropic-specific headers."""
def __init__(self, workspace_id: str, user_id: str) -> None:
"""Initialize with Anthropic-specific metadata.
Args:
workspace_id: The workspace ID to inject.
user_id: The user ID to inject.
"""
self.workspace_id = workspace_id
self.user_id = user_id
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Add custom metadata headers to request.
Args:
message: The outbound request.
Returns:
Request with metadata headers.
"""
message.headers["X-Workspace-ID"] = self.workspace_id
message.headers["X-User-ID"] = self.user_id
message.headers["X-Custom-Client"] = "crewai-interceptor"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Pass through inbound response.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
return message
class TestAnthropicHeaderInterceptor:
"""Test suite for header interceptor with Anthropic."""
def test_header_interceptor_with_anthropic(self) -> None:
"""Test that header interceptor can be used with Anthropic LLM."""
interceptor = AnthropicHeaderInterceptor(
workspace_id="ws-789", user_id="user-012"
)
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor)
assert llm.interceptor is interceptor
assert llm.interceptor.workspace_id == "ws-789"
assert llm.interceptor.user_id == "user-012"
def test_header_interceptor_adds_headers(self) -> None:
"""Test that header interceptor adds custom headers to requests."""
interceptor = AnthropicHeaderInterceptor(workspace_id="ws-123", user_id="u-456")
request = httpx.Request("POST", "https://api.anthropic.com/v1/messages")
modified_request = interceptor.on_outbound(request)
assert "X-Workspace-ID" in modified_request.headers
assert modified_request.headers["X-Workspace-ID"] == "ws-123"
assert "X-User-ID" in modified_request.headers
assert modified_request.headers["X-User-ID"] == "u-456"
assert "X-Custom-Client" in modified_request.headers
assert modified_request.headers["X-Custom-Client"] == "crewai-interceptor"
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
def test_header_interceptor_with_real_call(self) -> None:
"""Test that header interceptor works with real Anthropic API call."""
interceptor = AnthropicHeaderInterceptor(workspace_id="ws-999", user_id="u-888")
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", interceptor=interceptor)
# Make a simple call
result = llm.call(
messages=[{"role": "user", "content": "Reply with just the word: SUCCESS"}]
)
# Verify the call succeeded
assert result is not None
assert len(result) > 0
# Verify the interceptor was configured
assert llm.interceptor is interceptor
class TestMixedProviderInterceptors:
"""Test suite for using interceptors with different providers."""
def test_openai_and_anthropic_different_interceptors(self) -> None:
"""Test that OpenAI and Anthropic LLMs can have different interceptors."""
openai_interceptor = AnthropicTestInterceptor()
openai_interceptor.custom_header_value = "openai-specific"
anthropic_interceptor = AnthropicTestInterceptor()
anthropic_interceptor.custom_header_value = "anthropic-specific"
openai_llm = LLM(model="gpt-4", interceptor=openai_interceptor)
anthropic_llm = LLM(
model="anthropic/claude-3-5-sonnet-20241022", interceptor=anthropic_interceptor
)
assert openai_llm.interceptor is openai_interceptor
assert anthropic_llm.interceptor is anthropic_interceptor
assert openai_llm.interceptor.custom_header_value == "openai-specific"
assert anthropic_llm.interceptor.custom_header_value == "anthropic-specific"
def test_same_interceptor_different_providers(self) -> None:
"""Test that same interceptor instance can be used with multiple providers."""
shared_interceptor = AnthropicTestInterceptor()
openai_llm = LLM(model="gpt-4", interceptor=shared_interceptor)
anthropic_llm = LLM(
model="anthropic/claude-3-5-sonnet-20241022", interceptor=shared_interceptor
)
assert openai_llm.interceptor is shared_interceptor
assert anthropic_llm.interceptor is shared_interceptor
assert openai_llm.interceptor is anthropic_llm.interceptor

View File

@@ -0,0 +1,287 @@
"""Tests for base interceptor functionality."""
import httpx
import pytest
from crewai.llms.hooks.base import BaseInterceptor
class SimpleInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Simple test interceptor implementation."""
def __init__(self) -> None:
"""Initialize tracking lists."""
self.outbound_calls: list[httpx.Request] = []
self.inbound_calls: list[httpx.Response] = []
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Track outbound calls.
Args:
message: The outbound request.
Returns:
The request unchanged.
"""
self.outbound_calls.append(message)
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Track inbound calls.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
self.inbound_calls.append(message)
return message
class ModifyingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor that modifies requests and responses."""
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Add custom header to outbound request.
Args:
message: The outbound request.
Returns:
Modified request with custom header.
"""
message.headers["X-Custom-Header"] = "test-value"
message.headers["X-Intercepted"] = "true"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Add custom header to inbound response.
Args:
message: The inbound response.
Returns:
Modified response with custom header.
"""
message.headers["X-Response-Intercepted"] = "true"
return message
class AsyncInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor with async support."""
def __init__(self) -> None:
"""Initialize tracking lists."""
self.async_outbound_calls: list[httpx.Request] = []
self.async_inbound_calls: list[httpx.Response] = []
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Handle sync outbound.
Args:
message: The outbound request.
Returns:
The request unchanged.
"""
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Handle sync inbound.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
return message
async def aon_outbound(self, message: httpx.Request) -> httpx.Request:
"""Handle async outbound.
Args:
message: The outbound request.
Returns:
Modified request with async header.
"""
self.async_outbound_calls.append(message)
message.headers["X-Async-Outbound"] = "true"
return message
async def aon_inbound(self, message: httpx.Response) -> httpx.Response:
"""Handle async inbound.
Args:
message: The inbound response.
Returns:
Modified response with async header.
"""
self.async_inbound_calls.append(message)
message.headers["X-Async-Inbound"] = "true"
return message
class TestBaseInterceptor:
"""Test suite for BaseInterceptor class."""
def test_interceptor_instantiation(self) -> None:
"""Test that interceptor can be instantiated."""
interceptor = SimpleInterceptor()
assert interceptor is not None
assert isinstance(interceptor, BaseInterceptor)
def test_on_outbound_called(self) -> None:
"""Test that on_outbound is called and tracks requests."""
interceptor = SimpleInterceptor()
request = httpx.Request("GET", "https://api.example.com/test")
result = interceptor.on_outbound(request)
assert len(interceptor.outbound_calls) == 1
assert interceptor.outbound_calls[0] is request
assert result is request
def test_on_inbound_called(self) -> None:
"""Test that on_inbound is called and tracks responses."""
interceptor = SimpleInterceptor()
response = httpx.Response(200, json={"status": "ok"})
result = interceptor.on_inbound(response)
assert len(interceptor.inbound_calls) == 1
assert interceptor.inbound_calls[0] is response
assert result is response
def test_multiple_outbound_calls(self) -> None:
"""Test that interceptor tracks multiple outbound calls."""
interceptor = SimpleInterceptor()
requests = [
httpx.Request("GET", "https://api.example.com/1"),
httpx.Request("POST", "https://api.example.com/2"),
httpx.Request("PUT", "https://api.example.com/3"),
]
for req in requests:
interceptor.on_outbound(req)
assert len(interceptor.outbound_calls) == 3
assert interceptor.outbound_calls == requests
def test_multiple_inbound_calls(self) -> None:
"""Test that interceptor tracks multiple inbound calls."""
interceptor = SimpleInterceptor()
responses = [
httpx.Response(200, json={"id": 1}),
httpx.Response(201, json={"id": 2}),
httpx.Response(404, json={"error": "not found"}),
]
for resp in responses:
interceptor.on_inbound(resp)
assert len(interceptor.inbound_calls) == 3
assert interceptor.inbound_calls == responses
class TestModifyingInterceptor:
"""Test suite for interceptor that modifies messages."""
def test_outbound_header_modification(self) -> None:
"""Test that interceptor can add headers to outbound requests."""
interceptor = ModifyingInterceptor()
request = httpx.Request("GET", "https://api.example.com/test")
result = interceptor.on_outbound(request)
assert result is request
assert "X-Custom-Header" in result.headers
assert result.headers["X-Custom-Header"] == "test-value"
assert "X-Intercepted" in result.headers
assert result.headers["X-Intercepted"] == "true"
def test_inbound_header_modification(self) -> None:
"""Test that interceptor can add headers to inbound responses."""
interceptor = ModifyingInterceptor()
response = httpx.Response(200, json={"status": "ok"})
result = interceptor.on_inbound(response)
assert result is response
assert "X-Response-Intercepted" in result.headers
assert result.headers["X-Response-Intercepted"] == "true"
def test_preserves_existing_headers(self) -> None:
"""Test that interceptor preserves existing headers."""
interceptor = ModifyingInterceptor()
request = httpx.Request(
"GET",
"https://api.example.com/test",
headers={"Authorization": "Bearer token123", "Content-Type": "application/json"},
)
result = interceptor.on_outbound(request)
assert result.headers["Authorization"] == "Bearer token123"
assert result.headers["Content-Type"] == "application/json"
assert result.headers["X-Custom-Header"] == "test-value"
class TestAsyncInterceptor:
"""Test suite for async interceptor functionality."""
def test_sync_methods_work(self) -> None:
"""Test that sync methods still work on async interceptor."""
interceptor = AsyncInterceptor()
request = httpx.Request("GET", "https://api.example.com/test")
response = httpx.Response(200)
req_result = interceptor.on_outbound(request)
resp_result = interceptor.on_inbound(response)
assert req_result is request
assert resp_result is response
@pytest.mark.asyncio
async def test_async_outbound(self) -> None:
"""Test async outbound hook."""
interceptor = AsyncInterceptor()
request = httpx.Request("GET", "https://api.example.com/test")
result = await interceptor.aon_outbound(request)
assert result is request
assert len(interceptor.async_outbound_calls) == 1
assert interceptor.async_outbound_calls[0] is request
assert "X-Async-Outbound" in result.headers
assert result.headers["X-Async-Outbound"] == "true"
@pytest.mark.asyncio
async def test_async_inbound(self) -> None:
"""Test async inbound hook."""
interceptor = AsyncInterceptor()
response = httpx.Response(200, json={"status": "ok"})
result = await interceptor.aon_inbound(response)
assert result is response
assert len(interceptor.async_inbound_calls) == 1
assert interceptor.async_inbound_calls[0] is response
assert "X-Async-Inbound" in result.headers
assert result.headers["X-Async-Inbound"] == "true"
@pytest.mark.asyncio
async def test_default_async_not_implemented(self) -> None:
"""Test that default async methods raise NotImplementedError."""
interceptor = SimpleInterceptor()
request = httpx.Request("GET", "https://api.example.com/test")
response = httpx.Response(200)
with pytest.raises(NotImplementedError):
await interceptor.aon_outbound(request)
with pytest.raises(NotImplementedError):
await interceptor.aon_inbound(response)

View File

@@ -0,0 +1,262 @@
"""Tests for OpenAI provider with interceptor integration."""
import httpx
import pytest
from crewai.llm import LLM
from crewai.llms.hooks.base import BaseInterceptor
class OpenAITestInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Test interceptor for OpenAI provider."""
def __init__(self) -> None:
"""Initialize tracking and modification state."""
self.outbound_calls: list[httpx.Request] = []
self.inbound_calls: list[httpx.Response] = []
self.custom_header_value = "openai-test-value"
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Track and modify outbound OpenAI requests.
Args:
message: The outbound request.
Returns:
Modified request with custom headers.
"""
self.outbound_calls.append(message)
message.headers["X-OpenAI-Interceptor"] = self.custom_header_value
message.headers["X-Request-ID"] = "test-request-123"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Track inbound OpenAI responses.
Args:
message: The inbound response.
Returns:
The response with tracking header.
"""
self.inbound_calls.append(message)
message.headers["X-Response-Tracked"] = "true"
return message
class TestOpenAIInterceptorIntegration:
"""Test suite for OpenAI provider with interceptor."""
def test_openai_llm_accepts_interceptor(self) -> None:
"""Test that OpenAI LLM accepts interceptor parameter."""
interceptor = OpenAITestInterceptor()
llm = LLM(model="gpt-4", interceptor=interceptor)
assert llm.interceptor is interceptor
@pytest.mark.vcr(filter_headers=["authorization"])
def test_openai_call_with_interceptor_tracks_requests(self) -> None:
"""Test that interceptor tracks OpenAI API requests."""
interceptor = OpenAITestInterceptor()
llm = LLM(model="gpt-4o-mini", interceptor=interceptor)
# Make a simple completion call
result = llm.call(
messages=[{"role": "user", "content": "Say 'Hello World' and nothing else"}]
)
# Verify custom headers were added
for request in interceptor.outbound_calls:
assert "X-OpenAI-Interceptor" in request.headers
assert request.headers["X-OpenAI-Interceptor"] == "openai-test-value"
assert "X-Request-ID" in request.headers
assert request.headers["X-Request-ID"] == "test-request-123"
# Verify response was tracked
for response in interceptor.inbound_calls:
assert "X-Response-Tracked" in response.headers
assert response.headers["X-Response-Tracked"] == "true"
# Verify result is valid
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
def test_openai_without_interceptor_works(self) -> None:
"""Test that OpenAI LLM works without interceptor."""
llm = LLM(model="gpt-4")
assert llm.interceptor is None
def test_multiple_openai_llms_different_interceptors(self) -> None:
"""Test that multiple OpenAI LLMs can have different interceptors."""
interceptor1 = OpenAITestInterceptor()
interceptor1.custom_header_value = "llm1-value"
interceptor2 = OpenAITestInterceptor()
interceptor2.custom_header_value = "llm2-value"
llm1 = LLM(model="gpt-4", interceptor=interceptor1)
llm2 = LLM(model="gpt-3.5-turbo", interceptor=interceptor2)
assert llm1.interceptor is interceptor1
assert llm2.interceptor is interceptor2
assert llm1.interceptor.custom_header_value == "llm1-value"
assert llm2.interceptor.custom_header_value == "llm2-value"
class LoggingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor that logs request/response details for testing."""
def __init__(self) -> None:
"""Initialize logging lists."""
self.request_urls: list[str] = []
self.request_methods: list[str] = []
self.response_status_codes: list[int] = []
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Log outbound request details.
Args:
message: The outbound request.
Returns:
The request unchanged.
"""
self.request_urls.append(str(message.url))
self.request_methods.append(message.method)
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Log inbound response details.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
self.response_status_codes.append(message.status_code)
return message
class TestOpenAILoggingInterceptor:
"""Test suite for logging interceptor with OpenAI."""
def test_logging_interceptor_instantiation(self) -> None:
"""Test that logging interceptor can be created with OpenAI LLM."""
interceptor = LoggingInterceptor()
llm = LLM(model="gpt-4", interceptor=interceptor)
assert llm.interceptor is interceptor
assert isinstance(llm.interceptor, LoggingInterceptor)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_logging_interceptor_tracks_details(self) -> None:
"""Test that logging interceptor tracks request/response details."""
interceptor = LoggingInterceptor()
llm = LLM(model="gpt-4o-mini", interceptor=interceptor)
# Make a completion call
result = llm.call(
messages=[{"role": "user", "content": "Count from 1 to 3"}]
)
# Verify URL points to OpenAI API
for url in interceptor.request_urls:
assert "openai" in url.lower() or "api" in url.lower()
# Verify methods are POST (chat completions use POST)
for method in interceptor.request_methods:
assert method == "POST"
# Verify successful status codes
for status_code in interceptor.response_status_codes:
assert 200 <= status_code < 300
# Verify result is valid
assert result is not None
class AuthInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor that adds authentication headers."""
def __init__(self, api_key: str, org_id: str) -> None:
"""Initialize with auth credentials.
Args:
api_key: The API key to inject.
org_id: The organization ID to inject.
"""
self.api_key = api_key
self.org_id = org_id
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Add authentication headers to request.
Args:
message: The outbound request.
Returns:
Request with auth headers.
"""
message.headers["X-Custom-API-Key"] = self.api_key
message.headers["X-Organization-ID"] = self.org_id
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Pass through inbound response.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
return message
class TestOpenAIAuthInterceptor:
"""Test suite for authentication interceptor with OpenAI."""
def test_auth_interceptor_with_openai(self) -> None:
"""Test that auth interceptor can be used with OpenAI LLM."""
interceptor = AuthInterceptor(api_key="custom-key-123", org_id="org-456")
llm = LLM(model="gpt-4", interceptor=interceptor)
assert llm.interceptor is interceptor
assert llm.interceptor.api_key == "custom-key-123"
assert llm.interceptor.org_id == "org-456"
def test_auth_interceptor_adds_headers(self) -> None:
"""Test that auth interceptor adds custom headers to requests."""
interceptor = AuthInterceptor(api_key="test-key", org_id="test-org")
request = httpx.Request("POST", "https://api.openai.com/v1/chat/completions")
modified_request = interceptor.on_outbound(request)
assert "X-Custom-API-Key" in modified_request.headers
assert modified_request.headers["X-Custom-API-Key"] == "test-key"
assert "X-Organization-ID" in modified_request.headers
assert modified_request.headers["X-Organization-ID"] == "test-org"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_auth_interceptor_with_real_call(self) -> None:
"""Test that auth interceptor works with real OpenAI API call."""
interceptor = AuthInterceptor(api_key="custom-123", org_id="org-789")
llm = LLM(model="gpt-4o-mini", interceptor=interceptor)
# Make a simple call
result = llm.call(
messages=[{"role": "user", "content": "Reply with just the word: SUCCESS"}]
)
# Verify the call succeeded
assert result is not None
assert len(result) > 0
# Verify headers were added to outbound requests
# (We can't directly inspect the request sent to OpenAI in this test,
# but we verify the interceptor was configured and the call succeeded)
assert llm.interceptor is interceptor

View File

@@ -0,0 +1,248 @@
"""Tests for transport layer with interceptor integration."""
from unittest.mock import Mock
import httpx
import pytest
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llms.hooks.transport import AsyncHTTPransport, HTTPTransport
class TrackingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Test interceptor that tracks all calls."""
def __init__(self) -> None:
"""Initialize tracking lists."""
self.outbound_calls: list[httpx.Request] = []
self.inbound_calls: list[httpx.Response] = []
self.async_outbound_calls: list[httpx.Request] = []
self.async_inbound_calls: list[httpx.Response] = []
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Track outbound calls and add header.
Args:
message: The outbound request.
Returns:
Modified request with tracking header.
"""
self.outbound_calls.append(message)
message.headers["X-Intercepted-Sync"] = "true"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Track inbound calls.
Args:
message: The inbound response.
Returns:
The response with tracking header.
"""
self.inbound_calls.append(message)
message.headers["X-Response-Intercepted-Sync"] = "true"
return message
async def aon_outbound(self, message: httpx.Request) -> httpx.Request:
"""Track async outbound calls and add header.
Args:
message: The outbound request.
Returns:
Modified request with tracking header.
"""
self.async_outbound_calls.append(message)
message.headers["X-Intercepted-Async"] = "true"
return message
async def aon_inbound(self, message: httpx.Response) -> httpx.Response:
"""Track async inbound calls.
Args:
message: The inbound response.
Returns:
The response with tracking header.
"""
self.async_inbound_calls.append(message)
message.headers["X-Response-Intercepted-Async"] = "true"
return message
class TestHTTPTransport:
"""Test suite for sync HTTPTransport with interceptor."""
def test_transport_instantiation(self) -> None:
"""Test that transport can be instantiated with interceptor."""
interceptor = TrackingInterceptor()
transport = HTTPTransport(interceptor=interceptor)
assert transport.interceptor is interceptor
def test_transport_requires_interceptor(self) -> None:
"""Test that transport requires interceptor parameter."""
# HTTPTransport requires an interceptor parameter
with pytest.raises(TypeError):
HTTPTransport()
def test_interceptor_called_on_request(self) -> None:
"""Test that interceptor hooks are called during request handling."""
interceptor = TrackingInterceptor()
transport = HTTPTransport(interceptor=interceptor)
# Create a mock parent transport that returns a response
mock_response = httpx.Response(200, json={"success": True})
mock_parent_handle = Mock(return_value=mock_response)
# Monkey-patch the parent's handle_request
original_handle = httpx.HTTPTransport.handle_request
httpx.HTTPTransport.handle_request = mock_parent_handle
try:
request = httpx.Request("GET", "https://api.example.com/test")
response = transport.handle_request(request)
# Verify interceptor was called
assert len(interceptor.outbound_calls) == 1
assert len(interceptor.inbound_calls) == 1
assert interceptor.outbound_calls[0] is request
assert interceptor.inbound_calls[0] is response
# Verify headers were added
assert "X-Intercepted-Sync" in request.headers
assert request.headers["X-Intercepted-Sync"] == "true"
assert "X-Response-Intercepted-Sync" in response.headers
assert response.headers["X-Response-Intercepted-Sync"] == "true"
finally:
# Restore original method
httpx.HTTPTransport.handle_request = original_handle
class TestAsyncHTTPTransport:
"""Test suite for async AsyncHTTPransport with interceptor."""
def test_async_transport_instantiation(self) -> None:
"""Test that async transport can be instantiated with interceptor."""
interceptor = TrackingInterceptor()
transport = AsyncHTTPransport(interceptor=interceptor)
assert transport.interceptor is interceptor
def test_async_transport_requires_interceptor(self) -> None:
"""Test that async transport requires interceptor parameter."""
# AsyncHTTPransport requires an interceptor parameter
with pytest.raises(TypeError):
AsyncHTTPransport()
@pytest.mark.asyncio
async def test_async_interceptor_called_on_request(self) -> None:
"""Test that async interceptor hooks are called during request handling."""
interceptor = TrackingInterceptor()
transport = AsyncHTTPransport(interceptor=interceptor)
# Create a mock parent transport that returns a response
mock_response = httpx.Response(200, json={"success": True})
async def mock_handle(*args, **kwargs):
return mock_response
mock_parent_handle = Mock(side_effect=mock_handle)
# Monkey-patch the parent's handle_async_request
original_handle = httpx.AsyncHTTPTransport.handle_async_request
httpx.AsyncHTTPTransport.handle_async_request = mock_parent_handle
try:
request = httpx.Request("GET", "https://api.example.com/test")
response = await transport.handle_async_request(request)
# Verify async interceptor was called
assert len(interceptor.async_outbound_calls) == 1
assert len(interceptor.async_inbound_calls) == 1
assert interceptor.async_outbound_calls[0] is request
assert interceptor.async_inbound_calls[0] is response
# Verify sync interceptor was NOT called
assert len(interceptor.outbound_calls) == 0
assert len(interceptor.inbound_calls) == 0
# Verify async headers were added
assert "X-Intercepted-Async" in request.headers
assert request.headers["X-Intercepted-Async"] == "true"
assert "X-Response-Intercepted-Async" in response.headers
assert response.headers["X-Response-Intercepted-Async"] == "true"
finally:
# Restore original method
httpx.AsyncHTTPTransport.handle_async_request = original_handle
class TestTransportIntegration:
"""Test suite for transport integration scenarios."""
def test_multiple_requests_same_interceptor(self) -> None:
"""Test that multiple requests through same interceptor are tracked."""
interceptor = TrackingInterceptor()
transport = HTTPTransport(interceptor=interceptor)
mock_response = httpx.Response(200)
mock_parent_handle = Mock(return_value=mock_response)
original_handle = httpx.HTTPTransport.handle_request
httpx.HTTPTransport.handle_request = mock_parent_handle
try:
# Make multiple requests
requests = [
httpx.Request("GET", "https://api.example.com/1"),
httpx.Request("POST", "https://api.example.com/2"),
httpx.Request("PUT", "https://api.example.com/3"),
]
for req in requests:
transport.handle_request(req)
# Verify all requests were intercepted
assert len(interceptor.outbound_calls) == 3
assert len(interceptor.inbound_calls) == 3
assert interceptor.outbound_calls == requests
finally:
httpx.HTTPTransport.handle_request = original_handle
@pytest.mark.asyncio
async def test_multiple_async_requests_same_interceptor(self) -> None:
"""Test that multiple async requests through same interceptor are tracked."""
interceptor = TrackingInterceptor()
transport = AsyncHTTPransport(interceptor=interceptor)
mock_response = httpx.Response(200)
async def mock_handle(*args, **kwargs):
return mock_response
mock_parent_handle = Mock(side_effect=mock_handle)
original_handle = httpx.AsyncHTTPTransport.handle_async_request
httpx.AsyncHTTPTransport.handle_async_request = mock_parent_handle
try:
# Make multiple async requests
requests = [
httpx.Request("GET", "https://api.example.com/1"),
httpx.Request("POST", "https://api.example.com/2"),
httpx.Request("DELETE", "https://api.example.com/3"),
]
for req in requests:
await transport.handle_async_request(req)
# Verify all requests were intercepted
assert len(interceptor.async_outbound_calls) == 3
assert len(interceptor.async_inbound_calls) == 3
assert interceptor.async_outbound_calls == requests
finally:
httpx.AsyncHTTPTransport.handle_async_request = original_handle

View File

@@ -0,0 +1,319 @@
"""Tests for interceptor behavior with unsupported providers."""
import os
import httpx
import pytest
from crewai.llm import LLM
from crewai.llms.hooks.base import BaseInterceptor
@pytest.fixture(autouse=True)
def setup_provider_api_keys(monkeypatch):
"""Set dummy API keys for providers that require them."""
if "OPENAI_API_KEY" not in os.environ:
monkeypatch.setenv("OPENAI_API_KEY", "sk-test-key-dummy")
if "ANTHROPIC_API_KEY" not in os.environ:
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-test-key-dummy")
if "GOOGLE_API_KEY" not in os.environ:
monkeypatch.setenv("GOOGLE_API_KEY", "test-google-key-dummy")
class DummyInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Simple dummy interceptor for testing."""
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Pass through outbound request.
Args:
message: The outbound request.
Returns:
The request unchanged.
"""
message.headers["X-Dummy"] = "true"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Pass through inbound response.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
return message
class TestAzureProviderInterceptor:
"""Test suite for Azure provider with interceptor (unsupported)."""
def test_azure_llm_accepts_interceptor_parameter(self) -> None:
"""Test that Azure LLM raises NotImplementedError with interceptor."""
interceptor = DummyInterceptor()
# Azure provider should raise NotImplementedError
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="azure/gpt-4",
interceptor=interceptor,
api_key="test-key",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
assert "interceptor" in str(exc_info.value).lower()
def test_azure_raises_not_implemented_on_initialization(self) -> None:
"""Test that Azure raises NotImplementedError when interceptor is used."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="azure/gpt-4",
interceptor=interceptor,
api_key="test-key",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
error_msg = str(exc_info.value).lower()
assert "interceptor" in error_msg
assert "azure" in error_msg
def test_azure_without_interceptor_works(self) -> None:
"""Test that Azure LLM works without interceptor."""
llm = LLM(
model="azure/gpt-4",
api_key="test-key",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
# Azure provider doesn't have interceptor attribute
assert not hasattr(llm, 'interceptor') or llm.interceptor is None
class TestBedrockProviderInterceptor:
"""Test suite for Bedrock provider with interceptor (unsupported)."""
def test_bedrock_llm_accepts_interceptor_parameter(self) -> None:
"""Test that Bedrock LLM raises NotImplementedError with interceptor."""
interceptor = DummyInterceptor()
# Bedrock provider should raise NotImplementedError
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
interceptor=interceptor,
aws_access_key_id="test-access-key",
aws_secret_access_key="test-secret-key",
aws_region_name="us-east-1",
)
error_msg = str(exc_info.value).lower()
assert "interceptor" in error_msg
assert "bedrock" in error_msg
def test_bedrock_raises_not_implemented_on_initialization(self) -> None:
"""Test that Bedrock raises NotImplementedError when interceptor is used."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
interceptor=interceptor,
aws_access_key_id="test-access-key",
aws_secret_access_key="test-secret-key",
aws_region_name="us-east-1",
)
error_msg = str(exc_info.value).lower()
assert "interceptor" in error_msg
assert "bedrock" in error_msg
def test_bedrock_without_interceptor_works(self) -> None:
"""Test that Bedrock LLM works without interceptor."""
llm = LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key_id="test-access-key",
aws_secret_access_key="test-secret-key",
aws_region_name="us-east-1",
)
# Bedrock provider doesn't have interceptor attribute
assert not hasattr(llm, 'interceptor') or llm.interceptor is None
class TestGeminiProviderInterceptor:
"""Test suite for Gemini provider with interceptor (unsupported)."""
def test_gemini_llm_accepts_interceptor_parameter(self) -> None:
"""Test that Gemini LLM raises NotImplementedError with interceptor."""
interceptor = DummyInterceptor()
# Gemini provider should raise NotImplementedError
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="gemini/gemini-pro",
interceptor=interceptor,
api_key="test-gemini-key",
)
error_msg = str(exc_info.value).lower()
assert "interceptor" in error_msg
assert "gemini" in error_msg
def test_gemini_raises_not_implemented_on_initialization(self) -> None:
"""Test that Gemini raises NotImplementedError when interceptor is used."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="gemini/gemini-pro",
interceptor=interceptor,
api_key="test-gemini-key",
)
error_msg = str(exc_info.value).lower()
assert "interceptor" in error_msg
assert "gemini" in error_msg
def test_gemini_without_interceptor_works(self) -> None:
"""Test that Gemini LLM works without interceptor."""
llm = LLM(
model="gemini/gemini-pro",
api_key="test-gemini-key",
)
# Gemini provider doesn't have interceptor attribute
assert not hasattr(llm, 'interceptor') or llm.interceptor is None
class TestUnsupportedProviderMessages:
"""Test suite for error messages from unsupported providers."""
def test_azure_error_message_is_clear(self) -> None:
"""Test that Azure error message clearly states lack of support."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="azure/gpt-4",
interceptor=interceptor,
api_key="test-key",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
error_message = str(exc_info.value).lower()
assert "azure" in error_message
assert "interceptor" in error_message
def test_bedrock_error_message_is_clear(self) -> None:
"""Test that Bedrock error message clearly states lack of support."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
interceptor=interceptor,
aws_access_key_id="test-access-key",
aws_secret_access_key="test-secret-key",
aws_region_name="us-east-1",
)
error_message = str(exc_info.value).lower()
assert "bedrock" in error_message
assert "interceptor" in error_message
def test_gemini_error_message_is_clear(self) -> None:
"""Test that Gemini error message clearly states lack of support."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="gemini/gemini-pro",
interceptor=interceptor,
api_key="test-gemini-key",
)
error_message = str(exc_info.value).lower()
assert "gemini" in error_message
assert "interceptor" in error_message
class TestProviderSupportMatrix:
"""Test suite to document which providers support interceptors."""
def test_supported_providers_accept_interceptor(self) -> None:
"""Test that supported providers accept and use interceptors."""
interceptor = DummyInterceptor()
# OpenAI - SUPPORTED
openai_llm = LLM(model="gpt-4", interceptor=interceptor)
assert openai_llm.interceptor is interceptor
# Anthropic - SUPPORTED
anthropic_llm = LLM(model="anthropic/claude-3-opus-20240229", interceptor=interceptor)
assert anthropic_llm.interceptor is interceptor
def test_unsupported_providers_raise_error(self) -> None:
"""Test that unsupported providers raise NotImplementedError."""
interceptor = DummyInterceptor()
# Azure - NOT SUPPORTED
with pytest.raises(NotImplementedError):
LLM(
model="azure/gpt-4",
interceptor=interceptor,
api_key="test",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
# Bedrock - NOT SUPPORTED
with pytest.raises(NotImplementedError):
LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
interceptor=interceptor,
aws_access_key_id="test",
aws_secret_access_key="test",
aws_region_name="us-east-1",
)
# Gemini - NOT SUPPORTED
with pytest.raises(NotImplementedError):
LLM(
model="gemini/gemini-pro",
interceptor=interceptor,
api_key="test",
)
def test_all_providers_work_without_interceptor(self) -> None:
"""Test that all providers work normally without interceptor."""
# OpenAI
openai_llm = LLM(model="gpt-4")
assert openai_llm.interceptor is None
# Anthropic
anthropic_llm = LLM(model="anthropic/claude-3-opus-20240229")
assert anthropic_llm.interceptor is None
# Azure - doesn't have interceptor attribute
azure_llm = LLM(
model="azure/gpt-4",
api_key="test",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
assert not hasattr(azure_llm, 'interceptor') or azure_llm.interceptor is None
# Bedrock - doesn't have interceptor attribute
bedrock_llm = LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key_id="test",
aws_secret_access_key="test",
aws_region_name="us-east-1",
)
assert not hasattr(bedrock_llm, 'interceptor') or bedrock_llm.interceptor is None
# Gemini - doesn't have interceptor attribute
gemini_llm = LLM(model="gemini/gemini-pro", api_key="test")
assert not hasattr(gemini_llm, 'interceptor') or gemini_llm.interceptor is None