Check the right property

This commit is contained in:
Brandon Hancock
2025-02-18 15:31:12 -05:00
parent 7dc47adb5c
commit 4f06880015

View File

@@ -181,14 +181,14 @@ class LLM:
def _is_anthropic_model(self, model: str) -> bool: def _is_anthropic_model(self, model: str) -> bool:
"""Determine if the model is from Anthropic provider. """Determine if the model is from Anthropic provider.
Args: Args:
model: The model identifier string. model: The model identifier string.
Returns: Returns:
bool: True if the model is from Anthropic, False otherwise. bool: True if the model is from Anthropic, False otherwise.
""" """
ANTHROPIC_PREFIXES = ('anthropic/', 'claude-', 'claude/') ANTHROPIC_PREFIXES = ("anthropic/", "claude-", "claude/")
return any(prefix in model.lower() for prefix in ANTHROPIC_PREFIXES) return any(prefix in model.lower() for prefix in ANTHROPIC_PREFIXES)
def call( def call(
@@ -199,7 +199,7 @@ class LLM:
available_functions: Optional[Dict[str, Any]] = None, available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]: ) -> Union[str, Any]:
"""High-level LLM call method. """High-level LLM call method.
Args: Args:
messages: Input messages for the LLM. messages: Input messages for the LLM.
Can be a string or list of message dictionaries. Can be a string or list of message dictionaries.
@@ -211,22 +211,22 @@ class LLM:
during and after the LLM call. during and after the LLM call.
available_functions: Optional dict mapping function names to callables available_functions: Optional dict mapping function names to callables
that can be invoked by the LLM. that can be invoked by the LLM.
Returns: Returns:
Union[str, Any]: Either a text response from the LLM (str) or Union[str, Any]: Either a text response from the LLM (str) or
the result of a tool function call (Any). the result of a tool function call (Any).
Raises: Raises:
TypeError: If messages format is invalid TypeError: If messages format is invalid
ValueError: If response format is not supported ValueError: If response format is not supported
LLMContextLengthExceededException: If input exceeds model's context limit LLMContextLengthExceededException: If input exceeds model's context limit
Examples: Examples:
# Example 1: Simple string input # Example 1: Simple string input
>>> response = llm.call("Return the name of a random city.") >>> response = llm.call("Return the name of a random city.")
>>> print(response) >>> print(response)
"Paris" "Paris"
# Example 2: Message list with system and user messages # Example 2: Message list with system and user messages
>>> messages = [ >>> messages = [
... {"role": "system", "content": "You are a geography expert"}, ... {"role": "system", "content": "You are a geography expert"},
@@ -348,36 +348,40 @@ class LLM:
logging.error(f"LiteLLM call failed: {str(e)}") logging.error(f"LiteLLM call failed: {str(e)}")
raise raise
def _format_messages_for_provider(self, messages: List[Dict[str, str]]) -> List[Dict[str, str]]: def _format_messages_for_provider(
self, messages: List[Dict[str, str]]
) -> List[Dict[str, str]]:
"""Format messages according to provider requirements. """Format messages according to provider requirements.
Args: Args:
messages: List of message dictionaries with 'role' and 'content' keys. messages: List of message dictionaries with 'role' and 'content' keys.
Can be empty or None. Can be empty or None.
Returns: Returns:
List of formatted messages according to provider requirements. List of formatted messages according to provider requirements.
For Anthropic models, ensures first message has 'user' role. For Anthropic models, ensures first message has 'user' role.
Raises: Raises:
TypeError: If messages is None or contains invalid message format. TypeError: If messages is None or contains invalid message format.
""" """
if messages is None: if messages is None:
raise TypeError("Messages cannot be None") raise TypeError("Messages cannot be None")
# Validate message format first # Validate message format first
for msg in messages: for msg in messages:
if not isinstance(msg, dict) or "role" not in msg or "content" not in msg: if not isinstance(msg, dict) or "role" not in msg or "content" not in msg:
raise TypeError("Invalid message format. Each message must be a dict with 'role' and 'content' keys") raise TypeError(
"Invalid message format. Each message must be a dict with 'role' and 'content' keys"
)
if not self.is_anthropic: if not self.is_anthropic:
return messages return messages
# Anthropic requires messages to start with 'user' role # Anthropic requires messages to start with 'user' role
if not messages or messages[0]["role"] == "system": if not messages or messages[0]["role"] == "system":
# If first message is system or empty, add a placeholder user message # If first message is system or empty, add a placeholder user message
return [{"role": "user", "content": "."}, *messages] return [{"role": "user", "content": "."}, *messages]
return messages return messages
def _get_custom_llm_provider(self) -> str: def _get_custom_llm_provider(self) -> str:
@@ -413,7 +417,7 @@ class LLM:
def supports_function_calling(self) -> bool: def supports_function_calling(self) -> bool:
try: try:
params = get_supported_openai_params(model=self.model) params = get_supported_openai_params(model=self.model)
return "response_format" in params return "tools" in params
except Exception as e: except Exception as e:
logging.error(f"Failed to get supported params: {str(e)}") logging.error(f"Failed to get supported params: {str(e)}")
return False return False