mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 23:58:34 +00:00
Compare commits
1 Commits
devin/1737
...
devin/1745
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
819bd0b3b2 |
@@ -196,9 +196,11 @@ class Agent(BaseAgent):
|
||||
else:
|
||||
# For any other type, attempt to extract relevant attributes
|
||||
llm_params = {
|
||||
"model": getattr(self.llm, "model_name", None)
|
||||
or getattr(self.llm, "deployment_name", None)
|
||||
or str(self.llm),
|
||||
"model": self._normalize_model_name(
|
||||
getattr(self.llm, "model_name", None)
|
||||
or getattr(self.llm, "deployment_name", None)
|
||||
or str(self.llm)
|
||||
),
|
||||
"temperature": getattr(self.llm, "temperature", None),
|
||||
"max_tokens": getattr(self.llm, "max_tokens", None),
|
||||
"logprobs": getattr(self.llm, "logprobs", None),
|
||||
@@ -534,5 +536,14 @@ class Agent(BaseAgent):
|
||||
def __tools_names(tools) -> str:
|
||||
return ", ".join([t.name for t in tools])
|
||||
|
||||
def _normalize_model_name(self, model_name):
|
||||
"""
|
||||
Normalize the model name by removing any 'models/' prefix.
|
||||
This fixes the issue with ChatGoogleGenerativeAI and potentially other LLM providers.
|
||||
"""
|
||||
if model_name and isinstance(model_name, str) and model_name.startswith("models/"):
|
||||
return model_name[7:] # Remove "models/" prefix
|
||||
return model_name
|
||||
|
||||
def __repr__(self):
|
||||
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
|
||||
|
||||
@@ -3,7 +3,6 @@ import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
from crewai.agents.parser import (
|
||||
@@ -198,19 +197,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
return self._invoke_loop(formatted_answer)
|
||||
|
||||
except Exception as e:
|
||||
if isinstance(e, LiteLLMAuthenticationError):
|
||||
self._logger.log(
|
||||
level="error",
|
||||
message="Authentication error with litellm occurred. Please check your API key and configuration.",
|
||||
color="red",
|
||||
)
|
||||
self._logger.log(
|
||||
level="error",
|
||||
message=f"Error details: {str(e)}",
|
||||
color="red",
|
||||
)
|
||||
raise e
|
||||
elif LLMContextLengthExceededException(str(e))._is_context_limit_error(
|
||||
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
|
||||
str(e)
|
||||
):
|
||||
self._handle_context_length()
|
||||
|
||||
@@ -1308,115 +1308,6 @@ def test_llm_call_with_error():
|
||||
llm.call(messages)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_litellm_auth_error_handling():
|
||||
"""Test that LiteLLM authentication errors are handled correctly and not retried."""
|
||||
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
||||
|
||||
# Create an agent with a mocked LLM and max_retry_limit=0
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
llm=LLM(model="gpt-4"),
|
||||
max_retry_limit=0, # Disable retries for authentication errors
|
||||
max_iter=1, # Limit to one iteration to prevent multiple calls
|
||||
)
|
||||
|
||||
# Create a task
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
# Mock the LLM call to raise LiteLLMAuthenticationError
|
||||
with (
|
||||
patch.object(LLM, "call") as mock_llm_call,
|
||||
pytest.raises(LiteLLMAuthenticationError, match="Invalid API key"),
|
||||
):
|
||||
mock_llm_call.side_effect = LiteLLMAuthenticationError(
|
||||
message="Invalid API key",
|
||||
llm_provider="openai",
|
||||
model="gpt-4"
|
||||
)
|
||||
agent.execute_task(task)
|
||||
|
||||
# Verify the call was only made once (no retries)
|
||||
mock_llm_call.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_agent_executor_litellm_auth_error():
|
||||
"""Test that CrewAgentExecutor properly identifies and handles LiteLLM authentication errors."""
|
||||
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
||||
from crewai.utilities import Logger
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
|
||||
# Create an agent and executor with max_retry_limit=0
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
llm=LLM(model="gpt-4"),
|
||||
max_retry_limit=0, # Disable retries for authentication errors
|
||||
)
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
# Create executor with all required parameters
|
||||
executor = CrewAgentExecutor(
|
||||
agent=agent,
|
||||
task=task,
|
||||
llm=agent.llm,
|
||||
crew=None,
|
||||
prompt={
|
||||
"system": "You are a test agent",
|
||||
"user": "Execute the task: {input}"
|
||||
},
|
||||
max_iter=5,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=[],
|
||||
tools_description="",
|
||||
tools_handler=ToolsHandler(),
|
||||
)
|
||||
|
||||
# Mock the LLM call to raise LiteLLMAuthenticationError
|
||||
with (
|
||||
patch.object(LLM, "call") as mock_llm_call,
|
||||
patch.object(Logger, "log") as mock_logger,
|
||||
pytest.raises(LiteLLMAuthenticationError, match="Invalid API key"),
|
||||
):
|
||||
mock_llm_call.side_effect = LiteLLMAuthenticationError(
|
||||
message="Invalid API key",
|
||||
llm_provider="openai",
|
||||
model="gpt-4"
|
||||
)
|
||||
executor.invoke({
|
||||
"input": "test input",
|
||||
"tool_names": "", # Required template variable
|
||||
"tools": "", # Required template variable
|
||||
})
|
||||
|
||||
# Verify error handling
|
||||
mock_logger.assert_any_call(
|
||||
level="error",
|
||||
message="Authentication error with litellm occurred. Please check your API key and configuration.",
|
||||
color="red",
|
||||
)
|
||||
mock_logger.assert_any_call(
|
||||
level="error",
|
||||
message="Error details: litellm.AuthenticationError: Invalid API key",
|
||||
color="red",
|
||||
)
|
||||
# Verify the call was only made once (no retries)
|
||||
mock_llm_call.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_handle_context_length_exceeds_limit():
|
||||
agent = Agent(
|
||||
|
||||
39
tests/test_agent_model_name.py
Normal file
39
tests/test_agent_model_name.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from crewai import Agent
|
||||
from crewai.llm import LLM
|
||||
|
||||
|
||||
def test_normalize_model_name_method():
|
||||
"""Test that the _normalize_model_name method correctly handles model names with 'models/' prefix"""
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm="gpt-4"
|
||||
)
|
||||
|
||||
model_with_prefix = "models/gemini/gemini-1.5-flash"
|
||||
normalized_name = agent._normalize_model_name(model_with_prefix)
|
||||
assert normalized_name == "gemini/gemini-1.5-flash"
|
||||
|
||||
regular_model = "gpt-4"
|
||||
assert agent._normalize_model_name(regular_model) == "gpt-4"
|
||||
|
||||
assert agent._normalize_model_name(None) is None
|
||||
|
||||
assert agent._normalize_model_name(123) == 123
|
||||
|
||||
|
||||
def test_agent_with_regular_model_name():
|
||||
"""Test that the Agent class doesn't modify normal model names"""
|
||||
with patch('crewai.agent.LLM') as mock_llm:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm="gpt-4"
|
||||
)
|
||||
|
||||
args, kwargs = mock_llm.call_args
|
||||
assert kwargs["model"] == "gpt-4"
|
||||
Reference in New Issue
Block a user