mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-17 04:48:30 +00:00
Compare commits
7 Commits
fix/consol
...
bugfix/dro
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
78164b4f73 | ||
|
|
a367a96ab9 | ||
|
|
63ce0c91f9 | ||
|
|
e125b136b9 | ||
|
|
63fcc74faf | ||
|
|
0cba344976 | ||
|
|
319da2129a |
@@ -1,15 +1,12 @@
|
|||||||
import os
|
|
||||||
import shutil
|
import shutil
|
||||||
import subprocess
|
import subprocess
|
||||||
from typing import Any, Dict, List, Literal, Optional, Union
|
from typing import Any, Dict, List, Literal, Optional, Union
|
||||||
|
|
||||||
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
|
||||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||||
|
|
||||||
from crewai.agents import CacheHandler
|
from crewai.agents import CacheHandler
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||||
from crewai.cli.constants import ENV_VARS, LITELLM_PARAMS
|
|
||||||
from crewai.knowledge.knowledge import Knowledge
|
from crewai.knowledge.knowledge import Knowledge
|
||||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||||
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
||||||
@@ -262,8 +259,8 @@ class Agent(BaseAgent):
|
|||||||
}
|
}
|
||||||
)["output"]
|
)["output"]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if isinstance(e, LiteLLMAuthenticationError):
|
if e.__class__.__module__.startswith("litellm"):
|
||||||
# Do not retry on authentication errors
|
# Do not retry on litellm errors
|
||||||
raise e
|
raise e
|
||||||
self._times_executed += 1
|
self._times_executed += 1
|
||||||
if self._times_executed > self.max_retry_limit:
|
if self._times_executed > self.max_retry_limit:
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ import re
|
|||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Any, Callable, Dict, List, Optional, Union
|
from typing import Any, Callable, Dict, List, Optional, Union
|
||||||
|
|
||||||
from litellm.exceptions import AuthenticationError as LiteLLMAuthenticationError
|
|
||||||
|
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||||
from crewai.agents.parser import (
|
from crewai.agents.parser import (
|
||||||
@@ -103,6 +101,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
try:
|
try:
|
||||||
formatted_answer = self._invoke_loop()
|
formatted_answer = self._invoke_loop()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if e.__class__.__module__.startswith("litellm"):
|
||||||
|
# Do not retry on litellm errors
|
||||||
|
raise e
|
||||||
|
else:
|
||||||
|
self._handle_unknown_error(e)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
if self.ask_for_human_input:
|
if self.ask_for_human_input:
|
||||||
@@ -146,6 +149,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
formatted_answer = self._handle_output_parser_exception(e)
|
formatted_answer = self._handle_output_parser_exception(e)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if e.__class__.__module__.startswith("litellm"):
|
||||||
|
# Do not retry on litellm errors
|
||||||
|
raise e
|
||||||
if self._is_context_length_exceeded(e):
|
if self._is_context_length_exceeded(e):
|
||||||
self._handle_context_length()
|
self._handle_context_length()
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -1,3 +1,11 @@
|
|||||||
|
import warnings
|
||||||
|
|
||||||
|
warnings.filterwarnings(
|
||||||
|
"ignore",
|
||||||
|
message="Valid config keys have changed in V2*",
|
||||||
|
category=UserWarning,
|
||||||
|
)
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from importlib.metadata import version as get_version
|
from importlib.metadata import version as get_version
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ def run_crew() -> None:
|
|||||||
"""
|
"""
|
||||||
Run the crew by running a command in the UV environment.
|
Run the crew by running a command in the UV environment.
|
||||||
"""
|
"""
|
||||||
|
click.echo("Running crew hello...")
|
||||||
command = ["uv", "run", "run_crew"]
|
command = ["uv", "run", "run_crew"]
|
||||||
crewai_version = get_crewai_version()
|
crewai_version = get_crewai_version()
|
||||||
min_required_version = "0.71.0"
|
min_required_version = "0.71.0"
|
||||||
|
|||||||
@@ -57,9 +57,6 @@ except ImportError:
|
|||||||
agentops = None
|
agentops = None
|
||||||
|
|
||||||
|
|
||||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
|
||||||
|
|
||||||
|
|
||||||
class Crew(BaseModel):
|
class Crew(BaseModel):
|
||||||
"""
|
"""
|
||||||
Represents a group of agents, defining how they should collaborate and the tasks they should perform.
|
Represents a group of agents, defining how they should collaborate and the tasks they should perform.
|
||||||
|
|||||||
@@ -1623,7 +1623,7 @@ def test_litellm_auth_error_handling():
|
|||||||
agent=agent,
|
agent=agent,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Mock the LLM call to raise LiteLLMAuthenticationError
|
# Mock the LLM call to raise AuthenticationError
|
||||||
with (
|
with (
|
||||||
patch.object(LLM, "call") as mock_llm_call,
|
patch.object(LLM, "call") as mock_llm_call,
|
||||||
pytest.raises(LiteLLMAuthenticationError, match="Invalid API key"),
|
pytest.raises(LiteLLMAuthenticationError, match="Invalid API key"),
|
||||||
@@ -1639,7 +1639,7 @@ def test_litellm_auth_error_handling():
|
|||||||
|
|
||||||
def test_crew_agent_executor_litellm_auth_error():
|
def test_crew_agent_executor_litellm_auth_error():
|
||||||
"""Test that CrewAgentExecutor handles LiteLLM authentication errors by raising them."""
|
"""Test that CrewAgentExecutor handles LiteLLM authentication errors by raising them."""
|
||||||
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
from litellm.exceptions import AuthenticationError
|
||||||
|
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
from crewai.utilities import Printer
|
from crewai.utilities import Printer
|
||||||
@@ -1672,13 +1672,13 @@ def test_crew_agent_executor_litellm_auth_error():
|
|||||||
tools_handler=ToolsHandler(),
|
tools_handler=ToolsHandler(),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Mock the LLM call to raise LiteLLMAuthenticationError
|
# Mock the LLM call to raise AuthenticationError
|
||||||
with (
|
with (
|
||||||
patch.object(LLM, "call") as mock_llm_call,
|
patch.object(LLM, "call") as mock_llm_call,
|
||||||
patch.object(Printer, "print") as mock_printer,
|
patch.object(Printer, "print") as mock_printer,
|
||||||
pytest.raises(LiteLLMAuthenticationError, match="Invalid API key"),
|
pytest.raises(AuthenticationError) as exc_info,
|
||||||
):
|
):
|
||||||
mock_llm_call.side_effect = LiteLLMAuthenticationError(
|
mock_llm_call.side_effect = AuthenticationError(
|
||||||
message="Invalid API key", llm_provider="openai", model="gpt-4"
|
message="Invalid API key", llm_provider="openai", model="gpt-4"
|
||||||
)
|
)
|
||||||
executor.invoke(
|
executor.invoke(
|
||||||
@@ -1689,14 +1689,53 @@ def test_crew_agent_executor_litellm_auth_error():
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
# Verify error handling
|
# Verify error handling messages
|
||||||
|
error_message = f"Error during LLM call: {str(mock_llm_call.side_effect)}"
|
||||||
mock_printer.assert_any_call(
|
mock_printer.assert_any_call(
|
||||||
content="An unknown error occurred. Please check the details below.",
|
content=error_message,
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
mock_printer.assert_any_call(
|
|
||||||
content="Error details: litellm.AuthenticationError: Invalid API key",
|
|
||||||
color="red",
|
color="red",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Verify the call was only made once (no retries)
|
# Verify the call was only made once (no retries)
|
||||||
mock_llm_call.assert_called_once()
|
mock_llm_call.assert_called_once()
|
||||||
|
|
||||||
|
# Assert that the exception was raised and has the expected attributes
|
||||||
|
assert exc_info.type is AuthenticationError
|
||||||
|
assert "Invalid API key".lower() in exc_info.value.message.lower()
|
||||||
|
assert exc_info.value.llm_provider == "openai"
|
||||||
|
assert exc_info.value.model == "gpt-4"
|
||||||
|
|
||||||
|
|
||||||
|
def test_litellm_anthropic_error_handling():
|
||||||
|
"""Test that AnthropicError from LiteLLM is handled correctly and not retried."""
|
||||||
|
from litellm.llms.anthropic.common_utils import AnthropicError
|
||||||
|
|
||||||
|
# Create an agent with a mocked LLM that uses an Anthropic model
|
||||||
|
agent = Agent(
|
||||||
|
role="test role",
|
||||||
|
goal="test goal",
|
||||||
|
backstory="test backstory",
|
||||||
|
llm=LLM(model="claude-3.5-sonnet-20240620"),
|
||||||
|
max_retry_limit=0,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a task
|
||||||
|
task = Task(
|
||||||
|
description="Test task",
|
||||||
|
expected_output="Test output",
|
||||||
|
agent=agent,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock the LLM call to raise AnthropicError
|
||||||
|
with (
|
||||||
|
patch.object(LLM, "call") as mock_llm_call,
|
||||||
|
pytest.raises(AnthropicError, match="Test Anthropic error"),
|
||||||
|
):
|
||||||
|
mock_llm_call.side_effect = AnthropicError(
|
||||||
|
status_code=500,
|
||||||
|
message="Test Anthropic error",
|
||||||
|
)
|
||||||
|
agent.execute_task(task)
|
||||||
|
|
||||||
|
# Verify the LLM call was only made once (no retries)
|
||||||
|
mock_llm_call.assert_called_once()
|
||||||
|
|||||||
Reference in New Issue
Block a user