mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-06 22:58:30 +00:00
Compare commits
1 Commits
gl/fix/age
...
devin/1753
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
90afeae467 |
@@ -23,7 +23,7 @@ dependencies = [
|
||||
# Data Handling
|
||||
"chromadb>=0.5.23",
|
||||
"tokenizers>=0.20.3",
|
||||
"onnxruntime==1.22.0",
|
||||
"onnxruntime>=1.19.0,<=1.22.0",
|
||||
"openpyxl>=3.1.5",
|
||||
"pyvis>=0.3.2",
|
||||
# Authentication and Security
|
||||
|
||||
@@ -1,18 +1,7 @@
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Type, Union
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
|
||||
@@ -87,12 +76,6 @@ class Agent(BaseAgent):
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
agent_executor: Optional[CrewAgentExecutor] = Field(
|
||||
default=None,
|
||||
init=False, # Not included in __init__ as it's created dynamically in create_agent_executor()
|
||||
exclude=True, # Excluded from serialization to avoid circular references
|
||||
description="The agent executor instance for running tasks. Created dynamically when needed.",
|
||||
)
|
||||
max_execution_time: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum execution time for an agent to execute a task",
|
||||
@@ -179,7 +162,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
guardrail: Optional[Union[Callable[[Any], Tuple[bool, Any]], str]] = Field(
|
||||
default=None,
|
||||
description="Function or string description of a guardrail to validate agent output",
|
||||
description="Function or string description of a guardrail to validate agent output"
|
||||
)
|
||||
guardrail_max_retries: int = Field(
|
||||
default=3, description="Maximum number of retries when guardrail fails"
|
||||
@@ -357,6 +340,7 @@ class Agent(BaseAgent):
|
||||
self.knowledge_config.model_dump() if self.knowledge_config else {}
|
||||
)
|
||||
|
||||
|
||||
if self.knowledge or (self.crew and self.crew.knowledge):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -547,11 +531,6 @@ class Agent(BaseAgent):
|
||||
Returns:
|
||||
The output of the agent.
|
||||
"""
|
||||
if not self.agent_executor:
|
||||
raise ValueError(
|
||||
"Agent executor not initialized. Call create_agent_executor() first."
|
||||
)
|
||||
|
||||
return self.agent_executor.invoke(
|
||||
{
|
||||
"input": task_prompt,
|
||||
|
||||
@@ -96,7 +96,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
)
|
||||
|
||||
def invoke(self, inputs: Dict[str, Union[str, bool, None]]) -> Dict[str, Any]:
|
||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
if "system" in self.prompt:
|
||||
system_prompt = self._format_prompt(self.prompt.get("system", ""), inputs)
|
||||
user_prompt = self._format_prompt(self.prompt.get("user", ""), inputs)
|
||||
@@ -120,7 +120,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
# Do not retry on litellm errors
|
||||
raise e
|
||||
else:
|
||||
raise e
|
||||
|
||||
if self.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
@@ -155,7 +159,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
callbacks=self.callbacks,
|
||||
printer=self._printer,
|
||||
from_task=self.task,
|
||||
from_task=self.task
|
||||
)
|
||||
formatted_answer = process_llm_response(answer, self.use_stop_words)
|
||||
|
||||
@@ -371,13 +375,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
training_data[agent_id] = agent_training_data
|
||||
training_handler.save(training_data)
|
||||
|
||||
def _format_prompt(
|
||||
self, prompt: str, inputs: Dict[str, Union[str, bool, None]]
|
||||
) -> str:
|
||||
# Cast to str to satisfy type checker - these are always strings when called
|
||||
prompt = prompt.replace("{input}", str(inputs["input"]))
|
||||
prompt = prompt.replace("{tool_names}", str(inputs["tool_names"]))
|
||||
prompt = prompt.replace("{tools}", str(inputs["tools"]))
|
||||
def _format_prompt(self, prompt: str, inputs: Dict[str, str]) -> str:
|
||||
prompt = prompt.replace("{input}", inputs["input"])
|
||||
prompt = prompt.replace("{tool_names}", inputs["tool_names"])
|
||||
prompt = prompt.replace("{tools}", inputs["tools"])
|
||||
return prompt
|
||||
|
||||
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
|
||||
|
||||
@@ -59,7 +59,6 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
|
||||
load_dotenv()
|
||||
|
||||
litellm.suppress_debug_info = True
|
||||
|
||||
class FilteredStream(io.TextIOBase):
|
||||
_lock = None
|
||||
@@ -77,7 +76,9 @@ class FilteredStream(io.TextIOBase):
|
||||
|
||||
# Skip common noisy LiteLLM banners and any other lines that contain "litellm"
|
||||
if (
|
||||
"litellm.info:" in lower_s
|
||||
"give feedback / get help" in lower_s
|
||||
or "litellm.info:" in lower_s
|
||||
or "litellm" in lower_s
|
||||
or "Consider using a smaller input or implementing a text splitting strategy" in lower_s
|
||||
):
|
||||
return 0
|
||||
@@ -1004,6 +1005,7 @@ class LLM(BaseLLM):
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=str(e), from_task=from_task, from_agent=from_agent),
|
||||
)
|
||||
logging.error(f"LiteLLM call failed: {str(e)}")
|
||||
raise
|
||||
|
||||
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType, from_task: Optional[Any] = None, from_agent: Optional[Any] = None, messages: str | list[dict[str, Any]] | None = None):
|
||||
|
||||
@@ -157,6 +157,10 @@ def get_llm_response(
|
||||
from_agent=from_agent,
|
||||
)
|
||||
except Exception as e:
|
||||
printer.print(
|
||||
content=f"Error during LLM call: {e}",
|
||||
color="red",
|
||||
)
|
||||
raise e
|
||||
if not answer:
|
||||
printer.print(
|
||||
@@ -228,17 +232,12 @@ def handle_unknown_error(printer: Any, exception: Exception) -> None:
|
||||
printer: Printer instance for output
|
||||
exception: The exception that occurred
|
||||
"""
|
||||
error_message = str(exception)
|
||||
|
||||
if "litellm" in error_message:
|
||||
return
|
||||
|
||||
printer.print(
|
||||
content="An unknown error occurred. Please check the details below.",
|
||||
color="red",
|
||||
)
|
||||
printer.print(
|
||||
content=f"Error details: {error_message}",
|
||||
content=f"Error details: {exception}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
|
||||
@@ -2010,6 +2010,7 @@ def test_crew_agent_executor_litellm_auth_error():
|
||||
from litellm.exceptions import AuthenticationError
|
||||
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.utilities import Printer
|
||||
|
||||
# Create an agent and executor
|
||||
agent = Agent(
|
||||
@@ -2042,6 +2043,7 @@ def test_crew_agent_executor_litellm_auth_error():
|
||||
# Mock the LLM call to raise AuthenticationError
|
||||
with (
|
||||
patch.object(LLM, "call") as mock_llm_call,
|
||||
patch.object(Printer, "print") as mock_printer,
|
||||
pytest.raises(AuthenticationError) as exc_info,
|
||||
):
|
||||
mock_llm_call.side_effect = AuthenticationError(
|
||||
@@ -2055,6 +2057,13 @@ def test_crew_agent_executor_litellm_auth_error():
|
||||
}
|
||||
)
|
||||
|
||||
# Verify error handling messages
|
||||
error_message = f"Error during LLM call: {str(mock_llm_call.side_effect)}"
|
||||
mock_printer.assert_any_call(
|
||||
content=error_message,
|
||||
color="red",
|
||||
)
|
||||
|
||||
# Verify the call was only made once (no retries)
|
||||
mock_llm_call.assert_called_once()
|
||||
|
||||
|
||||
138
tests/test_macos_compatibility.py
Normal file
138
tests/test_macos_compatibility.py
Normal file
@@ -0,0 +1,138 @@
|
||||
import pytest
|
||||
import platform
|
||||
|
||||
|
||||
class TestMacOSCompatibility:
|
||||
"""Test macOS compatibility, especially onnxruntime dependency resolution."""
|
||||
|
||||
def test_chromadb_import_success(self):
|
||||
"""Test that ChromaDB can be imported successfully."""
|
||||
try:
|
||||
import chromadb
|
||||
assert chromadb is not None
|
||||
assert hasattr(chromadb, '__version__')
|
||||
except ImportError as e:
|
||||
pytest.fail(f"ChromaDB import failed: {e}")
|
||||
|
||||
def test_onnxruntime_import_success(self):
|
||||
"""Test that onnxruntime can be imported successfully."""
|
||||
try:
|
||||
import onnxruntime
|
||||
assert onnxruntime is not None
|
||||
assert hasattr(onnxruntime, '__version__')
|
||||
except ImportError as e:
|
||||
pytest.fail(f"onnxruntime import failed: {e}")
|
||||
|
||||
def test_onnxruntime_version_compatibility(self):
|
||||
"""Test that onnxruntime version is within expected range."""
|
||||
try:
|
||||
import onnxruntime
|
||||
version = onnxruntime.__version__
|
||||
|
||||
major, minor, patch = map(int, version.split('.'))
|
||||
version_tuple = (major, minor, patch)
|
||||
|
||||
min_version = (1, 19, 0)
|
||||
max_version = (1, 22, 0)
|
||||
|
||||
assert version_tuple >= min_version, f"onnxruntime version {version} is below minimum {'.'.join(map(str, min_version))}"
|
||||
assert version_tuple <= max_version, f"onnxruntime version {version} is above maximum {'.'.join(map(str, max_version))}"
|
||||
|
||||
except ImportError:
|
||||
pytest.skip("onnxruntime not available for version check")
|
||||
|
||||
def test_chromadb_persistent_client_creation(self):
|
||||
"""Test that ChromaDB PersistentClient can be created successfully."""
|
||||
try:
|
||||
from crewai.utilities.chromadb import create_persistent_client
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
client = create_persistent_client(path=temp_dir)
|
||||
assert client is not None
|
||||
|
||||
except ImportError as e:
|
||||
pytest.fail(f"ChromaDB utilities import failed: {e}")
|
||||
except Exception as e:
|
||||
pytest.fail(f"ChromaDB client creation failed: {e}")
|
||||
|
||||
def test_rag_storage_initialization(self):
|
||||
"""Test that RAGStorage can be initialized successfully."""
|
||||
try:
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
storage = RAGStorage(
|
||||
type="test_memory",
|
||||
allow_reset=True,
|
||||
embedder_config=None,
|
||||
crew=None,
|
||||
path=temp_dir
|
||||
)
|
||||
assert storage is not None
|
||||
assert hasattr(storage, 'app')
|
||||
assert hasattr(storage, 'collection')
|
||||
|
||||
except ImportError as e:
|
||||
pytest.fail(f"RAGStorage import failed: {e}")
|
||||
except Exception as e:
|
||||
pytest.fail(f"RAGStorage initialization failed: {e}")
|
||||
|
||||
@pytest.mark.skipif(platform.system() != "Darwin", reason="macOS-specific test")
|
||||
def test_macos_onnxruntime_availability(self):
|
||||
"""Test that onnxruntime is available on macOS with proper version."""
|
||||
try:
|
||||
import onnxruntime
|
||||
version = onnxruntime.__version__
|
||||
|
||||
major, minor, patch = map(int, version.split('.'))
|
||||
|
||||
if (major, minor) == (1, 19):
|
||||
assert patch >= 0, f"onnxruntime 1.19.x version should be >= 1.19.0, got {version}"
|
||||
elif (major, minor) == (1, 20):
|
||||
pass
|
||||
elif (major, minor) == (1, 21):
|
||||
pass
|
||||
elif (major, minor) == (1, 22):
|
||||
assert patch <= 0, f"onnxruntime 1.22.x version should be <= 1.22.0, got {version}"
|
||||
else:
|
||||
pytest.fail(f"onnxruntime version {version} is outside expected range 1.19.0-1.22.0")
|
||||
|
||||
except ImportError:
|
||||
pytest.fail("onnxruntime should be available on macOS with the new version range")
|
||||
|
||||
def test_chromadb_collection_operations(self):
|
||||
"""Test basic ChromaDB collection operations work with current onnxruntime."""
|
||||
try:
|
||||
from crewai.utilities.chromadb import create_persistent_client, sanitize_collection_name
|
||||
import tempfile
|
||||
import uuid
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
client = create_persistent_client(path=temp_dir)
|
||||
|
||||
collection_name = sanitize_collection_name("test_collection")
|
||||
collection = client.get_or_create_collection(name=collection_name)
|
||||
|
||||
test_doc = "This is a test document for ChromaDB compatibility."
|
||||
test_id = str(uuid.uuid4())
|
||||
|
||||
collection.add(
|
||||
documents=[test_doc],
|
||||
ids=[test_id],
|
||||
metadatas=[{"test": True}]
|
||||
)
|
||||
|
||||
results = collection.query(
|
||||
query_texts=["test document"],
|
||||
n_results=1
|
||||
)
|
||||
|
||||
assert len(results["ids"][0]) > 0
|
||||
assert results["documents"][0][0] == test_doc
|
||||
|
||||
except ImportError as e:
|
||||
pytest.fail(f"ChromaDB operations import failed: {e}")
|
||||
except Exception as e:
|
||||
pytest.fail(f"ChromaDB operations failed: {e}")
|
||||
Reference in New Issue
Block a user