Compare commits

..

6 Commits

Author SHA1 Message Date
Devin AI
50508297c9 feat: centralize default memory path logic & add path validation tests
Co-Authored-By: Joe Moura <joao@crewai.com>
2024-12-28 01:36:03 +00:00
Devin AI
58b2ba4d90 refactor: update database connections to use storage_path
Co-Authored-By: Joe Moura <joao@crewai.com>
2024-12-28 01:12:30 +00:00
Arnaud Gelas
4274cde583 Improve handling of optional configurations in memory and storage
- Initialize contextual_memory in src/crewai/agent.py and src/crewai/crew.py
- Make UserMemory optional and add checks in src/crewai/memory/contextual/contextual_memory.py
- Add crew checks in src/crewai/memory/entity/entity_memory.py and
  src/crewai/memory/short_term/short_term_memory.py
- Allow optional storage_path in src/crewai/memory/storage/base_rag_storage.py
- Update storage classes to accept optional db_path in:
  src/crewai/memory/storage/kickoff_task_outputs_storage.py,
  src/crewai/memory/storage/ltm_sqlite_storage.py, and
  src/crewai/memory/storage/mem0_storage.py
- Modify src/crewai/memory/storage/rag_storage.py to use storage_path
- Enhance src/crewai/utilities/embedding_configurator.py to handle missing providers
2024-12-28 01:12:30 +00:00
Arnaud Gelas
12245d66a7 Run uv run ruff format 2024-12-28 01:12:30 +00:00
devin-ai-integration[bot]
2433819c4f fix: handle optional storage with null checks (#1808)
Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Co-authored-by: João Moura <joaomdmoura@gmail.com>
2024-12-27 21:30:39 -03:00
Erick Amorim
97fc44c930 fix: Change storage initialization to None for KnowledgeStorage (#1804)
* fix: Change storage initialization to None for KnowledgeStorage

* refactor: Change storage field to optional and improve error handling when saving documents

---------

Co-authored-by: João Moura <joaomdmoura@gmail.com>
2024-12-27 21:18:25 -03:00
27 changed files with 448 additions and 319 deletions

View File

@@ -294,14 +294,7 @@ class Agent(BaseAgent):
)
if self.crew and self.crew.memory:
contextual_memory = ContextualMemory(
self.crew.memory_config,
self.crew._short_term_memory,
self.crew._long_term_memory,
self.crew._entity_memory,
self.crew._user_memory,
)
memory = contextual_memory.build_context_for_task(task, context)
memory = self.crew.contextual_memory.build_context_for_task(task, context)
if memory.strip() != "":
task_prompt += self.i18n.slice("memory").format(memory=memory)

View File

@@ -358,9 +358,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
train_iteration = self.crew._train_iteration
if agent_id in training_data and isinstance(train_iteration, int):
training_data[agent_id][train_iteration][
"improved_output"
] = result.output
training_data[agent_id][train_iteration]["improved_output"] = (
result.output
)
training_handler.save(training_data)
else:
self._printer.print(

View File

@@ -153,8 +153,12 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
login_response_json = login_response.json()
settings = Settings()
settings.tool_repository_username = login_response_json["credential"]["username"]
settings.tool_repository_password = login_response_json["credential"]["password"]
settings.tool_repository_username = login_response_json["credential"][
"username"
]
settings.tool_repository_password = login_response_json["credential"][
"password"
]
settings.dump()
console.print(
@@ -179,7 +183,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
capture_output=False,
env=self._build_env_with_credentials(repository_handle),
text=True,
check=True
check=True,
)
if add_package_result.stderr:
@@ -204,7 +208,11 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
settings = Settings()
env = os.environ.copy()
env[f"UV_INDEX_{repository_handle}_USERNAME"] = str(settings.tool_repository_username or "")
env[f"UV_INDEX_{repository_handle}_PASSWORD"] = str(settings.tool_repository_password or "")
env[f"UV_INDEX_{repository_handle}_USERNAME"] = str(
settings.tool_repository_username or ""
)
env[f"UV_INDEX_{repository_handle}_PASSWORD"] = str(
settings.tool_repository_password or ""
)
return env

View File

@@ -6,7 +6,6 @@ from concurrent.futures import Future
from hashlib import md5
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from langchain_core.language_models.base import BaseLanguageModel
from pydantic import (
UUID4,
BaseModel,
@@ -26,6 +25,7 @@ from crewai.crews.crew_output import CrewOutput
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.llm import LLM
from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.memory.entity.entity_memory import EntityMemory
from crewai.memory.long_term.long_term_memory import LongTermMemory
from crewai.memory.short_term.short_term_memory import ShortTermMemory
@@ -279,6 +279,13 @@ class Crew(BaseModel):
)
else:
self._user_memory = None
self.contextual_memory = ContextualMemory(
memory_config=self.memory_config,
stm=self._short_term_memory,
ltm=self._long_term_memory,
em=self._entity_memory,
um=self._user_memory,
)
return self
@model_validator(mode="after")
@@ -1076,39 +1083,19 @@ class Crew(BaseModel):
def test(
self,
n_iterations: int,
llm: Optional[Union[str, InstanceOf[LLM], Any]] = None,
openai_model_name: Optional[str] = None, # Kept for backward compatibility
openai_model_name: Optional[str] = None,
inputs: Optional[Dict[str, Any]] = None,
) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations.
Args:
n_iterations (int): Number of test iterations to run
llm (Optional[Union[str, LLM, BaseLanguageModel]]): Language model to use for testing
openai_model_name (Optional[str]): Legacy parameter for OpenAI models (deprecated)
inputs (Optional[Dict[str, Any]]): Test inputs for the crew
Raises:
ValueError: If n_iterations is less than 1 or if llm type is unsupported
Returns:
None
"""
if n_iterations < 1:
raise ValueError("n_iterations must be greater than 0")
if llm is not None and not isinstance(llm, (str, LLM, BaseLanguageModel)):
raise ValueError(f"Unsupported LLM type: {type(llm)}")
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
test_crew = self.copy()
test_llm = llm if llm is not None else openai_model_name
self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew,
n_iterations,
inputs,
test_llm, # type: ignore[arg-type]
openai_model_name, # type: ignore[arg-type]
) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, test_llm) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
for i in range(1, n_iterations + 1):
evaluator.set_iteration(i)

View File

@@ -14,13 +14,13 @@ class Knowledge(BaseModel):
Knowledge is a collection of sources and setup for the vector store to save and query relevant context.
Args:
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
storage: Optional[KnowledgeStorage] = Field(default=None)
embedder_config: Optional[Dict[str, Any]] = None
"""
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
model_config = ConfigDict(arbitrary_types_allowed=True)
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
storage: Optional[KnowledgeStorage] = Field(default=None)
embedder_config: Optional[Dict[str, Any]] = None
collection_name: Optional[str] = None
@@ -49,8 +49,13 @@ class Knowledge(BaseModel):
"""
Query across all knowledge sources to find the most relevant information.
Returns the top_k most relevant chunks.
Raises:
ValueError: If storage is not initialized.
"""
if self.storage is None:
raise ValueError("Storage is not initialized.")
results = self.storage.search(
query,
limit,

View File

@@ -22,7 +22,7 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
default_factory=list, description="The path to the file"
)
content: Dict[Path, str] = Field(init=False, default_factory=dict)
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
storage: Optional[KnowledgeStorage] = Field(default=None)
safe_file_paths: List[Path] = Field(default_factory=list)
@field_validator("file_path", "file_paths", mode="before")
@@ -62,7 +62,10 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
def _save_documents(self):
"""Save the documents to the storage."""
self.storage.save(self.chunks)
if self.storage:
self.storage.save(self.chunks)
else:
raise ValueError("No storage found to save documents.")
def convert_to_path(self, path: Union[Path, str]) -> Path:
"""Convert a path to a Path object."""

View File

@@ -16,7 +16,7 @@ class BaseKnowledgeSource(BaseModel, ABC):
chunk_embeddings: List[np.ndarray] = Field(default_factory=list)
model_config = ConfigDict(arbitrary_types_allowed=True)
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
storage: Optional[KnowledgeStorage] = Field(default=None)
metadata: Dict[str, Any] = Field(default_factory=dict) # Currently unused
collection_name: Optional[str] = Field(default=None)
@@ -46,4 +46,7 @@ class BaseKnowledgeSource(BaseModel, ABC):
Save the documents to the storage.
This method should be called after the chunks and embeddings are generated.
"""
self.storage.save(self.chunks)
if self.storage:
self.storage.save(self.chunks)
else:
raise ValueError("No storage found to save documents.")

View File

@@ -1,4 +1,5 @@
from typing import Any, Dict, Optional
from crewai.task import Task
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory, UserMemory
@@ -10,7 +11,7 @@ class ContextualMemory:
stm: ShortTermMemory,
ltm: LongTermMemory,
em: EntityMemory,
um: UserMemory,
um: Optional[UserMemory],
):
if memory_config is not None:
self.memory_provider = memory_config.get("provider")
@@ -21,7 +22,7 @@ class ContextualMemory:
self.em = em
self.um = um
def build_context_for_task(self, task, context) -> str:
def build_context_for_task(self, task: Task, context: str) -> str:
"""
Automatically builds a minimal, highly relevant set of contextual information
for a given task.
@@ -39,7 +40,7 @@ class ContextualMemory:
context.append(self._fetch_user_context(query))
return "\n".join(filter(None, context))
def _fetch_stm_context(self, query) -> str:
def _fetch_stm_context(self, query: str) -> str:
"""
Fetches recent relevant insights from STM related to the task's description and expected_output,
formatted as bullet points.
@@ -53,7 +54,7 @@ class ContextualMemory:
)
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
def _fetch_ltm_context(self, task) -> Optional[str]:
def _fetch_ltm_context(self, task: str) -> Optional[str]:
"""
Fetches historical data or insights from LTM that are relevant to the task's description and expected_output,
formatted as bullet points.
@@ -72,7 +73,7 @@ class ContextualMemory:
return f"Historical Data:\n{formatted_results}" if ltm_results else ""
def _fetch_entity_context(self, query) -> str:
def _fetch_entity_context(self, query: str) -> str:
"""
Fetches relevant entity information from Entity Memory related to the task's description and expected_output,
formatted as bullet points.
@@ -94,6 +95,8 @@ class ContextualMemory:
Returns:
str: Formatted user memories as bullet points, or an empty string if none found.
"""
if not self.um:
return ""
user_memories = self.um.search(query)
if not user_memories:
return ""

View File

@@ -11,7 +11,7 @@ class EntityMemory(Memory):
"""
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
if hasattr(crew, "memory_config") and crew.memory_config is not None:
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
self.memory_provider = crew.memory_config.get("provider")
else:
self.memory_provider = None

View File

@@ -15,8 +15,17 @@ class LongTermMemory(Memory):
"""
def __init__(self, storage=None, path=None):
"""Initialize long term memory.
Args:
storage: Optional custom storage instance
path: Optional custom path for storage location
Note:
If both storage and path are provided, storage takes precedence
"""
if not storage:
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
storage = LTMSQLiteStorage(storage_path=path) if path else LTMSQLiteStorage()
super().__init__(storage)
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"

View File

@@ -15,7 +15,7 @@ class ShortTermMemory(Memory):
"""
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
if hasattr(crew, "memory_config") and crew.memory_config is not None:
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
self.memory_provider = crew.memory_config.get("provider")
else:
self.memory_provider = None

View File

@@ -1,5 +1,11 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from pathlib import Path
import os
from typing import Any, Dict, List, Optional, TypeVar
from abc import ABC, abstractmethod
from pathlib import Path
from crewai.utilities.paths import get_default_storage_path
class BaseRAGStorage(ABC):
@@ -12,17 +18,46 @@ class BaseRAGStorage(ABC):
def __init__(
self,
type: str,
storage_path: Optional[Path] = None,
allow_reset: bool = True,
embedder_config: Optional[Any] = None,
crew: Any = None,
):
) -> None:
"""Initialize the BaseRAGStorage.
Args:
type: Type of storage being used
storage_path: Optional custom path for storage location
allow_reset: Whether storage can be reset
embedder_config: Optional configuration for the embedder
crew: Optional crew instance this storage belongs to
Raises:
PermissionError: If storage path is not writable
OSError: If storage path cannot be created
"""
self.type = type
self.storage_path = storage_path if storage_path else get_default_storage_path('rag')
# Validate storage path
try:
self.storage_path.parent.mkdir(parents=True, exist_ok=True)
if not os.access(self.storage_path.parent, os.W_OK):
raise PermissionError(f"No write permission for storage path: {self.storage_path}")
except OSError as e:
raise OSError(f"Failed to initialize storage path: {str(e)}")
self.allow_reset = allow_reset
self.embedder_config = embedder_config
self.crew = crew
self.agents = self._initialize_agents()
def _initialize_agents(self) -> str:
"""Initialize agent identifiers for storage.
Returns:
str: Underscore-joined string of sanitized agent role names
"""
if self.crew:
return "_".join(
[self._sanitize_role(agent.role) for agent in self.crew.agents]
@@ -31,12 +66,27 @@ class BaseRAGStorage(ABC):
@abstractmethod
def _sanitize_role(self, role: str) -> str:
"""Sanitizes agent roles to ensure valid directory names."""
"""Sanitizes agent roles to ensure valid directory names.
Args:
role: The agent role name to sanitize
Returns:
str: Sanitized role name safe for use in paths
"""
pass
@abstractmethod
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
"""Save a value with metadata to the storage."""
"""Save a value with metadata to the storage.
Args:
value: The value to store
metadata: Additional metadata to store with the value
Raises:
OSError: If there is an error writing to storage
"""
pass
@abstractmethod
@@ -46,25 +96,55 @@ class BaseRAGStorage(ABC):
limit: int = 3,
filter: Optional[dict] = None,
score_threshold: float = 0.35,
) -> List[Any]:
"""Search for entries in the storage."""
) -> List[Dict[str, Any]]:
"""Search for entries in the storage.
Args:
query: The search query string
limit: Maximum number of results to return
filter: Optional filter criteria
score_threshold: Minimum similarity score threshold
Returns:
List[Dict[str, Any]]: List of matching entries with their metadata
"""
pass
@abstractmethod
def reset(self) -> None:
"""Reset the storage."""
"""Reset the storage.
Raises:
OSError: If there is an error clearing storage
PermissionError: If reset is not allowed
"""
pass
@abstractmethod
def _generate_embedding(
self, text: str, metadata: Optional[Dict[str, Any]] = None
) -> Any:
"""Generate an embedding for the given text and metadata."""
) -> List[float]:
"""Generate an embedding for the given text and metadata.
Args:
text: Text to generate embedding for
metadata: Optional metadata to include in embedding
Returns:
List[float]: Vector embedding of the text
Raises:
ValueError: If text is empty or invalid
"""
pass
@abstractmethod
def _initialize_app(self):
"""Initialize the vector db."""
def _initialize_app(self) -> None:
"""Initialize the vector db.
Raises:
OSError: If vector db initialization fails
"""
pass
def setup_config(self, config: Dict[str, Any]):

View File

@@ -1,11 +1,13 @@
import json
import os
import sqlite3
from pathlib import Path
from typing import Any, Dict, List, Optional
from crewai.task import Task
from crewai.utilities import Printer
from crewai.utilities.crew_json_encoder import CrewJSONEncoder
from crewai.utilities.paths import db_storage_path
from crewai.utilities.paths import get_default_storage_path
class KickoffTaskOutputsSQLiteStorage:
@@ -13,10 +15,26 @@ class KickoffTaskOutputsSQLiteStorage:
An updated SQLite storage class for kickoff task outputs storage.
"""
def __init__(
self, db_path: str = f"{db_storage_path()}/latest_kickoff_task_outputs.db"
) -> None:
self.db_path = db_path
def __init__(self, storage_path: Optional[Path] = None) -> None:
"""Initialize kickoff task outputs storage.
Args:
storage_path: Optional custom path for storage location
Raises:
PermissionError: If storage path is not writable
OSError: If storage path cannot be created
"""
self.storage_path = storage_path if storage_path else get_default_storage_path('kickoff')
# Validate storage path
try:
self.storage_path.parent.mkdir(parents=True, exist_ok=True)
if not os.access(self.storage_path.parent, os.W_OK):
raise PermissionError(f"No write permission for storage path: {self.storage_path}")
except OSError as e:
raise OSError(f"Failed to initialize storage path: {str(e)}")
self._printer: Printer = Printer()
self._initialize_db()
@@ -25,7 +43,7 @@ class KickoffTaskOutputsSQLiteStorage:
Initializes the SQLite database and creates LTM table
"""
try:
with sqlite3.connect(self.db_path) as conn:
with sqlite3.connect(str(self.storage_path)) as conn:
cursor = conn.cursor()
cursor.execute(
"""
@@ -55,9 +73,21 @@ class KickoffTaskOutputsSQLiteStorage:
task_index: int,
was_replayed: bool = False,
inputs: Dict[str, Any] = {},
):
) -> None:
"""Add a task output to storage.
Args:
task: The task whose output is being stored
output: The output data from the task
task_index: Index of this task in the sequence
was_replayed: Whether this was from a replay
inputs: Optional input data that led to this output
Raises:
sqlite3.Error: If there is an error saving to database
"""
try:
with sqlite3.connect(self.db_path) as conn:
with sqlite3.connect(str(self.storage_path)) as conn:
cursor = conn.cursor()
cursor.execute(
"""
@@ -90,7 +120,7 @@ class KickoffTaskOutputsSQLiteStorage:
Updates an existing row in the latest_kickoff_task_outputs table based on task_index.
"""
try:
with sqlite3.connect(self.db_path) as conn:
with sqlite3.connect(str(self.storage_path)) as conn:
cursor = conn.cursor()
fields = []
@@ -119,7 +149,7 @@ class KickoffTaskOutputsSQLiteStorage:
def load(self) -> Optional[List[Dict[str, Any]]]:
try:
with sqlite3.connect(self.db_path) as conn:
with sqlite3.connect(str(self.storage_path)) as conn:
cursor = conn.cursor()
cursor.execute("""
SELECT *
@@ -155,7 +185,7 @@ class KickoffTaskOutputsSQLiteStorage:
Deletes all rows from the latest_kickoff_task_outputs table.
"""
try:
with sqlite3.connect(self.db_path) as conn:
with sqlite3.connect(str(self.storage_path)) as conn:
cursor = conn.cursor()
cursor.execute("DELETE FROM latest_kickoff_task_outputs")
conn.commit()

View File

@@ -1,9 +1,11 @@
import json
import os
import sqlite3
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from crewai.utilities import Printer
from crewai.utilities.paths import db_storage_path
from crewai.utilities.paths import get_default_storage_path
class LTMSQLiteStorage:
@@ -11,10 +13,26 @@ class LTMSQLiteStorage:
An updated SQLite storage class for LTM data storage.
"""
def __init__(
self, db_path: str = f"{db_storage_path()}/long_term_memory_storage.db"
) -> None:
self.db_path = db_path
def __init__(self, storage_path: Optional[Path] = None) -> None:
"""Initialize LTM SQLite storage.
Args:
storage_path: Optional custom path for storage location
Raises:
PermissionError: If storage path is not writable
OSError: If storage path cannot be created
"""
self.storage_path = storage_path if storage_path else get_default_storage_path('ltm')
# Validate storage path
try:
self.storage_path.parent.mkdir(parents=True, exist_ok=True)
if not os.access(self.storage_path.parent, os.W_OK):
raise PermissionError(f"No write permission for storage path: {self.storage_path}")
except OSError as e:
raise OSError(f"Failed to initialize storage path: {str(e)}")
self._printer: Printer = Printer()
self._initialize_db()
@@ -23,7 +41,7 @@ class LTMSQLiteStorage:
Initializes the SQLite database and creates LTM table
"""
try:
with sqlite3.connect(self.db_path) as conn:
with sqlite3.connect(str(self.storage_path)) as conn:
cursor = conn.cursor()
cursor.execute(
"""
@@ -51,9 +69,20 @@ class LTMSQLiteStorage:
datetime: str,
score: Union[int, float],
) -> None:
"""Save a memory entry to long-term memory.
Args:
task_description: Description of the task this memory relates to
metadata: Additional data to store with the memory
datetime: Timestamp for when this memory was created
score: Relevance score for this memory (higher is more relevant)
Raises:
sqlite3.Error: If there is an error saving to the database
"""
"""Saves data to the LTM table with error handling."""
try:
with sqlite3.connect(self.db_path) as conn:
with sqlite3.connect(str(self.storage_path)) as conn:
cursor = conn.cursor()
cursor.execute(
"""
@@ -74,7 +103,7 @@ class LTMSQLiteStorage:
) -> Optional[List[Dict[str, Any]]]:
"""Queries the LTM table by task description with error handling."""
try:
with sqlite3.connect(self.db_path) as conn:
with sqlite3.connect(str(self.storage_path)) as conn:
cursor = conn.cursor()
cursor.execute(
f"""
@@ -109,7 +138,7 @@ class LTMSQLiteStorage:
) -> None:
"""Resets the LTM table with error handling."""
try:
with sqlite3.connect(self.db_path) as conn:
with sqlite3.connect(str(self.storage_path)) as conn:
cursor = conn.cursor()
cursor.execute("DELETE FROM long_term_memories")
conn.commit()

View File

@@ -19,7 +19,7 @@ class Mem0Storage(Storage):
self.memory_type = type
self.crew = crew
self.memory_config = crew.memory_config
self.memory_config = crew.memory_config if crew else None
# User ID is required for user memory type "user" since it's used as a unique identifier for the user.
user_id = self._get_user_id()
@@ -27,9 +27,10 @@ class Mem0Storage(Storage):
raise ValueError("User ID is required for user memory type")
# API key in memory config overrides the environment variable
mem0_api_key = self.memory_config.get("config", {}).get("api_key") or os.getenv(
"MEM0_API_KEY"
)
if self.memory_config and self.memory_config.get("config"):
mem0_api_key = self.memory_config.get("config").get("api_key")
else:
mem0_api_key = os.getenv("MEM0_API_KEY")
self.memory = MemoryClient(api_key=mem0_api_key)
def _sanitize_role(self, role: str) -> str:

View File

@@ -11,7 +11,6 @@ from chromadb.api import ClientAPI
from crewai.memory.storage.base_rag_storage import BaseRAGStorage
from crewai.utilities import EmbeddingConfigurator
from crewai.utilities.constants import MAX_FILE_NAME_LENGTH
from crewai.utilities.paths import db_storage_path
@contextlib.contextmanager
@@ -40,9 +39,15 @@ class RAGStorage(BaseRAGStorage):
app: ClientAPI | None = None
def __init__(
self, type, allow_reset=True, embedder_config=None, crew=None, path=None
self,
type,
storage_path=None,
allow_reset=True,
embedder_config=None,
crew=None,
path=None,
):
super().__init__(type, allow_reset, embedder_config, crew)
super().__init__(type, storage_path, allow_reset, embedder_config, crew)
agents = crew.agents if crew else []
agents = [self._sanitize_role(agent.role) for agent in agents]
agents = "_".join(agents)
@@ -90,7 +95,7 @@ class RAGStorage(BaseRAGStorage):
"""
Ensures file name does not exceed max allowed by OS
"""
base_path = f"{db_storage_path()}/{type}"
base_path = f"{self.storage_path}/{type}"
if len(file_name) > MAX_FILE_NAME_LENGTH:
logging.warning(
@@ -152,7 +157,7 @@ class RAGStorage(BaseRAGStorage):
try:
if self.app:
self.app.reset()
shutil.rmtree(f"{db_storage_path()}/{self.type}")
shutil.rmtree(f"{self.storage_path}/{self.type}")
self.app = None
self.collection = None
except Exception as e:

View File

@@ -66,7 +66,6 @@ def cache_handler(func):
def crew(func) -> Callable[..., Crew]:
@wraps(func)
def wrapper(self, *args, **kwargs) -> Crew:
instantiated_tasks = []

View File

@@ -216,5 +216,5 @@ def CrewBase(cls: T) -> T:
# Include base class (qual)name in the wrapper class (qual)name.
WrappedClass.__name__ = CrewBase.__name__ + "(" + cls.__name__ + ")"
WrappedClass.__qualname__ = CrewBase.__qualname__ + "(" + cls.__name__ + ")"
return cast(T, WrappedClass)

View File

@@ -373,7 +373,9 @@ class Task(BaseModel):
content = (
json_output
if json_output
else pydantic_output.model_dump_json() if pydantic_output else result
else pydantic_output.model_dump_json()
if pydantic_output
else result
)
self._save_file(content)

View File

@@ -27,7 +27,7 @@ class EmbeddingConfigurator:
if embedder_config is None:
return self._create_default_embedding_function()
provider = embedder_config.get("provider")
provider = embedder_config.get("provider", "")
config = embedder_config.get("config", {})
model_name = config.get("model")
@@ -38,12 +38,13 @@ class EmbeddingConfigurator:
except Exception as e:
raise ValueError(f"Invalid custom embedding function: {str(e)}")
if provider not in self.embedding_functions:
embedding_function = self.embedding_functions.get(provider, None)
if not embedding_function:
raise Exception(
f"Unsupported embedding provider: {provider}, supported providers: {list(self.embedding_functions.keys())}"
)
return self.embedding_functions[provider](config, model_name)
return embedding_function(config, model_name)
@staticmethod
def _create_default_embedding_function():

View File

@@ -1,19 +1,14 @@
import os
from collections import defaultdict
from typing import Any, Dict, List, Optional, Union
from langchain_core.language_models.base import BaseLanguageModel
from pydantic import BaseModel, Field, InstanceOf
from pydantic import BaseModel, Field
from rich.box import HEAVY_EDGE
from rich.console import Console
from rich.table import Table
from crewai.agent import Agent
from crewai.llm import LLM
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.telemetry import Telemetry
from crewai.utilities.logger import Logger
class TaskEvaluationPydanticOutput(BaseModel):
@@ -27,62 +22,22 @@ class CrewEvaluator:
A class to evaluate the performance of the agents in the crew based on the tasks they have performed.
Attributes:
crew (Crew): The crew of agents to evaluate
llm (Union[str, LLM, BaseLanguageModel]): Language model to use for evaluation
tasks_scores (defaultdict): Dictionary to store the scores of the agents for each task
iteration (int): Current iteration of the evaluation
run_execution_times (defaultdict): Dictionary to store execution times for each run
crew (Crew): The crew of agents to evaluate.
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted).
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
iteration (int): The current iteration of the evaluation.
"""
tasks_scores: defaultdict = defaultdict(list)
run_execution_times: defaultdict = defaultdict(list)
iteration: int = 0
def __init__(self, crew, llm: Union[str, InstanceOf[LLM], BaseLanguageModel]):
"""Initialize the CrewEvaluator.
Args:
crew (Crew): The crew to evaluate
llm (Union[str, LLM, BaseLanguageModel]): Language model to use for evaluation
Raises:
ValueError: If llm is of an unsupported type
"""
if not isinstance(llm, (str, LLM, BaseLanguageModel, type(None))):
raise ValueError(f"Unsupported LLM type: {type(llm)}")
def __init__(self, crew, openai_model_name: str):
self.crew = crew
self.llm = llm
self.openai_model_name = openai_model_name
self._telemetry = Telemetry()
self._logger = Logger()
self._setup_llm()
self._setup_for_evaluating()
def _setup_llm(self):
"""Set up the LLM following the Agent class pattern.
This method initializes the language model based on the provided llm parameter:
- If string: creates new LLM instance with model name
- If LLM instance: uses as-is
- If None: uses default model from environment or "gpt-4"
- Otherwise: attempts to extract model name from object attributes
"""
if isinstance(self.llm, str):
self.llm = LLM(model=self.llm)
elif isinstance(self.llm, LLM):
pass
elif self.llm is None:
model_name = os.environ.get("OPENAI_MODEL_NAME") or "gpt-4"
self.llm = LLM(model=model_name)
else:
llm_params = {
"model": getattr(self.llm, "model_name", None)
or getattr(self.llm, "deployment_name", None)
or str(self.llm),
}
self.llm = LLM(**llm_params)
def _setup_for_evaluating(self) -> None:
"""Sets up the crew for evaluating."""
for task in self.crew.tasks:
@@ -96,7 +51,7 @@ class CrewEvaluator:
),
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
verbose=False,
llm=self.llm,
llm=self.openai_model_name,
)
def _evaluation_task(
@@ -202,48 +157,35 @@ class CrewEvaluator:
console.print(table)
def evaluate(self, task_output: TaskOutput):
"""Evaluates the performance of the agents in the crew based on the tasks they have performed.
Args:
task_output (TaskOutput): The output from the task to evaluate
"""Evaluates the performance of the agents in the crew based on the tasks they have performed."""
current_task = None
for task in self.crew.tasks:
if task.description == task_output.description:
current_task = task
break
Raises:
ValueError: If task to evaluate or task output is missing, or if evaluation result is invalid
"""
try:
current_task = None
for task in self.crew.tasks:
if task.description == task_output.description:
current_task = task
break
if not current_task or not task_output:
raise ValueError(
"Task to evaluate and task output are required for evaluation"
)
self._logger.log("info", f"Starting evaluation for task: {task_output.description}")
evaluator_agent = self._evaluator_agent()
evaluation_task = self._evaluation_task(
evaluator_agent, current_task, task_output.raw
if not current_task or not task_output:
raise ValueError(
"Task to evaluate and task output are required for evaluation"
)
evaluation_result = evaluation_task.execute_sync()
evaluator_agent = self._evaluator_agent()
evaluation_task = self._evaluation_task(
evaluator_agent, current_task, task_output.raw
)
if isinstance(evaluation_result.pydantic, TaskEvaluationPydanticOutput):
self._test_result_span = self._telemetry.individual_test_result_span(
self.crew,
evaluation_result.pydantic.quality,
current_task._execution_time,
self.llm,
)
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
self.run_execution_times[self.iteration].append(
current_task._execution_time
)
self._logger.log("info", f"Evaluation completed with score: {evaluation_result.pydantic.quality}")
else:
raise ValueError("Evaluation result is not in the expected format")
except Exception as e:
self._logger.log("error", f"Evaluation failed: {str(e)}")
raise
evaluation_result = evaluation_task.execute_sync()
if isinstance(evaluation_result.pydantic, TaskEvaluationPydanticOutput):
self._test_result_span = self._telemetry.individual_test_result_span(
self.crew,
evaluation_result.pydantic.quality,
current_task._execution_time,
self.openai_model_name,
)
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
self.run_execution_times[self.iteration].append(
current_task._execution_time
)
else:
raise ValueError("Evaluation result is not in the expected format")

View File

@@ -22,3 +22,26 @@ def get_project_directory_name():
cwd = Path.cwd()
project_directory_name = cwd.name
return project_directory_name
def get_default_storage_path(storage_type: str) -> Path:
"""Returns the default storage path for a given storage type.
Args:
storage_type: Type of storage ('ltm', 'kickoff', 'rag')
Returns:
Path: Default storage path for the specified type
Raises:
ValueError: If storage_type is not recognized
"""
base_path = db_storage_path()
if storage_type == 'ltm':
return base_path / 'latest_long_term_memories.db'
elif storage_type == 'kickoff':
return base_path / 'latest_kickoff_task_outputs.db'
elif storage_type == 'rag':
return base_path
else:
raise ValueError(f"Unknown storage type: {storage_type}")

View File

@@ -28,9 +28,10 @@ def test_create_success(mock_subprocess):
with in_temp_dir():
tool_command = ToolCommand()
with patch.object(tool_command, "login") as mock_login, patch(
"sys.stdout", new=StringIO()
) as fake_out:
with (
patch.object(tool_command, "login") as mock_login,
patch("sys.stdout", new=StringIO()) as fake_out,
):
tool_command.create("test-tool")
output = fake_out.getvalue()
@@ -82,7 +83,7 @@ def test_install_success(mock_get, mock_subprocess_run):
capture_output=False,
text=True,
check=True,
env=unittest.mock.ANY
env=unittest.mock.ANY,
)
assert "Successfully installed sample-tool" in output

View File

@@ -24,36 +24,6 @@ from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities import Logger
from crewai.utilities.rpm_controller import RPMController
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
from crewai.llm import LLM
class MockLLM(LLM):
"""Mock LLM for testing."""
def __init__(self):
super().__init__(model="gpt-4") # Use a known model name
def chat_completion(self, messages, tools=None, tool_choice=None, **kwargs):
# Mock a proper response that matches the expected format
if tools and any('output' in tool.get('function', {}).get('name', '') for tool in tools):
return {
"choices": [{
"message": {
"content": None,
"role": "assistant",
"function_call": {
"name": "output",
"arguments": '{"quality": 8.5}'
}
}
}]
}
return {
"choices": [{
"message": {
"content": "Mock LLM Response",
"role": "assistant"
}
}]
}
ceo = Agent(
role="CEO",
@@ -77,60 +47,6 @@ writer = Agent(
)
def test_crew_test_with_custom_llm():
"""Test that Crew.test() works with a custom LLM implementation."""
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task])
# Test with custom LLM
custom_llm = MockLLM()
crew.test(n_iterations=1, llm=custom_llm)
# No assertion needed as we just verify it runs without errors
def test_crew_test_backward_compatibility():
"""Test that Crew.test() maintains backward compatibility with openai_model_name."""
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task])
# Test with openai_model_name
crew.test(n_iterations=1, openai_model_name="gpt-4")
# No assertion needed as we just verify it runs without errors
def test_crew_test_with_invalid_llm():
"""Test that Crew.test() properly handles invalid LLM inputs."""
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task])
# Test with invalid LLM type
with pytest.raises(ValueError, match="Unsupported LLM type"):
crew.test(n_iterations=1, llm=123) # type: ignore
def test_crew_test_with_invalid_iterations():
"""Test that Crew.test() validates n_iterations."""
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task])
# Test with invalid n_iterations
with pytest.raises(ValueError, match="n_iterations must be greater than 0"):
crew.test(n_iterations=0, llm=MockLLM())
def test_crew_config_conditional_requirement():
with pytest.raises(ValueError):
Crew(process=Process.sequential)
@@ -1207,7 +1123,7 @@ def test_kickoff_for_each_empty_input():
assert results == []
@pytest.mark.vcr(filter_headeruvs=["authorization"])
@pytest.mark.vcr(filter_headers=["authorization"])
def test_kickoff_for_each_invalid_input():
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
@@ -3209,4 +3125,4 @@ def test_multimodal_agent_live_image_analysis():
# Verify we got a meaningful response
assert isinstance(result.raw, str)
assert len(result.raw) > 100 # Expecting a detailed analysis
assert "error" not in result.raw.lower() # No error messages in response
assert "error" not in result.raw.lower() # No error messages in response

View File

@@ -0,0 +1,83 @@
import os
import tempfile
from pathlib import Path
import pytest
from unittest.mock import patch
from crewai.memory.storage.ltm_sqlite_storage import LTMSQLiteStorage
from crewai.memory.storage.kickoff_task_outputs_storage import KickoffTaskOutputsSQLiteStorage
from crewai.memory.storage.base_rag_storage import BaseRAGStorage
from crewai.utilities.paths import get_default_storage_path
class MockRAGStorage(BaseRAGStorage):
"""Mock implementation of BaseRAGStorage for testing."""
def _sanitize_role(self, role: str) -> str:
return role.lower()
def save(self, value, metadata):
pass
def search(self, query, limit=3, filter=None, score_threshold=0.35):
return []
def reset(self):
pass
def _generate_embedding(self, text, metadata=None):
return []
def _initialize_app(self):
pass
def test_default_storage_paths():
"""Test that default storage paths are created correctly."""
ltm_path = get_default_storage_path('ltm')
kickoff_path = get_default_storage_path('kickoff')
rag_path = get_default_storage_path('rag')
assert str(ltm_path).endswith('latest_long_term_memories.db')
assert str(kickoff_path).endswith('latest_kickoff_task_outputs.db')
assert isinstance(rag_path, Path)
def test_custom_storage_paths():
"""Test that custom storage paths are respected."""
with tempfile.TemporaryDirectory() as temp_dir:
custom_path = Path(temp_dir) / 'custom.db'
ltm = LTMSQLiteStorage(storage_path=custom_path)
assert ltm.storage_path == custom_path
kickoff = KickoffTaskOutputsSQLiteStorage(storage_path=custom_path)
assert kickoff.storage_path == custom_path
rag = MockRAGStorage('test', storage_path=custom_path)
assert rag.storage_path == custom_path
def test_directory_creation():
"""Test that storage directories are created automatically."""
with tempfile.TemporaryDirectory() as temp_dir:
test_dir = Path(temp_dir) / 'test_storage'
storage_path = test_dir / 'test.db'
assert not test_dir.exists()
LTMSQLiteStorage(storage_path=storage_path)
assert test_dir.exists()
def test_permission_error():
"""Test that permission errors are handled correctly."""
with tempfile.TemporaryDirectory() as temp_dir:
test_dir = Path(temp_dir) / 'readonly'
test_dir.mkdir()
os.chmod(test_dir, 0o444) # Read-only
storage_path = test_dir / 'test.db'
with pytest.raises((PermissionError, OSError)) as exc_info:
LTMSQLiteStorage(storage_path=storage_path)
# Verify that the error message mentions permission
assert "permission" in str(exc_info.value).lower()
def test_invalid_path():
"""Test that invalid paths raise appropriate errors."""
with pytest.raises(OSError):
# Try to create storage in a non-existent root directory
LTMSQLiteStorage(storage_path=Path('/nonexistent/dir/test.db'))

View File

@@ -23,7 +23,7 @@ class TestCrewEvaluator:
)
crew = Crew(agents=[agent], tasks=[task])
return CrewEvaluator(crew, llm="openai/gpt-4o-mini")
return CrewEvaluator(crew, openai_model_name="gpt-4o-mini")
def test_setup_for_evaluating(self, crew_planner):
crew_planner._setup_for_evaluating()
@@ -45,7 +45,7 @@ class TestCrewEvaluator:
== "Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed"
)
assert agent.verbose is False
assert agent.llm.model == "openai/gpt-4o-mini"
assert agent.llm.model == "gpt-4o-mini"
def test_evaluation_task(self, crew_planner):
evaluator_agent = Agent(

68
uv.lock generated
View File

@@ -1,10 +1,18 @@
version = 1
requires-python = ">=3.10, <3.13"
resolution-markers = [
"python_full_version < '3.11'",
"python_full_version == '3.11.*'",
"python_full_version >= '3.12' and python_full_version < '3.12.4'",
"python_full_version >= '3.12.4'",
"python_full_version < '3.11' and sys_platform == 'darwin'",
"python_full_version < '3.11' and platform_machine == 'aarch64' and sys_platform == 'linux'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.11.*' and sys_platform == 'darwin'",
"python_full_version == '3.11.*' and platform_machine == 'aarch64' and sys_platform == 'linux'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.12' and python_full_version < '3.12.4' and sys_platform == 'darwin'",
"python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and sys_platform == 'linux'",
"(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.12.4' and sys_platform == 'darwin'",
"python_full_version >= '3.12.4' and platform_machine == 'aarch64' and sys_platform == 'linux'",
"(python_full_version >= '3.12.4' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12.4' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
[[package]]
@@ -300,7 +308,7 @@ name = "build"
version = "1.2.2.post1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "os_name == 'nt'" },
{ name = "colorama", marker = "(os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "importlib-metadata", marker = "python_full_version < '3.10.2'" },
{ name = "packaging" },
{ name = "pyproject-hooks" },
@@ -535,7 +543,7 @@ name = "click"
version = "8.1.7"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "platform_system == 'Windows'" },
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de", size = 336121 }
wheels = [
@@ -642,7 +650,6 @@ tools = [
[package.dev-dependencies]
dev = [
{ name = "cairosvg" },
{ name = "crewai-tools" },
{ name = "mkdocs" },
{ name = "mkdocs-material" },
{ name = "mkdocs-material-extensions" },
@@ -696,7 +703,6 @@ requires-dist = [
[package.metadata.requires-dev]
dev = [
{ name = "cairosvg", specifier = ">=2.7.1" },
{ name = "crewai-tools", specifier = ">=0.17.0" },
{ name = "mkdocs", specifier = ">=1.4.3" },
{ name = "mkdocs-material", specifier = ">=9.5.7" },
{ name = "mkdocs-material-extensions", specifier = ">=1.3.1" },
@@ -2462,7 +2468,7 @@ version = "1.6.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click" },
{ name = "colorama", marker = "platform_system == 'Windows'" },
{ name = "colorama", marker = "sys_platform == 'win32'" },
{ name = "ghp-import" },
{ name = "jinja2" },
{ name = "markdown" },
@@ -2643,7 +2649,7 @@ version = "2.10.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pygments" },
{ name = "pywin32", marker = "platform_system == 'Windows'" },
{ name = "pywin32", marker = "sys_platform == 'win32'" },
{ name = "tqdm" },
]
sdist = { url = "https://files.pythonhosted.org/packages/3a/93/80ac75c20ce54c785648b4ed363c88f148bf22637e10c9863db4fbe73e74/mpire-2.10.2.tar.gz", hash = "sha256:f66a321e93fadff34585a4bfa05e95bd946cf714b442f51c529038eb45773d97", size = 271270 }
@@ -2890,7 +2896,7 @@ name = "nvidia-cudnn-cu12"
version = "9.1.0.70"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 },
@@ -2917,9 +2923,9 @@ name = "nvidia-cusolver-cu12"
version = "11.4.5.107"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
{ name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
{ name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/bc/1d/8de1e5c67099015c834315e333911273a8c6aaba78923dd1d1e25fc5f217/nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd", size = 124161928 },
@@ -2930,7 +2936,7 @@ name = "nvidia-cusparse-cu12"
version = "12.1.0.106"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/65/5b/cfaeebf25cd9fdec14338ccb16f6b2c4c7fa9163aefcf057d86b9cc248bb/nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c", size = 195958278 },
@@ -3480,7 +3486,7 @@ name = "portalocker"
version = "2.10.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pywin32", marker = "platform_system == 'Windows'" },
{ name = "pywin32", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/ed/d3/c6c64067759e87af98cc668c1cc75171347d0f1577fab7ca3749134e3cd4/portalocker-2.10.1.tar.gz", hash = "sha256:ef1bf844e878ab08aee7e40184156e1151f228f103aa5c6bd0724cc330960f8f", size = 40891 }
wheels = [
@@ -5022,19 +5028,19 @@ dependencies = [
{ name = "fsspec" },
{ name = "jinja2" },
{ name = "networkx" },
{ name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "sympy" },
{ name = "triton", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" },
{ name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "typing-extensions" },
]
wheels = [
@@ -5081,7 +5087,7 @@ name = "tqdm"
version = "4.66.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "platform_system == 'Windows'" },
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/58/83/6ba9844a41128c62e810fddddd72473201f3eacde02046066142a2d96cc5/tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad", size = 169504 }
wheels = [
@@ -5124,7 +5130,7 @@ version = "0.27.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "attrs" },
{ name = "cffi", marker = "implementation_name != 'pypy' and os_name == 'nt'" },
{ name = "cffi", marker = "(implementation_name != 'pypy' and os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (implementation_name != 'pypy' and os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "exceptiongroup", marker = "python_full_version < '3.11'" },
{ name = "idna" },
{ name = "outcome" },
@@ -5155,7 +5161,7 @@ name = "triton"
version = "3.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "filelock", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
{ name = "filelock", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/45/27/14cc3101409b9b4b9241d2ba7deaa93535a217a211c86c4cc7151fb12181/triton-3.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e1efef76935b2febc365bfadf74bcb65a6f959a9872e5bddf44cc9e0adce1e1a", size = 209376304 },