mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-03 13:18:29 +00:00
Compare commits
4 Commits
fix-langua
...
devin/1744
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
448c3e8b67 | ||
|
|
8c9b8fff84 | ||
|
|
afa71f9f5e | ||
|
|
ebd0803c0c |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -25,5 +25,4 @@ agentops.log
|
||||
test_flow.html
|
||||
crewairules.mdc
|
||||
plan.md
|
||||
conceptual_plan.md
|
||||
build_image
|
||||
conceptual_plan.md
|
||||
@@ -23,7 +23,8 @@ The `Crew` class has been enriched with several attributes to support advanced f
|
||||
| **Process Flow** (`process`) | Defines execution logic (e.g., sequential, hierarchical) for task distribution. |
|
||||
| **Verbose Logging** (`verbose`) | Provides detailed logging for monitoring and debugging. Accepts integer and boolean values to control verbosity level. |
|
||||
| **Rate Limiting** (`max_rpm`) | Limits requests per minute to optimize resource usage. Setting guidelines depend on task complexity and load. |
|
||||
| **Internationalization / Customization** (`prompt_file`) | Supports prompt customization for global usability. [Example of file](https://github.com/joaomdmoura/crewAI/blob/main/src/crewai/translations/en.json) |
|
||||
| **Internationalization / Customization** (`language`, `prompt_file`) | Supports prompt customization for global usability. [Example of file](https://github.com/joaomdmoura/crewAI/blob/main/src/crewai/translations/en.json) |
|
||||
| **Execution and Output Handling** (`full_output`) | Controls output granularity, distinguishing between full and final outputs. |
|
||||
| **Callback and Telemetry** (`step_callback`, `task_callback`) | Enables step-wise and task-level execution monitoring and telemetry for performance analytics. |
|
||||
| **Crew Sharing** (`share_crew`) | Allows sharing crew data with CrewAI for model improvement. Privacy implications and benefits should be considered. |
|
||||
| **Usage Metrics** (`usage_metrics`) | Logs all LLM usage metrics during task execution for performance insights. |
|
||||
@@ -48,4 +49,4 @@ Consider a crew with a researcher agent tasked with data gathering and a writer
|
||||
|
||||
## Conclusion
|
||||
|
||||
The integration of advanced attributes and functionalities into the CrewAI framework significantly enriches the agent collaboration ecosystem. These enhancements not only simplify interactions but also offer unprecedented flexibility and control, paving the way for sophisticated AI-driven solutions capable of tackling complex tasks through intelligent collaboration and delegation.
|
||||
The integration of advanced attributes and functionalities into the CrewAI framework significantly enriches the agent collaboration ecosystem. These enhancements not only simplify interactions but also offer unprecedented flexibility and control, paving the way for sophisticated AI-driven solutions capable of tackling complex tasks through intelligent collaboration and delegation.
|
||||
@@ -20,10 +20,13 @@ A crew in crewAI represents a collaborative group of agents working together to
|
||||
| **Function Calling LLM** _(optional)_ | `function_calling_llm` | If passed, the crew will use this LLM to do function calling for tools for all agents in the crew. Each agent can have its own LLM, which overrides the crew's LLM for function calling. |
|
||||
| **Config** _(optional)_ | `config` | Optional configuration settings for the crew, in `Json` or `Dict[str, Any]` format. |
|
||||
| **Max RPM** _(optional)_ | `max_rpm` | Maximum requests per minute the crew adheres to during execution. Defaults to `None`. |
|
||||
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
||||
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
||||
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
||||
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
||||
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
||||
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
||||
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
|
||||
| **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
|
||||
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
|
||||
|
||||
@@ -18,8 +18,7 @@ reason, and learn from past interactions.
|
||||
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. |
|
||||
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. |
|
||||
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
|
||||
| **External Memory** | Enables integration with external memory systems and providers (like Mem0), allowing for specialized memory storage and retrieval across different applications. Supports custom storage implementations for flexible memory management. |
|
||||
| **User Memory** | ⚠️ **DEPRECATED**: This component is deprecated and will be removed in a future version. Please use [External Memory](#using-external-memory) instead. |
|
||||
| **User Memory** | Stores user-specific information and preferences, enhancing personalization and user experience. |
|
||||
|
||||
## How Memory Systems Empower Agents
|
||||
|
||||
@@ -275,102 +274,6 @@ crew = Crew(
|
||||
)
|
||||
```
|
||||
|
||||
### Using External Memory
|
||||
|
||||
External Memory is a powerful feature that allows you to integrate external memory systems with your CrewAI applications. This is particularly useful when you want to use specialized memory providers or maintain memory across different applications.
|
||||
|
||||
#### Basic Usage with Mem0
|
||||
|
||||
The most common way to use External Memory is with Mem0 as the provider:
|
||||
|
||||
```python
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
|
||||
agent = Agent(
|
||||
role="You are a helpful assistant",
|
||||
goal="Plan a vacation for the user",
|
||||
backstory="You are a helpful assistant that can plan a vacation for the user",
|
||||
verbose=True,
|
||||
)
|
||||
task = Task(
|
||||
description="Give things related to the user's vacation",
|
||||
expected_output="A plan for the vacation",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
external_memory=ExternalMemory(
|
||||
embedder_config={"provider": "mem0", "config": {"user_id": "U-123"}} # you can provide an entire Mem0 configuration
|
||||
),
|
||||
)
|
||||
|
||||
crew.kickoff(
|
||||
inputs={"question": "which destination is better for a beach vacation?"}
|
||||
)
|
||||
```
|
||||
|
||||
#### Using External Memory with Custom Storage
|
||||
|
||||
You can also create custom storage implementations for External Memory. Here's an example of how to create a custom storage:
|
||||
|
||||
```python
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.storage.interface import Storage
|
||||
|
||||
|
||||
class CustomStorage(Storage):
|
||||
def __init__(self):
|
||||
self.memories = []
|
||||
|
||||
def save(self, value, metadata=None, agent=None):
|
||||
self.memories.append({"value": value, "metadata": metadata, "agent": agent})
|
||||
|
||||
def search(self, query, limit=10, score_threshold=0.5):
|
||||
# Implement your search logic here
|
||||
return []
|
||||
|
||||
def reset(self):
|
||||
self.memories = []
|
||||
|
||||
|
||||
# Create external memory with custom storage
|
||||
external_memory = ExternalMemory(
|
||||
storage=CustomStorage(),
|
||||
embedder_config={"provider": "mem0", "config": {"user_id": "U-123"}},
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="You are a helpful assistant",
|
||||
goal="Plan a vacation for the user",
|
||||
backstory="You are a helpful assistant that can plan a vacation for the user",
|
||||
verbose=True,
|
||||
)
|
||||
task = Task(
|
||||
description="Give things related to the user's vacation",
|
||||
expected_output="A plan for the vacation",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
external_memory=external_memory,
|
||||
)
|
||||
|
||||
crew.kickoff(
|
||||
inputs={"question": "which destination is better for a beach vacation?"}
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
## Additional Embedding Providers
|
||||
|
||||
@@ -22,16 +22,7 @@ usage of tools, API calls, responses, any data processed by the agents, or secre
|
||||
When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected
|
||||
to provide deeper insights. This expanded data collection may include personal information if users have incorporated it into their crews or tasks.
|
||||
Users should carefully consider the content of their crews and tasks before enabling `share_crew`.
|
||||
Users can disable telemetry by setting the environment variable `CREWAI_DISABLE_TELEMETRY` to `true` or by setting `OTEL_SDK_DISABLED` to `true` (note that the latter disables all OpenTelemetry instrumentation globally).
|
||||
|
||||
### Examples:
|
||||
```python
|
||||
# Disable CrewAI telemetry only
|
||||
os.environ['CREWAI_DISABLE_TELEMETRY'] = 'true'
|
||||
|
||||
# Disable all OpenTelemetry (including CrewAI)
|
||||
os.environ['OTEL_SDK_DISABLED'] = 'true'
|
||||
```
|
||||
Users can disable telemetry by setting the environment variable `OTEL_SDK_DISABLED` to `true`.
|
||||
|
||||
### Data Explanation:
|
||||
| Defaulted | Data | Reason and Specifics |
|
||||
@@ -64,4 +55,4 @@ This enables a deeper insight into usage patterns.
|
||||
<Warning>
|
||||
If you enable `share_crew`, the collected data may include personal information if it has been incorporated into crew configurations, task descriptions, or outputs.
|
||||
Users should carefully review their data and ensure compliance with GDPR and other applicable privacy regulations before enabling this feature.
|
||||
</Warning>
|
||||
</Warning>
|
||||
@@ -45,7 +45,7 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools~=0.40.1"]
|
||||
tools = ["crewai-tools~=0.38.0"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.7.0"
|
||||
]
|
||||
|
||||
@@ -2,14 +2,12 @@ import warnings
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
@@ -21,12 +19,10 @@ __version__ = "0.108.0"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"Crew",
|
||||
"CrewOutput",
|
||||
"Process",
|
||||
"Task",
|
||||
"LLM",
|
||||
"BaseLLM",
|
||||
"Flow",
|
||||
"Knowledge",
|
||||
"TaskOutput",
|
||||
]
|
||||
|
||||
@@ -207,7 +207,6 @@ class Agent(BaseAgent):
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._user_memory,
|
||||
self.crew._external_memory,
|
||||
)
|
||||
memory = contextual_memory.build_context_for_task(task, context)
|
||||
if memory.strip() != "":
|
||||
|
||||
@@ -47,27 +47,6 @@ class CrewAgentExecutorMixin:
|
||||
print(f"Failed to add to short term memory: {e}")
|
||||
pass
|
||||
|
||||
def _create_external_memory(self, output) -> None:
|
||||
"""Create and save a external-term memory item if conditions are met."""
|
||||
if (
|
||||
self.crew
|
||||
and self.agent
|
||||
and self.task
|
||||
and hasattr(self.crew, "_external_memory")
|
||||
and self.crew._external_memory
|
||||
):
|
||||
try:
|
||||
self.crew._external_memory.save(
|
||||
value=output.text,
|
||||
metadata={
|
||||
"description": self.task.description,
|
||||
},
|
||||
agent=self.agent.role,
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Failed to add to external memory: {e}")
|
||||
pass
|
||||
|
||||
def _create_long_term_memory(self, output) -> None:
|
||||
"""Create and save long-term and entity memory items based on evaluation."""
|
||||
if (
|
||||
|
||||
@@ -129,7 +129,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _invoke_loop(self) -> AgentFinish:
|
||||
|
||||
@@ -3,10 +3,6 @@ import subprocess
|
||||
import click
|
||||
|
||||
|
||||
# Be mindful about changing this.
|
||||
# on some enviorments we don't use this command but instead uv sync directly
|
||||
# so if you expect this to support more things you will need to replicate it there
|
||||
# ask @joaomdmoura if you are unsure
|
||||
def install_crew(proxy_options: list[str]) -> None:
|
||||
"""
|
||||
Install the crew by running the UV command to lock and install.
|
||||
|
||||
@@ -28,7 +28,6 @@ from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.llm import LLM, BaseLLM
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.memory.user.user_memory import UserMemory
|
||||
@@ -106,7 +105,6 @@ class Crew(BaseModel):
|
||||
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
|
||||
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
|
||||
_user_memory: Optional[InstanceOf[UserMemory]] = PrivateAttr()
|
||||
_external_memory: Optional[InstanceOf[ExternalMemory]] = PrivateAttr()
|
||||
_train: Optional[bool] = PrivateAttr(default=False)
|
||||
_train_iteration: Optional[int] = PrivateAttr()
|
||||
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
||||
@@ -147,10 +145,6 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="An instance of the UserMemory to be used by the Crew to store/fetch memories of a specific user.",
|
||||
)
|
||||
external_memory: Optional[InstanceOf[ExternalMemory]] = Field(
|
||||
default=None,
|
||||
description="An Instance of the ExternalMemory to be used by the Crew",
|
||||
)
|
||||
embedder: Optional[dict] = Field(
|
||||
default=None,
|
||||
description="Configuration for the embedder to be used for the crew.",
|
||||
@@ -197,6 +191,14 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="Path to the prompt json file to be used for the crew.",
|
||||
)
|
||||
language: Optional[str] = Field(
|
||||
default="en",
|
||||
description="Language used for the crew, defaults to English.",
|
||||
)
|
||||
language_file: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Path to the language file to be used for the crew.",
|
||||
)
|
||||
output_log_file: Optional[Union[bool, str]] = Field(
|
||||
default=None,
|
||||
description="Path to the log file to be saved",
|
||||
@@ -295,12 +297,6 @@ class Crew(BaseModel):
|
||||
if self.entity_memory
|
||||
else EntityMemory(crew=self, embedder_config=self.embedder)
|
||||
)
|
||||
self._external_memory = (
|
||||
# External memory doesn’t support a default value since it was designed to be managed entirely externally
|
||||
self.external_memory.set_crew(self)
|
||||
if self.external_memory
|
||||
else None
|
||||
)
|
||||
if (
|
||||
self.memory_config
|
||||
and "user_memory" in self.memory_config
|
||||
@@ -621,7 +617,7 @@ class Crew(BaseModel):
|
||||
self._interpolate_inputs(inputs)
|
||||
self._set_tasks_callbacks()
|
||||
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
i18n = self._create_i18n()
|
||||
|
||||
for agent in self.agents:
|
||||
agent.i18n = i18n
|
||||
@@ -762,8 +758,17 @@ class Crew(BaseModel):
|
||||
self._create_manager_agent()
|
||||
return self._execute_tasks(self.tasks)
|
||||
|
||||
def _create_i18n(self) -> I18N:
|
||||
"""
|
||||
Create an I18N instance with the crew's configuration.
|
||||
|
||||
Returns:
|
||||
I18N: An internationalization instance configured with the crew's settings.
|
||||
"""
|
||||
return I18N(prompt_file=self.prompt_file, language=self.language)
|
||||
|
||||
def _create_manager_agent(self):
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
i18n = self._create_i18n()
|
||||
if self.manager_agent is not None:
|
||||
self.manager_agent.allow_delegation = True
|
||||
manager = self.manager_agent
|
||||
@@ -1179,7 +1184,6 @@ class Crew(BaseModel):
|
||||
"_short_term_memory",
|
||||
"_long_term_memory",
|
||||
"_entity_memory",
|
||||
"_external_memory",
|
||||
"_telemetry",
|
||||
"agents",
|
||||
"tasks",
|
||||
@@ -1326,15 +1330,7 @@ class Crew(BaseModel):
|
||||
RuntimeError: If memory reset operation fails.
|
||||
"""
|
||||
VALID_TYPES = frozenset(
|
||||
[
|
||||
"long",
|
||||
"short",
|
||||
"entity",
|
||||
"knowledge",
|
||||
"kickoff_outputs",
|
||||
"all",
|
||||
"external",
|
||||
]
|
||||
["long", "short", "entity", "knowledge", "kickoff_outputs", "all"]
|
||||
)
|
||||
|
||||
if command_type not in VALID_TYPES:
|
||||
@@ -1360,7 +1356,6 @@ class Crew(BaseModel):
|
||||
memory_systems = [
|
||||
("short term", getattr(self, "_short_term_memory", None)),
|
||||
("entity", getattr(self, "_entity_memory", None)),
|
||||
("external", getattr(self, "_external_memory", None)),
|
||||
("long term", getattr(self, "_long_term_memory", None)),
|
||||
("task output", getattr(self, "_task_output_handler", None)),
|
||||
("knowledge", getattr(self, "knowledge", None)),
|
||||
@@ -1388,7 +1383,6 @@ class Crew(BaseModel):
|
||||
"entity": (self._entity_memory, "entity"),
|
||||
"knowledge": (self.knowledge, "knowledge"),
|
||||
"kickoff_outputs": (self._task_output_handler, "task output"),
|
||||
"external": (self._external_memory, "external"),
|
||||
}
|
||||
|
||||
memory_system, name = reset_functions[memory_type]
|
||||
|
||||
@@ -2,12 +2,5 @@ from .entity.entity_memory import EntityMemory
|
||||
from .long_term.long_term_memory import LongTermMemory
|
||||
from .short_term.short_term_memory import ShortTermMemory
|
||||
from .user.user_memory import UserMemory
|
||||
from .external.external_memory import ExternalMemory
|
||||
|
||||
__all__ = [
|
||||
"UserMemory",
|
||||
"EntityMemory",
|
||||
"LongTermMemory",
|
||||
"ShortTermMemory",
|
||||
"ExternalMemory",
|
||||
]
|
||||
__all__ = ["UserMemory", "EntityMemory", "LongTermMemory", "ShortTermMemory"]
|
||||
|
||||
@@ -1,12 +1,6 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from crewai.memory import (
|
||||
EntityMemory,
|
||||
ExternalMemory,
|
||||
LongTermMemory,
|
||||
ShortTermMemory,
|
||||
UserMemory,
|
||||
)
|
||||
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory, UserMemory
|
||||
|
||||
|
||||
class ContextualMemory:
|
||||
@@ -17,7 +11,6 @@ class ContextualMemory:
|
||||
ltm: LongTermMemory,
|
||||
em: EntityMemory,
|
||||
um: UserMemory,
|
||||
exm: ExternalMemory,
|
||||
):
|
||||
if memory_config is not None:
|
||||
self.memory_provider = memory_config.get("provider")
|
||||
@@ -27,7 +20,6 @@ class ContextualMemory:
|
||||
self.ltm = ltm
|
||||
self.em = em
|
||||
self.um = um
|
||||
self.exm = exm
|
||||
|
||||
def build_context_for_task(self, task, context) -> str:
|
||||
"""
|
||||
@@ -43,7 +35,6 @@ class ContextualMemory:
|
||||
context.append(self._fetch_ltm_context(task.description))
|
||||
context.append(self._fetch_stm_context(query))
|
||||
context.append(self._fetch_entity_context(query))
|
||||
context.append(self._fetch_external_context(query))
|
||||
if self.memory_provider == "mem0":
|
||||
context.append(self._fetch_user_context(query))
|
||||
return "\n".join(filter(None, context))
|
||||
@@ -115,24 +106,3 @@ class ContextualMemory:
|
||||
f"- {result['memory']}" for result in user_memories
|
||||
)
|
||||
return f"User memories/preferences:\n{formatted_memories}"
|
||||
|
||||
def _fetch_external_context(self, query: str) -> str:
|
||||
"""
|
||||
Fetches and formats relevant information from External Memory.
|
||||
Args:
|
||||
query (str): The search query to find relevant information.
|
||||
Returns:
|
||||
str: Formatted information as bullet points, or an empty string if none found.
|
||||
"""
|
||||
if self.exm is None:
|
||||
return ""
|
||||
|
||||
external_memories = self.exm.search(query)
|
||||
|
||||
if not external_memories:
|
||||
return ""
|
||||
|
||||
formatted_memories = "\n".join(
|
||||
f"- {result['memory']}" for result in external_memories
|
||||
)
|
||||
return f"External memories:\n{formatted_memories}"
|
||||
|
||||
0
src/crewai/memory/external/__init__.py
vendored
0
src/crewai/memory/external/__init__.py
vendored
61
src/crewai/memory/external/external_memory.py
vendored
61
src/crewai/memory/external/external_memory.py
vendored
@@ -1,61 +0,0 @@
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Self
|
||||
|
||||
from crewai.memory.external.external_memory_item import ExternalMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.interface import Storage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
|
||||
|
||||
class ExternalMemory(Memory):
|
||||
def __init__(self, storage: Optional[Storage] = None, **data: Any):
|
||||
super().__init__(storage=storage, **data)
|
||||
|
||||
@staticmethod
|
||||
def _configure_mem0(crew: Any, config: Dict[str, Any]) -> "Mem0Storage":
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
|
||||
return Mem0Storage(type="external", crew=crew, config=config)
|
||||
|
||||
@staticmethod
|
||||
def external_supported_storages() -> Dict[str, Any]:
|
||||
return {
|
||||
"mem0": ExternalMemory._configure_mem0,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def create_storage(crew: Any, embedder_config: Optional[Dict[str, Any]]) -> Storage:
|
||||
if not embedder_config:
|
||||
raise ValueError("embedder_config is required")
|
||||
|
||||
if "provider" not in embedder_config:
|
||||
raise ValueError("embedder_config must include a 'provider' key")
|
||||
|
||||
provider = embedder_config["provider"]
|
||||
supported_storages = ExternalMemory.external_supported_storages()
|
||||
if provider not in supported_storages:
|
||||
raise ValueError(f"Provider {provider} not supported")
|
||||
|
||||
return supported_storages[provider](crew, embedder_config.get("config", {}))
|
||||
|
||||
def save(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
agent: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Saves a value into the external storage."""
|
||||
item = ExternalMemoryItem(value=value, metadata=metadata, agent=agent)
|
||||
super().save(value=item.value, metadata=item.metadata, agent=item.agent)
|
||||
|
||||
def reset(self) -> None:
|
||||
self.storage.reset()
|
||||
|
||||
def set_crew(self, crew: Any) -> Self:
|
||||
super().set_crew(crew)
|
||||
|
||||
if not self.storage:
|
||||
self.storage = self.create_storage(crew, self.embedder_config)
|
||||
|
||||
return self
|
||||
@@ -1,13 +0,0 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
|
||||
class ExternalMemoryItem:
|
||||
def __init__(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
agent: Optional[str] = None,
|
||||
):
|
||||
self.value = value
|
||||
self.metadata = metadata
|
||||
self.agent = agent
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, List, Optional, Self
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -9,7 +9,6 @@ class Memory(BaseModel):
|
||||
"""
|
||||
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
crew: Optional[Any] = None
|
||||
|
||||
storage: Any
|
||||
|
||||
@@ -37,7 +36,3 @@ class Memory(BaseModel):
|
||||
return self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
def set_crew(self, crew: Any) -> Self:
|
||||
self.crew = crew
|
||||
return self
|
||||
|
||||
@@ -11,20 +11,15 @@ class Mem0Storage(Storage):
|
||||
Extends Storage to handle embedding and searching across entities using Mem0.
|
||||
"""
|
||||
|
||||
def __init__(self, type, crew=None, config=None):
|
||||
def __init__(self, type, crew=None):
|
||||
super().__init__()
|
||||
supported_types = ["user", "short_term", "long_term", "entities", "external"]
|
||||
if type not in supported_types:
|
||||
raise ValueError(
|
||||
f"Invalid type '{type}' for Mem0Storage. Must be one of: "
|
||||
+ ", ".join(supported_types)
|
||||
)
|
||||
|
||||
if type not in ["user", "short_term", "long_term", "entities"]:
|
||||
raise ValueError("Invalid type for Mem0Storage. Must be 'user' or 'agent'.")
|
||||
|
||||
self.memory_type = type
|
||||
self.crew = crew
|
||||
self.config = config or {}
|
||||
# TODO: Memory config will be removed in the future the config will be passed as a parameter
|
||||
self.memory_config = self.config or getattr(crew, "memory_config", {}) or {}
|
||||
self.memory_config = crew.memory_config
|
||||
|
||||
# User ID is required for user memory type "user" since it's used as a unique identifier for the user.
|
||||
user_id = self._get_user_id()
|
||||
@@ -32,7 +27,7 @@ class Mem0Storage(Storage):
|
||||
raise ValueError("User ID is required for user memory type")
|
||||
|
||||
# API key in memory config overrides the environment variable
|
||||
config = self._get_config()
|
||||
config = self.memory_config.get("config", {})
|
||||
mem0_api_key = config.get("api_key") or os.getenv("MEM0_API_KEY")
|
||||
mem0_org_id = config.get("org_id")
|
||||
mem0_project_id = config.get("project_id")
|
||||
@@ -61,34 +56,26 @@ class Mem0Storage(Storage):
|
||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||
user_id = self._get_user_id()
|
||||
agent_name = self._get_agent_name()
|
||||
params = None
|
||||
if self.memory_type == "short_term":
|
||||
params = {
|
||||
"agent_id": agent_name,
|
||||
"infer": False,
|
||||
"metadata": {"type": "short_term", **metadata},
|
||||
}
|
||||
if self.memory_type == "user":
|
||||
self.memory.add(value, user_id=user_id, metadata={**metadata})
|
||||
elif self.memory_type == "short_term":
|
||||
agent_name = self._get_agent_name()
|
||||
self.memory.add(
|
||||
value, agent_id=agent_name, metadata={"type": "short_term", **metadata}
|
||||
)
|
||||
elif self.memory_type == "long_term":
|
||||
params = {
|
||||
"agent_id": agent_name,
|
||||
"infer": False,
|
||||
"metadata": {"type": "long_term", **metadata},
|
||||
}
|
||||
agent_name = self._get_agent_name()
|
||||
self.memory.add(
|
||||
value,
|
||||
agent_id=agent_name,
|
||||
infer=False,
|
||||
metadata={"type": "long_term", **metadata},
|
||||
)
|
||||
elif self.memory_type == "entities":
|
||||
params = {
|
||||
"agent_id": agent_name,
|
||||
"infer": False,
|
||||
"metadata": {"type": "entity", **metadata},
|
||||
}
|
||||
elif self.memory_type == "external":
|
||||
params = {
|
||||
"user_id": user_id,
|
||||
"agent_id": agent_name,
|
||||
"metadata": {"type": "external", **metadata},
|
||||
}
|
||||
|
||||
if params:
|
||||
self.memory.add(value, **params | {"output_format": "v1.1"})
|
||||
entity_name = self._get_agent_name()
|
||||
self.memory.add(
|
||||
value, user_id=entity_name, metadata={"type": "entity", **metadata}
|
||||
)
|
||||
|
||||
def search(
|
||||
self,
|
||||
@@ -97,43 +84,41 @@ class Mem0Storage(Storage):
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Any]:
|
||||
params = {"query": query, "limit": limit}
|
||||
if user_id := self._get_user_id():
|
||||
if self.memory_type == "user":
|
||||
user_id = self._get_user_id()
|
||||
params["user_id"] = user_id
|
||||
|
||||
agent_name = self._get_agent_name()
|
||||
if self.memory_type == "short_term":
|
||||
elif self.memory_type == "short_term":
|
||||
agent_name = self._get_agent_name()
|
||||
params["agent_id"] = agent_name
|
||||
params["metadata"] = {"type": "short_term"}
|
||||
elif self.memory_type == "long_term":
|
||||
agent_name = self._get_agent_name()
|
||||
params["agent_id"] = agent_name
|
||||
params["metadata"] = {"type": "long_term"}
|
||||
elif self.memory_type == "entities":
|
||||
agent_name = self._get_agent_name()
|
||||
params["agent_id"] = agent_name
|
||||
params["metadata"] = {"type": "entity"}
|
||||
elif self.memory_type == "external":
|
||||
params["agent_id"] = agent_name
|
||||
params["metadata"] = {"type": "external"}
|
||||
|
||||
# Discard the filters for now since we create the filters
|
||||
# automatically when the crew is created.
|
||||
results = self.memory.search(**params)
|
||||
return [r for r in results if r["score"] >= score_threshold]
|
||||
|
||||
def _get_user_id(self) -> str:
|
||||
return self._get_config().get("user_id", "")
|
||||
def _get_user_id(self):
|
||||
if self.memory_type == "user":
|
||||
if hasattr(self, "memory_config") and self.memory_config is not None:
|
||||
return self.memory_config.get("config", {}).get("user_id")
|
||||
else:
|
||||
return None
|
||||
return None
|
||||
|
||||
def _get_agent_name(self) -> str:
|
||||
if not self.crew:
|
||||
return ""
|
||||
|
||||
agents = self.crew.agents
|
||||
def _get_agent_name(self):
|
||||
agents = self.crew.agents if self.crew else []
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
agents = "_".join(agents)
|
||||
return agents
|
||||
|
||||
def _get_config(self) -> Dict[str, Any]:
|
||||
return self.config or getattr(self, "memory_config", {}).get("config", {}) or {}
|
||||
|
||||
def reset(self):
|
||||
if self.memory:
|
||||
self.memory.reset()
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import warnings
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from crewai.memory.memory import Memory
|
||||
@@ -13,12 +12,6 @@ class UserMemory(Memory):
|
||||
"""
|
||||
|
||||
def __init__(self, crew=None):
|
||||
warnings.warn(
|
||||
"UserMemory is deprecated and will be removed in a future version. "
|
||||
"Please use ExternalMemory instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError:
|
||||
@@ -55,4 +48,6 @@ class UserMemory(Memory):
|
||||
try:
|
||||
self.storage.reset()
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while resetting the user memory: {e}")
|
||||
raise Exception(
|
||||
f"An error occurred while resetting the user memory: {e}"
|
||||
)
|
||||
|
||||
@@ -137,11 +137,13 @@ def CrewBase(cls: T) -> T:
|
||||
all_functions, "is_cache_handler"
|
||||
)
|
||||
callbacks = self._filter_functions(all_functions, "is_callback")
|
||||
agents = self._filter_functions(all_functions, "is_agent")
|
||||
|
||||
for agent_name, agent_info in self.agents_config.items():
|
||||
self._map_agent_variables(
|
||||
agent_name,
|
||||
agent_info,
|
||||
agents,
|
||||
llms,
|
||||
tool_functions,
|
||||
cache_handler_functions,
|
||||
@@ -152,6 +154,7 @@ def CrewBase(cls: T) -> T:
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_info: Dict[str, Any],
|
||||
agents: Dict[str, Callable],
|
||||
llms: Dict[str, Callable],
|
||||
tool_functions: Dict[str, Callable],
|
||||
cache_handler_functions: Dict[str, Callable],
|
||||
@@ -169,10 +172,9 @@ def CrewBase(cls: T) -> T:
|
||||
]
|
||||
|
||||
if function_calling_llm := agent_info.get("function_calling_llm"):
|
||||
try:
|
||||
self.agents_config[agent_name]["function_calling_llm"] = llms[function_calling_llm]()
|
||||
except KeyError:
|
||||
self.agents_config[agent_name]["function_calling_llm"] = function_calling_llm
|
||||
self.agents_config[agent_name]["function_calling_llm"] = agents[
|
||||
function_calling_llm
|
||||
]()
|
||||
|
||||
if step_callback := agent_info.get("step_callback"):
|
||||
self.agents_config[agent_name]["step_callback"] = callbacks[
|
||||
|
||||
@@ -45,10 +45,10 @@ class Telemetry:
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.ready: bool = False
|
||||
self.trace_set: bool = False
|
||||
self.ready = False
|
||||
self.trace_set = False
|
||||
|
||||
if self._is_telemetry_disabled():
|
||||
if os.getenv("OTEL_SDK_DISABLED", "false").lower() == "true":
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -75,13 +75,6 @@ class Telemetry:
|
||||
):
|
||||
raise # Re-raise the exception to not interfere with system signals
|
||||
self.ready = False
|
||||
|
||||
def _is_telemetry_disabled(self) -> bool:
|
||||
"""Check if telemetry should be disabled based on environment variables."""
|
||||
return (
|
||||
os.getenv("OTEL_SDK_DISABLED", "false").lower() == "true" or
|
||||
os.getenv("CREWAI_DISABLE_TELEMETRY", "false").lower() == "true"
|
||||
)
|
||||
|
||||
def set_tracer(self):
|
||||
if self.ready and not self.trace_set:
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
import json
|
||||
import os
|
||||
from typing import Dict, Optional, Union
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Dict, Literal, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
|
||||
"""Internationalization support for CrewAI prompts and messages."""
|
||||
|
||||
SUPPORTED_LANGUAGES = Literal["en", "fr", "es", "pt"]
|
||||
|
||||
class I18N(BaseModel):
|
||||
"""Handles loading and retrieving internationalized prompts."""
|
||||
_prompts: Dict[str, Dict[str, str]] = PrivateAttr()
|
||||
@@ -13,22 +17,57 @@ class I18N(BaseModel):
|
||||
default=None,
|
||||
description="Path to the prompt_file file to load",
|
||||
)
|
||||
language: Optional[str] = Field(
|
||||
default="en",
|
||||
description="Language to use for translations. Defaults to English.",
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_language(cls, data):
|
||||
"""
|
||||
Validate the language parameter.
|
||||
|
||||
If the language is not supported, it will fall back to English.
|
||||
"""
|
||||
if isinstance(data, dict) and "language" in data:
|
||||
lang = data["language"]
|
||||
if lang and lang not in ["en", "fr", "es", "pt"]:
|
||||
print(f"Warning: Language '{lang}' not supported. Falling back to English.")
|
||||
data["language"] = "en"
|
||||
return data
|
||||
|
||||
@model_validator(mode="after")
|
||||
def load_prompts(self) -> "I18N":
|
||||
"""Load prompts from a JSON file."""
|
||||
"""
|
||||
Load prompts from a JSON file.
|
||||
|
||||
If prompt_file is provided, loads from that file.
|
||||
Otherwise, attempts to load from the language-specific translation file.
|
||||
Falls back to English if the specified language file doesn't exist.
|
||||
|
||||
Raises:
|
||||
Exception: If the prompt file is not found or contains invalid JSON.
|
||||
|
||||
Returns:
|
||||
I18N: The instance with loaded prompts.
|
||||
"""
|
||||
try:
|
||||
if self.prompt_file:
|
||||
with open(self.prompt_file, "r", encoding="utf-8") as f:
|
||||
self._prompts = json.load(f)
|
||||
else:
|
||||
dir_path = os.path.dirname(os.path.realpath(__file__))
|
||||
prompts_path = os.path.join(dir_path, "../translations/en.json")
|
||||
|
||||
with open(prompts_path, "r", encoding="utf-8") as f:
|
||||
base_path = Path(__file__).parent / "../translations"
|
||||
lang = self.language or "en"
|
||||
lang_file = base_path / f"{lang}.json"
|
||||
|
||||
if not lang_file.exists():
|
||||
lang_file = base_path / "en.json"
|
||||
|
||||
with open(lang_file.resolve(), "r", encoding="utf-8") as f:
|
||||
self._prompts = json.load(f)
|
||||
except FileNotFoundError:
|
||||
raise Exception(f"Prompt file '{self.prompt_file}' not found.")
|
||||
raise Exception(f"Prompt file '{self.prompt_file or lang_file}' not found.")
|
||||
except json.JSONDecodeError:
|
||||
raise Exception("Error decoding JSON from the prompts file.")
|
||||
|
||||
@@ -38,15 +77,31 @@ class I18N(BaseModel):
|
||||
return self
|
||||
|
||||
def slice(self, slice: str) -> str:
|
||||
"""Get a slice prompt by key."""
|
||||
return self.retrieve("slices", slice)
|
||||
|
||||
def errors(self, error: str) -> str:
|
||||
"""Get an error message by key."""
|
||||
return self.retrieve("errors", error)
|
||||
|
||||
def tools(self, tool: str) -> Union[str, Dict[str, str]]:
|
||||
"""Get a tool prompt by key."""
|
||||
return self.retrieve("tools", tool)
|
||||
|
||||
def retrieve(self, kind, key) -> str:
|
||||
def retrieve(self, kind: str, key: str) -> str:
|
||||
"""
|
||||
Retrieve a prompt by section and key.
|
||||
|
||||
Args:
|
||||
kind: The section in the prompts file (e.g., "slices", "errors")
|
||||
key: The specific key within the section
|
||||
|
||||
Returns:
|
||||
The prompt text
|
||||
|
||||
Raises:
|
||||
Exception: If the prompt is not found
|
||||
"""
|
||||
try:
|
||||
return self._prompts[kind][key]
|
||||
except Exception as _:
|
||||
|
||||
@@ -8,7 +8,6 @@ researcher:
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
verbose: true
|
||||
function_calling_llm: "local_llm"
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
@@ -19,5 +18,4 @@ reporting_analyst:
|
||||
You're a meticulous analyst with a keen eye for detail. You're known for
|
||||
your ability to turn complex data into clear and concise reports, making
|
||||
it easy for others to understand and act on the information you provide.
|
||||
verbose: true
|
||||
function_calling_llm: "online_llm"
|
||||
verbose: true
|
||||
41
tests/crew_language_test.py
Normal file
41
tests/crew_language_test.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.utilities.i18n import I18N
|
||||
|
||||
|
||||
def test_crew_with_language():
|
||||
i18n = I18N(language="en")
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test Goal",
|
||||
backstory="Test Backstory",
|
||||
verbose=True
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Test Task",
|
||||
expected_output="Test Output",
|
||||
agent=agent
|
||||
)
|
||||
|
||||
with patch('crewai.crew.I18N') as mock_i18n:
|
||||
mock_i18n.return_value = i18n
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
language="fr" # Use French as an example
|
||||
)
|
||||
|
||||
with patch.object(crew, '_run_sequential_process'):
|
||||
with patch.object(crew, '_set_tasks_callbacks'):
|
||||
with patch('crewai.agent.Agent.create_agent_executor'):
|
||||
crew.kickoff()
|
||||
|
||||
mock_i18n.assert_called_with(prompt_file=None, language="fr")
|
||||
@@ -1,15 +0,0 @@
|
||||
"""Test that all public API classes are properly importable."""
|
||||
|
||||
|
||||
def test_task_output_import():
|
||||
"""Test that TaskOutput can be imported from crewai."""
|
||||
from crewai import TaskOutput
|
||||
|
||||
assert TaskOutput is not None
|
||||
|
||||
|
||||
def test_crew_output_import():
|
||||
"""Test that CrewOutput can be imported from crewai."""
|
||||
from crewai import CrewOutput
|
||||
|
||||
assert CrewOutput is not None
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
180
tests/memory/external/test_external_memory.py
vendored
180
tests/memory/external/test_external_memory.py
vendored
@@ -1,180 +0,0 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from mem0.memory.main import Memory
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew, Process
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.external.external_memory_item import ExternalMemoryItem
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_mem0_memory():
|
||||
mock_memory = MagicMock(spec=Memory)
|
||||
return mock_memory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patch_configure_mem0(mock_mem0_memory):
|
||||
with patch(
|
||||
"crewai.memory.external.external_memory.ExternalMemory._configure_mem0",
|
||||
return_value=mock_mem0_memory,
|
||||
) as mocked:
|
||||
yield mocked
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def external_memory_with_mocked_config(patch_configure_mem0):
|
||||
embedder_config = {"provider": "mem0"}
|
||||
external_memory = ExternalMemory(embedder_config=embedder_config)
|
||||
return external_memory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def crew_with_external_memory(external_memory_with_mocked_config, patch_configure_mem0):
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Search relevant data and provide results",
|
||||
backstory="You are a researcher at a leading tech think tank.",
|
||||
tools=[],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Perform a search on specific topics.",
|
||||
expected_output="A list of relevant URLs based on the search query.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
external_memory=external_memory_with_mocked_config,
|
||||
)
|
||||
|
||||
return crew
|
||||
|
||||
|
||||
def test_external_memory_initialization(external_memory_with_mocked_config):
|
||||
assert external_memory_with_mocked_config is not None
|
||||
assert isinstance(external_memory_with_mocked_config, ExternalMemory)
|
||||
|
||||
|
||||
def test_external_memory_save(external_memory_with_mocked_config):
|
||||
memory_item = ExternalMemoryItem(
|
||||
value="test value", metadata={"task": "test_task"}, agent="test_agent"
|
||||
)
|
||||
|
||||
with patch.object(ExternalMemory, "save") as mock_save:
|
||||
external_memory_with_mocked_config.save(
|
||||
value=memory_item.value,
|
||||
metadata=memory_item.metadata,
|
||||
agent=memory_item.agent,
|
||||
)
|
||||
|
||||
mock_save.assert_called_once_with(
|
||||
value=memory_item.value,
|
||||
metadata=memory_item.metadata,
|
||||
agent=memory_item.agent,
|
||||
)
|
||||
|
||||
|
||||
def test_external_memory_reset(external_memory_with_mocked_config):
|
||||
with patch(
|
||||
"crewai.memory.external.external_memory.ExternalMemory.reset"
|
||||
) as mock_reset:
|
||||
external_memory_with_mocked_config.reset()
|
||||
mock_reset.assert_called_once()
|
||||
|
||||
|
||||
def test_external_memory_supported_storages():
|
||||
supported_storages = ExternalMemory.external_supported_storages()
|
||||
assert "mem0" in supported_storages
|
||||
assert callable(supported_storages["mem0"])
|
||||
|
||||
|
||||
def test_external_memory_create_storage_invalid_provider():
|
||||
embedder_config = {"provider": "invalid_provider", "config": {}}
|
||||
|
||||
with pytest.raises(ValueError, match="Provider invalid_provider not supported"):
|
||||
ExternalMemory.create_storage(None, embedder_config)
|
||||
|
||||
|
||||
def test_external_memory_create_storage_missing_provider():
|
||||
embedder_config = {"config": {}}
|
||||
|
||||
with pytest.raises(
|
||||
ValueError, match="embedder_config must include a 'provider' key"
|
||||
):
|
||||
ExternalMemory.create_storage(None, embedder_config)
|
||||
|
||||
|
||||
def test_external_memory_create_storage_missing_config():
|
||||
with pytest.raises(ValueError, match="embedder_config is required"):
|
||||
ExternalMemory.create_storage(None, None)
|
||||
|
||||
|
||||
def test_crew_with_external_memory_initialization(crew_with_external_memory):
|
||||
assert crew_with_external_memory._external_memory is not None
|
||||
assert isinstance(crew_with_external_memory._external_memory, ExternalMemory)
|
||||
assert crew_with_external_memory._external_memory.crew == crew_with_external_memory
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mem_type", ["external", "all"])
|
||||
def test_crew_external_memory_reset(mem_type, crew_with_external_memory):
|
||||
with patch(
|
||||
"crewai.memory.external.external_memory.ExternalMemory.reset"
|
||||
) as mock_reset:
|
||||
crew_with_external_memory.reset_memories(mem_type)
|
||||
mock_reset.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mem_method", ["search", "save"])
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_external_memory_save(mem_method, crew_with_external_memory):
|
||||
with patch(
|
||||
f"crewai.memory.external.external_memory.ExternalMemory.{mem_method}"
|
||||
) as mock_method:
|
||||
crew_with_external_memory.kickoff()
|
||||
assert mock_method.call_count > 0
|
||||
|
||||
|
||||
def test_external_memory_custom_storage(crew_with_external_memory):
|
||||
class CustomStorage(Storage):
|
||||
def __init__(self):
|
||||
self.memories = []
|
||||
|
||||
def save(self, value, metadata=None, agent=None):
|
||||
self.memories.append({"value": value, "metadata": metadata, "agent": agent})
|
||||
|
||||
def search(self, query, limit=10, score_threshold=0.5):
|
||||
return self.memories
|
||||
|
||||
def reset(self):
|
||||
self.memories = []
|
||||
|
||||
custom_storage = CustomStorage()
|
||||
external_memory = ExternalMemory(storage=custom_storage)
|
||||
|
||||
# by ensuring the crew is set, we can test that the storage is used
|
||||
external_memory.set_crew(crew_with_external_memory)
|
||||
|
||||
test_value = "test value"
|
||||
test_metadata = {"source": "test"}
|
||||
test_agent = "test_agent"
|
||||
external_memory.save(value=test_value, metadata=test_metadata, agent=test_agent)
|
||||
|
||||
results = external_memory.search("test")
|
||||
assert len(results) == 1
|
||||
assert results[0]["value"] == test_value
|
||||
assert results[0]["metadata"] == test_metadata | {"agent": test_agent}
|
||||
|
||||
external_memory.reset()
|
||||
results = external_memory.search("test")
|
||||
assert len(results) == 0
|
||||
@@ -2,16 +2,7 @@ import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.llm import LLM
|
||||
from crewai.project import (
|
||||
CrewBase,
|
||||
after_kickoff,
|
||||
agent,
|
||||
before_kickoff,
|
||||
crew,
|
||||
llm,
|
||||
task,
|
||||
)
|
||||
from crewai.project import CrewBase, after_kickoff, agent, before_kickoff, crew, task
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
@@ -40,13 +31,6 @@ class InternalCrew:
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@llm
|
||||
def local_llm(self):
|
||||
return LLM(
|
||||
model='openai/model_name',
|
||||
api_key="None",
|
||||
base_url="http://xxx.xxx.xxx.xxx:8000/v1")
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(config=self.agents_config["researcher"])
|
||||
@@ -121,20 +105,6 @@ def test_task_name():
|
||||
), "Custom task name is not being set as expected"
|
||||
|
||||
|
||||
def test_agent_function_calling_llm():
|
||||
crew = InternalCrew()
|
||||
llm = crew.local_llm()
|
||||
obj_llm_agent = crew.researcher()
|
||||
assert (
|
||||
obj_llm_agent.function_calling_llm is llm
|
||||
), "agent's function_calling_llm is incorrect"
|
||||
|
||||
str_llm_agent = crew.reporting_analyst()
|
||||
assert (
|
||||
str_llm_agent.function_calling_llm.model == "online_llm"
|
||||
), "agent's function_calling_llm is incorrect"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_before_kickoff_modification():
|
||||
crew = InternalCrew()
|
||||
|
||||
@@ -29,32 +29,41 @@ def mem0_storage_with_mocked_config(mock_mem0_memory):
|
||||
"""Fixture to create a Mem0Storage instance with mocked dependencies"""
|
||||
|
||||
# Patch the Memory class to return our mock
|
||||
with patch("mem0.memory.main.Memory.from_config", return_value=mock_mem0_memory):
|
||||
with patch('mem0.memory.main.Memory.from_config', return_value=mock_mem0_memory):
|
||||
config = {
|
||||
"vector_store": {
|
||||
"provider": "mock_vector_store",
|
||||
"config": {"host": "localhost", "port": 6333},
|
||||
"config": {
|
||||
"host": "localhost",
|
||||
"port": 6333
|
||||
}
|
||||
},
|
||||
"llm": {
|
||||
"provider": "mock_llm",
|
||||
"config": {"api_key": "mock-api-key", "model": "mock-model"},
|
||||
"config": {
|
||||
"api_key": "mock-api-key",
|
||||
"model": "mock-model"
|
||||
}
|
||||
},
|
||||
"embedder": {
|
||||
"provider": "mock_embedder",
|
||||
"config": {"api_key": "mock-api-key", "model": "mock-model"},
|
||||
"config": {
|
||||
"api_key": "mock-api-key",
|
||||
"model": "mock-model"
|
||||
}
|
||||
},
|
||||
"graph_store": {
|
||||
"provider": "mock_graph_store",
|
||||
"config": {
|
||||
"url": "mock-url",
|
||||
"username": "mock-user",
|
||||
"password": "mock-password",
|
||||
},
|
||||
"password": "mock-password"
|
||||
}
|
||||
},
|
||||
"history_db_path": "/mock/path",
|
||||
"version": "test-version",
|
||||
"custom_fact_extraction_prompt": "mock prompt 1",
|
||||
"custom_update_memory_prompt": "mock prompt 2",
|
||||
"custom_update_memory_prompt": "mock prompt 2"
|
||||
}
|
||||
|
||||
# Instantiate the class with memory_config
|
||||
@@ -83,73 +92,23 @@ def mock_mem0_memory_client():
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mem0_storage_with_memory_client_using_config_from_crew(mock_mem0_memory_client):
|
||||
def mem0_storage_with_memory_client(mock_mem0_memory_client):
|
||||
"""Fixture to create a Mem0Storage instance with mocked dependencies"""
|
||||
|
||||
# We need to patch the MemoryClient before it's instantiated
|
||||
with patch.object(MemoryClient, "__new__", return_value=mock_mem0_memory_client):
|
||||
crew = MockCrew(
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {
|
||||
"user_id": "test_user",
|
||||
"api_key": "ABCDEFGH",
|
||||
"org_id": "my_org_id",
|
||||
"project_id": "my_project_id",
|
||||
},
|
||||
}
|
||||
)
|
||||
with patch.object(MemoryClient, '__new__', return_value=mock_mem0_memory_client):
|
||||
crew = MockCrew(
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {"user_id": "test_user", "api_key": "ABCDEFGH", "org_id": "my_org_id", "project_id": "my_project_id"},
|
||||
}
|
||||
)
|
||||
|
||||
mem0_storage = Mem0Storage(type="short_term", crew=crew)
|
||||
return mem0_storage
|
||||
mem0_storage = Mem0Storage(type="short_term", crew=crew)
|
||||
return mem0_storage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mem0_storage_with_memory_client_using_explictly_config(mock_mem0_memory_client):
|
||||
"""Fixture to create a Mem0Storage instance with mocked dependencies"""
|
||||
|
||||
# We need to patch the MemoryClient before it's instantiated
|
||||
with patch.object(MemoryClient, "__new__", return_value=mock_mem0_memory_client):
|
||||
crew = MockCrew(
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {
|
||||
"user_id": "test_user",
|
||||
"api_key": "ABCDEFGH",
|
||||
"org_id": "my_org_id",
|
||||
"project_id": "my_project_id",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
new_config = {"provider": "mem0", "config": {"api_key": "new-api-key"}}
|
||||
|
||||
mem0_storage = Mem0Storage(type="short_term", crew=crew, config=new_config)
|
||||
return mem0_storage
|
||||
|
||||
|
||||
def test_mem0_storage_with_memory_client_initialization(
|
||||
mem0_storage_with_memory_client_using_config_from_crew, mock_mem0_memory_client
|
||||
):
|
||||
def test_mem0_storage_with_memory_client_initialization(mem0_storage_with_memory_client, mock_mem0_memory_client):
|
||||
"""Test Mem0Storage initialization with MemoryClient"""
|
||||
assert (
|
||||
mem0_storage_with_memory_client_using_config_from_crew.memory_type
|
||||
== "short_term"
|
||||
)
|
||||
assert (
|
||||
mem0_storage_with_memory_client_using_config_from_crew.memory
|
||||
is mock_mem0_memory_client
|
||||
)
|
||||
|
||||
|
||||
def test_mem0_storage_with_explict_config(
|
||||
mem0_storage_with_memory_client_using_explictly_config,
|
||||
):
|
||||
expected_config = {"provider": "mem0", "config": {"api_key": "new-api-key"}}
|
||||
assert (
|
||||
mem0_storage_with_memory_client_using_explictly_config.config == expected_config
|
||||
)
|
||||
assert (
|
||||
mem0_storage_with_memory_client_using_explictly_config.memory_config
|
||||
== expected_config
|
||||
)
|
||||
assert mem0_storage_with_memory_client.memory_type == "short_term"
|
||||
assert mem0_storage_with_memory_client.memory is mock_mem0_memory_client
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
import os
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.telemetry import Telemetry
|
||||
|
||||
|
||||
@pytest.mark.parametrize("env_var,value,expected_ready", [
|
||||
("OTEL_SDK_DISABLED", "true", False),
|
||||
("OTEL_SDK_DISABLED", "TRUE", False),
|
||||
("CREWAI_DISABLE_TELEMETRY", "true", False),
|
||||
("CREWAI_DISABLE_TELEMETRY", "TRUE", False),
|
||||
("OTEL_SDK_DISABLED", "false", True),
|
||||
("CREWAI_DISABLE_TELEMETRY", "false", True),
|
||||
])
|
||||
def test_telemetry_environment_variables(env_var, value, expected_ready):
|
||||
"""Test telemetry state with different environment variable configurations."""
|
||||
with patch.dict(os.environ, {env_var: value}):
|
||||
with patch("crewai.telemetry.telemetry.TracerProvider"):
|
||||
telemetry = Telemetry()
|
||||
assert telemetry.ready is expected_ready
|
||||
|
||||
|
||||
def test_telemetry_enabled_by_default():
|
||||
"""Test that telemetry is enabled by default."""
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
with patch("crewai.telemetry.telemetry.TracerProvider"):
|
||||
telemetry = Telemetry()
|
||||
assert telemetry.ready is True
|
||||
@@ -42,3 +42,13 @@ def test_prompt_file():
|
||||
i18n.load_prompts()
|
||||
assert isinstance(i18n.retrieve("slices", "role_playing"), str)
|
||||
assert i18n.retrieve("slices", "role_playing") == "Lorem ipsum dolor sit amet"
|
||||
|
||||
|
||||
def test_language_parameter():
|
||||
i18n = I18N(language="en")
|
||||
i18n.load_prompts()
|
||||
assert isinstance(i18n.slice("role_playing"), str)
|
||||
|
||||
i18n = I18N(language="nonexistent")
|
||||
i18n.load_prompts()
|
||||
assert isinstance(i18n.slice("role_playing"), str)
|
||||
|
||||
8
uv.lock
generated
8
uv.lock
generated
@@ -695,7 +695,7 @@ requires-dist = [
|
||||
{ name = "blinker", specifier = ">=1.9.0" },
|
||||
{ name = "chromadb", specifier = ">=0.5.23" },
|
||||
{ name = "click", specifier = ">=8.1.7" },
|
||||
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.40.1" },
|
||||
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.38.0" },
|
||||
{ name = "docling", marker = "extra == 'docling'", specifier = ">=2.12.0" },
|
||||
{ name = "fastembed", marker = "extra == 'fastembed'", specifier = ">=0.4.1" },
|
||||
{ name = "instructor", specifier = ">=1.3.3" },
|
||||
@@ -745,7 +745,7 @@ dev = [
|
||||
|
||||
[[package]]
|
||||
name = "crewai-tools"
|
||||
version = "0.40.1"
|
||||
version = "0.38.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "chromadb" },
|
||||
@@ -760,9 +760,9 @@ dependencies = [
|
||||
{ name = "pytube" },
|
||||
{ name = "requests" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/16/ff/0c16c9943ec1501b12fc72aca7815f191ffe94d5f1fe4e9c353ee8c4ad1d/crewai_tools-0.40.1.tar.gz", hash = "sha256:6af5040b2277df8fd592238a17bf584f95dcc9ef7766236534999c8a9e9d0b52", size = 744094 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/85/3f/d3b5697b4c6756cec65316c9ea9ccd9054f7b73670d1580befd3632ba031/crewai_tools-0.38.1.tar.gz", hash = "sha256:6abe75b3b339d53a9cf4e2d80124d863ff62a82b36753c30bec64318881876b2", size = 737620 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/35/05/619c00bae2dda038f0d218dd5197120c938e9c9ccef1b9e50cfb037486f6/crewai_tools-0.40.1-py3-none-any.whl", hash = "sha256:8f459f74dee64364bfdbc524c815c4afcfb9ed532b51e6b8b4f616398d46cf1e", size = 573286 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/2b/a6c9007647ffbb6a3c204b3ef26806030d6b041e3e012d4cec43c21335d6/crewai_tools-0.38.1-py3-none-any.whl", hash = "sha256:d9d3a88060f1f30c8f4ea044f6dd564a50d0a22b8a018a6fcec202b36246b9d8", size = 561414 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
Reference in New Issue
Block a user