mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
General Clean UP (#2042)
* clean up. fix type safety. address memory config docs * improve manager * Include fix for o1 models not supporting system messages * more broad with o1 * address fix: Typo in expected_output string #2045 * drop prints * drop prints * wip * wip * fix failing memory tests * Fix memory provider issue * clean up short term memory * revert ltm * drop
This commit is contained in:
committed by
Devin AI
parent
df55278476
commit
ef1ef8fd80
@@ -185,7 +185,12 @@ my_crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
memory=True,
|
memory=True,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
embedder=OpenAIEmbeddingFunction(api_key=os.getenv("OPENAI_API_KEY"), model="text-embedding-3-small"),
|
embedder={
|
||||||
|
"provider": "openai",
|
||||||
|
"config": {
|
||||||
|
"model": 'text-embedding-3-small'
|
||||||
|
}
|
||||||
|
}
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -242,13 +247,15 @@ my_crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
memory=True,
|
memory=True,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
embedder=OpenAIEmbeddingFunction(
|
embedder={
|
||||||
api_key="YOUR_API_KEY",
|
"provider": "openai",
|
||||||
api_base="YOUR_API_BASE_PATH",
|
"config": {
|
||||||
api_type="azure",
|
"api_key": "YOUR_API_KEY",
|
||||||
api_version="YOUR_API_VERSION",
|
"api_base": "YOUR_API_BASE_PATH",
|
||||||
model="text-embedding-3-small"
|
"api_version": "YOUR_API_VERSION",
|
||||||
)
|
"model_name": 'text-embedding-3-small'
|
||||||
|
}
|
||||||
|
}
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -264,12 +271,15 @@ my_crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
memory=True,
|
memory=True,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
embedder=GoogleVertexEmbeddingFunction(
|
embedder={
|
||||||
project_id="YOUR_PROJECT_ID",
|
"provider": "vertexai",
|
||||||
region="YOUR_REGION",
|
"config": {
|
||||||
api_key="YOUR_API_KEY",
|
"project_id"="YOUR_PROJECT_ID",
|
||||||
model="textembedding-gecko"
|
"region"="YOUR_REGION",
|
||||||
)
|
"api_key"="YOUR_API_KEY",
|
||||||
|
"model_name"="textembedding-gecko"
|
||||||
|
}
|
||||||
|
}
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import re
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
import subprocess
|
import subprocess
|
||||||
from typing import Any, Dict, List, Literal, Optional, Union
|
from typing import Any, Dict, List, Literal, Optional, Sequence, Union
|
||||||
|
|
||||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||||
|
|
||||||
@@ -55,7 +55,6 @@ class Agent(BaseAgent):
|
|||||||
llm: The language model that will run the agent.
|
llm: The language model that will run the agent.
|
||||||
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
|
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
|
||||||
max_iter: Maximum number of iterations for an agent to execute a task.
|
max_iter: Maximum number of iterations for an agent to execute a task.
|
||||||
memory: Whether the agent should have memory or not.
|
|
||||||
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
|
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
|
||||||
verbose: Whether the agent execution should be in verbose mode.
|
verbose: Whether the agent execution should be in verbose mode.
|
||||||
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
||||||
@@ -72,9 +71,6 @@ class Agent(BaseAgent):
|
|||||||
)
|
)
|
||||||
agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||||
agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
|
||||||
default=None, description="An instance of the CacheHandler class."
|
|
||||||
)
|
|
||||||
step_callback: Optional[Any] = Field(
|
step_callback: Optional[Any] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="Callback to be executed after each step of the agent execution.",
|
description="Callback to be executed after each step of the agent execution.",
|
||||||
@@ -108,10 +104,6 @@ class Agent(BaseAgent):
|
|||||||
default=True,
|
default=True,
|
||||||
description="Keep messages under the context window size by summarizing content.",
|
description="Keep messages under the context window size by summarizing content.",
|
||||||
)
|
)
|
||||||
max_iter: int = Field(
|
|
||||||
default=20,
|
|
||||||
description="Maximum number of iterations for an agent to execute a task before giving it's best answer",
|
|
||||||
)
|
|
||||||
max_retry_limit: int = Field(
|
max_retry_limit: int = Field(
|
||||||
default=2,
|
default=2,
|
||||||
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
||||||
@@ -197,13 +189,15 @@ class Agent(BaseAgent):
|
|||||||
if task.output_json:
|
if task.output_json:
|
||||||
# schema = json.dumps(task.output_json, indent=2)
|
# schema = json.dumps(task.output_json, indent=2)
|
||||||
schema = generate_model_description(task.output_json)
|
schema = generate_model_description(task.output_json)
|
||||||
|
task_prompt += "\n" + self.i18n.slice(
|
||||||
|
"formatted_task_instructions"
|
||||||
|
).format(output_format=schema)
|
||||||
|
|
||||||
elif task.output_pydantic:
|
elif task.output_pydantic:
|
||||||
schema = generate_model_description(task.output_pydantic)
|
schema = generate_model_description(task.output_pydantic)
|
||||||
|
task_prompt += "\n" + self.i18n.slice(
|
||||||
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
|
"formatted_task_instructions"
|
||||||
output_format=schema
|
).format(output_format=schema)
|
||||||
)
|
|
||||||
|
|
||||||
if context:
|
if context:
|
||||||
task_prompt = self.i18n.slice("task_with_context").format(
|
task_prompt = self.i18n.slice("task_with_context").format(
|
||||||
@@ -331,14 +325,14 @@ class Agent(BaseAgent):
|
|||||||
tools = agent_tools.tools()
|
tools = agent_tools.tools()
|
||||||
return tools
|
return tools
|
||||||
|
|
||||||
def get_multimodal_tools(self) -> List[Tool]:
|
def get_multimodal_tools(self) -> Sequence[BaseTool]:
|
||||||
from crewai.tools.agent_tools.add_image_tool import AddImageTool
|
from crewai.tools.agent_tools.add_image_tool import AddImageTool
|
||||||
|
|
||||||
return [AddImageTool()]
|
return [AddImageTool()]
|
||||||
|
|
||||||
def get_code_execution_tools(self):
|
def get_code_execution_tools(self):
|
||||||
try:
|
try:
|
||||||
from crewai_tools import CodeInterpreterTool
|
from crewai_tools import CodeInterpreterTool # type: ignore
|
||||||
|
|
||||||
# Set the unsafe_mode based on the code_execution_mode attribute
|
# Set the unsafe_mode based on the code_execution_mode attribute
|
||||||
unsafe_mode = self.code_execution_mode == "unsafe"
|
unsafe_mode = self.code_execution_mode == "unsafe"
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ from crewai.tools import BaseTool
|
|||||||
from crewai.tools.base_tool import Tool
|
from crewai.tools.base_tool import Tool
|
||||||
from crewai.utilities import I18N, Logger, RPMController
|
from crewai.utilities import I18N, Logger, RPMController
|
||||||
from crewai.utilities.config import process_config
|
from crewai.utilities.config import process_config
|
||||||
|
from crewai.utilities.converter import Converter
|
||||||
|
|
||||||
T = TypeVar("T", bound="BaseAgent")
|
T = TypeVar("T", bound="BaseAgent")
|
||||||
|
|
||||||
@@ -42,7 +43,7 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution.
|
max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution.
|
||||||
allow_delegation (bool): Allow delegation of tasks to agents.
|
allow_delegation (bool): Allow delegation of tasks to agents.
|
||||||
tools (Optional[List[Any]]): Tools at the agent's disposal.
|
tools (Optional[List[Any]]): Tools at the agent's disposal.
|
||||||
max_iter (Optional[int]): Maximum iterations for an agent to execute a task.
|
max_iter (int): Maximum iterations for an agent to execute a task.
|
||||||
agent_executor (InstanceOf): An instance of the CrewAgentExecutor class.
|
agent_executor (InstanceOf): An instance of the CrewAgentExecutor class.
|
||||||
llm (Any): Language model that will run the agent.
|
llm (Any): Language model that will run the agent.
|
||||||
crew (Any): Crew to which the agent belongs.
|
crew (Any): Crew to which the agent belongs.
|
||||||
@@ -114,7 +115,7 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
tools: Optional[List[Any]] = Field(
|
tools: Optional[List[Any]] = Field(
|
||||||
default_factory=list, description="Tools at agents' disposal"
|
default_factory=list, description="Tools at agents' disposal"
|
||||||
)
|
)
|
||||||
max_iter: Optional[int] = Field(
|
max_iter: int = Field(
|
||||||
default=25, description="Maximum iterations for an agent to execute a task"
|
default=25, description="Maximum iterations for an agent to execute a task"
|
||||||
)
|
)
|
||||||
agent_executor: InstanceOf = Field(
|
agent_executor: InstanceOf = Field(
|
||||||
@@ -125,11 +126,12 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
)
|
)
|
||||||
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
||||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
||||||
default=None, description="An instance of the CacheHandler class."
|
default=None, description="An instance of the CacheHandler class."
|
||||||
)
|
)
|
||||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||||
default=None, description="An instance of the ToolsHandler class."
|
default_factory=ToolsHandler,
|
||||||
|
description="An instance of the ToolsHandler class.",
|
||||||
)
|
)
|
||||||
max_tokens: Optional[int] = Field(
|
max_tokens: Optional[int] = Field(
|
||||||
default=None, description="Maximum number of tokens for the agent's execution."
|
default=None, description="Maximum number of tokens for the agent's execution."
|
||||||
@@ -254,7 +256,7 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_output_converter(
|
def get_output_converter(
|
||||||
self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str
|
self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str
|
||||||
):
|
) -> Converter:
|
||||||
"""Get the converter class for the agent to create json/pydantic outputs."""
|
"""Get the converter class for the agent to create json/pydantic outputs."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
@@ -600,7 +600,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
```
|
```
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
if not hasattr(self, '_state'):
|
if not hasattr(self, "_state"):
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
if isinstance(self._state, dict):
|
if isinstance(self._state, dict):
|
||||||
@@ -706,26 +706,31 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
inputs: Optional dictionary containing input values and potentially a state ID to restore
|
inputs: Optional dictionary containing input values and potentially a state ID to restore
|
||||||
"""
|
"""
|
||||||
# Handle state restoration if ID is provided in inputs
|
# Handle state restoration if ID is provided in inputs
|
||||||
if inputs and 'id' in inputs and self._persistence is not None:
|
if inputs and "id" in inputs and self._persistence is not None:
|
||||||
restore_uuid = inputs['id']
|
restore_uuid = inputs["id"]
|
||||||
stored_state = self._persistence.load_state(restore_uuid)
|
stored_state = self._persistence.load_state(restore_uuid)
|
||||||
|
|
||||||
# Override the id in the state if it exists in inputs
|
# Override the id in the state if it exists in inputs
|
||||||
if 'id' in inputs:
|
if "id" in inputs:
|
||||||
if isinstance(self._state, dict):
|
if isinstance(self._state, dict):
|
||||||
self._state['id'] = inputs['id']
|
self._state["id"] = inputs["id"]
|
||||||
elif isinstance(self._state, BaseModel):
|
elif isinstance(self._state, BaseModel):
|
||||||
setattr(self._state, 'id', inputs['id'])
|
setattr(self._state, "id", inputs["id"])
|
||||||
|
|
||||||
if stored_state:
|
if stored_state:
|
||||||
self._log_flow_event(f"Loading flow state from memory for UUID: {restore_uuid}", color="yellow")
|
self._log_flow_event(
|
||||||
|
f"Loading flow state from memory for UUID: {restore_uuid}",
|
||||||
|
color="yellow",
|
||||||
|
)
|
||||||
# Restore the state
|
# Restore the state
|
||||||
self._restore_state(stored_state)
|
self._restore_state(stored_state)
|
||||||
else:
|
else:
|
||||||
self._log_flow_event(f"No flow state found for UUID: {restore_uuid}", color="red")
|
self._log_flow_event(
|
||||||
|
f"No flow state found for UUID: {restore_uuid}", color="red"
|
||||||
|
)
|
||||||
|
|
||||||
# Apply any additional inputs after restoration
|
# Apply any additional inputs after restoration
|
||||||
filtered_inputs = {k: v for k, v in inputs.items() if k != 'id'}
|
filtered_inputs = {k: v for k, v in inputs.items() if k != "id"}
|
||||||
if filtered_inputs:
|
if filtered_inputs:
|
||||||
self._initialize_state(filtered_inputs)
|
self._initialize_state(filtered_inputs)
|
||||||
|
|
||||||
@@ -737,9 +742,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
flow_name=self.__class__.__name__,
|
flow_name=self.__class__.__name__,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
self._log_flow_event(f"Flow started with ID: {self.flow_id}", color="bold_magenta")
|
self._log_flow_event(
|
||||||
|
f"Flow started with ID: {self.flow_id}", color="bold_magenta"
|
||||||
|
)
|
||||||
|
|
||||||
if inputs is not None and 'id' not in inputs:
|
if inputs is not None and "id" not in inputs:
|
||||||
self._initialize_state(inputs)
|
self._initialize_state(inputs)
|
||||||
|
|
||||||
return asyncio.run(self.kickoff_async())
|
return asyncio.run(self.kickoff_async())
|
||||||
@@ -984,7 +991,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
def _log_flow_event(self, message: str, color: str = "yellow", level: str = "info") -> None:
|
def _log_flow_event(
|
||||||
|
self, message: str, color: str = "yellow", level: str = "info"
|
||||||
|
) -> None:
|
||||||
"""Centralized logging method for flow events.
|
"""Centralized logging method for flow events.
|
||||||
|
|
||||||
This method provides a consistent interface for logging flow-related events,
|
This method provides a consistent interface for logging flow-related events,
|
||||||
|
|||||||
@@ -221,6 +221,13 @@ class LLM:
|
|||||||
if isinstance(messages, str):
|
if isinstance(messages, str):
|
||||||
messages = [{"role": "user", "content": messages}]
|
messages = [{"role": "user", "content": messages}]
|
||||||
|
|
||||||
|
# For O1 models, system messages are not supported.
|
||||||
|
# Convert any system messages into assistant messages.
|
||||||
|
if "o1" in self.model.lower():
|
||||||
|
for message in messages:
|
||||||
|
if message.get("role") == "system":
|
||||||
|
message["role"] = "assistant"
|
||||||
|
|
||||||
with suppress_warnings():
|
with suppress_warnings():
|
||||||
if callbacks and len(callbacks) > 0:
|
if callbacks and len(callbacks) > 0:
|
||||||
self.set_callbacks(callbacks)
|
self.set_callbacks(callbacks)
|
||||||
|
|||||||
@@ -1,3 +1,7 @@
|
|||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
from pydantic import PrivateAttr
|
||||||
|
|
||||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||||
from crewai.memory.memory import Memory
|
from crewai.memory.memory import Memory
|
||||||
from crewai.memory.storage.rag_storage import RAGStorage
|
from crewai.memory.storage.rag_storage import RAGStorage
|
||||||
@@ -10,13 +14,15 @@ class EntityMemory(Memory):
|
|||||||
Inherits from the Memory class.
|
Inherits from the Memory class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
_memory_provider: Optional[str] = PrivateAttr()
|
||||||
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
|
||||||
self.memory_provider = crew.memory_config.get("provider")
|
|
||||||
else:
|
|
||||||
self.memory_provider = None
|
|
||||||
|
|
||||||
if self.memory_provider == "mem0":
|
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||||
|
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||||
|
memory_provider = crew.memory_config.get("provider")
|
||||||
|
else:
|
||||||
|
memory_provider = None
|
||||||
|
|
||||||
|
if memory_provider == "mem0":
|
||||||
try:
|
try:
|
||||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -36,11 +42,13 @@ class EntityMemory(Memory):
|
|||||||
path=path,
|
path=path,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
super().__init__(storage)
|
|
||||||
|
super().__init__(storage=storage)
|
||||||
|
self._memory_provider = memory_provider
|
||||||
|
|
||||||
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||||
"""Saves an entity item into the SQLite storage."""
|
"""Saves an entity item into the SQLite storage."""
|
||||||
if self.memory_provider == "mem0":
|
if self._memory_provider == "mem0":
|
||||||
data = f"""
|
data = f"""
|
||||||
Remember details about the following entity:
|
Remember details about the following entity:
|
||||||
Name: {item.name}
|
Name: {item.name}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ class LongTermMemory(Memory):
|
|||||||
def __init__(self, storage=None, path=None):
|
def __init__(self, storage=None, path=None):
|
||||||
if not storage:
|
if not storage:
|
||||||
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
|
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
|
||||||
super().__init__(storage)
|
super().__init__(storage=storage)
|
||||||
|
|
||||||
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||||
metadata = item.metadata
|
metadata = item.metadata
|
||||||
|
|||||||
@@ -1,15 +1,19 @@
|
|||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from crewai.memory.storage.rag_storage import RAGStorage
|
from crewai.memory.storage.rag_storage import RAGStorage
|
||||||
|
|
||||||
|
|
||||||
class Memory:
|
class Memory(BaseModel):
|
||||||
"""
|
"""
|
||||||
Base class for memory, now supporting agent tags and generic metadata.
|
Base class for memory, now supporting agent tags and generic metadata.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, storage: RAGStorage):
|
storage: Any
|
||||||
self.storage = storage
|
|
||||||
|
def __init__(self, storage: Any, **data: Any):
|
||||||
|
super().__init__(storage=storage, **data)
|
||||||
|
|
||||||
def save(
|
def save(
|
||||||
self,
|
self,
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
from typing import Any, Dict, Optional
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
from pydantic import PrivateAttr
|
||||||
|
|
||||||
from crewai.memory.memory import Memory
|
from crewai.memory.memory import Memory
|
||||||
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
||||||
from crewai.memory.storage.rag_storage import RAGStorage
|
from crewai.memory.storage.rag_storage import RAGStorage
|
||||||
@@ -14,13 +16,15 @@ class ShortTermMemory(Memory):
|
|||||||
MemoryItem instances.
|
MemoryItem instances.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
_memory_provider: Optional[str] = PrivateAttr()
|
||||||
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
|
||||||
self.memory_provider = crew.memory_config.get("provider")
|
|
||||||
else:
|
|
||||||
self.memory_provider = None
|
|
||||||
|
|
||||||
if self.memory_provider == "mem0":
|
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||||
|
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||||
|
memory_provider = crew.memory_config.get("provider")
|
||||||
|
else:
|
||||||
|
memory_provider = None
|
||||||
|
|
||||||
|
if memory_provider == "mem0":
|
||||||
try:
|
try:
|
||||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -39,7 +43,8 @@ class ShortTermMemory(Memory):
|
|||||||
path=path,
|
path=path,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
super().__init__(storage)
|
super().__init__(storage=storage)
|
||||||
|
self._memory_provider = memory_provider
|
||||||
|
|
||||||
def save(
|
def save(
|
||||||
self,
|
self,
|
||||||
@@ -48,7 +53,7 @@ class ShortTermMemory(Memory):
|
|||||||
agent: Optional[str] = None,
|
agent: Optional[str] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
||||||
if self.memory_provider == "mem0":
|
if self._memory_provider == "mem0":
|
||||||
item.data = f"Remember the following insights from Agent run: {item.data}"
|
item.data = f"Remember the following insights from Agent run: {item.data}"
|
||||||
|
|
||||||
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ from crewai.utilities import I18N
|
|||||||
|
|
||||||
i18n = I18N()
|
i18n = I18N()
|
||||||
|
|
||||||
|
|
||||||
class AddImageToolSchema(BaseModel):
|
class AddImageToolSchema(BaseModel):
|
||||||
image_url: str = Field(..., description="The URL or path of the image to add")
|
image_url: str = Field(..., description="The URL or path of the image to add")
|
||||||
action: Optional[str] = Field(
|
action: Optional[str] = Field(
|
||||||
default=None,
|
default=None, description="Optional context or question about the image"
|
||||||
description="Optional context or question about the image"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -36,10 +36,7 @@ class AddImageTool(BaseTool):
|
|||||||
"image_url": {
|
"image_url": {
|
||||||
"url": image_url,
|
"url": image_url,
|
||||||
},
|
},
|
||||||
}
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
return {
|
return {"role": "user", "content": content}
|
||||||
"role": "user",
|
|
||||||
"content": content
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@
|
|||||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
|
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
|
||||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
||||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
"expected_output": "\nThis is the expected criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||||
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||||
"getting_input": "This is the agent's final answer: {final_answer}\n\n",
|
"getting_input": "This is the agent's final answer: {final_answer}\n\n",
|
||||||
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
||||||
|
|||||||
@@ -1183,7 +1183,7 @@ def test_agent_max_retry_limit():
|
|||||||
[
|
[
|
||||||
mock.call(
|
mock.call(
|
||||||
{
|
{
|
||||||
"input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.",
|
"input": "Say the word: Hi\n\nThis is the expected criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||||
"tool_names": "",
|
"tool_names": "",
|
||||||
"tools": "",
|
"tools": "",
|
||||||
"ask_for_human_input": True,
|
"ask_for_human_input": True,
|
||||||
@@ -1191,7 +1191,7 @@ def test_agent_max_retry_limit():
|
|||||||
),
|
),
|
||||||
mock.call(
|
mock.call(
|
||||||
{
|
{
|
||||||
"input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.",
|
"input": "Say the word: Hi\n\nThis is the expected criteria for your final answer: The word: Hi\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||||
"tool_names": "",
|
"tool_names": "",
|
||||||
"tools": "",
|
"tools": "",
|
||||||
"ask_for_human_input": True,
|
"ask_for_human_input": True,
|
||||||
|
|||||||
Reference in New Issue
Block a user