Compare commits

...

6 Commits

Author SHA1 Message Date
lorenzejay
77dcf265b6 added docs 2026-04-22 16:13:40 -07:00
Lorenze Jay
d1b35d8897 Merge branch 'main' into lorenze/imp/memory-prompt-influence 2026-04-22 15:10:35 -07:00
lorenzejay
15bf60fa29 Merge branch 'main' of github.com:crewAIInc/crewAI into lorenze/imp/memory-prompt-influence 2026-04-16 15:49:48 -07:00
lorenzejay
88cbf6bd1a refactor: update MemoryPromptConfig to allow custom prompt strings
* Removed the static method for online people research and replaced it with a constructor for MemoryPromptConfig that accepts custom strings for save, extract, and query systems.
* Updated the corresponding test to validate the new configuration approach, ensuring flexibility in memory prompt handling.
2026-04-07 17:53:36 -07:00
Lorenze Jay
eeeb90c3a8 Merge branch 'main' into lorenze/imp/memory-prompt-influence 2026-04-07 17:50:11 -07:00
lorenzejay
1c9e8d21c0 feat: introduce MemoryPromptConfig for customizable memory prompts
* Added MemoryPromptConfig class to allow users to override default memory prompts for various operations (save, query, extract, consolidation).
* Updated relevant functions and classes to utilize the new configuration, enabling more flexible and context-specific memory handling.
* Enhanced tests to validate the functionality of the new prompt configuration and its integration within the memory processing flows.
2026-04-07 17:49:06 -07:00
14 changed files with 340 additions and 24 deletions

View File

@@ -157,6 +157,43 @@ class ResearchFlow(Flow):
انظر [وثائق التدفقات](/concepts/flows) لمزيد من المعلومات حول الذاكرة في التدفقات.
## تخصيص مطالبات الذاكرة (`MemoryPromptConfig`)
يمكنك استبدال تعليمات نموذج اللغة في كل خطوة من تحليل الذاكرة (نفس فكرة ضبط مطالبات التخطيط). مرّر كائن `MemoryPromptConfig` كوسيط `memory_prompt` إلى `Memory`. عيّن الحقول التي تحتاجها فقط؛ تبقى الخطوات الأخرى على القيم الافتراضية المضمّنة في `translations/en.json` تحت المفتاح `memory` (أسماء الحقول تطابق مفاتيح JSON).
```python
from crewai import Memory, MemoryPromptConfig
memory = Memory(
llm="gpt-4o-mini",
memory_prompt=MemoryPromptConfig(
save_system="...", # اختياري
query_user="...", # اختياري
),
)
```
يمكنك أيضًا تمرير `memory_prompt` إلى دوال مساعدة في `crewai.memory.analyze` (مثل `extract_memories_from_content`) عند استدعائها مباشرة.
### تأثير كل زوج من المطالبات
| الحقول | متى يعمل | ماذا يؤثر |
| --- | --- | --- |
| `save_system` / `save_user` | عند الحفظ (`analyze_for_save`) | `suggested_scope` و`categories` و`importance` و`extracted_metadata` المستنتجة قبل التخزين والتضمين. |
| `query_system` / `query_user` | عند تحليل استعلام الاسترجاع (`analyze_query`) | `keywords` و`suggested_scopes` و`complexity` و`recall_queries` و`time_filter`، ما يوجّه البحث المتجهي وعمق الاسترجاع. |
| `extract_memories_system` / `extract_memories_user` | `extract_memories_from_content` / `Memory.extract_memories` | كيفية تقسيم النص الخام إلى جمل ذاكرة منفصلة (لا يزال التخزين عبر `remember()`). |
| `consolidation_system` / `consolidation_user` | عندما يكون المحتوى الجديد قريبًا دلاليًا من سجلات موجودة (`analyze_for_consolidation`) | الإبقاء على الصفوف أو تحديثها أو حذفها، وما إذا كان يُدرج المحتوى الجديد كذاكرة مستقلة. |
### العناصر النائبة (placeholders)
سلاسل **النظام (system)** تُرسل كما هي. سلاسل **المستخدم (user)** تُملأ بـ `str.format` في بايثون. يجب أن تتضمن قوالب المستخدم المخصصة نفس أسماء العناصر النائبة الافتراضية وإلا يفشل التنسيق.
| حقل المستخدم | عناصر نائبة مطلوبة |
| --- | --- |
| `save_user` | `{content}`، `{existing_scopes}`، `{existing_categories}` |
| `query_user` | `{query}`، `{available_scopes}`، `{scope_desc}` |
| `extract_memories_user` | `{content}` |
| `consolidation_user` | `{new_content}`، `{records_summary}` |
## النطاقات الهرمية

View File

@@ -157,6 +157,43 @@ class ResearchFlow(Flow):
See the [Flows documentation](/concepts/flows) for more on memory in Flows.
## Customizing memory prompts (`MemoryPromptConfig`)
Override the LLM instructions used at each memory analysis step (same idea as tuning planning prompts). Pass a `MemoryPromptConfig` as `memory_prompt` on `Memory`. Only set the fields you need; every other step keeps the bundled defaults from the librarys `translations/en.json` under the `memory` key (field names match those JSON keys).
```python
from crewai import Memory, MemoryPromptConfig
memory = Memory(
llm="gpt-4o-mini",
memory_prompt=MemoryPromptConfig(
save_system="...", # optional
query_user="...", # optional
),
)
```
You can also pass `memory_prompt` into helpers in `crewai.memory.analyze` (for example `extract_memories_from_content`) when you call them directly.
### What each prompt pair affects
| Fields | When it runs | What it influences |
| --- | --- | --- |
| `save_system` / `save_user` | Saving (`analyze_for_save`) | Inferred `suggested_scope`, `categories`, `importance`, and `extracted_metadata` before storage and embedding. |
| `query_system` / `query_user` | Recall query analysis (`analyze_query`) | `keywords`, `suggested_scopes`, `complexity`, `recall_queries`, and `time_filter`, which steer vector search and how deep recall goes. |
| `extract_memories_system` / `extract_memories_user` | `extract_memories_from_content` / `Memory.extract_memories` | How raw text is split into discrete memory strings (persistence is still via `remember()`). |
| `consolidation_system` / `consolidation_user` | When new content is similar to existing records (`analyze_for_consolidation`) | Whether to keep, update, or delete existing rows and whether to insert the new content as its own memory. |
### Placeholders
**System** strings are sent as-is. **User** strings are filled with Pythons `str.format`. Custom user templates must include the same placeholder names as the defaults or formatting will raise.
| User field | Required placeholders |
| --- | --- |
| `save_user` | `{content}`, `{existing_scopes}`, `{existing_categories}` |
| `query_user` | `{query}`, `{available_scopes}`, `{scope_desc}` |
| `extract_memories_user` | `{content}` |
| `consolidation_user` | `{new_content}`, `{records_summary}` |
## Hierarchical Scopes

View File

@@ -157,6 +157,43 @@ class ResearchFlow(Flow):
Flow에서의 메모리에 대한 자세한 내용은 [Flows 문서](/concepts/flows)를 참조하세요.
## 메모리 프롬프트 사용자 지정 (`MemoryPromptConfig`)
메모리 분석 단계마다 사용되는 LLM 지시문을 덮어쓸 수 있습니다(플래닝 프롬프트를 조정하는 것과 같은 개념). `Memory`의 `memory_prompt`에 `MemoryPromptConfig`를 넘깁니다. 필요한 필드만 설정하면 되고, 나머지 단계는 라이브러리 번들 기본값(`translations/en.json`의 `memory` 키; 필드 이름이 해당 JSON 키와 일치)을 그대로 씁니다.
```python
from crewai import Memory, MemoryPromptConfig
memory = Memory(
llm="gpt-4o-mini",
memory_prompt=MemoryPromptConfig(
save_system="...", # 선택
query_user="...", # 선택
),
)
```
`crewai.memory.analyze`의 헬퍼(예: `extract_memories_from_content`)를 직접 호출할 때도 `memory_prompt`를 넘길 수 있습니다.
### 프롬프트 쌍별 역할
| 필드 | 실행 시점 | 영향 |
| --- | --- | --- |
| `save_system` / `save_user` | 저장 시 (`analyze_for_save`) | 저장·임베딩 전에 추론되는 `suggested_scope`, `categories`, `importance`, `extracted_metadata`. |
| `query_system` / `query_user` | 리콜 시 쿼리 분석 (`analyze_query`) | `keywords`, `suggested_scopes`, `complexity`, `recall_queries`, `time_filter` — 벡터 검색과 리콜 탐색 깊이에 영향. |
| `extract_memories_system` / `extract_memories_user` | `extract_memories_from_content` / `Memory.extract_memories` | 긴 텍스트를 개별 메모리 문자열로 나누는 방식(저장은 여전히 `remember()`). |
| `consolidation_system` / `consolidation_user` | 신규 콘텐츠가 기존 레코드와 유사할 때 (`analyze_for_consolidation`) | 기존 행 유지·갱신·삭제 및 신규 콘텐츠를 별도 메모리로 넣을지 여부. |
### 플레이스홀더
**system** 문자열은 그대로 전송됩니다. **user** 문자열은 Python `str.format`으로 채워집니다. 사용자 정의 user 템플릿에는 기본값과 동일한 플레이스홀더 이름이 포함되어야 하며, 그렇지 않으면 포맷 단계에서 오류가 납니다.
| User 필드 | 필수 플레이스홀더 |
| --- | --- |
| `save_user` | `{content}`, `{existing_scopes}`, `{existing_categories}` |
| `query_user` | `{query}`, `{available_scopes}`, `{scope_desc}` |
| `extract_memories_user` | `{content}` |
| `consolidation_user` | `{new_content}`, `{records_summary}` |
## 계층적 범위(Scopes)

View File

@@ -157,6 +157,43 @@ class ResearchFlow(Flow):
Veja a [documentação de Flows](/concepts/flows) para mais informações sobre memória em Flows.
## Personalizando prompts de memória (`MemoryPromptConfig`)
Substitua as instruções do LLM usadas em cada etapa de análise de memória (mesma ideia que ajustar prompts de planejamento). Passe um `MemoryPromptConfig` como `memory_prompt` em `Memory`. Defina apenas os campos necessários; nas demais etapas permanecem os padrões embutidos do `translations/en.json` da biblioteca, na chave `memory` (os nomes dos campos correspondem às chaves JSON).
```python
from crewai import Memory, MemoryPromptConfig
memory = Memory(
llm="gpt-4o-mini",
memory_prompt=MemoryPromptConfig(
save_system="...", # opcional
query_user="...", # opcional
),
)
```
Você também pode passar `memory_prompt` para funções auxiliares em `crewai.memory.analyze` (por exemplo `extract_memories_from_content`) quando chamá-las diretamente.
### O que cada par de prompts afeta
| Campos | Quando roda | O que influencia |
| --- | --- | --- |
| `save_system` / `save_user` | Ao salvar (`analyze_for_save`) | `suggested_scope`, `categories`, `importance` e `extracted_metadata` inferidos antes do armazenamento e do embedding. |
| `query_system` / `query_user` | Análise da consulta no recall (`analyze_query`) | `keywords`, `suggested_scopes`, `complexity`, `recall_queries` e `time_filter`, que orientam a busca vetorial e a profundidade do recall. |
| `extract_memories_system` / `extract_memories_user` | `extract_memories_from_content` / `Memory.extract_memories` | Como o texto bruto é dividido em memórias atômicas (a persistência continua sendo via `remember()`). |
| `consolidation_system` / `consolidation_user` | Quando o novo conteúdo é semelhante a registros existentes (`analyze_for_consolidation`) | Manter, atualizar ou excluir linhas existentes e se o novo conteúdo entra como memória própria. |
### Placeholders
Strings de **system** são enviadas como estão. Strings de **user** são preenchidas com `str.format` do Python. Templates de user personalizados devem incluir os mesmos nomes de placeholder dos padrões; caso contrário, a formatação falha.
| Campo user | Placeholders obrigatórios |
| --- | --- |
| `save_user` | `{content}`, `{existing_scopes}`, `{existing_categories}` |
| `query_user` | `{query}`, `{available_scopes}`, `{scope_desc}` |
| `extract_memories_user` | `{content}` |
| `consolidation_user` | `{new_content}`, `{records_summary}` |
## Escopos Hierárquicos

View File

@@ -52,6 +52,7 @@ __version__ = "1.14.3a3"
_LAZY_IMPORTS: dict[str, tuple[str, str]] = {
"Memory": ("crewai.memory.unified_memory", "Memory"),
"MemoryPromptConfig": ("crewai.memory.types", "MemoryPromptConfig"),
}
@@ -196,6 +197,7 @@ __all__ = [
"Knowledge",
"LLMGuardrail",
"Memory",
"MemoryPromptConfig",
"PlanningConfig",
"Process",
"RuntimeState",

View File

@@ -8,7 +8,7 @@ from typing import Any
from pydantic import BaseModel, ConfigDict, Field
from crewai.memory.types import MemoryRecord, ScopeInfo
from crewai.memory.types import MemoryPromptConfig, MemoryRecord, ScopeInfo
from crewai.utilities.i18n import I18N_DEFAULT
@@ -140,19 +140,23 @@ class ConsolidationPlan(BaseModel):
)
def _get_prompt(key: str) -> str:
"""Retrieve a memory prompt from the i18n translations.
Args:
key: The prompt key under the "memory" section.
Returns:
The prompt string.
"""
def _memory_prompt_line(
memory_prompt: MemoryPromptConfig | None,
key: str,
) -> str:
"""Resolve one memory prompt: override string or bundled translation."""
if memory_prompt is not None:
raw = getattr(memory_prompt, key, None)
if isinstance(raw, str) and raw.strip():
return raw
return I18N_DEFAULT.memory(key)
def extract_memories_from_content(content: str, llm: Any) -> list[str]:
def extract_memories_from_content(
content: str,
llm: Any,
memory_prompt: MemoryPromptConfig | None = None,
) -> list[str]:
"""Use the LLM to extract discrete memory statements from raw content.
This is a pure helper: it does NOT store anything. Callers should call
@@ -164,15 +168,21 @@ def extract_memories_from_content(content: str, llm: Any) -> list[str]:
Args:
content: Raw text (e.g. task description + result dump).
llm: The LLM instance to use.
memory_prompt: Optional per-step prompt strings (see ``MemoryPromptConfig``).
Returns:
List of short, self-contained memory statements (or [content] on failure).
"""
if not (content or "").strip():
return []
user = _get_prompt("extract_memories_user").format(content=content)
user = _memory_prompt_line(memory_prompt, "extract_memories_user").format(
content=content
)
messages = [
{"role": "system", "content": _get_prompt("extract_memories_system")},
{
"role": "system",
"content": _memory_prompt_line(memory_prompt, "extract_memories_system"),
},
{"role": "user", "content": user},
]
try:
@@ -202,6 +212,7 @@ def analyze_query(
available_scopes: list[str],
scope_info: ScopeInfo | None,
llm: Any,
memory_prompt: MemoryPromptConfig | None = None,
) -> QueryAnalysis:
"""Use the LLM to analyze a recall query.
@@ -212,6 +223,7 @@ def analyze_query(
available_scopes: Scope paths that exist in the store.
scope_info: Optional info about the current scope.
llm: The LLM instance to use.
memory_prompt: Optional per-step prompt strings.
Returns:
QueryAnalysis with keywords, suggested_scopes, complexity, recall_queries, time_filter.
@@ -219,13 +231,16 @@ def analyze_query(
scope_desc = ""
if scope_info:
scope_desc = f"Current scope has {scope_info.record_count} records, categories: {scope_info.categories}"
user = _get_prompt("query_user").format(
user = _memory_prompt_line(memory_prompt, "query_user").format(
query=query,
available_scopes=available_scopes or ["/"],
scope_desc=scope_desc,
)
messages = [
{"role": "system", "content": _get_prompt("query_system")},
{
"role": "system",
"content": _memory_prompt_line(memory_prompt, "query_system"),
},
{"role": "user", "content": user},
]
try:
@@ -269,6 +284,7 @@ def analyze_for_save(
existing_scopes: list[str],
existing_categories: list[str],
llm: Any,
memory_prompt: MemoryPromptConfig | None = None,
) -> MemoryAnalysis:
"""Infer scope, categories, importance, and metadata for a single memory.
@@ -280,17 +296,21 @@ def analyze_for_save(
existing_scopes: Current scope paths in the memory store.
existing_categories: Current categories in use.
llm: The LLM instance to use.
memory_prompt: Optional per-step prompt strings.
Returns:
MemoryAnalysis with suggested_scope, categories, importance, extracted_metadata.
"""
user = _get_prompt("save_user").format(
user = _memory_prompt_line(memory_prompt, "save_user").format(
content=content,
existing_scopes=existing_scopes or ["/"],
existing_categories=existing_categories or [],
)
messages = [
{"role": "system", "content": _get_prompt("save_system")},
{
"role": "system",
"content": _memory_prompt_line(memory_prompt, "save_system"),
},
{"role": "user", "content": user},
]
try:
@@ -322,6 +342,7 @@ def analyze_for_consolidation(
new_content: str,
existing_records: list[MemoryRecord],
llm: Any,
memory_prompt: MemoryPromptConfig | None = None,
) -> ConsolidationPlan:
"""Decide insert/update/delete for a single memory against similar existing records.
@@ -332,6 +353,7 @@ def analyze_for_consolidation(
new_content: The new content to store.
existing_records: Existing records that are semantically similar.
llm: The LLM instance to use.
memory_prompt: Optional per-step prompt strings.
Returns:
ConsolidationPlan with actions per record and whether to insert the new content.
@@ -345,12 +367,15 @@ def analyze_for_consolidation(
f"- id={r.id} | scope={r.scope} | importance={r.importance:.2f} | created={created}\n"
f" content: {r.content[:200]}{'...' if len(r.content) > 200 else ''}"
)
user = _get_prompt("consolidation_user").format(
user = _memory_prompt_line(memory_prompt, "consolidation_user").format(
new_content=new_content,
records_summary="\n\n".join(records_lines),
)
messages = [
{"role": "system", "content": _get_prompt("consolidation_system")},
{
"role": "system",
"content": _memory_prompt_line(memory_prompt, "consolidation_system"),
},
{"role": "user", "content": user},
]
try:

View File

@@ -314,6 +314,7 @@ class EncodingFlow(Flow[EncodingState]):
item.content,
list(item.similar_records),
self._llm,
self._config.memory_prompt,
)
elif not fields_provided and not has_similar:
# Group C: field resolution only
@@ -324,6 +325,7 @@ class EncodingFlow(Flow[EncodingState]):
existing_scopes,
existing_categories,
self._llm,
self._config.memory_prompt,
)
else:
# Group D: both in parallel
@@ -334,6 +336,7 @@ class EncodingFlow(Flow[EncodingState]):
existing_scopes,
existing_categories,
self._llm,
self._config.memory_prompt,
)
consol_futures[i] = pool.submit(
contextvars.copy_context().run,
@@ -341,6 +344,7 @@ class EncodingFlow(Flow[EncodingState]):
item.content,
list(item.similar_records),
self._llm,
self._config.memory_prompt,
)
# Collect field-resolution results

View File

@@ -227,6 +227,7 @@ class RecallFlow(Flow[RecallState]):
available,
scope_info,
self._llm,
self._config.memory_prompt,
)
self.state.query_analysis = analysis

View File

@@ -6,7 +6,7 @@ from datetime import datetime
from typing import Any
from uuid import uuid4
from pydantic import BaseModel, Field
from pydantic import BaseModel, ConfigDict, Field
# When searching the vector store, we ask for more results than the caller
@@ -132,6 +132,28 @@ class ScopeInfo(BaseModel):
)
class MemoryPromptConfig(BaseModel):
"""Configuration for memory LLM prompts (like ``PlanningConfig`` for planning).
Field names match translation keys under ``memory`` in ``translations/en.json``.
When set, the string replaces the bundled prompt for that step; omitted keys
keep the default i18n text. Templates must include the same ``str.format``
placeholders as the defaults (e.g. ``save_user`` uses ``{content}``,
``{existing_scopes}``, ``{existing_categories}``).
"""
model_config = ConfigDict(extra="forbid")
save_system: str | None = None
save_user: str | None = None
query_system: str | None = None
query_user: str | None = None
extract_memories_system: str | None = None
extract_memories_user: str | None = None
consolidation_system: str | None = None
consolidation_user: str | None = None
class MemoryConfig(BaseModel):
"""Internal configuration for memory scoring, consolidation, and recall behavior.
@@ -141,6 +163,11 @@ class MemoryConfig(BaseModel):
compute_composite_score.
"""
memory_prompt: MemoryPromptConfig | None = Field(
default=None,
description="Per-step prompt strings overriding bundled memory prompts.",
)
# -- Composite score weights --
# The recall composite score is:
# semantic_weight * similarity + recency_weight * decay + importance_weight * importance

View File

@@ -9,7 +9,13 @@ import threading
import time
from typing import TYPE_CHECKING, Annotated, Any, Literal
from pydantic import BaseModel, ConfigDict, Field, PlainValidator, PrivateAttr
from pydantic import (
BaseModel,
ConfigDict,
Field,
PlainValidator,
PrivateAttr,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.memory_events import (
@@ -26,6 +32,7 @@ from crewai.memory.storage.backend import StorageBackend
from crewai.memory.types import (
MemoryConfig,
MemoryMatch,
MemoryPromptConfig,
MemoryRecord,
ScopeInfo,
compute_composite_score,
@@ -59,6 +66,10 @@ class Memory(BaseModel):
Works without agent/crew. Uses LLM to infer scope, categories, importance on save.
Uses RecallFlow for adaptive-depth recall. Supports scope/slice views and
pluggable storage (LanceDB default).
Override LLM prompts per step via ``memory_prompt`` (same idea as
``PlanningConfig.system_prompt`` / ``plan_prompt``): set only the strings you
need; the rest stay on bundled translations.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -135,6 +146,13 @@ class Memory(BaseModel):
"will store memories at '/crew/research/<inferred_scope>'."
),
)
memory_prompt: MemoryPromptConfig | None = Field(
default=None,
description=(
"Optional prompt strings for save, query, extract, and consolidation steps. "
"See MemoryPromptConfig; unset fields use translations/en.json defaults."
),
)
_config: MemoryConfig = PrivateAttr()
_llm_instance: BaseLLM | None = PrivateAttr(default=None)
@@ -181,6 +199,7 @@ class Memory(BaseModel):
def model_post_init(self, __context: Any) -> None:
"""Initialize runtime state from field values."""
self._config = MemoryConfig(
memory_prompt=self.memory_prompt,
recency_weight=self.recency_weight,
semantic_weight=self.semantic_weight,
importance_weight=self.importance_weight,
@@ -638,7 +657,9 @@ class Memory(BaseModel):
Returns:
List of short, self-contained memory statements.
"""
return extract_memories_from_content(content, self._llm)
return extract_memories_from_content(
content, self._llm, self._config.memory_prompt
)
def recall(
self,

View File

@@ -51,6 +51,7 @@ from crewai.telemetry.utils import (
add_crew_and_task_attributes,
add_crew_attributes,
close_span,
crew_memory_span_attribute_value,
)
from crewai.utilities.i18n import I18N_DEFAULT
from crewai.utilities.logger_utils import suppress_warnings
@@ -281,7 +282,11 @@ class Telemetry:
self._add_attribute(span, "python_version", platform.python_version())
add_crew_attributes(span, crew, self._add_attribute)
self._add_attribute(span, "crew_process", crew.process)
self._add_attribute(span, "crew_memory", crew.memory)
self._add_attribute(
span,
"crew_memory",
crew_memory_span_attribute_value(crew.memory),
)
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
self._add_attribute(span, "crew_number_of_agents", len(crew.agents))

View File

@@ -16,6 +16,19 @@ if TYPE_CHECKING:
from crewai.task import Task
def crew_memory_span_attribute_value(memory: Any) -> bool | str:
"""Serialize ``Crew.memory`` for OpenTelemetry span attributes.
OTLP only allows bool, str, bytes, int, float, and homogeneous sequences
of those types — not arbitrary objects like :class:`~crewai.memory.unified_memory.Memory`.
"""
if memory is None or memory is False:
return False
if memory is True:
return True
return type(memory).__name__
def add_agent_fingerprint_to_span(
span: Span, agent: Any, add_attribute_fn: Callable[[Span, str, Any], None]
) -> None:

View File

@@ -649,6 +649,58 @@ def test_remember_survives_llm_failure(
assert mem._storage.count() == 1
# --- Per-Memory prompt config (MemoryPromptConfig) ---
def test_memory_prompt_config_custom_strings() -> None:
"""Library stays domain-agnostic; apps pass their own MemoryPromptConfig."""
from crewai.memory.types import MemoryPromptConfig
po = MemoryPromptConfig(
save_system="Prefer categories: search_query, exa_search, result_domain.",
extract_memories_system="Record Exa queries and canonical URLs first.",
query_system="Distill recall_queries toward domains and past queries.",
)
assert "search_query" in (po.save_system or "")
assert "Exa" in (po.extract_memories_system or "")
assert "recall_queries" in (po.query_system or "")
def test_memory_prompt_overrides_save_system_used_in_analyze(tmp_path: Path) -> None:
from crewai.memory.analyze import analyze_for_save
from crewai.memory.types import MemoryPromptConfig
from crewai.memory.unified_memory import Memory
custom_system = "CUSTOM_SAVE_SYSTEM_OVERRIDE"
llm = MagicMock()
llm.supports_function_calling.return_value = False
llm.call.return_value = (
'{"suggested_scope": "/", "categories": [], "importance": 0.5, '
'"extracted_metadata": {"entities": [], "dates": [], "topics": []}}'
)
mem = Memory(
storage=str(tmp_path / "ov_db"),
embedder=MagicMock(),
llm=llm,
memory_prompt=MemoryPromptConfig(save_system=custom_system),
)
assert mem._config.memory_prompt is not None
assert mem._config.memory_prompt.save_system == custom_system
analyze_for_save(
"hello",
existing_scopes=["/"],
existing_categories=[],
llm=llm,
memory_prompt=mem._config.memory_prompt,
)
call_args = llm.call.call_args
messages = call_args[0][0]
assert messages[0]["role"] == "system"
assert messages[0]["content"] == custom_system
# --- Agent.kickoff() memory integration ---

View File

@@ -3,8 +3,9 @@ import threading
from unittest.mock import patch
import pytest
from crewai import Agent, Crew, Task
from crewai import Agent, Crew, Memory, Task
from crewai.telemetry import Telemetry
from crewai.telemetry.utils import crew_memory_span_attribute_value
from opentelemetry import trace
@@ -159,3 +160,20 @@ def test_no_signal_handler_traceback_in_non_main_thread():
mock_holder["logger"].debug.assert_any_call(
"Skipping signal handler registration: not running in main thread"
)
@pytest.mark.parametrize(
("memory", "expected"),
[
(False, False),
(None, False),
(True, True),
],
)
def test_crew_memory_span_attribute_value_primitives(memory, expected):
assert crew_memory_span_attribute_value(memory) is expected
def test_crew_memory_span_attribute_value_memory_instance():
"""Custom Memory instances must become a primitive string for OTLP."""
assert crew_memory_span_attribute_value(Memory()) == "Memory"