mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-02-12 00:48:15 +00:00
Compare commits
2 Commits
joaomdmour
...
codex/fix-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
afa4d01c22 | ||
|
|
78ad77e619 |
1429
.cursorrules
Normal file
1429
.cursorrules
Normal file
File diff suppressed because it is too large
Load Diff
5
.github/codeql/codeql-config.yml
vendored
5
.github/codeql/codeql-config.yml
vendored
@@ -14,18 +14,13 @@ paths-ignore:
|
||||
- "lib/crewai/src/crewai/experimental/a2a/**"
|
||||
|
||||
paths:
|
||||
# Include GitHub Actions workflows/composite actions for CodeQL actions analysis
|
||||
- ".github/workflows/**"
|
||||
- ".github/actions/**"
|
||||
# Include all Python source code from workspace packages
|
||||
- "lib/crewai/src/**"
|
||||
- "lib/crewai-tools/src/**"
|
||||
- "lib/crewai-files/src/**"
|
||||
- "lib/devtools/src/**"
|
||||
# Include tests (but exclude cassettes via paths-ignore)
|
||||
- "lib/crewai/tests/**"
|
||||
- "lib/crewai-tools/tests/**"
|
||||
- "lib/crewai-files/tests/**"
|
||||
- "lib/devtools/tests/**"
|
||||
|
||||
# Configure specific queries or packs if needed
|
||||
|
||||
4
.github/workflows/codeql.yml
vendored
4
.github/workflows/codeql.yml
vendored
@@ -69,7 +69,7 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
@@ -98,6 +98,6 @@ jobs:
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v4
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -27,6 +27,3 @@ conceptual_plan.md
|
||||
build_image
|
||||
chromadb-*.lock
|
||||
.claude
|
||||
.crewai/memory
|
||||
blogs/*
|
||||
secrets/*
|
||||
|
||||
@@ -11,11 +11,7 @@ from typing import Any
|
||||
from dotenv import load_dotenv
|
||||
import pytest
|
||||
from vcr.request import Request # type: ignore[import-untyped]
|
||||
|
||||
try:
|
||||
import vcr.stubs.httpx_stubs as httpx_stubs # type: ignore[import-untyped]
|
||||
except ModuleNotFoundError:
|
||||
import vcr.stubs.httpcore_stubs as httpx_stubs # type: ignore[import-untyped]
|
||||
import vcr.stubs.httpx_stubs as httpx_stubs # type: ignore[import-untyped]
|
||||
|
||||
|
||||
env_test_path = Path(__file__).parent / ".env.test"
|
||||
|
||||
@@ -111,13 +111,6 @@
|
||||
"en/guides/flows/mastering-flow-state"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Coding Tools",
|
||||
"icon": "terminal",
|
||||
"pages": [
|
||||
"en/guides/coding-tools/agents-md"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Advanced",
|
||||
"icon": "gear",
|
||||
@@ -1578,4 +1571,4 @@
|
||||
"reddit": "https://www.reddit.com/r/crewAIInc/"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -975,79 +975,6 @@ result = streaming.result
|
||||
|
||||
Learn more about streaming in the [Streaming Flow Execution](/en/learn/streaming-flow-execution) guide.
|
||||
|
||||
## Memory in Flows
|
||||
|
||||
Every Flow automatically has access to CrewAI's unified [Memory](/concepts/memory) system. You can store, recall, and extract memories directly inside any flow method using three built-in convenience methods.
|
||||
|
||||
### Built-in Methods
|
||||
|
||||
| Method | Description |
|
||||
| :--- | :--- |
|
||||
| `self.remember(content, **kwargs)` | Store content in memory. Accepts optional `scope`, `categories`, `metadata`, `importance`. |
|
||||
| `self.recall(query, **kwargs)` | Retrieve relevant memories. Accepts optional `scope`, `categories`, `limit`, `depth`. |
|
||||
| `self.extract_memories(content)` | Break raw text into discrete, self-contained memory statements. |
|
||||
|
||||
A default `Memory()` instance is created automatically when the Flow initializes. You can also pass a custom one:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai import Memory
|
||||
|
||||
custom_memory = Memory(
|
||||
recency_weight=0.5,
|
||||
recency_half_life_days=7,
|
||||
embedder={"provider": "ollama", "config": {"model_name": "mxbai-embed-large"}},
|
||||
)
|
||||
|
||||
flow = MyFlow(memory=custom_memory)
|
||||
```
|
||||
|
||||
### Example: Research and Analyze Flow
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
|
||||
|
||||
class ResearchAnalysisFlow(Flow):
|
||||
@start()
|
||||
def gather_data(self):
|
||||
# Simulate research findings
|
||||
findings = (
|
||||
"PostgreSQL handles 10k concurrent connections with connection pooling. "
|
||||
"MySQL caps at around 5k. MongoDB scales horizontally but adds complexity."
|
||||
)
|
||||
|
||||
# Extract atomic facts and remember each one
|
||||
memories = self.extract_memories(findings)
|
||||
for mem in memories:
|
||||
self.remember(mem, scope="/research/databases")
|
||||
|
||||
return findings
|
||||
|
||||
@listen(gather_data)
|
||||
def analyze(self, raw_findings):
|
||||
# Recall relevant past research (from this run or previous runs)
|
||||
past = self.recall("database performance and scaling", limit=10, depth="shallow")
|
||||
|
||||
context_lines = [f"- {m.record.content}" for m in past]
|
||||
context = "\n".join(context_lines) if context_lines else "No prior context."
|
||||
|
||||
return {
|
||||
"new_findings": raw_findings,
|
||||
"prior_context": context,
|
||||
"total_memories": len(past),
|
||||
}
|
||||
|
||||
|
||||
flow = ResearchAnalysisFlow()
|
||||
result = flow.kickoff()
|
||||
print(result)
|
||||
```
|
||||
|
||||
Because memory persists across runs (backed by LanceDB on disk), the `analyze` step will recall findings from previous executions too -- enabling flows that learn and accumulate knowledge over time.
|
||||
|
||||
See the [Memory documentation](/concepts/memory) for details on scopes, slices, composite scoring, embedder configuration, and more.
|
||||
|
||||
### Using the CLI
|
||||
|
||||
Starting from version 0.103.0, you can run flows using the `crewai run` command:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -224,60 +224,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `groupFields` (string, optional): Fields to include (e.g., 'name,memberCount,clientData'). Default: name,memberCount
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_contacts/get_contact_group">
|
||||
**Description:** Get a specific contact group by resource name.
|
||||
|
||||
**Parameters:**
|
||||
- `resourceName` (string, required): The resource name of the contact group (e.g., 'contactGroups/myContactGroup')
|
||||
- `maxMembers` (integer, optional): Maximum number of members to include. Minimum: 0, Maximum: 20000
|
||||
- `groupFields` (string, optional): Fields to include (e.g., 'name,memberCount,clientData'). Default: name,memberCount
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_contacts/create_contact_group">
|
||||
**Description:** Create a new contact group (label).
|
||||
|
||||
**Parameters:**
|
||||
- `name` (string, required): The name of the contact group
|
||||
- `clientData` (array, optional): Client-specific data
|
||||
```json
|
||||
[
|
||||
{
|
||||
"key": "data_key",
|
||||
"value": "data_value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_contacts/update_contact_group">
|
||||
**Description:** Update a contact group's information.
|
||||
|
||||
**Parameters:**
|
||||
- `resourceName` (string, required): The resource name of the contact group (e.g., 'contactGroups/myContactGroup')
|
||||
- `name` (string, required): The name of the contact group
|
||||
- `clientData` (array, optional): Client-specific data
|
||||
```json
|
||||
[
|
||||
{
|
||||
"key": "data_key",
|
||||
"value": "data_value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_contacts/delete_contact_group">
|
||||
**Description:** Delete a contact group.
|
||||
|
||||
**Parameters:**
|
||||
- `resourceName` (string, required): The resource name of the contact group to delete (e.g., 'contactGroups/myContactGroup')
|
||||
- `deleteContacts` (boolean, optional): Whether to delete contacts in the group as well. Default: false
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Usage Examples
|
||||
|
||||
@@ -132,297 +132,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `endIndex` (integer, required): The end index of the range.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_document_with_content">
|
||||
**Description:** Create a new Google Document with content in one action.
|
||||
|
||||
**Parameters:**
|
||||
- `title` (string, required): The title for the new document. Appears at the top of the document and in Google Drive.
|
||||
- `content` (string, optional): The text content to insert into the document. Use `\n` for new paragraphs.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/append_text">
|
||||
**Description:** Append text to the end of a Google Document. Automatically inserts at the document end without needing to specify an index.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID from create_document response or URL.
|
||||
- `text` (string, required): Text to append at the end of the document. Use `\n` for new paragraphs.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_bold">
|
||||
**Description:** Make text bold or remove bold formatting in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of text to format.
|
||||
- `endIndex` (integer, required): End position of text to format (exclusive).
|
||||
- `bold` (boolean, required): Set `true` to make bold, `false` to remove bold.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_italic">
|
||||
**Description:** Make text italic or remove italic formatting in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of text to format.
|
||||
- `endIndex` (integer, required): End position of text to format (exclusive).
|
||||
- `italic` (boolean, required): Set `true` to make italic, `false` to remove italic.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_underline">
|
||||
**Description:** Add or remove underline formatting from text in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of text to format.
|
||||
- `endIndex` (integer, required): End position of text to format (exclusive).
|
||||
- `underline` (boolean, required): Set `true` to underline, `false` to remove underline.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_strikethrough">
|
||||
**Description:** Add or remove strikethrough formatting from text in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of text to format.
|
||||
- `endIndex` (integer, required): End position of text to format (exclusive).
|
||||
- `strikethrough` (boolean, required): Set `true` to add strikethrough, `false` to remove.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_font_size">
|
||||
**Description:** Change the font size of text in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of text to format.
|
||||
- `endIndex` (integer, required): End position of text to format (exclusive).
|
||||
- `fontSize` (number, required): Font size in points. Common sizes: 10, 11, 12, 14, 16, 18, 24, 36.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_color">
|
||||
**Description:** Change the color of text using RGB values (0-1 scale) in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of text to format.
|
||||
- `endIndex` (integer, required): End position of text to format (exclusive).
|
||||
- `red` (number, required): Red component (0-1). Example: `1` for full red.
|
||||
- `green` (number, required): Green component (0-1). Example: `0.5` for half green.
|
||||
- `blue` (number, required): Blue component (0-1). Example: `0` for no blue.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_hyperlink">
|
||||
**Description:** Turn existing text into a clickable hyperlink in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of text to make into a link.
|
||||
- `endIndex` (integer, required): End position of text to make into a link (exclusive).
|
||||
- `url` (string, required): The URL the link should point to. Example: `"https://example.com"`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/apply_heading_style">
|
||||
**Description:** Apply a heading or paragraph style to a text range in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of paragraph(s) to style.
|
||||
- `endIndex` (integer, required): End position of paragraph(s) to style.
|
||||
- `style` (string, required): The style to apply. Enum: `NORMAL_TEXT`, `TITLE`, `SUBTITLE`, `HEADING_1`, `HEADING_2`, `HEADING_3`, `HEADING_4`, `HEADING_5`, `HEADING_6`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_paragraph_alignment">
|
||||
**Description:** Set text alignment for paragraphs in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of paragraph(s) to align.
|
||||
- `endIndex` (integer, required): End position of paragraph(s) to align.
|
||||
- `alignment` (string, required): Text alignment. Enum: `START` (left), `CENTER`, `END` (right), `JUSTIFIED`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_line_spacing">
|
||||
**Description:** Set line spacing for paragraphs in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of paragraph(s).
|
||||
- `endIndex` (integer, required): End position of paragraph(s).
|
||||
- `lineSpacing` (number, required): Line spacing as percentage. `100` = single, `115` = 1.15x, `150` = 1.5x, `200` = double.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_paragraph_bullets">
|
||||
**Description:** Convert paragraphs to a bulleted or numbered list in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of paragraphs to convert to list.
|
||||
- `endIndex` (integer, required): End position of paragraphs to convert to list.
|
||||
- `bulletPreset` (string, required): Bullet/numbering style. Enum: `BULLET_DISC_CIRCLE_SQUARE`, `BULLET_DIAMONDX_ARROW3D_SQUARE`, `BULLET_CHECKBOX`, `BULLET_ARROW_DIAMOND_DISC`, `BULLET_STAR_CIRCLE_SQUARE`, `NUMBERED_DECIMAL_ALPHA_ROMAN`, `NUMBERED_DECIMAL_ALPHA_ROMAN_PARENS`, `NUMBERED_DECIMAL_NESTED`, `NUMBERED_UPPERALPHA_ALPHA_ROMAN`, `NUMBERED_UPPERROMAN_UPPERALPHA_DECIMAL`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_paragraph_bullets">
|
||||
**Description:** Remove bullets or numbering from paragraphs in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `startIndex` (integer, required): Start position of list paragraphs.
|
||||
- `endIndex` (integer, required): End position of list paragraphs.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_table_with_content">
|
||||
**Description:** Insert a table with content into a Google Document in one action. Provide content as a 2D array.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `rows` (integer, required): Number of rows in the table.
|
||||
- `columns` (integer, required): Number of columns in the table.
|
||||
- `index` (integer, optional): Position to insert the table. If not provided, the table is inserted at the end of the document.
|
||||
- `content` (array, required): Table content as a 2D array. Each inner array is a row. Example: `[["Year", "Revenue"], ["2023", "$43B"], ["2024", "$45B"]]`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_table_row">
|
||||
**Description:** Insert a new row above or below a reference cell in an existing table.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `tableStartIndex` (integer, required): The start index of the table. Get from get_document.
|
||||
- `rowIndex` (integer, required): Row index (0-based) of reference cell.
|
||||
- `columnIndex` (integer, optional): Column index (0-based) of reference cell. Default is `0`.
|
||||
- `insertBelow` (boolean, optional): If `true`, insert below the reference row. If `false`, insert above. Default is `true`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_table_column">
|
||||
**Description:** Insert a new column left or right of a reference cell in an existing table.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `tableStartIndex` (integer, required): The start index of the table.
|
||||
- `rowIndex` (integer, optional): Row index (0-based) of reference cell. Default is `0`.
|
||||
- `columnIndex` (integer, required): Column index (0-based) of reference cell.
|
||||
- `insertRight` (boolean, optional): If `true`, insert to the right. If `false`, insert to the left. Default is `true`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_table_row">
|
||||
**Description:** Delete a row from an existing table in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `tableStartIndex` (integer, required): The start index of the table.
|
||||
- `rowIndex` (integer, required): Row index (0-based) to delete.
|
||||
- `columnIndex` (integer, optional): Column index (0-based) of any cell in the row. Default is `0`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_table_column">
|
||||
**Description:** Delete a column from an existing table in a Google Document.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `tableStartIndex` (integer, required): The start index of the table.
|
||||
- `rowIndex` (integer, optional): Row index (0-based) of any cell in the column. Default is `0`.
|
||||
- `columnIndex` (integer, required): Column index (0-based) to delete.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/merge_table_cells">
|
||||
**Description:** Merge a range of table cells into a single cell. Content from all cells is preserved.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `tableStartIndex` (integer, required): The start index of the table.
|
||||
- `rowIndex` (integer, required): Starting row index (0-based) for the merge.
|
||||
- `columnIndex` (integer, required): Starting column index (0-based) for the merge.
|
||||
- `rowSpan` (integer, required): Number of rows to merge.
|
||||
- `columnSpan` (integer, required): Number of columns to merge.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/unmerge_table_cells">
|
||||
**Description:** Unmerge previously merged table cells back into individual cells.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `tableStartIndex` (integer, required): The start index of the table.
|
||||
- `rowIndex` (integer, required): Row index (0-based) of the merged cell.
|
||||
- `columnIndex` (integer, required): Column index (0-based) of the merged cell.
|
||||
- `rowSpan` (integer, required): Number of rows the merged cell spans.
|
||||
- `columnSpan` (integer, required): Number of columns the merged cell spans.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_inline_image">
|
||||
**Description:** Insert an image from a public URL into a Google Document. The image must be publicly accessible, under 50MB, and in PNG/JPEG/GIF format.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `uri` (string, required): Public URL of the image. Must be accessible without authentication.
|
||||
- `index` (integer, optional): Position to insert the image. If not provided, the image is inserted at the end of the document. Default is `1`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_section_break">
|
||||
**Description:** Insert a section break to create document sections with different formatting.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `index` (integer, required): Position to insert the section break.
|
||||
- `sectionType` (string, required): The type of section break. Enum: `CONTINUOUS` (stays on same page), `NEXT_PAGE` (starts a new page).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_header">
|
||||
**Description:** Create a header for the document. Returns a headerId which can be used with insert_text to add header content.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `type` (string, optional): Header type. Enum: `DEFAULT`. Default is `DEFAULT`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_footer">
|
||||
**Description:** Create a footer for the document. Returns a footerId which can be used with insert_text to add footer content.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `type` (string, optional): Footer type. Enum: `DEFAULT`. Default is `DEFAULT`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_header">
|
||||
**Description:** Delete a header from the document. Use get_document to find the headerId.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `headerId` (string, required): The header ID to delete. Get from get_document response.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_footer">
|
||||
**Description:** Delete a footer from the document. Use get_document to find the footerId.
|
||||
|
||||
**Parameters:**
|
||||
- `documentId` (string, required): The document ID.
|
||||
- `footerId` (string, required): The footer ID to delete. Get from get_document response.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Usage Examples
|
||||
|
||||
@@ -62,22 +62,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_presentation_metadata">
|
||||
**Description:** Get lightweight metadata about a presentation (title, slide count, slide IDs). Use this first before fetching full content.
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation to retrieve.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_presentation_text">
|
||||
**Description:** Extract all text content from a presentation. Returns slide IDs and text from shapes and tables only (no formatting).
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_presentation">
|
||||
**Description:** Retrieves a presentation by ID.
|
||||
|
||||
@@ -112,15 +96,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_slide_text">
|
||||
**Description:** Extract text content from a single slide. Returns only text from shapes and tables (no formatting or styling).
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `pageObjectId` (string, required): The ID of the slide/page to get text from.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_page">
|
||||
**Description:** Retrieves a specific page by its ID.
|
||||
|
||||
@@ -139,120 +114,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_slide">
|
||||
**Description:** Add an additional blank slide to a presentation. New presentations already have one blank slide - check get_presentation_metadata first. For slides with title/body areas, use create_slide_with_layout instead.
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `insertionIndex` (integer, optional): Where to insert the slide (0-based). If omitted, adds at the end.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_slide_with_layout">
|
||||
**Description:** Create a slide with a predefined layout containing placeholder areas for title, body, etc. This is better than create_slide for structured content. After creating, use get_page to find placeholder IDs, then insert text into them.
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `layout` (string, required): Layout type. One of: `BLANK`, `TITLE`, `TITLE_AND_BODY`, `TITLE_AND_TWO_COLUMNS`, `TITLE_ONLY`, `SECTION_HEADER`, `ONE_COLUMN_TEXT`, `MAIN_POINT`, `BIG_NUMBER`. TITLE_AND_BODY is best for title+description. TITLE for title-only slides. SECTION_HEADER for section dividers.
|
||||
- `insertionIndex` (integer, optional): Where to insert (0-based). Omit to add at end.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_text_box">
|
||||
**Description:** Create a text box on a slide with content. Use this for titles, descriptions, paragraphs - not tables. Optionally specify position (x, y) and size (width, height) in EMU units (914400 EMU = 1 inch).
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `slideId` (string, required): The ID of the slide to add the text box to.
|
||||
- `text` (string, required): The text content for the text box.
|
||||
- `x` (integer, optional): X position in EMU (914400 = 1 inch). Default: 914400 (1 inch from left).
|
||||
- `y` (integer, optional): Y position in EMU (914400 = 1 inch). Default: 914400 (1 inch from top).
|
||||
- `width` (integer, optional): Width in EMU. Default: 7315200 (~8 inches).
|
||||
- `height` (integer, optional): Height in EMU. Default: 914400 (~1 inch).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/delete_slide">
|
||||
**Description:** Remove a slide from the presentation. Use get_presentation first to find the slide ID.
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `slideId` (string, required): The object ID of the slide to delete. Get from get_presentation.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/duplicate_slide">
|
||||
**Description:** Create a copy of an existing slide. The duplicate is inserted immediately after the original.
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `slideId` (string, required): The object ID of the slide to duplicate. Get from get_presentation.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/move_slides">
|
||||
**Description:** Reorder slides by moving them to a new position. Slide IDs must be in their current presentation order (no duplicates).
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `slideIds` (array of strings, required): Array of slide IDs to move. Must be in current presentation order.
|
||||
- `insertionIndex` (integer, required): Target position (0-based). 0 = beginning, slide count = end.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/insert_youtube_video">
|
||||
**Description:** Embed a YouTube video on a slide. The video ID is the value after "v=" in YouTube URLs (e.g., for youtube.com/watch?v=abc123, use "abc123").
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `slideId` (string, required): The ID of the slide to add the video to. Get from get_presentation.
|
||||
- `videoId` (string, required): The YouTube video ID (the value after v= in the URL).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/insert_drive_video">
|
||||
**Description:** Embed a video from Google Drive on a slide. The file ID can be found in the Drive file URL.
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `slideId` (string, required): The ID of the slide to add the video to. Get from get_presentation.
|
||||
- `fileId` (string, required): The Google Drive file ID of the video.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/set_slide_background_image">
|
||||
**Description:** Set a background image for a slide. The image URL must be publicly accessible.
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `slideId` (string, required): The ID of the slide to set the background for. Get from get_presentation.
|
||||
- `imageUrl` (string, required): Publicly accessible URL of the image to use as background.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_table">
|
||||
**Description:** Create an empty table on a slide. To create a table with content, use create_table_with_content instead.
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `slideId` (string, required): The ID of the slide to add the table to. Get from get_presentation.
|
||||
- `rows` (integer, required): Number of rows in the table.
|
||||
- `columns` (integer, required): Number of columns in the table.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_table_with_content">
|
||||
**Description:** Create a table with content in one action. Provide content as a 2D array where each inner array is a row. Example: [["Header1", "Header2"], ["Row1Col1", "Row1Col2"]].
|
||||
|
||||
**Parameters:**
|
||||
- `presentationId` (string, required): The ID of the presentation.
|
||||
- `slideId` (string, required): The ID of the slide to add the table to. Get from get_presentation.
|
||||
- `rows` (integer, required): Number of rows in the table.
|
||||
- `columns` (integer, required): Number of columns in the table.
|
||||
- `content` (array, required): Table content as 2D array. Each inner array is a row. Example: [["Year", "Revenue"], ["2023", "$10M"]].
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/import_data_from_sheet">
|
||||
**Description:** Imports data from a Google Sheet into a presentation.
|
||||
|
||||
|
||||
@@ -169,16 +169,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/get_table_data">
|
||||
**Description:** Get data from a specific table in an Excel worksheet.
|
||||
|
||||
**Parameters:**
|
||||
- `file_id` (string, required): The ID of the Excel file
|
||||
- `worksheet_name` (string, required): Name of the worksheet
|
||||
- `table_name` (string, required): Name of the table
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/create_chart">
|
||||
**Description:** Create a chart in an Excel worksheet.
|
||||
|
||||
@@ -211,15 +201,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/get_used_range_metadata">
|
||||
**Description:** Get the used range metadata (dimensions only, no data) of an Excel worksheet.
|
||||
|
||||
**Parameters:**
|
||||
- `file_id` (string, required): The ID of the Excel file
|
||||
- `worksheet_name` (string, required): Name of the worksheet
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/list_charts">
|
||||
**Description:** Get all charts in an Excel worksheet.
|
||||
|
||||
|
||||
@@ -151,49 +151,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `item_id` (string, required): The ID of the file.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/list_files_by_path">
|
||||
**Description:** List files and folders in a specific OneDrive path.
|
||||
|
||||
**Parameters:**
|
||||
- `folder_path` (string, required): The folder path (e.g., 'Documents/Reports').
|
||||
- `top` (integer, optional): Number of items to retrieve (max 1000). Default is `50`.
|
||||
- `orderby` (string, optional): Order by field (e.g., "name asc", "lastModifiedDateTime desc"). Default is "name asc".
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/get_recent_files">
|
||||
**Description:** Get recently accessed files from OneDrive.
|
||||
|
||||
**Parameters:**
|
||||
- `top` (integer, optional): Number of items to retrieve (max 200). Default is `25`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/get_shared_with_me">
|
||||
**Description:** Get files and folders shared with the user.
|
||||
|
||||
**Parameters:**
|
||||
- `top` (integer, optional): Number of items to retrieve (max 200). Default is `50`.
|
||||
- `orderby` (string, optional): Order by field. Default is "name asc".
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/get_file_by_path">
|
||||
**Description:** Get information about a specific file or folder by path.
|
||||
|
||||
**Parameters:**
|
||||
- `file_path` (string, required): The file or folder path (e.g., 'Documents/report.docx').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/download_file_by_path">
|
||||
**Description:** Download a file from OneDrive by its path.
|
||||
|
||||
**Parameters:**
|
||||
- `file_path` (string, required): The file path (e.g., 'Documents/report.docx').
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Usage Examples
|
||||
|
||||
@@ -133,74 +133,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `companyName` (string, optional): Contact's company name.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/get_message">
|
||||
**Description:** Get a specific email message by ID.
|
||||
|
||||
**Parameters:**
|
||||
- `message_id` (string, required): The unique identifier of the message. Obtain from get_messages action.
|
||||
- `select` (string, optional): Comma-separated list of properties to return. Example: "id,subject,body,from,receivedDateTime". Default is "id,subject,body,from,toRecipients,receivedDateTime".
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/reply_to_email">
|
||||
**Description:** Reply to an email message.
|
||||
|
||||
**Parameters:**
|
||||
- `message_id` (string, required): The unique identifier of the message to reply to. Obtain from get_messages action.
|
||||
- `comment` (string, required): The reply message content. Can be plain text or HTML. The original message will be quoted below this content.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/forward_email">
|
||||
**Description:** Forward an email message.
|
||||
|
||||
**Parameters:**
|
||||
- `message_id` (string, required): The unique identifier of the message to forward. Obtain from get_messages action.
|
||||
- `to_recipients` (array, required): Array of recipient email addresses to forward to. Example: ["john@example.com", "jane@example.com"].
|
||||
- `comment` (string, optional): Optional message to include above the forwarded content. Can be plain text or HTML.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/mark_message_read">
|
||||
**Description:** Mark a message as read or unread.
|
||||
|
||||
**Parameters:**
|
||||
- `message_id` (string, required): The unique identifier of the message. Obtain from get_messages action.
|
||||
- `is_read` (boolean, required): Set to true to mark as read, false to mark as unread.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/delete_message">
|
||||
**Description:** Delete an email message.
|
||||
|
||||
**Parameters:**
|
||||
- `message_id` (string, required): The unique identifier of the message to delete. Obtain from get_messages action.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/update_event">
|
||||
**Description:** Update an existing calendar event.
|
||||
|
||||
**Parameters:**
|
||||
- `event_id` (string, required): The unique identifier of the event. Obtain from get_calendar_events action.
|
||||
- `subject` (string, optional): New subject/title for the event.
|
||||
- `start_time` (string, optional): New start time in ISO 8601 format (e.g., "2024-01-20T10:00:00"). REQUIRED: Must also provide start_timezone when using this field.
|
||||
- `start_timezone` (string, optional): Timezone for start time. REQUIRED when updating start_time. Examples: "Pacific Standard Time", "Eastern Standard Time", "UTC".
|
||||
- `end_time` (string, optional): New end time in ISO 8601 format. REQUIRED: Must also provide end_timezone when using this field.
|
||||
- `end_timezone` (string, optional): Timezone for end time. REQUIRED when updating end_time. Examples: "Pacific Standard Time", "Eastern Standard Time", "UTC".
|
||||
- `location` (string, optional): New location for the event.
|
||||
- `body` (string, optional): New body/description for the event. Supports HTML formatting.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/delete_event">
|
||||
**Description:** Delete a calendar event.
|
||||
|
||||
**Parameters:**
|
||||
- `event_id` (string, required): The unique identifier of the event to delete. Obtain from get_calendar_events action.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Usage Examples
|
||||
|
||||
@@ -78,17 +78,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_drives">
|
||||
**Description:** List all document libraries (drives) in a SharePoint site. Use this to discover available libraries before using file operations.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `top` (integer, optional): Maximum number of drives to return per page (1-999). Default is 100
|
||||
- `skip_token` (string, optional): Pagination token from a previous response to fetch the next page of results
|
||||
- `select` (string, optional): Comma-separated list of properties to return (e.g., 'id,name,webUrl,driveType')
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_site_lists">
|
||||
**Description:** Get all lists in a SharePoint site.
|
||||
|
||||
@@ -170,317 +159,20 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_files">
|
||||
**Description:** Retrieve files and folders from a SharePoint document library. By default lists the root folder, but you can navigate into subfolders by providing a folder_id.
|
||||
<Accordion title="microsoft_sharepoint/get_drive_items">
|
||||
**Description:** Get files and folders from a SharePoint document library.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `folder_id` (string, optional): The ID of the folder to list contents from. Use 'root' for the root folder, or provide a folder ID from a previous list_files call. Default is 'root'
|
||||
- `top` (integer, optional): Maximum number of items to return per page (1-1000). Default is 50
|
||||
- `skip_token` (string, optional): Pagination token from a previous response to fetch the next page of results
|
||||
- `orderby` (string, optional): Sort order for results (e.g., 'name asc', 'size desc', 'lastModifiedDateTime desc'). Default is 'name asc'
|
||||
- `filter` (string, optional): OData filter to narrow results (e.g., 'file ne null' for files only, 'folder ne null' for folders only)
|
||||
- `select` (string, optional): Comma-separated list of fields to return (e.g., 'id,name,size,folder,file,webUrl,lastModifiedDateTime')
|
||||
- `site_id` (string, required): The ID of the SharePoint site
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/delete_file">
|
||||
**Description:** Delete a file or folder from a SharePoint document library. For folders, all contents are deleted recursively. Items are moved to the site recycle bin.
|
||||
<Accordion title="microsoft_sharepoint/delete_drive_item">
|
||||
**Description:** Delete a file or folder from SharePoint document library.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the file or folder to delete. Obtain from list_files
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_files_by_path">
|
||||
**Description:** List files and folders in a SharePoint document library folder by its path. More efficient than multiple list_files calls for deep navigation.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `folder_path` (string, required): The full path to the folder without leading/trailing slashes (e.g., 'Documents', 'Reports/2024/Q1')
|
||||
- `top` (integer, optional): Maximum number of items to return per page (1-1000). Default is 50
|
||||
- `skip_token` (string, optional): Pagination token from a previous response to fetch the next page of results
|
||||
- `orderby` (string, optional): Sort order for results (e.g., 'name asc', 'size desc'). Default is 'name asc'
|
||||
- `select` (string, optional): Comma-separated list of fields to return (e.g., 'id,name,size,folder,file,webUrl,lastModifiedDateTime')
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/download_file">
|
||||
**Description:** Download raw file content from a SharePoint document library. Use only for plain text files (.txt, .csv, .json). For Excel files, use the Excel-specific actions. For Word files, use get_word_document_content.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the file to download. Obtain from list_files or list_files_by_path
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_file_info">
|
||||
**Description:** Retrieve detailed metadata for a specific file or folder in a SharePoint document library, including name, size, created/modified dates, and author information.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the file or folder. Obtain from list_files or list_files_by_path
|
||||
- `select` (string, optional): Comma-separated list of properties to return (e.g., 'id,name,size,createdDateTime,lastModifiedDateTime,webUrl,createdBy,lastModifiedBy')
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/create_folder">
|
||||
**Description:** Create a new folder in a SharePoint document library. By default creates the folder in the root; use parent_id to create subfolders.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `folder_name` (string, required): Name for the new folder. Cannot contain: \ / : * ? " < > |
|
||||
- `parent_id` (string, optional): The ID of the parent folder. Use 'root' for the document library root, or provide a folder ID from list_files. Default is 'root'
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/search_files">
|
||||
**Description:** Search for files and folders in a SharePoint document library by keywords. Searches file names, folder names, and file contents for Office documents. Do not use wildcards or special characters.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `query` (string, required): Search keywords (e.g., 'report', 'budget 2024'). Wildcards like *.txt are not supported
|
||||
- `top` (integer, optional): Maximum number of results to return per page (1-1000). Default is 50
|
||||
- `skip_token` (string, optional): Pagination token from a previous response to fetch the next page of results
|
||||
- `select` (string, optional): Comma-separated list of fields to return (e.g., 'id,name,size,folder,file,webUrl,lastModifiedDateTime')
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/copy_file">
|
||||
**Description:** Copy a file or folder to a new location within SharePoint. The original item remains unchanged. The copy operation is asynchronous for large files.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the file or folder to copy. Obtain from list_files or search_files
|
||||
- `destination_folder_id` (string, required): The ID of the destination folder. Use 'root' for the root folder, or a folder ID from list_files
|
||||
- `new_name` (string, optional): New name for the copy. If not provided, the original name is used
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/move_file">
|
||||
**Description:** Move a file or folder to a new location within SharePoint. The item is removed from its original location. For folders, all contents are moved as well.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the file or folder to move. Obtain from list_files or search_files
|
||||
- `destination_folder_id` (string, required): The ID of the destination folder. Use 'root' for the root folder, or a folder ID from list_files
|
||||
- `new_name` (string, optional): New name for the moved item. If not provided, the original name is kept
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_worksheets">
|
||||
**Description:** List all worksheets (tabs) in an Excel workbook stored in a SharePoint document library. Use the returned worksheet name with other Excel actions.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `select` (string, optional): Comma-separated list of properties to return (e.g., 'id,name,position,visibility')
|
||||
- `filter` (string, optional): OData filter expression (e.g., "visibility eq 'Visible'" to exclude hidden sheets)
|
||||
- `top` (integer, optional): Maximum number of worksheets to return. Minimum: 1, Maximum: 999
|
||||
- `orderby` (string, optional): Sort order (e.g., 'position asc' to return sheets in tab order)
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/create_excel_worksheet">
|
||||
**Description:** Create a new worksheet (tab) in an Excel workbook stored in a SharePoint document library. The new sheet is added at the end of the tab list.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `name` (string, required): Name for the new worksheet. Maximum 31 characters. Cannot contain: \ / * ? : [ ]. Must be unique within the workbook
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_range_data">
|
||||
**Description:** Retrieve cell values from a specific range in an Excel worksheet stored in SharePoint. For reading all data without knowing dimensions, use get_excel_used_range instead.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet (tab) to read from. Obtain from get_excel_worksheets. Case-sensitive
|
||||
- `range` (string, required): Cell range in A1 notation (e.g., 'A1:C10', 'A:C', '1:5', 'A1')
|
||||
- `select` (string, optional): Comma-separated list of properties to return (e.g., 'address,values,formulas,numberFormat,text')
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/update_excel_range_data">
|
||||
**Description:** Write values to a specific range in an Excel worksheet stored in SharePoint. Overwrites existing cell contents. The values array dimensions must match the range dimensions exactly.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet (tab) to update. Obtain from get_excel_worksheets. Case-sensitive
|
||||
- `range` (string, required): Cell range in A1 notation where values will be written (e.g., 'A1:C3' for a 3x3 block)
|
||||
- `values` (array, required): 2D array of values (rows containing cells). Example for A1:B2: [["Header1", "Header2"], ["Value1", "Value2"]]. Use null to clear a cell
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_used_range_metadata">
|
||||
**Description:** Return only the metadata (address and dimensions) of the used range in a worksheet, without the actual cell values. Ideal for large files to understand spreadsheet size before reading data in chunks.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet (tab) to read. Obtain from get_excel_worksheets. Case-sensitive
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_used_range">
|
||||
**Description:** Retrieve all cells containing data in a worksheet stored in SharePoint. Do not use for files larger than 2MB. For large files, use get_excel_used_range_metadata first, then get_excel_range_data to read in smaller chunks.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet (tab) to read. Obtain from get_excel_worksheets. Case-sensitive
|
||||
- `select` (string, optional): Comma-separated list of properties to return (e.g., 'address,values,formulas,numberFormat,text,rowCount,columnCount')
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_cell">
|
||||
**Description:** Retrieve the value of a single cell by row and column index from an Excel file in SharePoint. Indices are 0-based (row 0 = Excel row 1, column 0 = column A).
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet (tab). Obtain from get_excel_worksheets. Case-sensitive
|
||||
- `row` (integer, required): 0-based row index (row 0 = Excel row 1). Valid range: 0-1048575
|
||||
- `column` (integer, required): 0-based column index (column 0 = A, column 1 = B). Valid range: 0-16383
|
||||
- `select` (string, optional): Comma-separated list of properties to return (e.g., 'address,values,formulas,numberFormat,text')
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/add_excel_table">
|
||||
**Description:** Convert a cell range into a formatted Excel table with filtering, sorting, and structured data capabilities. Tables enable add_excel_table_row for appending data.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet containing the data range. Obtain from get_excel_worksheets
|
||||
- `range` (string, required): Cell range to convert into a table, including headers and data (e.g., 'A1:D10' where A1:D1 contains column headers)
|
||||
- `has_headers` (boolean, optional): Set to true if the first row contains column headers. Default is true
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_tables">
|
||||
**Description:** List all tables in a specific Excel worksheet stored in SharePoint. Returns table properties including id, name, showHeaders, and showTotals.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet to get tables from. Obtain from get_excel_worksheets
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/add_excel_table_row">
|
||||
**Description:** Append a new row to the end of an Excel table in a SharePoint file. The values array must have the same number of elements as the table has columns.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet containing the table. Obtain from get_excel_worksheets
|
||||
- `table_name` (string, required): Name of the table to add the row to (e.g., 'Table1'). Obtain from get_excel_tables. Case-sensitive
|
||||
- `values` (array, required): Array of cell values for the new row, one per column in table order (e.g., ["John Doe", "john@example.com", 25])
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_table_data">
|
||||
**Description:** Get all rows from an Excel table in a SharePoint file as a data range. Easier than get_excel_range_data when working with structured tables since you don't need to know the exact range.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet containing the table. Obtain from get_excel_worksheets
|
||||
- `table_name` (string, required): Name of the table to get data from (e.g., 'Table1'). Obtain from get_excel_tables. Case-sensitive
|
||||
- `select` (string, optional): Comma-separated list of properties to return (e.g., 'address,values,formulas,numberFormat,text')
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/create_excel_chart">
|
||||
**Description:** Create a chart visualization in an Excel worksheet stored in SharePoint from a data range. The chart is embedded in the worksheet.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet where the chart will be created. Obtain from get_excel_worksheets
|
||||
- `chart_type` (string, required): Chart type (e.g., 'ColumnClustered', 'ColumnStacked', 'Line', 'LineMarkers', 'Pie', 'Bar', 'BarClustered', 'Area', 'Scatter', 'Doughnut')
|
||||
- `source_data` (string, required): Data range for the chart in A1 notation, including headers (e.g., 'A1:B10')
|
||||
- `series_by` (string, optional): How data series are organized: 'Auto', 'Columns', or 'Rows'. Default is 'Auto'
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_excel_charts">
|
||||
**Description:** List all charts embedded in an Excel worksheet stored in SharePoint. Returns chart properties including id, name, chartType, height, width, and position.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet to list charts from. Obtain from get_excel_worksheets
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/delete_excel_worksheet">
|
||||
**Description:** Permanently remove a worksheet (tab) and all its contents from an Excel workbook stored in SharePoint. Cannot be undone. A workbook must have at least one worksheet.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet to delete. Case-sensitive. All data, tables, and charts on this sheet will be permanently removed
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/delete_excel_table">
|
||||
**Description:** Remove a table from an Excel worksheet in SharePoint. This deletes the table structure (filtering, formatting, table features) but preserves the underlying cell data.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
- `worksheet_name` (string, required): Name of the worksheet containing the table. Obtain from get_excel_worksheets
|
||||
- `table_name` (string, required): Name of the table to delete (e.g., 'Table1'). Obtain from get_excel_tables. The data in the cells will remain after table deletion
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_excel_names">
|
||||
**Description:** Retrieve all named ranges defined in an Excel workbook stored in SharePoint. Named ranges are user-defined labels for cell ranges (e.g., 'SalesData' for A1:D100).
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_word_document_content">
|
||||
**Description:** Download and extract text content from a Word document (.docx) stored in a SharePoint document library. This is the recommended way to read Word documents from SharePoint.
|
||||
|
||||
**Parameters:**
|
||||
- `site_id` (string, required): The full SharePoint site identifier from get_sites
|
||||
- `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs
|
||||
- `item_id` (string, required): The unique identifier of the Word document (.docx) in SharePoint. Obtain from list_files or search_files
|
||||
- `site_id` (string, required): The ID of the SharePoint site
|
||||
- `item_id` (string, required): The ID of the file or folder to delete
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
@@ -108,86 +108,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `join_web_url` (string, required): The join web URL of the meeting to search for.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/search_online_meetings_by_meeting_id">
|
||||
**Description:** Search online meetings by external Meeting ID.
|
||||
|
||||
**Parameters:**
|
||||
- `join_meeting_id` (string, required): The meeting ID (numeric code) that attendees use to join. This is the joinMeetingId shown in meeting invitations, not the Graph API meeting id.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/get_meeting">
|
||||
**Description:** Get details of a specific online meeting.
|
||||
|
||||
**Parameters:**
|
||||
- `meeting_id` (string, required): The Graph API meeting ID (a long alphanumeric string). Obtain from create_meeting or search_online_meetings actions. Different from the numeric joinMeetingId.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/get_team_members">
|
||||
**Description:** Get members of a specific team.
|
||||
|
||||
**Parameters:**
|
||||
- `team_id` (string, required): The unique identifier of the team. Obtain from get_teams action.
|
||||
- `top` (integer, optional): Maximum number of members to retrieve per page (1-999). Default is `100`.
|
||||
- `skip_token` (string, optional): Pagination token from a previous response. When the response includes @odata.nextLink, extract the $skiptoken parameter value and pass it here to get the next page of results.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/create_channel">
|
||||
**Description:** Create a new channel in a team.
|
||||
|
||||
**Parameters:**
|
||||
- `team_id` (string, required): The unique identifier of the team. Obtain from get_teams action.
|
||||
- `display_name` (string, required): Name of the channel as displayed in Teams. Must be unique within the team. Max 50 characters.
|
||||
- `description` (string, optional): Optional description explaining the channel's purpose. Visible in channel details. Max 1024 characters.
|
||||
- `membership_type` (string, optional): Channel visibility. Enum: `standard`, `private`. "standard" = visible to all team members, "private" = visible only to specifically added members. Default is `standard`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/get_message_replies">
|
||||
**Description:** Get replies to a specific message in a channel.
|
||||
|
||||
**Parameters:**
|
||||
- `team_id` (string, required): The unique identifier of the team. Obtain from get_teams action.
|
||||
- `channel_id` (string, required): The unique identifier of the channel. Obtain from get_channels action.
|
||||
- `message_id` (string, required): The unique identifier of the parent message. Obtain from get_messages action.
|
||||
- `top` (integer, optional): Maximum number of replies to retrieve per page (1-50). Default is `50`.
|
||||
- `skip_token` (string, optional): Pagination token from a previous response. When the response includes @odata.nextLink, extract the $skiptoken parameter value and pass it here to get the next page of results.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/reply_to_message">
|
||||
**Description:** Reply to a message in a Teams channel.
|
||||
|
||||
**Parameters:**
|
||||
- `team_id` (string, required): The unique identifier of the team. Obtain from get_teams action.
|
||||
- `channel_id` (string, required): The unique identifier of the channel. Obtain from get_channels action.
|
||||
- `message_id` (string, required): The unique identifier of the message to reply to. Obtain from get_messages action.
|
||||
- `message` (string, required): The reply content. For HTML, include formatting tags. For text, plain text only.
|
||||
- `content_type` (string, optional): Content format. Enum: `html`, `text`. "text" for plain text, "html" for rich text with formatting. Default is `text`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/update_meeting">
|
||||
**Description:** Update an existing online meeting.
|
||||
|
||||
**Parameters:**
|
||||
- `meeting_id` (string, required): The unique identifier of the meeting. Obtain from create_meeting or search_online_meetings actions.
|
||||
- `subject` (string, optional): New meeting title.
|
||||
- `startDateTime` (string, optional): New start time in ISO 8601 format with timezone. Example: "2024-01-20T10:00:00-08:00".
|
||||
- `endDateTime` (string, optional): New end time in ISO 8601 format with timezone.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/delete_meeting">
|
||||
**Description:** Delete an online meeting.
|
||||
|
||||
**Parameters:**
|
||||
- `meeting_id` (string, required): The unique identifier of the meeting to delete. Obtain from create_meeting or search_online_meetings actions.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Usage Examples
|
||||
|
||||
@@ -98,26 +98,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `file_id` (string, required): The ID of the document to delete.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_word/copy_document">
|
||||
**Description:** Copy a document to a new location in OneDrive.
|
||||
|
||||
**Parameters:**
|
||||
- `file_id` (string, required): The ID of the document to copy
|
||||
- `name` (string, optional): New name for the copied document
|
||||
- `parent_id` (string, optional): The ID of the destination folder (defaults to root)
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_word/move_document">
|
||||
**Description:** Move a document to a new location in OneDrive.
|
||||
|
||||
**Parameters:**
|
||||
- `file_id` (string, required): The ID of the document to move
|
||||
- `parent_id` (string, required): The ID of the destination folder
|
||||
- `name` (string, optional): New name for the moved document
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Usage Examples
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
---
|
||||
title: Coding Tools
|
||||
description: Use AGENTS.md to guide coding agents and IDEs across your CrewAI projects.
|
||||
icon: terminal
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Why AGENTS.md
|
||||
|
||||
`AGENTS.md` is a lightweight, repo-local instruction file that gives coding agents consistent, project-specific guidance. Keep it in the project root and treat it as the source of truth for how you want assistants to work: conventions, commands, architecture notes, and guardrails.
|
||||
|
||||
## Create a Project with the CLI
|
||||
|
||||
Use the CrewAI CLI to scaffold a project, then `AGENTS.md` will be automatically added at the root.
|
||||
|
||||
```bash
|
||||
# Crew
|
||||
crewai create crew my_crew
|
||||
|
||||
# Flow
|
||||
crewai create flow my_flow
|
||||
|
||||
# Tool repository
|
||||
crewai tool create my_tool
|
||||
```
|
||||
|
||||
## Tool Setup: Point Assistants to AGENTS.md
|
||||
|
||||
### Codex
|
||||
|
||||
Codex can be guided by `AGENTS.md` files placed in your repository. Use them to supply persistent project context such as conventions, commands, and workflow expectations.
|
||||
|
||||
### Claude Code
|
||||
|
||||
Claude Code stores project memory in `CLAUDE.md`. You can bootstrap it with `/init` and edit it using `/memory`. Claude Code also supports imports inside `CLAUDE.md`, so you can add a single line like `@AGENTS.md` to pull in the shared instructions without duplicating them.
|
||||
|
||||
You can simply use:
|
||||
|
||||
```bash
|
||||
mv AGENTS.md CLAUDE.md
|
||||
```
|
||||
|
||||
### Gemini CLI and Google Antigravity
|
||||
|
||||
Gemini CLI and Antigravity load a project context file (default: `GEMINI.md`) from the repo root and parent directories. You can configure it to read `AGENTS.md` instead (or in addition) by setting `context.fileName` in your Gemini CLI settings. For example, set it to `AGENTS.md` only, or include both `AGENTS.md` and `GEMINI.md` if you want to keep each tool’s format.
|
||||
|
||||
You can simply use:
|
||||
|
||||
```bash
|
||||
mv AGENTS.md GEMINI.md
|
||||
```
|
||||
|
||||
### Cursor
|
||||
|
||||
Cursor supports `AGENTS.md` as a project instruction file. Place it at the project root to provide guidance for Cursor’s coding assistant.
|
||||
|
||||
### Windsurf
|
||||
|
||||
Claude Code provides an official integration with Windsurf. If you use Claude Code inside Windsurf, follow the Claude Code guidance above and import `AGENTS.md` from `CLAUDE.md`.
|
||||
|
||||
If you are using Windsurf’s native assistant, configure its project rules or instructions feature (if available) to read from `AGENTS.md` or paste the contents directly.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -200,25 +200,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `clientData` (array, 선택사항): 클라이언트별 데이터. 각 항목은 `key` (string)와 `value` (string)가 있는 객체.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_contacts/update_contact_group">
|
||||
**설명:** 연락처 그룹의 정보를 업데이트합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `resourceName` (string, 필수): 연락처 그룹의 리소스 이름 (예: 'contactGroups/myContactGroup').
|
||||
- `name` (string, 필수): 연락처 그룹의 이름.
|
||||
- `clientData` (array, 선택사항): 클라이언트별 데이터. 각 항목은 `key` (string)와 `value` (string)가 있는 객체.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_contacts/delete_contact_group">
|
||||
**설명:** 연락처 그룹을 삭제합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `resourceName` (string, 필수): 삭제할 연락처 그룹의 리소스 이름 (예: 'contactGroups/myContactGroup').
|
||||
- `deleteContacts` (boolean, 선택사항): 그룹 내 연락처도 삭제할지 여부. 기본값: false
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## 사용 예제
|
||||
|
||||
@@ -131,297 +131,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `endIndex` (integer, 필수): 범위의 끝 인덱스.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_document_with_content">
|
||||
**설명:** 내용이 포함된 새 Google 문서를 한 번에 만듭니다.
|
||||
|
||||
**매개변수:**
|
||||
- `title` (string, 필수): 새 문서의 제목. 문서 상단과 Google Drive에 표시됩니다.
|
||||
- `content` (string, 선택사항): 문서에 삽입할 텍스트 내용. 새 단락에는 `\n`을 사용하세요.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/append_text">
|
||||
**설명:** Google 문서의 끝에 텍스트를 추가합니다. 인덱스를 지정할 필요 없이 자동으로 문서 끝에 삽입됩니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): create_document 응답 또는 URL에서 가져온 문서 ID.
|
||||
- `text` (string, 필수): 문서 끝에 추가할 텍스트. 새 단락에는 `\n`을 사용하세요.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_bold">
|
||||
**설명:** Google 문서에서 텍스트를 굵게 만들거나 굵게 서식을 제거합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적).
|
||||
- `bold` (boolean, 필수): 굵게 만들려면 `true`, 굵게를 제거하려면 `false`로 설정.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_italic">
|
||||
**설명:** Google 문서에서 텍스트를 기울임꼴로 만들거나 기울임꼴 서식을 제거합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적).
|
||||
- `italic` (boolean, 필수): 기울임꼴로 만들려면 `true`, 기울임꼴을 제거하려면 `false`로 설정.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_underline">
|
||||
**설명:** Google 문서에서 텍스트에 밑줄 서식을 추가하거나 제거합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적).
|
||||
- `underline` (boolean, 필수): 밑줄을 추가하려면 `true`, 밑줄을 제거하려면 `false`로 설정.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_strikethrough">
|
||||
**설명:** Google 문서에서 텍스트에 취소선 서식을 추가하거나 제거합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적).
|
||||
- `strikethrough` (boolean, 필수): 취소선을 추가하려면 `true`, 제거하려면 `false`로 설정.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_font_size">
|
||||
**설명:** Google 문서에서 텍스트의 글꼴 크기를 변경합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적).
|
||||
- `fontSize` (number, 필수): 포인트 단위의 글꼴 크기. 일반적인 크기: 10, 11, 12, 14, 16, 18, 24, 36.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_color">
|
||||
**설명:** Google 문서에서 RGB 값(0-1 스케일)을 사용하여 텍스트 색상을 변경합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적).
|
||||
- `red` (number, 필수): 빨강 구성 요소 (0-1). 예: `1`은 완전한 빨강.
|
||||
- `green` (number, 필수): 초록 구성 요소 (0-1). 예: `0.5`는 절반 초록.
|
||||
- `blue` (number, 필수): 파랑 구성 요소 (0-1). 예: `0`은 파랑 없음.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_hyperlink">
|
||||
**설명:** Google 문서에서 기존 텍스트를 클릭 가능한 하이퍼링크로 변환합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 링크로 만들 텍스트의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 링크로 만들 텍스트의 끝 위치 (배타적).
|
||||
- `url` (string, 필수): 링크가 가리킬 URL. 예: `"https://example.com"`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/apply_heading_style">
|
||||
**설명:** Google 문서에서 텍스트 범위에 제목 또는 단락 스타일을 적용합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 스타일을 적용할 단락의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 스타일을 적용할 단락의 끝 위치.
|
||||
- `style` (string, 필수): 적용할 스타일. 옵션: `NORMAL_TEXT`, `TITLE`, `SUBTITLE`, `HEADING_1`, `HEADING_2`, `HEADING_3`, `HEADING_4`, `HEADING_5`, `HEADING_6`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_paragraph_alignment">
|
||||
**설명:** Google 문서에서 단락의 텍스트 정렬을 설정합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 정렬할 단락의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 정렬할 단락의 끝 위치.
|
||||
- `alignment` (string, 필수): 텍스트 정렬. 옵션: `START` (왼쪽), `CENTER`, `END` (오른쪽), `JUSTIFIED`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_line_spacing">
|
||||
**설명:** Google 문서에서 단락의 줄 간격을 설정합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 단락의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 단락의 끝 위치.
|
||||
- `lineSpacing` (number, 필수): 백분율로 나타낸 줄 간격. `100` = 단일, `115` = 1.15배, `150` = 1.5배, `200` = 이중.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_paragraph_bullets">
|
||||
**설명:** Google 문서에서 단락을 글머리 기호 또는 번호 매기기 목록으로 변환합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 목록으로 변환할 단락의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 목록으로 변환할 단락의 끝 위치.
|
||||
- `bulletPreset` (string, 필수): 글머리 기호/번호 매기기 스타일. 옵션: `BULLET_DISC_CIRCLE_SQUARE`, `BULLET_DIAMONDX_ARROW3D_SQUARE`, `BULLET_CHECKBOX`, `BULLET_ARROW_DIAMOND_DISC`, `BULLET_STAR_CIRCLE_SQUARE`, `NUMBERED_DECIMAL_ALPHA_ROMAN`, `NUMBERED_DECIMAL_ALPHA_ROMAN_PARENS`, `NUMBERED_DECIMAL_NESTED`, `NUMBERED_UPPERALPHA_ALPHA_ROMAN`, `NUMBERED_UPPERROMAN_UPPERALPHA_DECIMAL`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_paragraph_bullets">
|
||||
**설명:** Google 문서에서 단락의 글머리 기호 또는 번호 매기기를 제거합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `startIndex` (integer, 필수): 목록 단락의 시작 위치.
|
||||
- `endIndex` (integer, 필수): 목록 단락의 끝 위치.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_table_with_content">
|
||||
**설명:** Google 문서에 내용이 포함된 표를 한 번에 삽입합니다. 내용은 2D 배열로 제공하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `rows` (integer, 필수): 표의 행 수.
|
||||
- `columns` (integer, 필수): 표의 열 수.
|
||||
- `index` (integer, 선택사항): 표를 삽입할 위치. 제공하지 않으면 문서 끝에 삽입됩니다.
|
||||
- `content` (array, 필수): 2D 배열로 된 표 내용. 각 내부 배열은 행입니다. 예: `[["Year", "Revenue"], ["2023", "$43B"], ["2024", "$45B"]]`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_table_row">
|
||||
**설명:** 기존 표의 참조 셀 위 또는 아래에 새 행을 삽입합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `tableStartIndex` (integer, 필수): 표의 시작 인덱스. get_document에서 가져오세요.
|
||||
- `rowIndex` (integer, 필수): 참조 셀의 행 인덱스 (0 기반).
|
||||
- `columnIndex` (integer, 선택사항): 참조 셀의 열 인덱스 (0 기반). 기본값: `0`.
|
||||
- `insertBelow` (boolean, 선택사항): `true`이면 참조 행 아래에, `false`이면 위에 삽입. 기본값: `true`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_table_column">
|
||||
**설명:** 기존 표의 참조 셀 왼쪽 또는 오른쪽에 새 열을 삽입합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `tableStartIndex` (integer, 필수): 표의 시작 인덱스.
|
||||
- `rowIndex` (integer, 선택사항): 참조 셀의 행 인덱스 (0 기반). 기본값: `0`.
|
||||
- `columnIndex` (integer, 필수): 참조 셀의 열 인덱스 (0 기반).
|
||||
- `insertRight` (boolean, 선택사항): `true`이면 오른쪽에, `false`이면 왼쪽에 삽입. 기본값: `true`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_table_row">
|
||||
**설명:** Google 문서의 기존 표에서 행을 삭제합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `tableStartIndex` (integer, 필수): 표의 시작 인덱스.
|
||||
- `rowIndex` (integer, 필수): 삭제할 행 인덱스 (0 기반).
|
||||
- `columnIndex` (integer, 선택사항): 행의 아무 셀의 열 인덱스 (0 기반). 기본값: `0`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_table_column">
|
||||
**설명:** Google 문서의 기존 표에서 열을 삭제합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `tableStartIndex` (integer, 필수): 표의 시작 인덱스.
|
||||
- `rowIndex` (integer, 선택사항): 열의 아무 셀의 행 인덱스 (0 기반). 기본값: `0`.
|
||||
- `columnIndex` (integer, 필수): 삭제할 열 인덱스 (0 기반).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/merge_table_cells">
|
||||
**설명:** 표 셀 범위를 단일 셀로 병합합니다. 모든 셀의 내용이 보존됩니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `tableStartIndex` (integer, 필수): 표의 시작 인덱스.
|
||||
- `rowIndex` (integer, 필수): 병합의 시작 행 인덱스 (0 기반).
|
||||
- `columnIndex` (integer, 필수): 병합의 시작 열 인덱스 (0 기반).
|
||||
- `rowSpan` (integer, 필수): 병합할 행 수.
|
||||
- `columnSpan` (integer, 필수): 병합할 열 수.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/unmerge_table_cells">
|
||||
**설명:** 이전에 병합된 표 셀을 개별 셀로 분리합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `tableStartIndex` (integer, 필수): 표의 시작 인덱스.
|
||||
- `rowIndex` (integer, 필수): 병합된 셀의 행 인덱스 (0 기반).
|
||||
- `columnIndex` (integer, 필수): 병합된 셀의 열 인덱스 (0 기반).
|
||||
- `rowSpan` (integer, 필수): 병합된 셀이 차지하는 행 수.
|
||||
- `columnSpan` (integer, 필수): 병합된 셀이 차지하는 열 수.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_inline_image">
|
||||
**설명:** 공개 URL에서 Google 문서에 이미지를 삽입합니다. 이미지는 공개적으로 접근 가능해야 하고, 50MB 미만이며, PNG/JPEG/GIF 형식이어야 합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `uri` (string, 필수): 이미지의 공개 URL. 인증 없이 접근 가능해야 합니다.
|
||||
- `index` (integer, 선택사항): 이미지를 삽입할 위치. 제공하지 않으면 문서 끝에 삽입됩니다. 기본값: `1`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_section_break">
|
||||
**설명:** 서로 다른 서식을 가진 문서 섹션을 만들기 위해 섹션 나누기를 삽입합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `index` (integer, 필수): 섹션 나누기를 삽입할 위치.
|
||||
- `sectionType` (string, 필수): 섹션 나누기의 유형. 옵션: `CONTINUOUS` (같은 페이지에 유지), `NEXT_PAGE` (새 페이지 시작).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_header">
|
||||
**설명:** 문서의 머리글을 만듭니다. insert_text를 사용하여 머리글 내용을 추가할 수 있는 headerId를 반환합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `type` (string, 선택사항): 머리글 유형. 옵션: `DEFAULT`. 기본값: `DEFAULT`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_footer">
|
||||
**설명:** 문서의 바닥글을 만듭니다. insert_text를 사용하여 바닥글 내용을 추가할 수 있는 footerId를 반환합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `type` (string, 선택사항): 바닥글 유형. 옵션: `DEFAULT`. 기본값: `DEFAULT`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_header">
|
||||
**설명:** 문서에서 머리글을 삭제합니다. headerId를 찾으려면 get_document를 사용하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `headerId` (string, 필수): 삭제할 머리글 ID. get_document 응답에서 가져오세요.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_footer">
|
||||
**설명:** 문서에서 바닥글을 삭제합니다. footerId를 찾으려면 get_document를 사용하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `documentId` (string, 필수): 문서 ID.
|
||||
- `footerId` (string, 필수): 삭제할 바닥글 ID. get_document 응답에서 가져오세요.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## 사용 예제
|
||||
|
||||
@@ -61,22 +61,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_presentation_metadata">
|
||||
**설명:** 프레젠테이션에 대한 가벼운 메타데이터(제목, 슬라이드 수, 슬라이드 ID)를 가져옵니다. 전체 콘텐츠를 가져오기 전에 먼저 사용하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 검색할 프레젠테이션의 ID.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_presentation_text">
|
||||
**설명:** 프레젠테이션에서 모든 텍스트 콘텐츠를 추출합니다. 슬라이드 ID와 도형 및 테이블의 텍스트만 반환합니다 (포맷팅 없음).
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_presentation">
|
||||
**설명:** ID로 프레젠테이션을 검색합니다.
|
||||
|
||||
@@ -96,15 +80,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_slide_text">
|
||||
**설명:** 단일 슬라이드에서 텍스트 콘텐츠를 추출합니다. 도형 및 테이블의 텍스트만 반환합니다 (포맷팅 또는 스타일 없음).
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `pageObjectId` (string, 필수): 텍스트를 가져올 슬라이드/페이지의 ID.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_page">
|
||||
**설명:** ID로 특정 페이지를 검색합니다.
|
||||
|
||||
@@ -123,120 +98,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_slide">
|
||||
**설명:** 프레젠테이션에 추가 빈 슬라이드를 추가합니다. 새 프레젠테이션에는 이미 빈 슬라이드가 하나 있습니다. 먼저 get_presentation_metadata를 확인하세요. 제목/본문 영역이 있는 슬라이드는 create_slide_with_layout을 사용하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `insertionIndex` (integer, 선택사항): 슬라이드를 삽입할 위치 (0 기반). 생략하면 맨 끝에 추가됩니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_slide_with_layout">
|
||||
**설명:** 제목, 본문 등의 플레이스홀더 영역이 있는 미리 정의된 레이아웃으로 슬라이드를 만듭니다. 구조화된 콘텐츠에는 create_slide보다 적합합니다. 생성 후 get_page로 플레이스홀더 ID를 찾고, 그 안에 텍스트를 삽입하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `layout` (string, 필수): 레이아웃 유형. 옵션: `BLANK`, `TITLE`, `TITLE_AND_BODY`, `TITLE_AND_TWO_COLUMNS`, `TITLE_ONLY`, `SECTION_HEADER`, `ONE_COLUMN_TEXT`, `MAIN_POINT`, `BIG_NUMBER`. 제목+설명은 TITLE_AND_BODY, 제목만은 TITLE, 섹션 구분은 SECTION_HEADER가 적합합니다.
|
||||
- `insertionIndex` (integer, 선택사항): 삽입할 위치 (0 기반). 생략하면 맨 끝에 추가됩니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_text_box">
|
||||
**설명:** 콘텐츠가 있는 텍스트 상자를 슬라이드에 만듭니다. 제목, 설명, 단락에 사용합니다. 테이블에는 사용하지 마세요. 선택적으로 EMU 단위로 위치(x, y)와 크기(width, height)를 지정할 수 있습니다 (914400 EMU = 1 인치).
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `slideId` (string, 필수): 텍스트 상자를 추가할 슬라이드의 ID.
|
||||
- `text` (string, 필수): 텍스트 상자의 텍스트 내용.
|
||||
- `x` (integer, 선택사항): EMU 단위 X 위치 (914400 = 1 인치). 기본값: 914400 (왼쪽에서 1 인치).
|
||||
- `y` (integer, 선택사항): EMU 단위 Y 위치 (914400 = 1 인치). 기본값: 914400 (위에서 1 인치).
|
||||
- `width` (integer, 선택사항): EMU 단위 너비. 기본값: 7315200 (약 8 인치).
|
||||
- `height` (integer, 선택사항): EMU 단위 높이. 기본값: 914400 (약 1 인치).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/delete_slide">
|
||||
**설명:** 프레젠테이션에서 슬라이드를 제거합니다. 슬라이드 ID를 찾으려면 먼저 get_presentation을 사용하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `slideId` (string, 필수): 삭제할 슬라이드의 객체 ID. get_presentation에서 가져옵니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/duplicate_slide">
|
||||
**설명:** 기존 슬라이드의 복사본을 만듭니다. 복사본은 원본 바로 다음에 삽입됩니다.
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `slideId` (string, 필수): 복제할 슬라이드의 객체 ID. get_presentation에서 가져옵니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/move_slides">
|
||||
**설명:** 슬라이드를 새 위치로 이동하여 순서를 변경합니다. 슬라이드 ID는 현재 프레젠테이션 순서대로 있어야 합니다 (중복 없음).
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `slideIds` (string 배열, 필수): 이동할 슬라이드 ID 배열. 현재 프레젠테이션 순서대로 있어야 합니다.
|
||||
- `insertionIndex` (integer, 필수): 대상 위치 (0 기반). 0 = 맨 앞, 슬라이드 수 = 맨 끝.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/insert_youtube_video">
|
||||
**설명:** 슬라이드에 YouTube 동영상을 삽입합니다. 동영상 ID는 YouTube URL의 "v=" 다음 값입니다 (예: youtube.com/watch?v=abc123의 경우 "abc123" 사용).
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `slideId` (string, 필수): 동영상을 추가할 슬라이드의 ID. get_presentation에서 가져옵니다.
|
||||
- `videoId` (string, 필수): YouTube 동영상 ID (URL의 v= 다음 값).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/insert_drive_video">
|
||||
**설명:** 슬라이드에 Google Drive의 동영상을 삽입합니다. 파일 ID는 Drive 파일 URL에서 찾을 수 있습니다.
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `slideId` (string, 필수): 동영상을 추가할 슬라이드의 ID. get_presentation에서 가져옵니다.
|
||||
- `fileId` (string, 필수): 동영상의 Google Drive 파일 ID.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/set_slide_background_image">
|
||||
**설명:** 슬라이드의 배경 이미지를 설정합니다. 이미지 URL은 공개적으로 액세스 가능해야 합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `slideId` (string, 필수): 배경을 설정할 슬라이드의 ID. get_presentation에서 가져옵니다.
|
||||
- `imageUrl` (string, 필수): 배경으로 사용할 이미지의 공개적으로 액세스 가능한 URL.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_table">
|
||||
**설명:** 슬라이드에 빈 테이블을 만듭니다. 콘텐츠가 있는 테이블을 만들려면 create_table_with_content를 사용하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `slideId` (string, 필수): 테이블을 추가할 슬라이드의 ID. get_presentation에서 가져옵니다.
|
||||
- `rows` (integer, 필수): 테이블의 행 수.
|
||||
- `columns` (integer, 필수): 테이블의 열 수.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_table_with_content">
|
||||
**설명:** 한 번의 작업으로 콘텐츠가 있는 테이블을 만듭니다. 콘텐츠는 2D 배열로 제공하며, 각 내부 배열은 행을 나타냅니다. 예: [["Header1", "Header2"], ["Row1Col1", "Row1Col2"]].
|
||||
|
||||
**매개변수:**
|
||||
- `presentationId` (string, 필수): 프레젠테이션의 ID.
|
||||
- `slideId` (string, 필수): 테이블을 추가할 슬라이드의 ID. get_presentation에서 가져옵니다.
|
||||
- `rows` (integer, 필수): 테이블의 행 수.
|
||||
- `columns` (integer, 필수): 테이블의 열 수.
|
||||
- `content` (array, 필수): 2D 배열 형태의 테이블 콘텐츠. 각 내부 배열은 행입니다. 예: [["Year", "Revenue"], ["2023", "$10M"]].
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/import_data_from_sheet">
|
||||
**설명:** Google 시트에서 프레젠테이션으로 데이터를 가져옵니다.
|
||||
|
||||
|
||||
@@ -148,16 +148,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/get_table_data">
|
||||
**설명:** Excel 워크시트의 특정 테이블에서 데이터를 가져옵니다.
|
||||
|
||||
**매개변수:**
|
||||
- `file_id` (string, 필수): Excel 파일의 ID.
|
||||
- `worksheet_name` (string, 필수): 워크시트의 이름.
|
||||
- `table_name` (string, 필수): 테이블의 이름.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/create_chart">
|
||||
**설명:** Excel 워크시트에 차트를 만듭니다.
|
||||
|
||||
@@ -190,15 +180,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/get_used_range_metadata">
|
||||
**설명:** Excel 워크시트의 사용된 범위 메타데이터(크기만, 데이터 없음)를 가져옵니다.
|
||||
|
||||
**매개변수:**
|
||||
- `file_id` (string, 필수): Excel 파일의 ID.
|
||||
- `worksheet_name` (string, 필수): 워크시트의 이름.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/list_charts">
|
||||
**설명:** Excel 워크시트의 모든 차트를 가져옵니다.
|
||||
|
||||
|
||||
@@ -150,49 +150,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `item_id` (string, 필수): 파일의 ID.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/list_files_by_path">
|
||||
**설명:** 특정 OneDrive 경로의 파일과 폴더를 나열합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `folder_path` (string, 필수): 폴더 경로 (예: 'Documents/Reports').
|
||||
- `top` (integer, 선택사항): 검색할 항목 수 (최대 1000). 기본값: 50.
|
||||
- `orderby` (string, 선택사항): 필드별 정렬 (예: "name asc", "lastModifiedDateTime desc"). 기본값: "name asc".
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/get_recent_files">
|
||||
**설명:** OneDrive에서 최근에 액세스한 파일을 가져옵니다.
|
||||
|
||||
**매개변수:**
|
||||
- `top` (integer, 선택사항): 검색할 항목 수 (최대 200). 기본값: 25.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/get_shared_with_me">
|
||||
**설명:** 사용자와 공유된 파일과 폴더를 가져옵니다.
|
||||
|
||||
**매개변수:**
|
||||
- `top` (integer, 선택사항): 검색할 항목 수 (최대 200). 기본값: 50.
|
||||
- `orderby` (string, 선택사항): 필드별 정렬. 기본값: "name asc".
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/get_file_by_path">
|
||||
**설명:** 경로로 특정 파일 또는 폴더에 대한 정보를 가져옵니다.
|
||||
|
||||
**매개변수:**
|
||||
- `file_path` (string, 필수): 파일 또는 폴더 경로 (예: 'Documents/report.docx').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/download_file_by_path">
|
||||
**설명:** 경로로 OneDrive에서 파일을 다운로드합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `file_path` (string, 필수): 파일 경로 (예: 'Documents/report.docx').
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## 사용 예제
|
||||
@@ -226,62 +183,6 @@ crew = Crew(
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
### 파일 업로드 및 관리
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# 파일 작업에 특화된 에이전트 생성
|
||||
file_operator = Agent(
|
||||
role="파일 운영자",
|
||||
goal="파일을 정확하게 업로드, 다운로드 및 관리",
|
||||
backstory="파일 처리 및 콘텐츠 관리에 능숙한 AI 어시스턴트.",
|
||||
apps=['microsoft_onedrive/upload_file', 'microsoft_onedrive/download_file', 'microsoft_onedrive/get_file_info']
|
||||
)
|
||||
|
||||
# 파일 업로드 및 관리 작업
|
||||
file_management_task = Task(
|
||||
description="'report.txt'라는 이름의 텍스트 파일을 'This is a sample report for the project.' 내용으로 업로드한 다음 업로드된 파일에 대한 정보를 가져오세요.",
|
||||
agent=file_operator,
|
||||
expected_output="파일이 성공적으로 업로드되고 파일 정보가 검색됨."
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[file_operator],
|
||||
tasks=[file_management_task]
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
### 파일 정리 및 공유
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# 파일 정리 및 공유를 위한 에이전트 생성
|
||||
file_organizer = Agent(
|
||||
role="파일 정리자",
|
||||
goal="파일을 정리하고 협업을 위한 공유 링크 생성",
|
||||
backstory="파일 정리 및 공유 권한 관리에 뛰어난 AI 어시스턴트.",
|
||||
apps=['microsoft_onedrive/search_files', 'microsoft_onedrive/move_item', 'microsoft_onedrive/share_item', 'microsoft_onedrive/create_folder']
|
||||
)
|
||||
|
||||
# 파일 정리 및 공유 작업
|
||||
organize_share_task = Task(
|
||||
description="이름에 'presentation'이 포함된 파일을 검색하고, '프레젠테이션'이라는 폴더를 만든 다음, 찾은 파일을 이 폴더로 이동하고 폴더에 대한 읽기 전용 공유 링크를 생성하세요.",
|
||||
agent=file_organizer,
|
||||
expected_output="파일이 '프레젠테이션' 폴더로 정리되고 공유 링크가 생성됨."
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[file_organizer],
|
||||
tasks=[organize_share_task]
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
## 문제 해결
|
||||
|
||||
### 일반적인 문제
|
||||
@@ -295,30 +196,6 @@ crew.kickoff()
|
||||
|
||||
- 파일 업로드 시 `file_name`과 `content`가 제공되는지 확인하세요.
|
||||
- 바이너리 파일의 경우 내용이 Base64로 인코딩되어야 합니다.
|
||||
- OneDrive에 대한 쓰기 권한이 있는지 확인하세요.
|
||||
|
||||
**파일/폴더 ID 문제**
|
||||
|
||||
- 특정 파일 또는 폴더에 액세스할 때 항목 ID가 올바른지 다시 확인하세요.
|
||||
- 항목 ID는 `list_files` 또는 `search_files`와 같은 다른 작업에서 반환됩니다.
|
||||
- 참조하는 항목이 존재하고 액세스 가능한지 확인하세요.
|
||||
|
||||
**검색 및 필터 작업**
|
||||
|
||||
- `search_files` 작업에 적절한 검색어를 사용하세요.
|
||||
- `filter` 매개변수의 경우 올바른 OData 문법을 사용하세요.
|
||||
|
||||
**파일 작업 (복사/이동)**
|
||||
|
||||
- `move_item`의 경우 `item_id`와 `parent_id`가 모두 제공되는지 확인하세요.
|
||||
- `copy_item`의 경우 `item_id`만 필요합니다. `parent_id`는 지정하지 않으면 루트로 기본 설정됩니다.
|
||||
- 대상 폴더가 존재하고 액세스 가능한지 확인하세요.
|
||||
|
||||
**공유 링크 생성**
|
||||
|
||||
- 공유 링크를 만들기 전에 항목이 존재하는지 확인하세요.
|
||||
- 공유 요구 사항에 따라 적절한 `type`과 `scope`를 선택하세요.
|
||||
- `anonymous` 범위는 로그인 없이 액세스를 허용합니다. `organization`은 조직 계정이 필요합니다.
|
||||
|
||||
### 도움 받기
|
||||
|
||||
|
||||
@@ -132,74 +132,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `companyName` (string, 선택사항): 연락처의 회사 이름.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/get_message">
|
||||
**설명:** ID로 특정 이메일 메시지를 가져옵니다.
|
||||
|
||||
**매개변수:**
|
||||
- `message_id` (string, 필수): 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다.
|
||||
- `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록. 예: "id,subject,body,from,receivedDateTime". 기본값: "id,subject,body,from,toRecipients,receivedDateTime".
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/reply_to_email">
|
||||
**설명:** 이메일 메시지에 회신합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `message_id` (string, 필수): 회신할 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다.
|
||||
- `comment` (string, 필수): 회신 메시지 내용. 일반 텍스트 또는 HTML 가능. 원본 메시지가 이 내용 아래에 인용됩니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/forward_email">
|
||||
**설명:** 이메일 메시지를 전달합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `message_id` (string, 필수): 전달할 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다.
|
||||
- `to_recipients` (array, 필수): 전달할 받는 사람의 이메일 주소 배열. 예: ["john@example.com", "jane@example.com"].
|
||||
- `comment` (string, 선택사항): 전달된 콘텐츠 위에 포함할 선택적 메시지. 일반 텍스트 또는 HTML 가능.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/mark_message_read">
|
||||
**설명:** 메시지를 읽음 또는 읽지 않음으로 표시합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `message_id` (string, 필수): 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다.
|
||||
- `is_read` (boolean, 필수): 읽음으로 표시하려면 true, 읽지 않음으로 표시하려면 false로 설정합니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/delete_message">
|
||||
**설명:** 이메일 메시지를 삭제합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `message_id` (string, 필수): 삭제할 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/update_event">
|
||||
**설명:** 기존 캘린더 이벤트를 업데이트합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `event_id` (string, 필수): 이벤트의 고유 식별자. get_calendar_events 작업에서 얻을 수 있습니다.
|
||||
- `subject` (string, 선택사항): 이벤트의 새 제목/제목.
|
||||
- `start_time` (string, 선택사항): ISO 8601 형식의 새 시작 시간 (예: "2024-01-20T10:00:00"). 필수: 이 필드 사용 시 start_timezone도 제공해야 합니다.
|
||||
- `start_timezone` (string, 선택사항): 시작 시간의 시간대. start_time 업데이트 시 필수. 예: "Pacific Standard Time", "Eastern Standard Time", "UTC".
|
||||
- `end_time` (string, 선택사항): ISO 8601 형식의 새 종료 시간. 필수: 이 필드 사용 시 end_timezone도 제공해야 합니다.
|
||||
- `end_timezone` (string, 선택사항): 종료 시간의 시간대. end_time 업데이트 시 필수. 예: "Pacific Standard Time", "Eastern Standard Time", "UTC".
|
||||
- `location` (string, 선택사항): 이벤트의 새 위치.
|
||||
- `body` (string, 선택사항): 이벤트의 새 본문/설명. HTML 형식 지원.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/delete_event">
|
||||
**설명:** 캘린더 이벤트를 삭제합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `event_id` (string, 필수): 삭제할 이벤트의 고유 식별자. get_calendar_events 작업에서 얻을 수 있습니다.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## 사용 예제
|
||||
@@ -233,62 +165,6 @@ crew = Crew(
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
### 이메일 관리 및 검색
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# 이메일 관리에 특화된 에이전트 생성
|
||||
email_manager = Agent(
|
||||
role="이메일 관리자",
|
||||
goal="이메일 메시지를 검색하고 가져와 정리",
|
||||
backstory="이메일 정리 및 관리에 능숙한 AI 어시스턴트.",
|
||||
apps=['microsoft_outlook/get_messages']
|
||||
)
|
||||
|
||||
# 이메일 검색 및 가져오기 작업
|
||||
search_emails_task = Task(
|
||||
description="최신 읽지 않은 이메일 20건을 가져와 가장 중요한 것들의 요약을 제공하세요.",
|
||||
agent=email_manager,
|
||||
expected_output="주요 읽지 않은 이메일의 요약과 핵심 세부 정보."
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[email_manager],
|
||||
tasks=[search_emails_task]
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
### 캘린더 및 연락처 관리
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# 캘린더 및 연락처 관리를 위한 에이전트 생성
|
||||
scheduler = Agent(
|
||||
role="캘린더 및 연락처 관리자",
|
||||
goal="캘린더 이벤트를 관리하고 연락처 정보를 유지",
|
||||
backstory="일정 관리 및 연락처 정리를 담당하는 AI 어시스턴트.",
|
||||
apps=['microsoft_outlook/create_calendar_event', 'microsoft_outlook/get_calendar_events', 'microsoft_outlook/create_contact']
|
||||
)
|
||||
|
||||
# 회의 생성 및 연락처 추가 작업
|
||||
schedule_task = Task(
|
||||
description="내일 오후 2시 '팀 회의' 제목으로 '회의실 A' 장소의 캘린더 이벤트를 만들고, 'john.smith@example.com' 이메일과 '프로젝트 매니저' 직책으로 'John Smith'의 새 연락처를 추가하세요.",
|
||||
agent=scheduler,
|
||||
expected_output="캘린더 이벤트가 생성되고 새 연락처가 추가됨."
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[scheduler],
|
||||
tasks=[schedule_task]
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
## 문제 해결
|
||||
|
||||
### 일반적인 문제
|
||||
@@ -297,29 +173,11 @@ crew.kickoff()
|
||||
|
||||
- Microsoft 계정이 이메일, 캘린더 및 연락처 액세스에 필요한 권한을 가지고 있는지 확인하세요.
|
||||
- 필요한 범위: `Mail.Read`, `Mail.Send`, `Calendars.Read`, `Calendars.ReadWrite`, `Contacts.Read`, `Contacts.ReadWrite`.
|
||||
- OAuth 연결에 필요한 모든 범위가 포함되어 있는지 확인하세요.
|
||||
|
||||
**이메일 보내기 문제**
|
||||
|
||||
- `send_email`에 `to_recipients`, `subject`, `body`가 제공되는지 확인하세요.
|
||||
- 이메일 주소가 올바르게 형식화되어 있는지 확인하세요.
|
||||
- 계정에 `Mail.Send` 권한이 있는지 확인하세요.
|
||||
|
||||
**캘린더 이벤트 생성**
|
||||
|
||||
- `subject`, `start_datetime`, `end_datetime`이 제공되는지 확인하세요.
|
||||
- 날짜/시간 필드에 적절한 ISO 8601 형식을 사용하세요 (예: '2024-01-20T10:00:00').
|
||||
- 이벤트가 잘못된 시간에 표시되는 경우 시간대 설정을 확인하세요.
|
||||
|
||||
**연락처 관리**
|
||||
|
||||
- `create_contact`의 경우 필수인 `displayName`이 제공되는지 확인하세요.
|
||||
- `emailAddresses`를 제공할 때 `address`와 `name` 속성이 있는 올바른 객체 형식을 사용하세요.
|
||||
|
||||
**검색 및 필터 문제**
|
||||
|
||||
- `filter` 매개변수에 올바른 OData 문법을 사용하세요.
|
||||
- 날짜 필터의 경우 ISO 8601 형식을 사용하세요 (예: "receivedDateTime ge '2024-01-01T00:00:00Z'").
|
||||
|
||||
### 도움 받기
|
||||
|
||||
|
||||
@@ -77,17 +77,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_drives">
|
||||
**설명:** SharePoint 사이트의 모든 문서 라이브러리(드라이브)를 나열합니다. 파일 작업을 사용하기 전에 사용 가능한 라이브러리를 찾으려면 이 작업을 사용하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `top` (integer, 선택사항): 페이지당 반환할 최대 드라이브 수 (1-999). 기본값: 100
|
||||
- `skip_token` (string, 선택사항): 다음 결과 페이지를 가져오기 위한 이전 응답의 페이지네이션 토큰.
|
||||
- `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'id,name,webUrl,driveType').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_site_lists">
|
||||
**설명:** SharePoint 사이트의 모든 목록을 가져옵니다.
|
||||
|
||||
@@ -156,317 +145,20 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_files">
|
||||
**설명:** SharePoint 문서 라이브러리에서 파일과 폴더를 가져옵니다. 기본적으로 루트 폴더를 나열하지만 folder_id를 제공하여 하위 폴더로 이동할 수 있습니다.
|
||||
<Accordion title="microsoft_sharepoint/get_drive_items">
|
||||
**설명:** SharePoint 문서 라이브러리에서 파일과 폴더를 가져옵니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `folder_id` (string, 선택사항): 내용을 나열할 폴더의 ID. 루트 폴더의 경우 'root'를 사용하거나 이전 list_files 호출에서 가져온 폴더 ID를 제공하세요. 기본값: 'root'
|
||||
- `top` (integer, 선택사항): 페이지당 반환할 최대 항목 수 (1-1000). 기본값: 50
|
||||
- `skip_token` (string, 선택사항): 다음 결과 페이지를 가져오기 위한 이전 응답의 페이지네이션 토큰.
|
||||
- `orderby` (string, 선택사항): 결과 정렬 순서 (예: 'name asc', 'size desc', 'lastModifiedDateTime desc'). 기본값: 'name asc'
|
||||
- `filter` (string, 선택사항): 결과를 좁히기 위한 OData 필터 (예: 'file ne null'은 파일만, 'folder ne null'은 폴더만).
|
||||
- `select` (string, 선택사항): 반환할 필드의 쉼표로 구분된 목록 (예: 'id,name,size,folder,file,webUrl,lastModifiedDateTime').
|
||||
- `site_id` (string, 필수): SharePoint 사이트의 ID.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/delete_file">
|
||||
**설명:** SharePoint 문서 라이브러리에서 파일 또는 폴더를 삭제합니다. 폴더의 경우 모든 내용이 재귀적으로 삭제됩니다. 항목은 사이트 휴지통으로 이동됩니다.
|
||||
<Accordion title="microsoft_sharepoint/delete_drive_item">
|
||||
**설명:** SharePoint 문서 라이브러리에서 파일 또는 폴더를 삭제합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): 삭제할 파일 또는 폴더의 고유 식별자. list_files에서 가져오세요.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_files_by_path">
|
||||
**설명:** 경로로 SharePoint 문서 라이브러리 폴더의 파일과 폴더를 나열합니다. 깊은 탐색을 위해 여러 list_files 호출보다 더 효율적입니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `folder_path` (string, 필수): 앞뒤 슬래시 없이 폴더의 전체 경로 (예: 'Documents', 'Reports/2024/Q1').
|
||||
- `top` (integer, 선택사항): 페이지당 반환할 최대 항목 수 (1-1000). 기본값: 50
|
||||
- `skip_token` (string, 선택사항): 다음 결과 페이지를 가져오기 위한 이전 응답의 페이지네이션 토큰.
|
||||
- `orderby` (string, 선택사항): 결과 정렬 순서 (예: 'name asc', 'size desc'). 기본값: 'name asc'
|
||||
- `select` (string, 선택사항): 반환할 필드의 쉼표로 구분된 목록 (예: 'id,name,size,folder,file,webUrl,lastModifiedDateTime').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/download_file">
|
||||
**설명:** SharePoint 문서 라이브러리에서 원시 파일 내용을 다운로드합니다. 일반 텍스트 파일(.txt, .csv, .json)에만 사용하세요. Excel 파일의 경우 Excel 전용 작업을 사용하세요. Word 파일의 경우 get_word_document_content를 사용하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): 다운로드할 파일의 고유 식별자. list_files 또는 list_files_by_path에서 가져오세요.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_file_info">
|
||||
**설명:** SharePoint 문서 라이브러리의 특정 파일 또는 폴더에 대한 자세한 메타데이터를 가져옵니다. 이름, 크기, 생성/수정 날짜 및 작성자 정보가 포함됩니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): 파일 또는 폴더의 고유 식별자. list_files 또는 list_files_by_path에서 가져오세요.
|
||||
- `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'id,name,size,createdDateTime,lastModifiedDateTime,webUrl,createdBy,lastModifiedBy').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/create_folder">
|
||||
**설명:** SharePoint 문서 라이브러리에 새 폴더를 만듭니다. 기본적으로 루트에 폴더를 만들며 하위 폴더를 만들려면 parent_id를 사용하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `folder_name` (string, 필수): 새 폴더의 이름. 사용할 수 없는 문자: \ / : * ? " < > |
|
||||
- `parent_id` (string, 선택사항): 상위 폴더의 ID. 문서 라이브러리 루트의 경우 'root'를 사용하거나 list_files에서 가져온 폴더 ID를 제공하세요. 기본값: 'root'
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/search_files">
|
||||
**설명:** 키워드로 SharePoint 문서 라이브러리에서 파일과 폴더를 검색합니다. 파일 이름, 폴더 이름 및 Office 문서의 파일 내용을 검색합니다. 와일드카드나 특수 문자를 사용하지 마세요.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `query` (string, 필수): 검색 키워드 (예: 'report', 'budget 2024'). *.txt와 같은 와일드카드는 지원되지 않습니다.
|
||||
- `top` (integer, 선택사항): 페이지당 반환할 최대 결과 수 (1-1000). 기본값: 50
|
||||
- `skip_token` (string, 선택사항): 다음 결과 페이지를 가져오기 위한 이전 응답의 페이지네이션 토큰.
|
||||
- `select` (string, 선택사항): 반환할 필드의 쉼표로 구분된 목록 (예: 'id,name,size,folder,file,webUrl,lastModifiedDateTime').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/copy_file">
|
||||
**설명:** SharePoint 내에서 파일 또는 폴더를 새 위치로 복사합니다. 원본 항목은 변경되지 않습니다. 대용량 파일의 경우 복사 작업은 비동기적입니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): 복사할 파일 또는 폴더의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `destination_folder_id` (string, 필수): 대상 폴더의 ID. 루트 폴더의 경우 'root'를 사용하거나 list_files에서 가져온 폴더 ID를 사용하세요.
|
||||
- `new_name` (string, 선택사항): 복사본의 새 이름. 제공하지 않으면 원래 이름이 사용됩니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/move_file">
|
||||
**설명:** SharePoint 내에서 파일 또는 폴더를 새 위치로 이동합니다. 항목은 원래 위치에서 제거됩니다. 폴더의 경우 모든 내용도 함께 이동됩니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): 이동할 파일 또는 폴더의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `destination_folder_id` (string, 필수): 대상 폴더의 ID. 루트 폴더의 경우 'root'를 사용하거나 list_files에서 가져온 폴더 ID를 사용하세요.
|
||||
- `new_name` (string, 선택사항): 이동된 항목의 새 이름. 제공하지 않으면 원래 이름이 유지됩니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_worksheets">
|
||||
**설명:** SharePoint 문서 라이브러리에 저장된 Excel 통합 문서의 모든 워크시트(탭)를 나열합니다. 반환된 워크시트 이름을 다른 Excel 작업에 사용하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'id,name,position,visibility').
|
||||
- `filter` (string, 선택사항): OData 필터 표현식 (예: "visibility eq 'Visible'"로 숨겨진 시트 제외).
|
||||
- `top` (integer, 선택사항): 반환할 최대 워크시트 수. 최소: 1, 최대: 999
|
||||
- `orderby` (string, 선택사항): 정렬 순서 (예: 'position asc'로 탭 순서대로 반환).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/create_excel_worksheet">
|
||||
**설명:** SharePoint 문서 라이브러리에 저장된 Excel 통합 문서에 새 워크시트(탭)를 만듭니다. 새 시트는 탭 목록의 끝에 추가됩니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `name` (string, 필수): 새 워크시트의 이름. 최대 31자. 사용할 수 없는 문자: \ / * ? : [ ]. 통합 문서 내에서 고유해야 합니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_range_data">
|
||||
**설명:** SharePoint에 저장된 Excel 워크시트의 특정 범위에서 셀 값을 가져옵니다. 크기를 모르는 상태에서 모든 데이터를 읽으려면 대신 get_excel_used_range를 사용하세요.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 읽을 워크시트(탭)의 이름. get_excel_worksheets에서 가져오세요. 대소문자를 구분합니다.
|
||||
- `range` (string, 필수): A1 표기법의 셀 범위 (예: 'A1:C10', 'A:C', '1:5', 'A1').
|
||||
- `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'address,values,formulas,numberFormat,text').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/update_excel_range_data">
|
||||
**설명:** SharePoint에 저장된 Excel 워크시트의 특정 범위에 값을 씁니다. 기존 셀 내용을 덮어씁니다. values 배열의 크기는 범위 크기와 정확히 일치해야 합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 업데이트할 워크시트(탭)의 이름. get_excel_worksheets에서 가져오세요. 대소문자를 구분합니다.
|
||||
- `range` (string, 필수): 값을 쓸 A1 표기법의 셀 범위 (예: 'A1:C3'은 3x3 블록).
|
||||
- `values` (array, 필수): 2D 값 배열 (셀을 포함하는 행). A1:B2의 예: [["Header1", "Header2"], ["Value1", "Value2"]]. 셀을 지우려면 null을 사용하세요.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_used_range_metadata">
|
||||
**설명:** 실제 셀 값 없이 워크시트에서 사용된 범위의 메타데이터(주소 및 크기)만 반환합니다. 대용량 파일에서 데이터를 청크로 읽기 전에 스프레드시트 크기를 파악하는 데 이상적입니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 읽을 워크시트(탭)의 이름. get_excel_worksheets에서 가져오세요. 대소문자를 구분합니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_used_range">
|
||||
**설명:** SharePoint에 저장된 워크시트에서 데이터가 포함된 모든 셀을 가져옵니다. 2MB보다 큰 파일에는 사용하지 마세요. 대용량 파일의 경우 먼저 get_excel_used_range_metadata를 사용한 다음 get_excel_range_data로 작은 청크로 읽으세요.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 읽을 워크시트(탭)의 이름. get_excel_worksheets에서 가져오세요. 대소문자를 구분합니다.
|
||||
- `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'address,values,formulas,numberFormat,text,rowCount,columnCount').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_cell">
|
||||
**설명:** SharePoint의 Excel 파일에서 행과 열 인덱스로 단일 셀의 값을 가져옵니다. 인덱스는 0 기반입니다 (행 0 = Excel 행 1, 열 0 = 열 A).
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 워크시트(탭)의 이름. get_excel_worksheets에서 가져오세요. 대소문자를 구분합니다.
|
||||
- `row` (integer, 필수): 0 기반 행 인덱스 (행 0 = Excel 행 1). 유효 범위: 0-1048575
|
||||
- `column` (integer, 필수): 0 기반 열 인덱스 (열 0 = A, 열 1 = B). 유효 범위: 0-16383
|
||||
- `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'address,values,formulas,numberFormat,text').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/add_excel_table">
|
||||
**설명:** 셀 범위를 필터링, 정렬 및 구조화된 데이터 기능이 있는 서식이 지정된 Excel 테이블로 변환합니다. 테이블을 만들면 add_excel_table_row로 데이터를 추가할 수 있습니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 데이터 범위가 포함된 워크시트의 이름. get_excel_worksheets에서 가져오세요.
|
||||
- `range` (string, 필수): 헤더와 데이터를 포함하여 테이블로 변환할 셀 범위 (예: 'A1:D10'에서 A1:D1은 열 헤더).
|
||||
- `has_headers` (boolean, 선택사항): 첫 번째 행에 열 헤더가 포함되어 있으면 true로 설정. 기본값: true
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_tables">
|
||||
**설명:** SharePoint에 저장된 특정 Excel 워크시트의 모든 테이블을 나열합니다. id, name, showHeaders 및 showTotals를 포함한 테이블 속성을 반환합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 테이블을 가져올 워크시트의 이름. get_excel_worksheets에서 가져오세요.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/add_excel_table_row">
|
||||
**설명:** SharePoint 파일의 Excel 테이블 끝에 새 행을 추가합니다. values 배열은 테이블의 열 수와 같은 수의 요소를 가져야 합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 테이블이 포함된 워크시트의 이름. get_excel_worksheets에서 가져오세요.
|
||||
- `table_name` (string, 필수): 행을 추가할 테이블의 이름 (예: 'Table1'). get_excel_tables에서 가져오세요. 대소문자를 구분합니다.
|
||||
- `values` (array, 필수): 새 행의 셀 값 배열로 테이블 순서대로 열당 하나씩 (예: ["John Doe", "john@example.com", 25]).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_table_data">
|
||||
**설명:** SharePoint 파일의 Excel 테이블에서 모든 행을 데이터 범위로 가져옵니다. 정확한 범위를 알 필요가 없으므로 구조화된 테이블 작업 시 get_excel_range_data보다 쉽습니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 테이블이 포함된 워크시트의 이름. get_excel_worksheets에서 가져오세요.
|
||||
- `table_name` (string, 필수): 데이터를 가져올 테이블의 이름 (예: 'Table1'). get_excel_tables에서 가져오세요. 대소문자를 구분합니다.
|
||||
- `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'address,values,formulas,numberFormat,text').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/create_excel_chart">
|
||||
**설명:** SharePoint에 저장된 Excel 워크시트에 데이터 범위에서 차트 시각화를 만듭니다. 차트는 워크시트에 포함됩니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 차트를 만들 워크시트의 이름. get_excel_worksheets에서 가져오세요.
|
||||
- `chart_type` (string, 필수): 차트 유형 (예: 'ColumnClustered', 'ColumnStacked', 'Line', 'LineMarkers', 'Pie', 'Bar', 'BarClustered', 'Area', 'Scatter', 'Doughnut').
|
||||
- `source_data` (string, 필수): 헤더를 포함한 A1 표기법의 차트 데이터 범위 (예: 'A1:B10').
|
||||
- `series_by` (string, 선택사항): 데이터 계열 구성 방법: 'Auto', 'Columns' 또는 'Rows'. 기본값: 'Auto'
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_excel_charts">
|
||||
**설명:** SharePoint에 저장된 Excel 워크시트에 포함된 모든 차트를 나열합니다. id, name, chartType, height, width 및 position을 포함한 차트 속성을 반환합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 차트를 나열할 워크시트의 이름. get_excel_worksheets에서 가져오세요.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/delete_excel_worksheet">
|
||||
**설명:** SharePoint에 저장된 Excel 통합 문서에서 워크시트(탭)와 모든 내용을 영구적으로 제거합니다. 실행 취소할 수 없습니다. 통합 문서에는 최소 하나의 워크시트가 있어야 합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 삭제할 워크시트의 이름. 대소문자를 구분합니다. 이 시트의 모든 데이터, 테이블 및 차트가 영구적으로 제거됩니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/delete_excel_table">
|
||||
**설명:** SharePoint의 Excel 워크시트에서 테이블을 제거합니다. 테이블 구조(필터링, 서식, 테이블 기능)는 삭제되지만 기본 셀 데이터는 보존됩니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `worksheet_name` (string, 필수): 테이블이 포함된 워크시트의 이름. get_excel_worksheets에서 가져오세요.
|
||||
- `table_name` (string, 필수): 삭제할 테이블의 이름 (예: 'Table1'). get_excel_tables에서 가져오세요. 테이블 삭제 후에도 셀의 데이터는 유지됩니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_excel_names">
|
||||
**설명:** SharePoint에 저장된 Excel 통합 문서에 정의된 모든 명명된 범위를 가져옵니다. 명명된 범위는 셀 범위에 대한 사용자 정의 레이블입니다 (예: 'SalesData'는 A1:D100을 가리킴).
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_word_document_content">
|
||||
**설명:** SharePoint 문서 라이브러리에 저장된 Word 문서(.docx)에서 텍스트 내용을 다운로드하고 추출합니다. SharePoint에서 Word 문서를 읽는 권장 방법입니다.
|
||||
|
||||
**매개변수:**
|
||||
- `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자.
|
||||
- `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요.
|
||||
- `item_id` (string, 필수): SharePoint에 있는 Word 문서(.docx)의 고유 식별자. list_files 또는 search_files에서 가져오세요.
|
||||
- `site_id` (string, 필수): SharePoint 사이트의 ID.
|
||||
- `item_id` (string, 필수): 삭제할 파일 또는 폴더의 ID.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
@@ -107,86 +107,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `join_web_url` (string, 필수): 검색할 회의의 웹 참가 URL.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/search_online_meetings_by_meeting_id">
|
||||
**설명:** 외부 Meeting ID로 온라인 회의를 검색합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `join_meeting_id` (string, 필수): 참석자가 참가할 때 사용하는 회의 ID(숫자 코드). 회의 초대에 표시되는 joinMeetingId이며, Graph API meeting id가 아닙니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/get_meeting">
|
||||
**설명:** 특정 온라인 회의의 세부 정보를 가져옵니다.
|
||||
|
||||
**매개변수:**
|
||||
- `meeting_id` (string, 필수): Graph API 회의 ID(긴 영숫자 문자열). create_meeting 또는 search_online_meetings 작업에서 얻을 수 있습니다. 숫자 joinMeetingId와 다릅니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/get_team_members">
|
||||
**설명:** 특정 팀의 멤버를 가져옵니다.
|
||||
|
||||
**매개변수:**
|
||||
- `team_id` (string, 필수): 팀의 고유 식별자. get_teams 작업에서 얻을 수 있습니다.
|
||||
- `top` (integer, 선택사항): 페이지당 검색할 멤버 수 (1-999). 기본값: 100.
|
||||
- `skip_token` (string, 선택사항): 이전 응답의 페이지네이션 토큰. 응답에 @odata.nextLink가 포함된 경우 $skiptoken 매개변수 값을 추출하여 여기에 전달하면 다음 페이지 결과를 가져올 수 있습니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/create_channel">
|
||||
**설명:** 팀에 새 채널을 만듭니다.
|
||||
|
||||
**매개변수:**
|
||||
- `team_id` (string, 필수): 팀의 고유 식별자. get_teams 작업에서 얻을 수 있습니다.
|
||||
- `display_name` (string, 필수): Teams에 표시되는 채널 이름. 팀 내에서 고유해야 합니다. 최대 50자.
|
||||
- `description` (string, 선택사항): 채널 목적을 설명하는 선택적 설명. 채널 세부 정보에 표시됩니다. 최대 1024자.
|
||||
- `membership_type` (string, 선택사항): 채널 가시성. 옵션: standard, private. "standard" = 모든 팀 멤버에게 표시, "private" = 명시적으로 추가된 멤버에게만 표시. 기본값: standard.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/get_message_replies">
|
||||
**설명:** 채널의 특정 메시지에 대한 회신을 가져옵니다.
|
||||
|
||||
**매개변수:**
|
||||
- `team_id` (string, 필수): 팀의 고유 식별자. get_teams 작업에서 얻을 수 있습니다.
|
||||
- `channel_id` (string, 필수): 채널의 고유 식별자. get_channels 작업에서 얻을 수 있습니다.
|
||||
- `message_id` (string, 필수): 상위 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다.
|
||||
- `top` (integer, 선택사항): 페이지당 검색할 회신 수 (1-50). 기본값: 50.
|
||||
- `skip_token` (string, 선택사항): 이전 응답의 페이지네이션 토큰. 응답에 @odata.nextLink가 포함된 경우 $skiptoken 매개변수 값을 추출하여 여기에 전달하면 다음 페이지 결과를 가져올 수 있습니다.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/reply_to_message">
|
||||
**설명:** Teams 채널의 메시지에 회신합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `team_id` (string, 필수): 팀의 고유 식별자. get_teams 작업에서 얻을 수 있습니다.
|
||||
- `channel_id` (string, 필수): 채널의 고유 식별자. get_channels 작업에서 얻을 수 있습니다.
|
||||
- `message_id` (string, 필수): 회신할 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다.
|
||||
- `message` (string, 필수): 회신 내용. HTML의 경우 서식 태그 포함. 텍스트의 경우 일반 텍스트만.
|
||||
- `content_type` (string, 선택사항): 콘텐츠 형식. 옵션: html, text. "text"는 일반 텍스트, "html"은 서식이 있는 리치 텍스트. 기본값: text.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/update_meeting">
|
||||
**설명:** 기존 온라인 회의를 업데이트합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `meeting_id` (string, 필수): 회의의 고유 식별자. create_meeting 또는 search_online_meetings 작업에서 얻을 수 있습니다.
|
||||
- `subject` (string, 선택사항): 새 회의 제목.
|
||||
- `startDateTime` (string, 선택사항): 시간대가 포함된 ISO 8601 형식의 새 시작 시간. 예: "2024-01-20T10:00:00-08:00".
|
||||
- `endDateTime` (string, 선택사항): 시간대가 포함된 ISO 8601 형식의 새 종료 시간.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/delete_meeting">
|
||||
**설명:** 온라인 회의를 삭제합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `meeting_id` (string, 필수): 삭제할 회의의 고유 식별자. create_meeting 또는 search_online_meetings 작업에서 얻을 수 있습니다.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## 사용 예제
|
||||
@@ -220,62 +140,6 @@ crew = Crew(
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
### 메시징 및 커뮤니케이션
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# 메시징에 특화된 에이전트 생성
|
||||
messenger = Agent(
|
||||
role="Teams 메신저",
|
||||
goal="Teams 채널에서 메시지 전송 및 검색",
|
||||
backstory="팀 커뮤니케이션 및 메시지 관리에 능숙한 AI 어시스턴트.",
|
||||
apps=['microsoft_teams/send_message', 'microsoft_teams/get_messages']
|
||||
)
|
||||
|
||||
# 메시지 전송 및 최근 메시지 검색 작업
|
||||
messaging_task = Task(
|
||||
description="'your_team_id' 팀의 General 채널에 'Hello team! This is an automated update from our AI assistant.' 메시지를 보낸 다음 해당 채널의 최근 10개 메시지를 검색하세요.",
|
||||
agent=messenger,
|
||||
expected_output="메시지가 성공적으로 전송되고 최근 메시지가 검색됨."
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[messenger],
|
||||
tasks=[messaging_task]
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
### 회의 관리
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# 회의 관리를 위한 에이전트 생성
|
||||
meeting_scheduler = Agent(
|
||||
role="회의 스케줄러",
|
||||
goal="Teams 회의 생성 및 관리",
|
||||
backstory="회의 일정 관리 및 정리를 담당하는 AI 어시스턴트.",
|
||||
apps=['microsoft_teams/create_meeting', 'microsoft_teams/search_online_meetings_by_join_url']
|
||||
)
|
||||
|
||||
# 회의 생성 작업
|
||||
schedule_meeting_task = Task(
|
||||
description="내일 오전 10시에 1시간 동안 진행되는 '주간 팀 동기화' 제목의 Teams 회의를 생성하세요 (시간대가 포함된 적절한 ISO 8601 형식 사용).",
|
||||
agent=meeting_scheduler,
|
||||
expected_output="회의 세부 정보와 함께 Teams 회의가 성공적으로 생성됨."
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[meeting_scheduler],
|
||||
tasks=[schedule_meeting_task]
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
## 문제 해결
|
||||
|
||||
### 일반적인 문제
|
||||
@@ -284,35 +148,11 @@ crew.kickoff()
|
||||
|
||||
- Microsoft 계정이 Teams 액세스에 필요한 권한을 가지고 있는지 확인하세요.
|
||||
- 필요한 범위: `Team.ReadBasic.All`, `Channel.ReadBasic.All`, `ChannelMessage.Send`, `ChannelMessage.Read.All`, `OnlineMeetings.ReadWrite`, `OnlineMeetings.Read`.
|
||||
- OAuth 연결에 필요한 모든 범위가 포함되어 있는지 확인하세요.
|
||||
|
||||
**팀 및 채널 액세스**
|
||||
|
||||
- 액세스하려는 팀의 멤버인지 확인하세요.
|
||||
- 팀 및 채널 ID가 올바른지 다시 확인하세요.
|
||||
- 팀 및 채널 ID는 `get_teams` 및 `get_channels` 작업을 사용하여 얻을 수 있습니다.
|
||||
|
||||
**메시지 전송 문제**
|
||||
|
||||
- `send_message`에 `team_id`, `channel_id`, `message`가 제공되는지 확인하세요.
|
||||
- 지정된 채널에 메시지를 보낼 권한이 있는지 확인하세요.
|
||||
- 메시지 형식에 따라 적절한 `content_type`(text 또는 html)을 선택하세요.
|
||||
|
||||
**회의 생성**
|
||||
|
||||
- `subject`, `startDateTime`, `endDateTime`이 제공되는지 확인하세요.
|
||||
- 날짜/시간 필드에 시간대가 포함된 적절한 ISO 8601 형식을 사용하세요 (예: '2024-01-20T10:00:00-08:00').
|
||||
- 회의 시간이 미래인지 확인하세요.
|
||||
|
||||
**메시지 검색 제한**
|
||||
|
||||
- `get_messages` 작업은 요청당 최대 50개 메시지만 검색할 수 있습니다.
|
||||
- 메시지는 역시간순(최신순)으로 반환됩니다.
|
||||
|
||||
**회의 검색**
|
||||
|
||||
- `search_online_meetings_by_join_url`의 경우 참가 URL이 정확하고 올바르게 형식화되어 있는지 확인하세요.
|
||||
- URL은 완전한 Teams 회의 참가 URL이어야 합니다.
|
||||
|
||||
### 도움 받기
|
||||
|
||||
|
||||
@@ -97,26 +97,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token
|
||||
- `file_id` (string, 필수): 삭제할 문서의 ID.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_word/copy_document">
|
||||
**설명:** OneDrive의 새 위치에 문서를 복사합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `file_id` (string, 필수): 복사할 문서의 ID.
|
||||
- `name` (string, 선택사항): 복사된 문서의 새 이름.
|
||||
- `parent_id` (string, 선택사항): 대상 폴더의 ID (기본값: 루트).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_word/move_document">
|
||||
**설명:** OneDrive의 새 위치로 문서를 이동합니다.
|
||||
|
||||
**매개변수:**
|
||||
- `file_id` (string, 필수): 이동할 문서의 ID.
|
||||
- `parent_id` (string, 필수): 대상 폴더의 ID.
|
||||
- `name` (string, 선택사항): 이동된 문서의 새 이름.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## 사용 예제
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -200,25 +200,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
- `clientData` (array, opcional): Dados específicos do cliente. Cada item é um objeto com `key` (string) e `value` (string).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_contacts/update_contact_group">
|
||||
**Descrição:** Atualizar informações de um grupo de contatos.
|
||||
|
||||
**Parâmetros:**
|
||||
- `resourceName` (string, obrigatório): O nome do recurso do grupo de contatos (ex: 'contactGroups/myContactGroup').
|
||||
- `name` (string, obrigatório): O nome do grupo de contatos.
|
||||
- `clientData` (array, opcional): Dados específicos do cliente. Cada item é um objeto com `key` (string) e `value` (string).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_contacts/delete_contact_group">
|
||||
**Descrição:** Excluir um grupo de contatos.
|
||||
|
||||
**Parâmetros:**
|
||||
- `resourceName` (string, obrigatório): O nome do recurso do grupo de contatos a excluir (ex: 'contactGroups/myContactGroup').
|
||||
- `deleteContacts` (boolean, opcional): Se os contatos do grupo também devem ser excluídos. Padrão: false
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Exemplos de Uso
|
||||
|
||||
@@ -131,297 +131,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
- `endIndex` (integer, obrigatório): O índice final do intervalo.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_document_with_content">
|
||||
**Descrição:** Criar um novo documento do Google com conteúdo em uma única ação.
|
||||
|
||||
**Parâmetros:**
|
||||
- `title` (string, obrigatório): O título para o novo documento. Aparece no topo do documento e no Google Drive.
|
||||
- `content` (string, opcional): O conteúdo de texto a inserir no documento. Use `\n` para novos parágrafos.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/append_text">
|
||||
**Descrição:** Adicionar texto ao final de um documento do Google. Insere automaticamente no final do documento sem necessidade de especificar um índice.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento obtido da resposta de create_document ou URL.
|
||||
- `text` (string, obrigatório): Texto a adicionar ao final do documento. Use `\n` para novos parágrafos.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_bold">
|
||||
**Descrição:** Aplicar ou remover formatação de negrito em texto de um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial do texto a formatar.
|
||||
- `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo).
|
||||
- `bold` (boolean, obrigatório): Defina `true` para aplicar negrito, `false` para remover negrito.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_italic">
|
||||
**Descrição:** Aplicar ou remover formatação de itálico em texto de um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial do texto a formatar.
|
||||
- `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo).
|
||||
- `italic` (boolean, obrigatório): Defina `true` para aplicar itálico, `false` para remover itálico.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_underline">
|
||||
**Descrição:** Adicionar ou remover formatação de sublinhado em texto de um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial do texto a formatar.
|
||||
- `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo).
|
||||
- `underline` (boolean, obrigatório): Defina `true` para sublinhar, `false` para remover sublinhado.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_strikethrough">
|
||||
**Descrição:** Adicionar ou remover formatação de tachado em texto de um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial do texto a formatar.
|
||||
- `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo).
|
||||
- `strikethrough` (boolean, obrigatório): Defina `true` para adicionar tachado, `false` para remover.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_font_size">
|
||||
**Descrição:** Alterar o tamanho da fonte do texto em um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial do texto a formatar.
|
||||
- `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo).
|
||||
- `fontSize` (number, obrigatório): Tamanho da fonte em pontos. Tamanhos comuns: 10, 11, 12, 14, 16, 18, 24, 36.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_text_color">
|
||||
**Descrição:** Alterar a cor do texto usando valores RGB (escala 0-1) em um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial do texto a formatar.
|
||||
- `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo).
|
||||
- `red` (number, obrigatório): Componente vermelho (0-1). Exemplo: `1` para vermelho total.
|
||||
- `green` (number, obrigatório): Componente verde (0-1). Exemplo: `0.5` para metade verde.
|
||||
- `blue` (number, obrigatório): Componente azul (0-1). Exemplo: `0` para sem azul.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_hyperlink">
|
||||
**Descrição:** Transformar texto existente em um hyperlink clicável em um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial do texto a transformar em link.
|
||||
- `endIndex` (integer, obrigatório): Posição final do texto a transformar em link (exclusivo).
|
||||
- `url` (string, obrigatório): A URL para a qual o link deve apontar. Exemplo: `"https://example.com"`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/apply_heading_style">
|
||||
**Descrição:** Aplicar um estilo de título ou parágrafo a um intervalo de texto em um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial do(s) parágrafo(s) a estilizar.
|
||||
- `endIndex` (integer, obrigatório): Posição final do(s) parágrafo(s) a estilizar.
|
||||
- `style` (string, obrigatório): O estilo a aplicar. Opções: `NORMAL_TEXT`, `TITLE`, `SUBTITLE`, `HEADING_1`, `HEADING_2`, `HEADING_3`, `HEADING_4`, `HEADING_5`, `HEADING_6`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_paragraph_alignment">
|
||||
**Descrição:** Definir o alinhamento de texto para parágrafos em um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial do(s) parágrafo(s) a alinhar.
|
||||
- `endIndex` (integer, obrigatório): Posição final do(s) parágrafo(s) a alinhar.
|
||||
- `alignment` (string, obrigatório): Alinhamento do texto. Opções: `START` (esquerda), `CENTER`, `END` (direita), `JUSTIFIED`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/set_line_spacing">
|
||||
**Descrição:** Definir o espaçamento entre linhas para parágrafos em um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial do(s) parágrafo(s).
|
||||
- `endIndex` (integer, obrigatório): Posição final do(s) parágrafo(s).
|
||||
- `lineSpacing` (number, obrigatório): Espaçamento entre linhas como porcentagem. `100` = simples, `115` = 1.15x, `150` = 1.5x, `200` = duplo.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_paragraph_bullets">
|
||||
**Descrição:** Converter parágrafos em uma lista com marcadores ou numerada em um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial dos parágrafos a converter em lista.
|
||||
- `endIndex` (integer, obrigatório): Posição final dos parágrafos a converter em lista.
|
||||
- `bulletPreset` (string, obrigatório): Estilo de marcadores/numeração. Opções: `BULLET_DISC_CIRCLE_SQUARE`, `BULLET_DIAMONDX_ARROW3D_SQUARE`, `BULLET_CHECKBOX`, `BULLET_ARROW_DIAMOND_DISC`, `BULLET_STAR_CIRCLE_SQUARE`, `NUMBERED_DECIMAL_ALPHA_ROMAN`, `NUMBERED_DECIMAL_ALPHA_ROMAN_PARENS`, `NUMBERED_DECIMAL_NESTED`, `NUMBERED_UPPERALPHA_ALPHA_ROMAN`, `NUMBERED_UPPERROMAN_UPPERALPHA_DECIMAL`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_paragraph_bullets">
|
||||
**Descrição:** Remover marcadores ou numeração de parágrafos em um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `startIndex` (integer, obrigatório): Posição inicial dos parágrafos de lista.
|
||||
- `endIndex` (integer, obrigatório): Posição final dos parágrafos de lista.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_table_with_content">
|
||||
**Descrição:** Inserir uma tabela com conteúdo em um documento do Google em uma única ação. Forneça o conteúdo como um array 2D.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `rows` (integer, obrigatório): Número de linhas na tabela.
|
||||
- `columns` (integer, obrigatório): Número de colunas na tabela.
|
||||
- `index` (integer, opcional): Posição para inserir a tabela. Se não fornecido, a tabela é inserida no final do documento.
|
||||
- `content` (array, obrigatório): Conteúdo da tabela como um array 2D. Cada array interno é uma linha. Exemplo: `[["Ano", "Receita"], ["2023", "$43B"], ["2024", "$45B"]]`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_table_row">
|
||||
**Descrição:** Inserir uma nova linha acima ou abaixo de uma célula de referência em uma tabela existente.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `tableStartIndex` (integer, obrigatório): O índice inicial da tabela. Obtenha de get_document.
|
||||
- `rowIndex` (integer, obrigatório): Índice da linha (baseado em 0) da célula de referência.
|
||||
- `columnIndex` (integer, opcional): Índice da coluna (baseado em 0) da célula de referência. Padrão: `0`.
|
||||
- `insertBelow` (boolean, opcional): Se `true`, insere abaixo da linha de referência. Se `false`, insere acima. Padrão: `true`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_table_column">
|
||||
**Descrição:** Inserir uma nova coluna à esquerda ou à direita de uma célula de referência em uma tabela existente.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `tableStartIndex` (integer, obrigatório): O índice inicial da tabela.
|
||||
- `rowIndex` (integer, opcional): Índice da linha (baseado em 0) da célula de referência. Padrão: `0`.
|
||||
- `columnIndex` (integer, obrigatório): Índice da coluna (baseado em 0) da célula de referência.
|
||||
- `insertRight` (boolean, opcional): Se `true`, insere à direita. Se `false`, insere à esquerda. Padrão: `true`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_table_row">
|
||||
**Descrição:** Excluir uma linha de uma tabela existente em um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `tableStartIndex` (integer, obrigatório): O índice inicial da tabela.
|
||||
- `rowIndex` (integer, obrigatório): Índice da linha (baseado em 0) a excluir.
|
||||
- `columnIndex` (integer, opcional): Índice da coluna (baseado em 0) de qualquer célula na linha. Padrão: `0`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_table_column">
|
||||
**Descrição:** Excluir uma coluna de uma tabela existente em um documento do Google.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `tableStartIndex` (integer, obrigatório): O índice inicial da tabela.
|
||||
- `rowIndex` (integer, opcional): Índice da linha (baseado em 0) de qualquer célula na coluna. Padrão: `0`.
|
||||
- `columnIndex` (integer, obrigatório): Índice da coluna (baseado em 0) a excluir.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/merge_table_cells">
|
||||
**Descrição:** Mesclar um intervalo de células de tabela em uma única célula. O conteúdo de todas as células é preservado.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `tableStartIndex` (integer, obrigatório): O índice inicial da tabela.
|
||||
- `rowIndex` (integer, obrigatório): Índice da linha inicial (baseado em 0) para a mesclagem.
|
||||
- `columnIndex` (integer, obrigatório): Índice da coluna inicial (baseado em 0) para a mesclagem.
|
||||
- `rowSpan` (integer, obrigatório): Número de linhas a mesclar.
|
||||
- `columnSpan` (integer, obrigatório): Número de colunas a mesclar.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/unmerge_table_cells">
|
||||
**Descrição:** Desfazer a mesclagem de células de tabela previamente mescladas, retornando-as a células individuais.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `tableStartIndex` (integer, obrigatório): O índice inicial da tabela.
|
||||
- `rowIndex` (integer, obrigatório): Índice da linha (baseado em 0) da célula mesclada.
|
||||
- `columnIndex` (integer, obrigatório): Índice da coluna (baseado em 0) da célula mesclada.
|
||||
- `rowSpan` (integer, obrigatório): Número de linhas que a célula mesclada abrange.
|
||||
- `columnSpan` (integer, obrigatório): Número de colunas que a célula mesclada abrange.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_inline_image">
|
||||
**Descrição:** Inserir uma imagem de uma URL pública em um documento do Google. A imagem deve ser publicamente acessível, ter menos de 50MB e estar no formato PNG/JPEG/GIF.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `uri` (string, obrigatório): URL pública da imagem. Deve ser acessível sem autenticação.
|
||||
- `index` (integer, opcional): Posição para inserir a imagem. Se não fornecido, a imagem é inserida no final do documento. Padrão: `1`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/insert_section_break">
|
||||
**Descrição:** Inserir uma quebra de seção para criar seções de documento com formatação diferente.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `index` (integer, obrigatório): Posição para inserir a quebra de seção.
|
||||
- `sectionType` (string, obrigatório): O tipo de quebra de seção. Opções: `CONTINUOUS` (permanece na mesma página), `NEXT_PAGE` (inicia uma nova página).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_header">
|
||||
**Descrição:** Criar um cabeçalho para o documento. Retorna um headerId que pode ser usado com insert_text para adicionar conteúdo ao cabeçalho.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `type` (string, opcional): Tipo de cabeçalho. Opções: `DEFAULT`. Padrão: `DEFAULT`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/create_footer">
|
||||
**Descrição:** Criar um rodapé para o documento. Retorna um footerId que pode ser usado com insert_text para adicionar conteúdo ao rodapé.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `type` (string, opcional): Tipo de rodapé. Opções: `DEFAULT`. Padrão: `DEFAULT`.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_header">
|
||||
**Descrição:** Excluir um cabeçalho do documento. Use get_document para encontrar o headerId.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `headerId` (string, obrigatório): O ID do cabeçalho a excluir. Obtenha da resposta de get_document.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_docs/delete_footer">
|
||||
**Descrição:** Excluir um rodapé do documento. Use get_document para encontrar o footerId.
|
||||
|
||||
**Parâmetros:**
|
||||
- `documentId` (string, obrigatório): O ID do documento.
|
||||
- `footerId` (string, obrigatório): O ID do rodapé a excluir. Obtenha da resposta de get_document.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Exemplos de Uso
|
||||
|
||||
@@ -61,22 +61,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_presentation_metadata">
|
||||
**Descrição:** Obter metadados leves de uma apresentação (título, número de slides, IDs dos slides). Use isso primeiro antes de recuperar o conteúdo completo.
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação a ser recuperada.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_presentation_text">
|
||||
**Descrição:** Extrair todo o conteúdo de texto de uma apresentação. Retorna IDs dos slides e texto de formas e tabelas apenas (sem formatação).
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_presentation">
|
||||
**Descrição:** Recupera uma apresentação por ID.
|
||||
|
||||
@@ -96,15 +80,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_slide_text">
|
||||
**Descrição:** Extrair conteúdo de texto de um único slide. Retorna apenas texto de formas e tabelas (sem formatação ou estilo).
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `pageObjectId` (string, obrigatório): O ID do slide/página para obter o texto.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/get_page">
|
||||
**Descrição:** Recupera uma página específica por seu ID.
|
||||
|
||||
@@ -123,120 +98,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_slide">
|
||||
**Descrição:** Adicionar um slide em branco adicional a uma apresentação. Novas apresentações já possuem um slide em branco - verifique get_presentation_metadata primeiro. Para slides com áreas de título/corpo, use create_slide_with_layout.
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `insertionIndex` (integer, opcional): Onde inserir o slide (baseado em 0). Se omitido, adiciona no final.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_slide_with_layout">
|
||||
**Descrição:** Criar um slide com layout predefinido contendo áreas de espaço reservado para título, corpo, etc. Melhor que create_slide para conteúdo estruturado. Após criar, use get_page para encontrar os IDs de espaço reservado, depois insira texto neles.
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `layout` (string, obrigatório): Tipo de layout. Um de: `BLANK`, `TITLE`, `TITLE_AND_BODY`, `TITLE_AND_TWO_COLUMNS`, `TITLE_ONLY`, `SECTION_HEADER`, `ONE_COLUMN_TEXT`, `MAIN_POINT`, `BIG_NUMBER`. TITLE_AND_BODY é melhor para título+descrição. TITLE para slides apenas com título. SECTION_HEADER para divisores de seção.
|
||||
- `insertionIndex` (integer, opcional): Onde inserir (baseado em 0). Se omitido, adiciona no final.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_text_box">
|
||||
**Descrição:** Criar uma caixa de texto em um slide com conteúdo. Use para títulos, descrições, parágrafos - não para tabelas. Opcionalmente especifique posição (x, y) e tamanho (width, height) em unidades EMU (914400 EMU = 1 polegada).
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `slideId` (string, obrigatório): O ID do slide para adicionar a caixa de texto.
|
||||
- `text` (string, obrigatório): O conteúdo de texto da caixa de texto.
|
||||
- `x` (integer, opcional): Posição X em EMU (914400 = 1 polegada). Padrão: 914400 (1 polegada da esquerda).
|
||||
- `y` (integer, opcional): Posição Y em EMU (914400 = 1 polegada). Padrão: 914400 (1 polegada do topo).
|
||||
- `width` (integer, opcional): Largura em EMU. Padrão: 7315200 (~8 polegadas).
|
||||
- `height` (integer, opcional): Altura em EMU. Padrão: 914400 (~1 polegada).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/delete_slide">
|
||||
**Descrição:** Remover um slide de uma apresentação. Use get_presentation primeiro para encontrar o ID do slide.
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `slideId` (string, obrigatório): O ID do objeto do slide a excluir. Obtenha de get_presentation.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/duplicate_slide">
|
||||
**Descrição:** Criar uma cópia de um slide existente. A duplicata é inserida imediatamente após o original.
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `slideId` (string, obrigatório): O ID do objeto do slide a duplicar. Obtenha de get_presentation.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/move_slides">
|
||||
**Descrição:** Reordenar slides movendo-os para uma nova posição. Os IDs dos slides devem estar na ordem atual da apresentação (sem duplicatas).
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `slideIds` (array de strings, obrigatório): Array de IDs dos slides a mover. Obrigatoriamente na ordem atual da apresentação.
|
||||
- `insertionIndex` (integer, obrigatório): Posição de destino (baseado em 0). 0 = início, número de slides = final.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/insert_youtube_video">
|
||||
**Descrição:** Incorporar um vídeo do YouTube em um slide. O ID do vídeo é o valor após "v=" nas URLs do YouTube (ex: para youtube.com/watch?v=abc123, use "abc123").
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `slideId` (string, obrigatório): O ID do slide para adicionar o vídeo. Obtenha de get_presentation.
|
||||
- `videoId` (string, obrigatório): O ID do vídeo do YouTube (o valor após v= na URL).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/insert_drive_video">
|
||||
**Descrição:** Incorporar um vídeo do Google Drive em um slide. O ID do arquivo pode ser encontrado na URL do arquivo no Drive.
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `slideId` (string, obrigatório): O ID do slide para adicionar o vídeo. Obtenha de get_presentation.
|
||||
- `fileId` (string, obrigatório): O ID do arquivo do Google Drive do vídeo.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/set_slide_background_image">
|
||||
**Descrição:** Definir uma imagem de fundo para um slide. A URL da imagem deve ser publicamente acessível.
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `slideId` (string, obrigatório): O ID do slide para definir o fundo. Obtenha de get_presentation.
|
||||
- `imageUrl` (string, obrigatório): URL publicamente acessível da imagem a usar como fundo.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_table">
|
||||
**Descrição:** Criar uma tabela vazia em um slide. Para criar uma tabela com conteúdo, use create_table_with_content.
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `slideId` (string, obrigatório): O ID do slide para adicionar a tabela. Obtenha de get_presentation.
|
||||
- `rows` (integer, obrigatório): Número de linhas na tabela.
|
||||
- `columns` (integer, obrigatório): Número de colunas na tabela.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/create_table_with_content">
|
||||
**Descrição:** Criar uma tabela com conteúdo em uma única ação. Forneça o conteúdo como uma matriz 2D onde cada array interno é uma linha. Exemplo: [["Cabeçalho1", "Cabeçalho2"], ["Linha1Col1", "Linha1Col2"]].
|
||||
|
||||
**Parâmetros:**
|
||||
- `presentationId` (string, obrigatório): O ID da apresentação.
|
||||
- `slideId` (string, obrigatório): O ID do slide para adicionar a tabela. Obtenha de get_presentation.
|
||||
- `rows` (integer, obrigatório): Número de linhas na tabela.
|
||||
- `columns` (integer, obrigatório): Número de colunas na tabela.
|
||||
- `content` (array, obrigatório): Conteúdo da tabela como matriz 2D. Cada array interno é uma linha. Exemplo: [["Ano", "Receita"], ["2023", "$10M"]].
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="google_slides/import_data_from_sheet">
|
||||
**Descrição:** Importa dados de uma planilha do Google para uma apresentação.
|
||||
|
||||
|
||||
@@ -148,16 +148,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/get_table_data">
|
||||
**Descrição:** Obter dados de uma tabela específica em uma planilha do Excel.
|
||||
|
||||
**Parâmetros:**
|
||||
- `file_id` (string, obrigatório): O ID do arquivo Excel.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha.
|
||||
- `table_name` (string, obrigatório): Nome da tabela.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/create_chart">
|
||||
**Descrição:** Criar um gráfico em uma planilha do Excel.
|
||||
|
||||
@@ -190,15 +180,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/get_used_range_metadata">
|
||||
**Descrição:** Obter os metadados do intervalo usado (apenas dimensões, sem dados) de uma planilha do Excel.
|
||||
|
||||
**Parâmetros:**
|
||||
- `file_id` (string, obrigatório): O ID do arquivo Excel.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_excel/list_charts">
|
||||
**Descrição:** Obter todos os gráficos em uma planilha do Excel.
|
||||
|
||||
|
||||
@@ -150,49 +150,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
- `item_id` (string, obrigatório): O ID do arquivo.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/list_files_by_path">
|
||||
**Descrição:** Listar arquivos e pastas em um caminho específico do OneDrive.
|
||||
|
||||
**Parâmetros:**
|
||||
- `folder_path` (string, obrigatório): O caminho da pasta (ex: 'Documents/Reports').
|
||||
- `top` (integer, opcional): Número de itens a recuperar (máx 1000). Padrão: 50.
|
||||
- `orderby` (string, opcional): Ordenar por campo (ex: "name asc", "lastModifiedDateTime desc"). Padrão: "name asc".
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/get_recent_files">
|
||||
**Descrição:** Obter arquivos acessados recentemente no OneDrive.
|
||||
|
||||
**Parâmetros:**
|
||||
- `top` (integer, opcional): Número de itens a recuperar (máx 200). Padrão: 25.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/get_shared_with_me">
|
||||
**Descrição:** Obter arquivos e pastas compartilhados com o usuário.
|
||||
|
||||
**Parâmetros:**
|
||||
- `top` (integer, opcional): Número de itens a recuperar (máx 200). Padrão: 50.
|
||||
- `orderby` (string, opcional): Ordenar por campo. Padrão: "name asc".
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/get_file_by_path">
|
||||
**Descrição:** Obter informações sobre um arquivo ou pasta específica pelo caminho.
|
||||
|
||||
**Parâmetros:**
|
||||
- `file_path` (string, obrigatório): O caminho do arquivo ou pasta (ex: 'Documents/report.docx').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_onedrive/download_file_by_path">
|
||||
**Descrição:** Baixar um arquivo do OneDrive pelo seu caminho.
|
||||
|
||||
**Parâmetros:**
|
||||
- `file_path` (string, obrigatório): O caminho do arquivo (ex: 'Documents/report.docx').
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Exemplos de Uso
|
||||
|
||||
@@ -132,74 +132,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
- `companyName` (string, opcional): Nome da empresa do contato.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/get_message">
|
||||
**Descrição:** Obter uma mensagem de email específica por ID.
|
||||
|
||||
**Parâmetros:**
|
||||
- `message_id` (string, obrigatório): O identificador único da mensagem. Obter pela ação get_messages.
|
||||
- `select` (string, opcional): Lista separada por vírgulas de propriedades a retornar. Exemplo: "id,subject,body,from,receivedDateTime". Padrão: "id,subject,body,from,toRecipients,receivedDateTime".
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/reply_to_email">
|
||||
**Descrição:** Responder a uma mensagem de email.
|
||||
|
||||
**Parâmetros:**
|
||||
- `message_id` (string, obrigatório): O identificador único da mensagem a responder. Obter pela ação get_messages.
|
||||
- `comment` (string, obrigatório): O conteúdo da mensagem de resposta. Pode ser texto simples ou HTML. A mensagem original será citada abaixo deste conteúdo.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/forward_email">
|
||||
**Descrição:** Encaminhar uma mensagem de email.
|
||||
|
||||
**Parâmetros:**
|
||||
- `message_id` (string, obrigatório): O identificador único da mensagem a encaminhar. Obter pela ação get_messages.
|
||||
- `to_recipients` (array, obrigatório): Array de endereços de email dos destinatários. Exemplo: ["john@example.com", "jane@example.com"].
|
||||
- `comment` (string, opcional): Mensagem opcional a incluir acima do conteúdo encaminhado. Pode ser texto simples ou HTML.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/mark_message_read">
|
||||
**Descrição:** Marcar uma mensagem como lida ou não lida.
|
||||
|
||||
**Parâmetros:**
|
||||
- `message_id` (string, obrigatório): O identificador único da mensagem. Obter pela ação get_messages.
|
||||
- `is_read` (boolean, obrigatório): Definir como true para marcar como lida, false para marcar como não lida.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/delete_message">
|
||||
**Descrição:** Excluir uma mensagem de email.
|
||||
|
||||
**Parâmetros:**
|
||||
- `message_id` (string, obrigatório): O identificador único da mensagem a excluir. Obter pela ação get_messages.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/update_event">
|
||||
**Descrição:** Atualizar um evento de calendário existente.
|
||||
|
||||
**Parâmetros:**
|
||||
- `event_id` (string, obrigatório): O identificador único do evento. Obter pela ação get_calendar_events.
|
||||
- `subject` (string, opcional): Novo assunto/título do evento.
|
||||
- `start_time` (string, opcional): Nova hora de início no formato ISO 8601 (ex: "2024-01-20T10:00:00"). OBRIGATÓRIO: Também deve fornecer start_timezone ao usar este campo.
|
||||
- `start_timezone` (string, opcional): Fuso horário da hora de início. OBRIGATÓRIO ao atualizar start_time. Exemplos: "Pacific Standard Time", "Eastern Standard Time", "UTC".
|
||||
- `end_time` (string, opcional): Nova hora de término no formato ISO 8601. OBRIGATÓRIO: Também deve fornecer end_timezone ao usar este campo.
|
||||
- `end_timezone` (string, opcional): Fuso horário da hora de término. OBRIGATÓRIO ao atualizar end_time. Exemplos: "Pacific Standard Time", "Eastern Standard Time", "UTC".
|
||||
- `location` (string, opcional): Novo local do evento.
|
||||
- `body` (string, opcional): Novo corpo/descrição do evento. Suporta formatação HTML.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_outlook/delete_event">
|
||||
**Descrição:** Excluir um evento de calendário.
|
||||
|
||||
**Parâmetros:**
|
||||
- `event_id` (string, obrigatório): O identificador único do evento a excluir. Obter pela ação get_calendar_events.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Exemplos de Uso
|
||||
|
||||
@@ -77,17 +77,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_drives">
|
||||
**Descrição:** Listar todas as bibliotecas de documentos (drives) em um site do SharePoint. Use isto para descobrir bibliotecas disponíveis antes de usar operações de arquivo.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `top` (integer, opcional): Número máximo de drives a retornar por página (1-999). Padrão: 100
|
||||
- `skip_token` (string, opcional): Token de paginação de uma resposta anterior para buscar a próxima página de resultados.
|
||||
- `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'id,name,webUrl,driveType').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_site_lists">
|
||||
**Descrição:** Obter todas as listas em um site do SharePoint.
|
||||
|
||||
@@ -156,317 +145,20 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_files">
|
||||
**Descrição:** Recuperar arquivos e pastas de uma biblioteca de documentos do SharePoint. Por padrão, lista a pasta raiz, mas você pode navegar em subpastas fornecendo um folder_id.
|
||||
<Accordion title="microsoft_sharepoint/get_drive_items">
|
||||
**Descrição:** Obter arquivos e pastas de uma biblioteca de documentos do SharePoint.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `folder_id` (string, opcional): O ID da pasta para listar o conteúdo. Use 'root' para a pasta raiz, ou forneça um ID de pasta de uma chamada anterior de list_files. Padrão: 'root'
|
||||
- `top` (integer, opcional): Número máximo de itens a retornar por página (1-1000). Padrão: 50
|
||||
- `skip_token` (string, opcional): Token de paginação de uma resposta anterior para buscar a próxima página de resultados.
|
||||
- `orderby` (string, opcional): Ordem de classificação dos resultados (ex: 'name asc', 'size desc', 'lastModifiedDateTime desc'). Padrão: 'name asc'
|
||||
- `filter` (string, opcional): Filtro OData para restringir resultados (ex: 'file ne null' apenas para arquivos, 'folder ne null' apenas para pastas).
|
||||
- `select` (string, opcional): Lista de campos separados por vírgula para retornar (ex: 'id,name,size,folder,file,webUrl,lastModifiedDateTime').
|
||||
- `site_id` (string, obrigatório): O ID do site do SharePoint.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/delete_file">
|
||||
**Descrição:** Excluir um arquivo ou pasta de uma biblioteca de documentos do SharePoint. Para pastas, todo o conteúdo é excluído recursivamente. Os itens são movidos para a lixeira do site.
|
||||
<Accordion title="microsoft_sharepoint/delete_drive_item">
|
||||
**Descrição:** Excluir um arquivo ou pasta da biblioteca de documentos do SharePoint.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo ou pasta a excluir. Obtenha de list_files.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_files_by_path">
|
||||
**Descrição:** Listar arquivos e pastas em uma pasta de biblioteca de documentos do SharePoint pelo caminho. Mais eficiente do que múltiplas chamadas list_files para navegação profunda.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `folder_path` (string, obrigatório): O caminho completo para a pasta sem barras iniciais/finais (ex: 'Documents', 'Reports/2024/Q1').
|
||||
- `top` (integer, opcional): Número máximo de itens a retornar por página (1-1000). Padrão: 50
|
||||
- `skip_token` (string, opcional): Token de paginação de uma resposta anterior para buscar a próxima página de resultados.
|
||||
- `orderby` (string, opcional): Ordem de classificação dos resultados (ex: 'name asc', 'size desc'). Padrão: 'name asc'
|
||||
- `select` (string, opcional): Lista de campos separados por vírgula para retornar (ex: 'id,name,size,folder,file,webUrl,lastModifiedDateTime').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/download_file">
|
||||
**Descrição:** Baixar conteúdo bruto de um arquivo de uma biblioteca de documentos do SharePoint. Use apenas para arquivos de texto simples (.txt, .csv, .json). Para arquivos Excel, use as ações específicas de Excel. Para arquivos Word, use get_word_document_content.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo a baixar. Obtenha de list_files ou list_files_by_path.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_file_info">
|
||||
**Descrição:** Recuperar metadados detalhados de um arquivo ou pasta específico em uma biblioteca de documentos do SharePoint, incluindo nome, tamanho, datas de criação/modificação e informações do autor.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo ou pasta. Obtenha de list_files ou list_files_by_path.
|
||||
- `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'id,name,size,createdDateTime,lastModifiedDateTime,webUrl,createdBy,lastModifiedBy').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/create_folder">
|
||||
**Descrição:** Criar uma nova pasta em uma biblioteca de documentos do SharePoint. Por padrão, cria a pasta na raiz; use parent_id para criar subpastas.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `folder_name` (string, obrigatório): Nome para a nova pasta. Não pode conter: \ / : * ? " < > |
|
||||
- `parent_id` (string, opcional): O ID da pasta pai. Use 'root' para a raiz da biblioteca de documentos, ou forneça um ID de pasta de list_files. Padrão: 'root'
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/search_files">
|
||||
**Descrição:** Pesquisar arquivos e pastas em uma biblioteca de documentos do SharePoint por palavras-chave. Pesquisa nomes de arquivos, nomes de pastas e conteúdo de arquivos para documentos Office. Não use curingas ou caracteres especiais.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `query` (string, obrigatório): Palavras-chave de pesquisa (ex: 'relatório', 'orçamento 2024'). Curingas como *.txt não são suportados.
|
||||
- `top` (integer, opcional): Número máximo de resultados a retornar por página (1-1000). Padrão: 50
|
||||
- `skip_token` (string, opcional): Token de paginação de uma resposta anterior para buscar a próxima página de resultados.
|
||||
- `select` (string, opcional): Lista de campos separados por vírgula para retornar (ex: 'id,name,size,folder,file,webUrl,lastModifiedDateTime').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/copy_file">
|
||||
**Descrição:** Copiar um arquivo ou pasta para um novo local dentro do SharePoint. O item original permanece inalterado. A operação de cópia é assíncrona para arquivos grandes.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo ou pasta a copiar. Obtenha de list_files ou search_files.
|
||||
- `destination_folder_id` (string, obrigatório): O ID da pasta de destino. Use 'root' para a pasta raiz, ou um ID de pasta de list_files.
|
||||
- `new_name` (string, opcional): Novo nome para a cópia. Se não fornecido, o nome original é usado.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/move_file">
|
||||
**Descrição:** Mover um arquivo ou pasta para um novo local dentro do SharePoint. O item é removido de sua localização original. Para pastas, todo o conteúdo é movido também.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo ou pasta a mover. Obtenha de list_files ou search_files.
|
||||
- `destination_folder_id` (string, obrigatório): O ID da pasta de destino. Use 'root' para a pasta raiz, ou um ID de pasta de list_files.
|
||||
- `new_name` (string, opcional): Novo nome para o item movido. Se não fornecido, o nome original é mantido.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_worksheets">
|
||||
**Descrição:** Listar todas as planilhas (abas) em uma pasta de trabalho Excel armazenada em uma biblioteca de documentos do SharePoint. Use o nome da planilha retornado com outras ações de Excel.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'id,name,position,visibility').
|
||||
- `filter` (string, opcional): Expressão de filtro OData (ex: "visibility eq 'Visible'" para excluir planilhas ocultas).
|
||||
- `top` (integer, opcional): Número máximo de planilhas a retornar. Mínimo: 1, Máximo: 999
|
||||
- `orderby` (string, opcional): Ordem de classificação (ex: 'position asc' para retornar planilhas na ordem das abas).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/create_excel_worksheet">
|
||||
**Descrição:** Criar uma nova planilha (aba) em uma pasta de trabalho Excel armazenada em uma biblioteca de documentos do SharePoint. A nova planilha é adicionada no final da lista de abas.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `name` (string, obrigatório): Nome para a nova planilha. Máximo de 31 caracteres. Não pode conter: \ / * ? : [ ]. Deve ser único na pasta de trabalho.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_range_data">
|
||||
**Descrição:** Recuperar valores de células de um intervalo específico em uma planilha Excel armazenada no SharePoint. Para ler todos os dados sem saber as dimensões, use get_excel_used_range em vez disso.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha (aba) para leitura. Obtenha de get_excel_worksheets. Sensível a maiúsculas e minúsculas.
|
||||
- `range` (string, obrigatório): Intervalo de células em notação A1 (ex: 'A1:C10', 'A:C', '1:5', 'A1').
|
||||
- `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'address,values,formulas,numberFormat,text').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/update_excel_range_data">
|
||||
**Descrição:** Escrever valores em um intervalo específico em uma planilha Excel armazenada no SharePoint. Sobrescreve o conteúdo existente das células. As dimensões do array de valores devem corresponder exatamente às dimensões do intervalo.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha (aba) a atualizar. Obtenha de get_excel_worksheets. Sensível a maiúsculas e minúsculas.
|
||||
- `range` (string, obrigatório): Intervalo de células em notação A1 onde os valores serão escritos (ex: 'A1:C3' para um bloco 3x3).
|
||||
- `values` (array, obrigatório): Array 2D de valores (linhas contendo células). Exemplo para A1:B2: [["Cabeçalho1", "Cabeçalho2"], ["Valor1", "Valor2"]]. Use null para limpar uma célula.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_used_range_metadata">
|
||||
**Descrição:** Retornar apenas os metadados (endereço e dimensões) do intervalo utilizado em uma planilha, sem os valores reais das células. Ideal para arquivos grandes para entender o tamanho da planilha antes de ler dados em blocos.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha (aba) para leitura. Obtenha de get_excel_worksheets. Sensível a maiúsculas e minúsculas.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_used_range">
|
||||
**Descrição:** Recuperar todas as células contendo dados em uma planilha armazenada no SharePoint. Não use para arquivos maiores que 2MB. Para arquivos grandes, use get_excel_used_range_metadata primeiro, depois get_excel_range_data para ler em blocos menores.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha (aba) para leitura. Obtenha de get_excel_worksheets. Sensível a maiúsculas e minúsculas.
|
||||
- `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'address,values,formulas,numberFormat,text,rowCount,columnCount').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_cell">
|
||||
**Descrição:** Recuperar o valor de uma única célula por índice de linha e coluna de um arquivo Excel no SharePoint. Os índices são baseados em 0 (linha 0 = linha 1 do Excel, coluna 0 = coluna A).
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha (aba). Obtenha de get_excel_worksheets. Sensível a maiúsculas e minúsculas.
|
||||
- `row` (integer, obrigatório): Índice de linha baseado em 0 (linha 0 = linha 1 do Excel). Intervalo válido: 0-1048575
|
||||
- `column` (integer, obrigatório): Índice de coluna baseado em 0 (coluna 0 = A, coluna 1 = B). Intervalo válido: 0-16383
|
||||
- `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'address,values,formulas,numberFormat,text').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/add_excel_table">
|
||||
**Descrição:** Converter um intervalo de células em uma tabela Excel formatada com recursos de filtragem, classificação e dados estruturados. Tabelas habilitam add_excel_table_row para adicionar dados.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha contendo o intervalo de dados. Obtenha de get_excel_worksheets.
|
||||
- `range` (string, obrigatório): Intervalo de células para converter em tabela, incluindo cabeçalhos e dados (ex: 'A1:D10' onde A1:D1 contém cabeçalhos de coluna).
|
||||
- `has_headers` (boolean, opcional): Defina como true se a primeira linha contém cabeçalhos de coluna. Padrão: true
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_tables">
|
||||
**Descrição:** Listar todas as tabelas em uma planilha Excel específica armazenada no SharePoint. Retorna propriedades da tabela incluindo id, name, showHeaders e showTotals.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha para obter tabelas. Obtenha de get_excel_worksheets.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/add_excel_table_row">
|
||||
**Descrição:** Adicionar uma nova linha ao final de uma tabela Excel em um arquivo do SharePoint. O array de valores deve ter o mesmo número de elementos que o número de colunas da tabela.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha contendo a tabela. Obtenha de get_excel_worksheets.
|
||||
- `table_name` (string, obrigatório): Nome da tabela para adicionar a linha (ex: 'Table1'). Obtenha de get_excel_tables. Sensível a maiúsculas e minúsculas.
|
||||
- `values` (array, obrigatório): Array de valores de células para a nova linha, um por coluna na ordem da tabela (ex: ["João Silva", "joao@exemplo.com", 25]).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_excel_table_data">
|
||||
**Descrição:** Obter todas as linhas de uma tabela Excel em um arquivo do SharePoint como um intervalo de dados. Mais fácil do que get_excel_range_data ao trabalhar com tabelas estruturadas, pois não é necessário saber o intervalo exato.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha contendo a tabela. Obtenha de get_excel_worksheets.
|
||||
- `table_name` (string, obrigatório): Nome da tabela para obter dados (ex: 'Table1'). Obtenha de get_excel_tables. Sensível a maiúsculas e minúsculas.
|
||||
- `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'address,values,formulas,numberFormat,text').
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/create_excel_chart">
|
||||
**Descrição:** Criar uma visualização de gráfico em uma planilha Excel armazenada no SharePoint a partir de um intervalo de dados. O gráfico é incorporado na planilha.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha onde o gráfico será criado. Obtenha de get_excel_worksheets.
|
||||
- `chart_type` (string, obrigatório): Tipo de gráfico (ex: 'ColumnClustered', 'ColumnStacked', 'Line', 'LineMarkers', 'Pie', 'Bar', 'BarClustered', 'Area', 'Scatter', 'Doughnut').
|
||||
- `source_data` (string, obrigatório): Intervalo de dados para o gráfico em notação A1, incluindo cabeçalhos (ex: 'A1:B10').
|
||||
- `series_by` (string, opcional): Como as séries de dados são organizadas: 'Auto', 'Columns' ou 'Rows'. Padrão: 'Auto'
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_excel_charts">
|
||||
**Descrição:** Listar todos os gráficos incorporados em uma planilha Excel armazenada no SharePoint. Retorna propriedades do gráfico incluindo id, name, chartType, height, width e position.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha para listar gráficos. Obtenha de get_excel_worksheets.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/delete_excel_worksheet">
|
||||
**Descrição:** Remover permanentemente uma planilha (aba) e todo seu conteúdo de uma pasta de trabalho Excel armazenada no SharePoint. Não pode ser desfeito. Uma pasta de trabalho deve ter pelo menos uma planilha.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha a excluir. Sensível a maiúsculas e minúsculas. Todos os dados, tabelas e gráficos nesta planilha serão permanentemente removidos.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/delete_excel_table">
|
||||
**Descrição:** Remover uma tabela de uma planilha Excel no SharePoint. Isto exclui a estrutura da tabela (filtragem, formatação, recursos de tabela) mas preserva os dados subjacentes das células.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `worksheet_name` (string, obrigatório): Nome da planilha contendo a tabela. Obtenha de get_excel_worksheets.
|
||||
- `table_name` (string, obrigatório): Nome da tabela a excluir (ex: 'Table1'). Obtenha de get_excel_tables. Os dados nas células permanecerão após a exclusão da tabela.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/list_excel_names">
|
||||
**Descrição:** Recuperar todos os intervalos nomeados definidos em uma pasta de trabalho Excel armazenada no SharePoint. Intervalos nomeados são rótulos definidos pelo usuário para intervalos de células (ex: 'DadosVendas' para A1:D100).
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_sharepoint/get_word_document_content">
|
||||
**Descrição:** Baixar e extrair conteúdo de texto de um documento Word (.docx) armazenado em uma biblioteca de documentos do SharePoint. Esta é a maneira recomendada de ler documentos Word do SharePoint.
|
||||
|
||||
**Parâmetros:**
|
||||
- `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites.
|
||||
- `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos.
|
||||
- `item_id` (string, obrigatório): O identificador único do documento Word (.docx) no SharePoint. Obtenha de list_files ou search_files.
|
||||
- `site_id` (string, obrigatório): O ID do site do SharePoint.
|
||||
- `item_id` (string, obrigatório): O ID do arquivo ou pasta a excluir.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
@@ -107,86 +107,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
- `join_web_url` (string, obrigatório): A URL de participação na web da reunião a pesquisar.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/search_online_meetings_by_meeting_id">
|
||||
**Descrição:** Pesquisar reuniões online por ID externo da reunião.
|
||||
|
||||
**Parâmetros:**
|
||||
- `join_meeting_id` (string, obrigatório): O ID da reunião (código numérico) que os participantes usam para entrar. É o joinMeetingId exibido nos convites da reunião, não o meeting id da API Graph.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/get_meeting">
|
||||
**Descrição:** Obter detalhes de uma reunião online específica.
|
||||
|
||||
**Parâmetros:**
|
||||
- `meeting_id` (string, obrigatório): O ID da reunião na API Graph (string alfanumérica longa). Obter pelas ações create_meeting ou search_online_meetings. Diferente do joinMeetingId numérico.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/get_team_members">
|
||||
**Descrição:** Obter membros de uma equipe específica.
|
||||
|
||||
**Parâmetros:**
|
||||
- `team_id` (string, obrigatório): O identificador único da equipe. Obter pela ação get_teams.
|
||||
- `top` (integer, opcional): Número máximo de membros a recuperar por página (1-999). Padrão: 100.
|
||||
- `skip_token` (string, opcional): Token de paginação de uma resposta anterior. Quando a resposta incluir @odata.nextLink, extraia o valor do parâmetro $skiptoken e passe aqui para obter a próxima página de resultados.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/create_channel">
|
||||
**Descrição:** Criar um novo canal em uma equipe.
|
||||
|
||||
**Parâmetros:**
|
||||
- `team_id` (string, obrigatório): O identificador único da equipe. Obter pela ação get_teams.
|
||||
- `display_name` (string, obrigatório): Nome do canal exibido no Teams. Deve ser único na equipe. Máx 50 caracteres.
|
||||
- `description` (string, opcional): Descrição opcional explicando o propósito do canal. Visível nos detalhes do canal. Máx 1024 caracteres.
|
||||
- `membership_type` (string, opcional): Visibilidade do canal. Opções: standard, private. "standard" = visível para todos os membros da equipe, "private" = visível apenas para membros adicionados especificamente. Padrão: standard.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/get_message_replies">
|
||||
**Descrição:** Obter respostas a uma mensagem específica em um canal.
|
||||
|
||||
**Parâmetros:**
|
||||
- `team_id` (string, obrigatório): O identificador único da equipe. Obter pela ação get_teams.
|
||||
- `channel_id` (string, obrigatório): O identificador único do canal. Obter pela ação get_channels.
|
||||
- `message_id` (string, obrigatório): O identificador único da mensagem pai. Obter pela ação get_messages.
|
||||
- `top` (integer, opcional): Número máximo de respostas a recuperar por página (1-50). Padrão: 50.
|
||||
- `skip_token` (string, opcional): Token de paginação de uma resposta anterior. Quando a resposta incluir @odata.nextLink, extraia o valor do parâmetro $skiptoken e passe aqui para obter a próxima página de resultados.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/reply_to_message">
|
||||
**Descrição:** Responder a uma mensagem em um canal do Teams.
|
||||
|
||||
**Parâmetros:**
|
||||
- `team_id` (string, obrigatório): O identificador único da equipe. Obter pela ação get_teams.
|
||||
- `channel_id` (string, obrigatório): O identificador único do canal. Obter pela ação get_channels.
|
||||
- `message_id` (string, obrigatório): O identificador único da mensagem a responder. Obter pela ação get_messages.
|
||||
- `message` (string, obrigatório): O conteúdo da resposta. Para HTML, inclua tags de formatação. Para texto, use apenas texto simples.
|
||||
- `content_type` (string, opcional): Formato do conteúdo. Opções: html, text. "text" para texto simples, "html" para texto rico com formatação. Padrão: text.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/update_meeting">
|
||||
**Descrição:** Atualizar uma reunião online existente.
|
||||
|
||||
**Parâmetros:**
|
||||
- `meeting_id` (string, obrigatório): O identificador único da reunião. Obter pelas ações create_meeting ou search_online_meetings.
|
||||
- `subject` (string, opcional): Novo título da reunião.
|
||||
- `startDateTime` (string, opcional): Nova hora de início no formato ISO 8601 com fuso horário. Exemplo: "2024-01-20T10:00:00-08:00".
|
||||
- `endDateTime` (string, opcional): Nova hora de término no formato ISO 8601 com fuso horário.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_teams/delete_meeting">
|
||||
**Descrição:** Excluir uma reunião online.
|
||||
|
||||
**Parâmetros:**
|
||||
- `meeting_id` (string, obrigatório): O identificador único da reunião a excluir. Obter pelas ações create_meeting ou search_online_meetings.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Exemplos de Uso
|
||||
|
||||
@@ -97,26 +97,6 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token
|
||||
- `file_id` (string, obrigatório): O ID do documento a excluir.
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_word/copy_document">
|
||||
**Descrição:** Copiar um documento para um novo local no OneDrive.
|
||||
|
||||
**Parâmetros:**
|
||||
- `file_id` (string, obrigatório): O ID do documento a copiar.
|
||||
- `name` (string, opcional): Novo nome para o documento copiado.
|
||||
- `parent_id` (string, opcional): O ID da pasta de destino (padrão: raiz).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="microsoft_word/move_document">
|
||||
**Descrição:** Mover um documento para um novo local no OneDrive.
|
||||
|
||||
**Parâmetros:**
|
||||
- `file_id` (string, obrigatório): O ID do documento a mover.
|
||||
- `parent_id` (string, obrigatório): O ID da pasta de destino.
|
||||
- `name` (string, opcional): Novo nome para o documento movido.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Exemplos de Uso
|
||||
|
||||
@@ -33,11 +33,8 @@ def test_brave_tool_search(mock_get, brave_tool):
|
||||
mock_get.return_value.json.return_value = mock_response
|
||||
|
||||
result = brave_tool.run(query="test")
|
||||
data = json.loads(result)
|
||||
assert isinstance(data, list)
|
||||
assert len(data) >= 1
|
||||
assert data[0]["title"] == "Test Title"
|
||||
assert data[0]["url"] == "http://test.com"
|
||||
assert "Test Title" in result
|
||||
assert "http://test.com" in result
|
||||
|
||||
|
||||
@patch("requests.get")
|
||||
|
||||
@@ -26,8 +26,6 @@ dependencies = [
|
||||
# Authentication and Security
|
||||
"python-dotenv~=1.1.1",
|
||||
"pyjwt>=2.9.0,<3",
|
||||
# TUI
|
||||
"textual>=7.5.0",
|
||||
# Configuration and Utils
|
||||
"click~=8.1.7",
|
||||
"appdirs~=1.4.4",
|
||||
@@ -41,7 +39,6 @@ dependencies = [
|
||||
"mcp~=1.26.0",
|
||||
"uv~=0.9.13",
|
||||
"aiosqlite~=0.21.0",
|
||||
"lancedb>=0.4.0",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
|
||||
@@ -10,7 +10,6 @@ from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.memory.unified_memory import Memory
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
@@ -81,7 +80,6 @@ __all__ = [
|
||||
"Flow",
|
||||
"Knowledge",
|
||||
"LLMGuardrail",
|
||||
"Memory",
|
||||
"Process",
|
||||
"Task",
|
||||
"TaskOutput",
|
||||
|
||||
@@ -71,6 +71,7 @@ from crewai.mcp import (
|
||||
from crewai.mcp.transports.http import HTTPTransport
|
||||
from crewai.mcp.transports.sse import SSETransport
|
||||
from crewai.mcp.transports.stdio import StdioTransport
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.rag.embeddings.types import EmbedderConfig
|
||||
from crewai.security.fingerprint import Fingerprint
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
@@ -310,12 +311,19 @@ class Agent(BaseAgent):
|
||||
raise ValueError(f"Invalid Knowledge Configuration: {e!s}") from e
|
||||
|
||||
def _is_any_available_memory(self) -> bool:
|
||||
"""Check if unified memory is available (agent or crew)."""
|
||||
if getattr(self, "memory", None):
|
||||
return True
|
||||
if self.crew and getattr(self.crew, "_memory", None):
|
||||
return True
|
||||
return False
|
||||
"""Check if any memory is available."""
|
||||
if not self.crew:
|
||||
return False
|
||||
|
||||
memory_attributes = [
|
||||
"memory",
|
||||
"_short_term_memory",
|
||||
"_long_term_memory",
|
||||
"_entity_memory",
|
||||
"_external_memory",
|
||||
]
|
||||
|
||||
return any(getattr(self.crew, attr) for attr in memory_attributes)
|
||||
|
||||
def _supports_native_tool_calling(self, tools: list[BaseTool]) -> bool:
|
||||
"""Check if the LLM supports native function calling with the given tools.
|
||||
@@ -379,16 +387,15 @@ class Agent(BaseAgent):
|
||||
memory = ""
|
||||
|
||||
try:
|
||||
unified_memory = getattr(self, "memory", None) or (
|
||||
getattr(self.crew, "_memory", None) if self.crew else None
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._external_memory,
|
||||
agent=self,
|
||||
task=task,
|
||||
)
|
||||
if unified_memory is not None:
|
||||
query = task.description
|
||||
matches = unified_memory.recall(query, limit=10)
|
||||
if matches:
|
||||
memory = "Relevant memories:\n" + "\n".join(
|
||||
f"- {m.record.content}" for m in matches
|
||||
)
|
||||
memory = contextual_memory.build_context_for_task(task, context or "")
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
@@ -617,16 +624,17 @@ class Agent(BaseAgent):
|
||||
memory = ""
|
||||
|
||||
try:
|
||||
unified_memory = getattr(self, "memory", None) or (
|
||||
getattr(self.crew, "_memory", None) if self.crew else None
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._external_memory,
|
||||
agent=self,
|
||||
task=task,
|
||||
)
|
||||
memory = await contextual_memory.abuild_context_for_task(
|
||||
task, context or ""
|
||||
)
|
||||
if unified_memory is not None:
|
||||
query = task.description
|
||||
matches = unified_memory.recall(query, limit=10)
|
||||
if matches:
|
||||
memory = "Relevant memories:\n" + "\n".join(
|
||||
f"- {m.record.content}" for m in matches
|
||||
)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
@@ -1704,18 +1712,6 @@ class Agent(BaseAgent):
|
||||
|
||||
# Prepare tools
|
||||
raw_tools: list[BaseTool] = self.tools or []
|
||||
|
||||
# Inject memory tools for standalone kickoff (crew path handles its own)
|
||||
agent_memory = getattr(self, "memory", None)
|
||||
if agent_memory is not None:
|
||||
from crewai.tools.memory_tools import create_memory_tools
|
||||
|
||||
existing_names = {sanitize_tool_name(t.name) for t in raw_tools}
|
||||
raw_tools.extend(
|
||||
mt for mt in create_memory_tools(agent_memory)
|
||||
if sanitize_tool_name(mt.name) not in existing_names
|
||||
)
|
||||
|
||||
parsed_tools = parse_tools(raw_tools)
|
||||
|
||||
# Build agent_info for backward-compatible event emission
|
||||
|
||||
@@ -199,10 +199,6 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
default=None,
|
||||
description="List of MCP server references. Supports 'https://server.com/path' for external servers and 'crewai-amp:mcp-name' for AMP marketplace. Use '#tool_name' suffix for specific tools.",
|
||||
)
|
||||
memory: Any = Field(
|
||||
default=None,
|
||||
description="Memory instance for this agent (Memory, MemoryScope, or MemorySlice). If not set, falls back to crew memory.",
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.agents.parser import AgentFinish
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.utilities.converter import ConverterError
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.string_utils import sanitize_tool_name
|
||||
|
||||
@@ -25,29 +30,110 @@ class CrewAgentExecutorMixin:
|
||||
_i18n: I18N
|
||||
_printer: Printer = Printer()
|
||||
|
||||
def _save_to_memory(self, output: AgentFinish) -> None:
|
||||
"""Save task result to unified memory (memory or crew._memory)."""
|
||||
memory = getattr(self.agent, "memory", None) or (
|
||||
getattr(self.crew, "_memory", None) if self.crew else None
|
||||
)
|
||||
if memory is None or not self.task:
|
||||
return
|
||||
def _create_short_term_memory(self, output: AgentFinish) -> None:
|
||||
"""Create and save a short-term memory item if conditions are met."""
|
||||
if (
|
||||
f"Action: {sanitize_tool_name('Delegate work to coworker')}"
|
||||
in output.text
|
||||
self.crew
|
||||
and self.agent
|
||||
and self.task
|
||||
and f"Action: {sanitize_tool_name('Delegate work to coworker')}"
|
||||
not in output.text
|
||||
):
|
||||
return
|
||||
try:
|
||||
raw = (
|
||||
f"Task: {self.task.description}\n"
|
||||
f"Agent: {self.agent.role}\n"
|
||||
f"Expected result: {self.task.expected_output}\n"
|
||||
f"Result: {output.text}"
|
||||
)
|
||||
extracted = memory.extract_memories(raw)
|
||||
for mem in extracted:
|
||||
memory.remember(mem)
|
||||
except Exception as e:
|
||||
self.agent._logger.log(
|
||||
"error", f"Failed to save to memory: {e}"
|
||||
)
|
||||
try:
|
||||
if (
|
||||
hasattr(self.crew, "_short_term_memory")
|
||||
and self.crew._short_term_memory
|
||||
):
|
||||
self.crew._short_term_memory.save(
|
||||
value=output.text,
|
||||
metadata={
|
||||
"observation": self.task.description,
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
self.agent._logger.log(
|
||||
"error", f"Failed to add to short term memory: {e}"
|
||||
)
|
||||
|
||||
def _create_external_memory(self, output: AgentFinish) -> None:
|
||||
"""Create and save a external-term memory item if conditions are met."""
|
||||
if (
|
||||
self.crew
|
||||
and self.agent
|
||||
and self.task
|
||||
and hasattr(self.crew, "_external_memory")
|
||||
and self.crew._external_memory
|
||||
):
|
||||
try:
|
||||
self.crew._external_memory.save(
|
||||
value=output.text,
|
||||
metadata={
|
||||
"description": self.task.description,
|
||||
"messages": self.messages,
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
self.agent._logger.log(
|
||||
"error", f"Failed to add to external memory: {e}"
|
||||
)
|
||||
|
||||
def _create_long_term_memory(self, output: AgentFinish) -> None:
|
||||
"""Create and save long-term and entity memory items based on evaluation."""
|
||||
if (
|
||||
self.crew
|
||||
and self.crew._long_term_memory
|
||||
and self.crew._entity_memory
|
||||
and self.task
|
||||
and self.agent
|
||||
):
|
||||
try:
|
||||
ltm_agent = TaskEvaluator(self.agent)
|
||||
evaluation = ltm_agent.evaluate(self.task, output.text)
|
||||
|
||||
if isinstance(evaluation, ConverterError):
|
||||
return
|
||||
|
||||
long_term_memory = LongTermMemoryItem(
|
||||
task=self.task.description,
|
||||
agent=self.agent.role,
|
||||
quality=evaluation.quality,
|
||||
datetime=str(time.time()),
|
||||
expected_output=self.task.expected_output,
|
||||
metadata={
|
||||
"suggestions": evaluation.suggestions,
|
||||
"quality": evaluation.quality,
|
||||
},
|
||||
)
|
||||
self.crew._long_term_memory.save(long_term_memory)
|
||||
|
||||
entity_memories = [
|
||||
EntityMemoryItem(
|
||||
name=entity.name,
|
||||
type=entity.type,
|
||||
description=entity.description,
|
||||
relationships="\n".join(
|
||||
[f"- {r}" for r in entity.relationships]
|
||||
),
|
||||
)
|
||||
for entity in evaluation.entities
|
||||
]
|
||||
if entity_memories:
|
||||
self.crew._entity_memory.save(entity_memories)
|
||||
except AttributeError as e:
|
||||
self.agent._logger.log(
|
||||
"error", f"Missing attributes for long term memory: {e}"
|
||||
)
|
||||
except Exception as e:
|
||||
self.agent._logger.log(
|
||||
"error", f"Failed to add to long term memory: {e}"
|
||||
)
|
||||
elif (
|
||||
self.crew
|
||||
and self.crew._long_term_memory
|
||||
and self.crew._entity_memory is None
|
||||
):
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Long term memory is enabled, but entity memory is not enabled. Please configure entity memory or set memory=True to automatically enable it.",
|
||||
color="bold_yellow",
|
||||
)
|
||||
|
||||
@@ -234,7 +234,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if self.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
|
||||
self._save_to_memory(formatted_answer)
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _inject_multimodal_files(self, inputs: dict[str, Any] | None = None) -> None:
|
||||
@@ -1009,7 +1011,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if self.ask_for_human_input:
|
||||
formatted_answer = await self._ahandle_human_feedback(formatted_answer)
|
||||
|
||||
self._save_to_memory(formatted_answer)
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
async def _ainvoke_loop(self) -> AgentFinish:
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from importlib.metadata import version as get_version
|
||||
import os
|
||||
import subprocess
|
||||
from typing import Any
|
||||
|
||||
import click
|
||||
|
||||
@@ -180,19 +179,9 @@ def log_tasks_outputs() -> None:
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@click.option("-m", "--memory", is_flag=True, help="Reset MEMORY")
|
||||
@click.option(
|
||||
"-l", "--long", is_flag=True, hidden=True,
|
||||
help="[Deprecated: use --memory] Reset memory",
|
||||
)
|
||||
@click.option(
|
||||
"-s", "--short", is_flag=True, hidden=True,
|
||||
help="[Deprecated: use --memory] Reset memory",
|
||||
)
|
||||
@click.option(
|
||||
"-e", "--entities", is_flag=True, hidden=True,
|
||||
help="[Deprecated: use --memory] Reset memory",
|
||||
)
|
||||
@click.option("-l", "--long", is_flag=True, help="Reset LONG TERM memory")
|
||||
@click.option("-s", "--short", is_flag=True, help="Reset SHORT TERM memory")
|
||||
@click.option("-e", "--entities", is_flag=True, help="Reset ENTITIES memory")
|
||||
@click.option("-kn", "--knowledge", is_flag=True, help="Reset KNOWLEDGE storage")
|
||||
@click.option(
|
||||
"-akn", "--agent-knowledge", is_flag=True, help="Reset AGENT KNOWLEDGE storage"
|
||||
@@ -202,7 +191,6 @@ def log_tasks_outputs() -> None:
|
||||
)
|
||||
@click.option("-a", "--all", is_flag=True, help="Reset ALL memories")
|
||||
def reset_memories(
|
||||
memory: bool,
|
||||
long: bool,
|
||||
short: bool,
|
||||
entities: bool,
|
||||
@@ -212,22 +200,13 @@ def reset_memories(
|
||||
all: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Reset the crew memories (memory, knowledge, agent_knowledge, kickoff_outputs). This will delete all the data saved.
|
||||
Reset the crew memories (long, short, entity, latest_crew_kickoff_ouputs, knowledge, agent_knowledge). This will delete all the data saved.
|
||||
"""
|
||||
try:
|
||||
# Treat legacy flags as --memory with a deprecation warning
|
||||
if long or short or entities:
|
||||
legacy_used = [
|
||||
f for f, v in [("--long", long), ("--short", short), ("--entities", entities)] if v
|
||||
]
|
||||
click.echo(
|
||||
f"Warning: {', '.join(legacy_used)} {'is' if len(legacy_used) == 1 else 'are'} "
|
||||
"deprecated. Use --memory (-m) instead. All memory is now unified."
|
||||
)
|
||||
memory = True
|
||||
|
||||
memory_types = [
|
||||
memory,
|
||||
long,
|
||||
short,
|
||||
entities,
|
||||
knowledge,
|
||||
agent_knowledge,
|
||||
kickoff_outputs,
|
||||
@@ -239,73 +218,12 @@ def reset_memories(
|
||||
)
|
||||
return
|
||||
reset_memories_command(
|
||||
memory, knowledge, agent_knowledge, kickoff_outputs, all
|
||||
long, short, entities, knowledge, agent_knowledge, kickoff_outputs, all
|
||||
)
|
||||
except Exception as e:
|
||||
click.echo(f"An error occurred while resetting memories: {e}", err=True)
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@click.option(
|
||||
"--storage-path",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Path to LanceDB memory directory. If omitted, uses ./.crewai/memory.",
|
||||
)
|
||||
@click.option(
|
||||
"--embedder-provider",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Embedder provider for recall queries (e.g. openai, google-vertex, cohere, ollama).",
|
||||
)
|
||||
@click.option(
|
||||
"--embedder-model",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Embedder model name (e.g. text-embedding-3-small, gemini-embedding-001).",
|
||||
)
|
||||
@click.option(
|
||||
"--embedder-config",
|
||||
type=str,
|
||||
default=None,
|
||||
help='Full embedder config as JSON (e.g. \'{"provider": "cohere", "config": {"model_name": "embed-v4.0"}}\').',
|
||||
)
|
||||
def memory(
|
||||
storage_path: str | None,
|
||||
embedder_provider: str | None,
|
||||
embedder_model: str | None,
|
||||
embedder_config: str | None,
|
||||
) -> None:
|
||||
"""Open the Memory TUI to browse scopes and recall memories."""
|
||||
try:
|
||||
from crewai.cli.memory_tui import MemoryTUI
|
||||
except ImportError as exc:
|
||||
click.echo(
|
||||
"Textual is required for the memory TUI but could not be imported. "
|
||||
"Try reinstalling crewai or: pip install textual"
|
||||
)
|
||||
raise SystemExit(1) from exc
|
||||
|
||||
# Build embedder spec from CLI flags.
|
||||
embedder_spec: dict[str, Any] | None = None
|
||||
if embedder_config:
|
||||
import json as _json
|
||||
|
||||
try:
|
||||
embedder_spec = _json.loads(embedder_config)
|
||||
except _json.JSONDecodeError as exc:
|
||||
click.echo(f"Invalid --embedder-config JSON: {exc}")
|
||||
raise SystemExit(1) from exc
|
||||
elif embedder_provider:
|
||||
cfg: dict[str, str] = {}
|
||||
if embedder_model:
|
||||
cfg["model_name"] = embedder_model
|
||||
embedder_spec = {"provider": embedder_provider, "config": cfg}
|
||||
|
||||
app = MemoryTUI(storage_path=storage_path, embedder_config=embedder_spec)
|
||||
app.run()
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@click.option(
|
||||
"-n",
|
||||
|
||||
@@ -143,12 +143,6 @@ def create_folder_structure(
|
||||
(folder_path / "src" / folder_name).mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "config").mkdir(parents=True)
|
||||
|
||||
# Copy AGENTS.md to project root (top-level projects only)
|
||||
package_dir = Path(__file__).parent
|
||||
agents_md_src = package_dir / "templates" / "AGENTS.md"
|
||||
if agents_md_src.exists():
|
||||
shutil.copy2(agents_md_src, folder_path / "AGENTS.md")
|
||||
|
||||
return folder_path, folder_name, class_name
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
@@ -35,11 +34,6 @@ def create_flow(name):
|
||||
package_dir = Path(__file__).parent
|
||||
templates_dir = package_dir / "templates" / "flow"
|
||||
|
||||
# Copy AGENTS.md to project root
|
||||
agents_md_src = package_dir / "templates" / "AGENTS.md"
|
||||
if agents_md_src.exists():
|
||||
shutil.copy2(agents_md_src, project_root / "AGENTS.md")
|
||||
|
||||
# List of template files to copy
|
||||
root_template_files = [".gitignore", "pyproject.toml", "README.md"]
|
||||
src_template_files = ["__init__.py", "main.py"]
|
||||
|
||||
@@ -1,273 +0,0 @@
|
||||
"""Textual TUI for browsing and recalling unified memory."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, ClassVar
|
||||
|
||||
from textual.app import App, ComposeResult
|
||||
from textual.containers import Horizontal
|
||||
from textual.widgets import Footer, Header, Input, Static, Tree
|
||||
|
||||
|
||||
# -- CrewAI brand palette --
|
||||
_PRIMARY = "#eb6658" # coral
|
||||
_SECONDARY = "#1F7982" # teal
|
||||
_TERTIARY = "#ffffff" # white
|
||||
|
||||
|
||||
def _format_scope_info(info: Any) -> str:
|
||||
"""Format ScopeInfo with Rich markup."""
|
||||
return (
|
||||
f"[bold {_PRIMARY}]{info.path}[/]\n\n"
|
||||
f"[dim]Records:[/] [bold]{info.record_count}[/]\n"
|
||||
f"[dim]Categories:[/] {', '.join(info.categories) or 'none'}\n"
|
||||
f"[dim]Oldest:[/] {info.oldest_record or '-'}\n"
|
||||
f"[dim]Newest:[/] {info.newest_record or '-'}\n"
|
||||
f"[dim]Children:[/] {', '.join(info.child_scopes) or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
class MemoryTUI(App[None]):
|
||||
"""TUI to browse memory scopes and run recall queries."""
|
||||
|
||||
TITLE = "CrewAI Memory"
|
||||
SUB_TITLE = "Browse scopes and recall memories"
|
||||
|
||||
PAGE_SIZE: ClassVar[int] = 20
|
||||
|
||||
BINDINGS: ClassVar[list[tuple[str, str, str]]] = [
|
||||
("n", "next_page", "Next page"),
|
||||
("p", "prev_page", "Prev page"),
|
||||
]
|
||||
|
||||
CSS = f"""
|
||||
Header {{
|
||||
background: {_PRIMARY};
|
||||
color: {_TERTIARY};
|
||||
}}
|
||||
Footer {{
|
||||
background: {_SECONDARY};
|
||||
color: {_TERTIARY};
|
||||
}}
|
||||
Footer > .footer-key--key {{
|
||||
background: {_PRIMARY};
|
||||
color: {_TERTIARY};
|
||||
}}
|
||||
Horizontal {{
|
||||
height: 1fr;
|
||||
}}
|
||||
#scope-tree {{
|
||||
width: 30%;
|
||||
padding: 1 2;
|
||||
background: {_SECONDARY} 8%;
|
||||
border-right: solid {_SECONDARY};
|
||||
}}
|
||||
#scope-tree:focus > .tree--cursor {{
|
||||
background: {_SECONDARY};
|
||||
color: {_TERTIARY};
|
||||
}}
|
||||
#scope-tree > .tree--guides {{
|
||||
color: {_SECONDARY} 50%;
|
||||
}}
|
||||
#scope-tree > .tree--guides-hover {{
|
||||
color: {_PRIMARY};
|
||||
}}
|
||||
#scope-tree > .tree--guides-selected {{
|
||||
color: {_SECONDARY};
|
||||
}}
|
||||
#info-panel {{
|
||||
width: 70%;
|
||||
padding: 1 2;
|
||||
overflow-y: auto;
|
||||
}}
|
||||
#info-panel LoadingIndicator {{
|
||||
color: {_PRIMARY};
|
||||
}}
|
||||
#recall-input {{
|
||||
margin: 0 1 1 1;
|
||||
border: tall {_SECONDARY};
|
||||
}}
|
||||
#recall-input:focus {{
|
||||
border: tall {_PRIMARY};
|
||||
}}
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage_path: str | None = None,
|
||||
embedder_config: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self._memory: Any = None
|
||||
self._init_error: str | None = None
|
||||
self._selected_scope: str = "/"
|
||||
self._entries: list[Any] = []
|
||||
self._page: int = 0
|
||||
self._last_scope_info: Any = None
|
||||
self._custom_embedder = embedder_config is not None
|
||||
try:
|
||||
from crewai.memory.storage.lancedb_storage import LanceDBStorage
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
storage = LanceDBStorage(path=storage_path) if storage_path else LanceDBStorage()
|
||||
embedder = None
|
||||
if embedder_config is not None:
|
||||
from crewai.rag.embeddings.factory import build_embedder
|
||||
|
||||
embedder = build_embedder(embedder_config)
|
||||
self._memory = Memory(storage=storage, embedder=embedder) if embedder else Memory(storage=storage)
|
||||
except Exception as e:
|
||||
self._init_error = str(e)
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
yield Header(show_clock=False)
|
||||
with Horizontal():
|
||||
yield self._build_scope_tree()
|
||||
initial = (
|
||||
self._init_error
|
||||
if self._init_error
|
||||
else "Select a scope or type a recall query."
|
||||
)
|
||||
yield Static(initial, id="info-panel")
|
||||
yield Input(
|
||||
placeholder="Type a query and press Enter to recall...",
|
||||
id="recall-input",
|
||||
)
|
||||
yield Footer()
|
||||
|
||||
def _build_scope_tree(self) -> Tree[str]:
|
||||
tree: Tree[str] = Tree("/", id="scope-tree")
|
||||
if self._memory is None:
|
||||
tree.root.data = "/"
|
||||
tree.root.label = "/ (0 records)"
|
||||
return tree
|
||||
info = self._memory.info("/")
|
||||
tree.root.label = f"/ ({info.record_count} records)"
|
||||
tree.root.data = "/"
|
||||
self._add_children(tree.root, "/", depth=0, max_depth=3)
|
||||
tree.root.expand()
|
||||
return tree
|
||||
|
||||
def _add_children(
|
||||
self,
|
||||
parent_node: Tree.Node[str],
|
||||
path: str,
|
||||
depth: int,
|
||||
max_depth: int,
|
||||
) -> None:
|
||||
if depth >= max_depth or self._memory is None:
|
||||
return
|
||||
info = self._memory.info(path)
|
||||
for child in info.child_scopes:
|
||||
child_info = self._memory.info(child)
|
||||
label = f"{child} ({child_info.record_count})"
|
||||
node = parent_node.add(label, data=child)
|
||||
self._add_children(node, child, depth + 1, max_depth)
|
||||
|
||||
def on_tree_node_selected(self, event: Tree.NodeSelected[str]) -> None:
|
||||
path = event.node.data if event.node.data is not None else "/"
|
||||
self._selected_scope = path
|
||||
panel = self.query_one("#info-panel", Static)
|
||||
if self._memory is None:
|
||||
panel.update(self._init_error or "No memory loaded.")
|
||||
return
|
||||
info = self._memory.info(path)
|
||||
self._last_scope_info = info
|
||||
self._entries = self._memory.list_records(scope=path, limit=200)
|
||||
self._page = 0
|
||||
self._render_panel()
|
||||
|
||||
def _render_panel(self) -> None:
|
||||
"""Refresh info panel with scope info and current page of entries."""
|
||||
panel = self.query_one("#info-panel", Static)
|
||||
if self._last_scope_info is None:
|
||||
return
|
||||
lines: list[str] = [_format_scope_info(self._last_scope_info)]
|
||||
total = len(self._entries)
|
||||
if total == 0:
|
||||
lines.append("\n[dim]No entries in this scope.[/]")
|
||||
else:
|
||||
total_pages = (total + self.PAGE_SIZE - 1) // self.PAGE_SIZE
|
||||
page_num = self._page + 1
|
||||
lines.append(
|
||||
f"\n[bold {_PRIMARY}]{'─' * 44}[/]\n"
|
||||
f"[bold]Entries[/] [dim](page {page_num} of {total_pages})[/]\n"
|
||||
)
|
||||
start = self._page * self.PAGE_SIZE
|
||||
end = min(start + self.PAGE_SIZE, total)
|
||||
for record in self._entries[start:end]:
|
||||
date_str = record.created_at.strftime("%Y-%m-%d")
|
||||
preview = (record.content[:100] + "…") if len(record.content) > 100 else record.content
|
||||
lines.append(
|
||||
f"[{_SECONDARY}]{date_str}[/] "
|
||||
f"[bold]{record.importance:.1f}[/] "
|
||||
f"[dim]{preview}[/]"
|
||||
)
|
||||
if total_pages > 1:
|
||||
lines.append("\n[dim]\\[n] next \\[p] prev[/]")
|
||||
panel.update("\n".join(lines))
|
||||
|
||||
def action_next_page(self) -> None:
|
||||
"""Go to next page of entries."""
|
||||
total_pages = (len(self._entries) + self.PAGE_SIZE - 1) // self.PAGE_SIZE
|
||||
if total_pages <= 0:
|
||||
return
|
||||
self._page = min(self._page + 1, total_pages - 1)
|
||||
self._render_panel()
|
||||
|
||||
def action_prev_page(self) -> None:
|
||||
"""Go to previous page of entries."""
|
||||
if self._page <= 0:
|
||||
return
|
||||
self._page -= 1
|
||||
self._render_panel()
|
||||
|
||||
def on_input_submitted(self, event: Input.Submitted) -> None:
|
||||
query = event.value.strip()
|
||||
if not query:
|
||||
return
|
||||
if self._memory is None:
|
||||
panel = self.query_one("#info-panel", Static)
|
||||
panel.update(self._init_error or "No memory loaded. Cannot recall.")
|
||||
return
|
||||
self.run_worker(self._do_recall(query), exclusive=True)
|
||||
|
||||
async def _do_recall(self, query: str) -> None:
|
||||
panel = self.query_one("#info-panel", Static)
|
||||
panel.loading = True
|
||||
try:
|
||||
scope = (
|
||||
self._selected_scope
|
||||
if self._selected_scope != "/"
|
||||
else None
|
||||
)
|
||||
matches = self._memory.recall(
|
||||
query,
|
||||
scope=scope,
|
||||
limit=10,
|
||||
depth="deep",
|
||||
)
|
||||
if not matches:
|
||||
panel.update("[dim]No memories found.[/]")
|
||||
return
|
||||
lines: list[str] = []
|
||||
if not self._custom_embedder:
|
||||
lines.append(
|
||||
"[dim italic]Note: Using default OpenAI embedder. "
|
||||
"If memories were created with a different embedder, "
|
||||
"pass --embedder-provider to match.[/]\n"
|
||||
)
|
||||
for m in matches:
|
||||
content = m.record.content
|
||||
score_color = _PRIMARY if m.score >= 0.5 else "dim"
|
||||
lines.append(f"[{score_color}]\\[{m.score:.2f}][/] {content[:120]}")
|
||||
lines.append(
|
||||
f" [dim]scope={m.record.scope} "
|
||||
f"importance={m.record.importance:.1f}[/]"
|
||||
)
|
||||
lines.append("")
|
||||
panel.update("\n".join(lines))
|
||||
except Exception as e:
|
||||
panel.update(f"[bold red]Error:[/] {e}")
|
||||
finally:
|
||||
panel.loading = False
|
||||
@@ -6,23 +6,31 @@ from crewai.cli.utils import get_crews
|
||||
|
||||
|
||||
def reset_memories_command(
|
||||
memory: bool,
|
||||
knowledge: bool,
|
||||
agent_knowledge: bool,
|
||||
kickoff_outputs: bool,
|
||||
all: bool,
|
||||
long,
|
||||
short,
|
||||
entity,
|
||||
knowledge,
|
||||
agent_knowledge,
|
||||
kickoff_outputs,
|
||||
all,
|
||||
) -> None:
|
||||
"""Reset the crew memories.
|
||||
"""
|
||||
Reset the crew memories.
|
||||
|
||||
Args:
|
||||
memory: Whether to reset the unified memory.
|
||||
knowledge: Whether to reset the knowledge.
|
||||
agent_knowledge: Whether to reset the agents knowledge.
|
||||
kickoff_outputs: Whether to reset the latest kickoff task outputs.
|
||||
all: Whether to reset all memories.
|
||||
long (bool): Whether to reset the long-term memory.
|
||||
short (bool): Whether to reset the short-term memory.
|
||||
entity (bool): Whether to reset the entity memory.
|
||||
kickoff_outputs (bool): Whether to reset the latest kickoff task outputs.
|
||||
all (bool): Whether to reset all memories.
|
||||
knowledge (bool): Whether to reset the knowledge.
|
||||
agent_knowledge (bool): Whether to reset the agents knowledge.
|
||||
"""
|
||||
|
||||
try:
|
||||
if not any([memory, kickoff_outputs, knowledge, agent_knowledge, all]):
|
||||
if not any(
|
||||
[long, short, entity, kickoff_outputs, knowledge, agent_knowledge, all]
|
||||
):
|
||||
click.echo(
|
||||
"No memory type specified. Please specify at least one type to reset."
|
||||
)
|
||||
@@ -38,10 +46,20 @@ def reset_memories_command(
|
||||
f"[Crew ({crew.name if crew.name else crew.id})] Reset memories command has been completed."
|
||||
)
|
||||
continue
|
||||
if memory:
|
||||
crew.reset_memories(command_type="memory")
|
||||
if long:
|
||||
crew.reset_memories(command_type="long")
|
||||
click.echo(
|
||||
f"[Crew ({crew.name if crew.name else crew.id})] Memory has been reset."
|
||||
f"[Crew ({crew.name if crew.name else crew.id})] Long term memory has been reset."
|
||||
)
|
||||
if short:
|
||||
crew.reset_memories(command_type="short")
|
||||
click.echo(
|
||||
f"[Crew ({crew.name if crew.name else crew.id})] Short term memory has been reset."
|
||||
)
|
||||
if entity:
|
||||
crew.reset_memories(command_type="entity")
|
||||
click.echo(
|
||||
f"[Crew ({crew.name if crew.name else crew.id})] Entity memory has been reset."
|
||||
)
|
||||
if kickoff_outputs:
|
||||
crew.reset_memories(command_type="kickoff_outputs")
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,6 @@ import base64
|
||||
from json import JSONDecodeError
|
||||
import os
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
from typing import Any
|
||||
@@ -56,11 +55,6 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
tree_find_and_replace(project_root, "{{folder_name}}", folder_name)
|
||||
tree_find_and_replace(project_root, "{{class_name}}", class_name)
|
||||
|
||||
# Copy AGENTS.md to project root
|
||||
agents_md_src = Path(__file__).parent.parent / "templates" / "AGENTS.md"
|
||||
if agents_md_src.exists():
|
||||
shutil.copy2(agents_md_src, project_root / "AGENTS.md")
|
||||
|
||||
old_directory = os.getcwd()
|
||||
os.chdir(project_root)
|
||||
try:
|
||||
|
||||
@@ -83,6 +83,10 @@ from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.process import Process
|
||||
from crewai.rag.embeddings.types import EmbedderConfig
|
||||
from crewai.rag.types import SearchResult
|
||||
@@ -170,7 +174,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
_logger: Logger = PrivateAttr()
|
||||
_file_handler: FileHandler = PrivateAttr()
|
||||
_cache_handler: InstanceOf[CacheHandler] = PrivateAttr(default_factory=CacheHandler)
|
||||
_memory: Any = PrivateAttr(default=None) # Unified Memory | MemoryScope
|
||||
_short_term_memory: InstanceOf[ShortTermMemory] | None = PrivateAttr()
|
||||
_long_term_memory: InstanceOf[LongTermMemory] | None = PrivateAttr()
|
||||
_entity_memory: InstanceOf[EntityMemory] | None = PrivateAttr()
|
||||
_external_memory: InstanceOf[ExternalMemory] | None = PrivateAttr()
|
||||
_train: bool | None = PrivateAttr(default=False)
|
||||
_train_iteration: int | None = PrivateAttr()
|
||||
_inputs: dict[str, Any] | None = PrivateAttr(default=None)
|
||||
@@ -180,7 +187,6 @@ class Crew(FlowTrackable, BaseModel):
|
||||
_task_output_handler: TaskOutputStorageHandler = PrivateAttr(
|
||||
default_factory=TaskOutputStorageHandler
|
||||
)
|
||||
_kickoff_event_id: str | None = PrivateAttr(default=None)
|
||||
|
||||
name: str | None = Field(default="crew")
|
||||
cache: bool = Field(default=True)
|
||||
@@ -188,12 +194,25 @@ class Crew(FlowTrackable, BaseModel):
|
||||
agents: list[BaseAgent] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: bool = Field(default=False)
|
||||
memory: bool | Any = Field(
|
||||
memory: bool = Field(
|
||||
default=False,
|
||||
description=(
|
||||
"Enable crew memory. Pass True for default Memory(), "
|
||||
"or a Memory/MemoryScope/MemorySlice instance for custom configuration."
|
||||
),
|
||||
description="If crew should use memory to store memories of it's execution",
|
||||
)
|
||||
short_term_memory: InstanceOf[ShortTermMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the ShortTermMemory to be used by the Crew",
|
||||
)
|
||||
long_term_memory: InstanceOf[LongTermMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the LongTermMemory to be used by the Crew",
|
||||
)
|
||||
entity_memory: InstanceOf[EntityMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the EntityMemory to be used by the Crew",
|
||||
)
|
||||
external_memory: InstanceOf[ExternalMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the ExternalMemory to be used by the Crew",
|
||||
)
|
||||
embedder: EmbedderConfig | None = Field(
|
||||
default=None,
|
||||
@@ -352,23 +371,31 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
return self
|
||||
|
||||
def _initialize_default_memories(self) -> None:
|
||||
self._long_term_memory = self._long_term_memory or LongTermMemory()
|
||||
self._short_term_memory = self._short_term_memory or ShortTermMemory(
|
||||
crew=self,
|
||||
embedder_config=self.embedder,
|
||||
)
|
||||
self._entity_memory = self.entity_memory or EntityMemory(
|
||||
crew=self, embedder_config=self.embedder
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def create_crew_memory(self) -> Crew:
|
||||
"""Initialize unified memory, respecting crew embedder config."""
|
||||
if self.memory is True:
|
||||
from crewai.memory.unified_memory import Memory
|
||||
"""Initialize private memory attributes."""
|
||||
self._external_memory = (
|
||||
# External memory does not support a default value since it was
|
||||
# designed to be managed entirely externally
|
||||
self.external_memory.set_crew(self) if self.external_memory else None
|
||||
)
|
||||
|
||||
embedder = None
|
||||
if self.embedder is not None:
|
||||
from crewai.rag.embeddings.factory import build_embedder
|
||||
self._long_term_memory = self.long_term_memory
|
||||
self._short_term_memory = self.short_term_memory
|
||||
self._entity_memory = self.entity_memory
|
||||
|
||||
embedder = build_embedder(self.embedder)
|
||||
self._memory = Memory(embedder=embedder)
|
||||
elif self.memory:
|
||||
# User passed a Memory / MemoryScope / MemorySlice instance
|
||||
self._memory = self.memory
|
||||
else:
|
||||
self._memory = None
|
||||
if self.memory:
|
||||
self._initialize_default_memories()
|
||||
|
||||
return self
|
||||
|
||||
@@ -732,11 +759,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffFailedEvent(
|
||||
error=str(e),
|
||||
crew_name=self.name,
|
||||
started_event_id=self._kickoff_event_id,
|
||||
),
|
||||
CrewKickoffFailedEvent(error=str(e), crew_name=self.name),
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
@@ -926,11 +949,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffFailedEvent(
|
||||
error=str(e),
|
||||
crew_name=self.name,
|
||||
started_event_id=self._kickoff_event_id,
|
||||
),
|
||||
CrewKickoffFailedEvent(error=str(e), crew_name=self.name),
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
@@ -1295,11 +1314,6 @@ class Crew(FlowTrackable, BaseModel):
|
||||
if agent and (hasattr(agent, "mcps") and getattr(agent, "mcps", None)):
|
||||
tools = self._add_mcp_tools(task, tools)
|
||||
|
||||
# Add memory tools if memory is available (agent or crew level)
|
||||
resolved_memory = getattr(agent, "memory", None) or self._memory
|
||||
if resolved_memory is not None:
|
||||
tools = self._add_memory_tools(tools, resolved_memory)
|
||||
|
||||
files = get_all_files(self.id, task.id)
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
@@ -1407,22 +1421,6 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return self._merge_tools(tools, cast(list[BaseTool], code_tools))
|
||||
return tools
|
||||
|
||||
def _add_memory_tools(
|
||||
self, tools: list[BaseTool], memory: Any
|
||||
) -> list[BaseTool]:
|
||||
"""Add recall and remember tools when memory is available.
|
||||
|
||||
Args:
|
||||
tools: Current list of tools.
|
||||
memory: The resolved Memory, MemoryScope, or MemorySlice instance.
|
||||
|
||||
Returns:
|
||||
Updated list with memory tools added.
|
||||
"""
|
||||
from crewai.tools.memory_tools import create_memory_tools
|
||||
|
||||
return self._merge_tools(tools, create_memory_tools(memory))
|
||||
|
||||
def _add_file_tools(
|
||||
self, tools: list[BaseTool], files: dict[str, Any]
|
||||
) -> list[BaseTool]:
|
||||
@@ -1526,7 +1524,6 @@ class Crew(FlowTrackable, BaseModel):
|
||||
crew_name=self.name,
|
||||
output=final_task_output,
|
||||
total_tokens=self.token_usage.total_tokens,
|
||||
started_event_id=self._kickoff_event_id,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -1667,7 +1664,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
"_execution_span",
|
||||
"_file_handler",
|
||||
"_cache_handler",
|
||||
"_memory",
|
||||
"_short_term_memory",
|
||||
"_long_term_memory",
|
||||
"_entity_memory",
|
||||
"_external_memory",
|
||||
"agents",
|
||||
"tasks",
|
||||
"knowledge_sources",
|
||||
@@ -1701,8 +1701,18 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
if getattr(self, "_memory", None):
|
||||
copied_data["memory"] = self._memory
|
||||
if self.short_term_memory:
|
||||
copied_data["short_term_memory"] = self.short_term_memory.model_copy(
|
||||
deep=True
|
||||
)
|
||||
if self.long_term_memory:
|
||||
copied_data["long_term_memory"] = self.long_term_memory.model_copy(
|
||||
deep=True
|
||||
)
|
||||
if self.entity_memory:
|
||||
copied_data["entity_memory"] = self.entity_memory.model_copy(deep=True)
|
||||
if self.external_memory:
|
||||
copied_data["external_memory"] = self.external_memory.model_copy(deep=True)
|
||||
|
||||
copied_data.pop("agents", None)
|
||||
copied_data.pop("tasks", None)
|
||||
@@ -1833,24 +1843,23 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
Args:
|
||||
command_type: Type of memory to reset.
|
||||
Valid options: 'memory', 'knowledge', 'agent_knowledge',
|
||||
'kickoff_outputs', or 'all'. Legacy names 'long', 'short',
|
||||
'entity', 'external' are treated as 'memory'.
|
||||
Valid options: 'long', 'short', 'entity', 'knowledge', 'agent_knowledge'
|
||||
'kickoff_outputs', or 'all'
|
||||
|
||||
Raises:
|
||||
ValueError: If an invalid command type is provided.
|
||||
RuntimeError: If memory reset operation fails.
|
||||
"""
|
||||
legacy_memory = frozenset(["long", "short", "entity", "external"])
|
||||
if command_type in legacy_memory:
|
||||
command_type = "memory"
|
||||
valid_types = frozenset(
|
||||
[
|
||||
"memory",
|
||||
"long",
|
||||
"short",
|
||||
"entity",
|
||||
"knowledge",
|
||||
"agent_knowledge",
|
||||
"kickoff_outputs",
|
||||
"all",
|
||||
"external",
|
||||
]
|
||||
)
|
||||
|
||||
@@ -1956,10 +1965,25 @@ class Crew(FlowTrackable, BaseModel):
|
||||
) + agent_knowledges
|
||||
|
||||
return {
|
||||
"memory": {
|
||||
"system": getattr(self, "_memory", None),
|
||||
"short": {
|
||||
"system": getattr(self, "_short_term_memory", None),
|
||||
"reset": default_reset,
|
||||
"name": "Memory",
|
||||
"name": "Short Term",
|
||||
},
|
||||
"entity": {
|
||||
"system": getattr(self, "_entity_memory", None),
|
||||
"reset": default_reset,
|
||||
"name": "Entity",
|
||||
},
|
||||
"external": {
|
||||
"system": getattr(self, "_external_memory", None),
|
||||
"reset": default_reset,
|
||||
"name": "External",
|
||||
},
|
||||
"long": {
|
||||
"system": getattr(self, "_long_term_memory", None),
|
||||
"reset": default_reset,
|
||||
"name": "Long Term",
|
||||
},
|
||||
"kickoff_outputs": {
|
||||
"system": getattr(self, "_task_output_handler", None),
|
||||
|
||||
@@ -265,9 +265,10 @@ def prepare_kickoff(
|
||||
normalized = {}
|
||||
normalized = before_callback(normalized)
|
||||
|
||||
started_event = CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized)
|
||||
crew._kickoff_event_id = started_event.event_id
|
||||
future = crewai_event_bus.emit(crew, started_event)
|
||||
future = crewai_event_bus.emit(
|
||||
crew,
|
||||
CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized),
|
||||
)
|
||||
if future is not None:
|
||||
try:
|
||||
future.result()
|
||||
|
||||
@@ -1106,7 +1106,9 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
if self.state.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
|
||||
self._save_to_memory(formatted_answer)
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
@@ -1189,7 +1191,9 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
if self.state.ask_for_human_input:
|
||||
formatted_answer = await self._ahandle_human_feedback(formatted_answer)
|
||||
|
||||
self._save_to_memory(formatted_answer)
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
|
||||
@@ -416,18 +416,13 @@ def and_(*conditions: str | FlowCondition | Callable[..., Any]) -> FlowCondition
|
||||
return {"type": AND_CONDITION, "conditions": processed_conditions}
|
||||
|
||||
|
||||
class LockedListProxy(list, Generic[T]): # type: ignore[type-arg]
|
||||
class LockedListProxy(Generic[T]):
|
||||
"""Thread-safe proxy for list operations.
|
||||
|
||||
Subclasses ``list`` so that ``isinstance(proxy, list)`` returns True,
|
||||
which is required by libraries like LanceDB and Pydantic that do strict
|
||||
type checks. All mutations go through the lock; reads delegate to the
|
||||
underlying list.
|
||||
Wraps a list and uses a lock for all mutating operations.
|
||||
"""
|
||||
|
||||
def __init__(self, lst: list[T], lock: threading.Lock) -> None:
|
||||
# Do NOT call super().__init__() -- we don't want to copy data into
|
||||
# the builtin list storage. All access goes through self._list.
|
||||
self._list = lst
|
||||
self._lock = lock
|
||||
|
||||
@@ -481,32 +476,14 @@ class LockedListProxy(list, Generic[T]): # type: ignore[type-arg]
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self._list)
|
||||
|
||||
def __eq__(self, other: object) -> bool: # type: ignore[override]
|
||||
"""Compare based on the underlying list contents."""
|
||||
if isinstance(other, LockedListProxy):
|
||||
# Avoid deadlocks by acquiring locks in a consistent order.
|
||||
first, second = (self, other) if id(self) <= id(other) else (other, self)
|
||||
with first._lock:
|
||||
with second._lock:
|
||||
return first._list == second._list
|
||||
with self._lock:
|
||||
return self._list == other
|
||||
|
||||
def __ne__(self, other: object) -> bool: # type: ignore[override]
|
||||
return not self.__eq__(other)
|
||||
|
||||
|
||||
class LockedDictProxy(dict, Generic[T]): # type: ignore[type-arg]
|
||||
class LockedDictProxy(Generic[T]):
|
||||
"""Thread-safe proxy for dict operations.
|
||||
|
||||
Subclasses ``dict`` so that ``isinstance(proxy, dict)`` returns True,
|
||||
which is required by libraries like Pydantic that do strict type checks.
|
||||
All mutations go through the lock; reads delegate to the underlying dict.
|
||||
Wraps a dict and uses a lock for all mutating operations.
|
||||
"""
|
||||
|
||||
def __init__(self, d: dict[str, T], lock: threading.Lock) -> None:
|
||||
# Do NOT call super().__init__() -- we don't want to copy data into
|
||||
# the builtin dict storage. All access goes through self._dict.
|
||||
self._dict = d
|
||||
self._lock = lock
|
||||
|
||||
@@ -564,20 +541,6 @@ class LockedDictProxy(dict, Generic[T]): # type: ignore[type-arg]
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self._dict)
|
||||
|
||||
def __eq__(self, other: object) -> bool: # type: ignore[override]
|
||||
"""Compare based on the underlying dict contents."""
|
||||
if isinstance(other, LockedDictProxy):
|
||||
# Avoid deadlocks by acquiring locks in a consistent order.
|
||||
first, second = (self, other) if id(self) <= id(other) else (other, self)
|
||||
with first._lock:
|
||||
with second._lock:
|
||||
return first._dict == second._dict
|
||||
with self._lock:
|
||||
return self._dict == other
|
||||
|
||||
def __ne__(self, other: object) -> bool: # type: ignore[override]
|
||||
return not self.__eq__(other)
|
||||
|
||||
|
||||
class StateProxy(Generic[T]):
|
||||
"""Proxy that provides thread-safe access to flow state.
|
||||
@@ -737,7 +700,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
name: str | None = None
|
||||
tracing: bool | None = None
|
||||
stream: bool = False
|
||||
memory: Any = None # Memory | MemoryScope | MemorySlice | None; auto-created if not set
|
||||
|
||||
def __class_getitem__(cls: type[Flow[T]], item: type[T]) -> type[Flow[T]]:
|
||||
class _FlowGeneric(cls): # type: ignore
|
||||
@@ -805,14 +767,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
),
|
||||
)
|
||||
|
||||
# Auto-create memory if not provided at class or instance level.
|
||||
# Internal flows (RecallFlow, ConsolidationFlow) set _skip_auto_memory
|
||||
# to avoid creating a wasteful standalone Memory instance.
|
||||
if self.memory is None and not getattr(self, "_skip_auto_memory", False):
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
self.memory = Memory()
|
||||
|
||||
# Register all flow-related methods
|
||||
for method_name in dir(self):
|
||||
if not method_name.startswith("_"):
|
||||
@@ -823,56 +777,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
method = method.__get__(self, self.__class__)
|
||||
self._methods[method.__name__] = method
|
||||
|
||||
def recall(self, query: str, **kwargs: Any) -> Any:
|
||||
"""Recall relevant memories. Delegates to this flow's memory.
|
||||
|
||||
Args:
|
||||
query: Natural language query.
|
||||
**kwargs: Passed to memory.recall (e.g. scope, categories, limit, depth).
|
||||
|
||||
Returns:
|
||||
Result of memory.recall(query, **kwargs).
|
||||
|
||||
Raises:
|
||||
ValueError: If no memory is configured for this flow.
|
||||
"""
|
||||
if self.memory is None:
|
||||
raise ValueError("No memory configured for this flow")
|
||||
return self.memory.recall(query, **kwargs)
|
||||
|
||||
def remember(self, content: str, **kwargs: Any) -> Any:
|
||||
"""Store content in memory. Delegates to this flow's memory.
|
||||
|
||||
Args:
|
||||
content: Text to remember.
|
||||
**kwargs: Passed to memory.remember (e.g. scope, categories, metadata).
|
||||
|
||||
Returns:
|
||||
Result of memory.remember(content, **kwargs).
|
||||
|
||||
Raises:
|
||||
ValueError: If no memory is configured for this flow.
|
||||
"""
|
||||
if self.memory is None:
|
||||
raise ValueError("No memory configured for this flow")
|
||||
return self.memory.remember(content, **kwargs)
|
||||
|
||||
def extract_memories(self, content: str) -> list[str]:
|
||||
"""Extract discrete memories from content. Delegates to this flow's memory.
|
||||
|
||||
Args:
|
||||
content: Raw text (e.g. task + result dump).
|
||||
|
||||
Returns:
|
||||
List of short, self-contained memory statements.
|
||||
|
||||
Raises:
|
||||
ValueError: If no memory is configured for this flow.
|
||||
"""
|
||||
if self.memory is None:
|
||||
raise ValueError("No memory configured for this flow")
|
||||
return self.memory.extract_memories(content)
|
||||
|
||||
def _mark_or_listener_fired(self, listener_name: FlowMethodName) -> bool:
|
||||
"""Mark an OR listener as fired atomically.
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
import time
|
||||
from functools import wraps
|
||||
import inspect
|
||||
import json
|
||||
@@ -49,11 +48,6 @@ from crewai.events.types.agent_events import (
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
)
|
||||
from crewai.events.types.logging_events import AgentLogsExecutionEvent
|
||||
from crewai.flow.flow_trackable import FlowTrackable
|
||||
from crewai.hooks.llm_hooks import get_after_llm_call_hooks, get_before_llm_call_hooks
|
||||
@@ -250,10 +244,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
description="A2A (Agent-to-Agent) configuration for delegating tasks to remote agents. "
|
||||
"Can be a single A2AConfig/A2AClientConfig/A2AServerConfig, or a list of configurations.",
|
||||
)
|
||||
memory: bool | Any | None = Field(
|
||||
default=None,
|
||||
description="If True, use default Memory(). If Memory/MemoryScope/MemorySlice, use it for recall and remember.",
|
||||
)
|
||||
tools_results: list[dict[str, Any]] = Field(
|
||||
default_factory=list, description="Results of the tools used by the agent."
|
||||
)
|
||||
@@ -276,7 +266,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
_after_llm_call_hooks: list[AfterLLMCallHookType] = PrivateAttr(
|
||||
default_factory=get_after_llm_call_hooks
|
||||
)
|
||||
_memory: Any = PrivateAttr(default=None)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def emit_deprecation_warning(self) -> Self:
|
||||
@@ -374,19 +363,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def resolve_memory(self) -> Self:
|
||||
"""Resolve memory field to _memory: default Memory() when True, else user instance or None."""
|
||||
if self.memory is True:
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
object.__setattr__(self, "_memory", Memory())
|
||||
elif self.memory is not None and self.memory is not False:
|
||||
object.__setattr__(self, "_memory", self.memory)
|
||||
else:
|
||||
object.__setattr__(self, "_memory", None)
|
||||
return self
|
||||
|
||||
@field_validator("guardrail", mode="before")
|
||||
@classmethod
|
||||
def validate_guardrail_function(
|
||||
@@ -498,7 +474,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
self._messages = self._format_messages(
|
||||
messages, response_format=response_format, input_files=input_files
|
||||
)
|
||||
self._inject_memory_context()
|
||||
|
||||
return self._execute_core(
|
||||
agent_info=agent_info, response_format=response_format
|
||||
@@ -521,80 +496,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
raise e
|
||||
|
||||
def _get_last_user_content(self) -> str:
|
||||
"""Get the last user message content from _messages for recall/input."""
|
||||
for msg in reversed(self._messages):
|
||||
if msg.get("role") == "user":
|
||||
content = msg.get("content")
|
||||
return content if isinstance(content, str) else ""
|
||||
return ""
|
||||
|
||||
def _inject_memory_context(self) -> None:
|
||||
"""Recall relevant memories and append to the system message. No-op if _memory is None."""
|
||||
if self._memory is None:
|
||||
return
|
||||
query = self._get_last_user_content()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalStartedEvent(
|
||||
task_id=None,
|
||||
source_type="lite_agent",
|
||||
),
|
||||
)
|
||||
start_time = time.time()
|
||||
memory_block = ""
|
||||
try:
|
||||
matches = self._memory.recall(query, limit=10)
|
||||
if matches:
|
||||
memory_block = "Relevant memories:\n" + "\n".join(
|
||||
f"- {m.record.content}" for m in matches
|
||||
)
|
||||
if memory_block:
|
||||
formatted = self.i18n.slice("memory").format(memory=memory_block)
|
||||
if self._messages and self._messages[0].get("role") == "system":
|
||||
self._messages[0]["content"] = (
|
||||
self._messages[0].get("content", "") + "\n\n" + formatted
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalCompletedEvent(
|
||||
task_id=None,
|
||||
memory_content=memory_block,
|
||||
retrieval_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="lite_agent",
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalFailedEvent(
|
||||
task_id=None,
|
||||
source_type="lite_agent",
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
|
||||
def _save_to_memory(self, output_text: str) -> None:
|
||||
"""Extract discrete memories from the run and remember each. No-op if _memory is None."""
|
||||
if self._memory is None:
|
||||
return
|
||||
input_str = self._get_last_user_content() or "User request"
|
||||
try:
|
||||
raw = (
|
||||
f"Input: {input_str}\n"
|
||||
f"Agent: {self.role}\n"
|
||||
f"Result: {output_text}"
|
||||
)
|
||||
extracted = self._memory.extract_memories(raw)
|
||||
for mem in extracted:
|
||||
self._memory.remember(mem)
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content=f"Failed to save to memory: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
def _execute_core(
|
||||
self, agent_info: dict[str, Any], response_format: type[BaseModel] | None = None
|
||||
) -> LiteAgentOutput:
|
||||
@@ -610,8 +511,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
# Execute the agent using invoke loop
|
||||
agent_finish = self._invoke_loop()
|
||||
if self._memory is not None:
|
||||
self._save_to_memory(agent_finish.output)
|
||||
formatted_result: BaseModel | None = None
|
||||
|
||||
active_response_format = response_format or self.response_format
|
||||
|
||||
@@ -1580,12 +1580,10 @@ class AnthropicCompletion(BaseLLM):
|
||||
usage = response.usage
|
||||
input_tokens = getattr(usage, "input_tokens", 0)
|
||||
output_tokens = getattr(usage, "output_tokens", 0)
|
||||
cache_read_tokens = getattr(usage, "cache_read_input_tokens", 0) or 0
|
||||
return {
|
||||
"input_tokens": input_tokens,
|
||||
"output_tokens": output_tokens,
|
||||
"total_tokens": input_tokens + output_tokens,
|
||||
"cached_prompt_tokens": cache_read_tokens,
|
||||
}
|
||||
return {"total_tokens": 0}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import json
|
||||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any, TypedDict
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pydantic import BaseModel
|
||||
from typing_extensions import Self
|
||||
@@ -175,11 +176,51 @@ class AzureCompletion(BaseLLM):
|
||||
prefix in model.lower() for prefix in ["gpt-", "o1-", "text-"]
|
||||
)
|
||||
|
||||
self.is_azure_openai_endpoint = (
|
||||
"openai.azure.com" in self.endpoint
|
||||
and "/openai/deployments/" in self.endpoint
|
||||
self.is_azure_openai_endpoint = self._is_azure_openai_deployment_endpoint(
|
||||
self.endpoint
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _parse_endpoint_url(endpoint: str):
|
||||
parsed_endpoint = urlparse(endpoint)
|
||||
if parsed_endpoint.hostname:
|
||||
return parsed_endpoint
|
||||
|
||||
# Support endpoint values without a URL scheme.
|
||||
return urlparse(f"https://{endpoint}")
|
||||
|
||||
@staticmethod
|
||||
def _is_azure_openai_hostname(endpoint: str) -> bool:
|
||||
parsed_endpoint = AzureCompletion._parse_endpoint_url(endpoint)
|
||||
hostname = parsed_endpoint.hostname or ""
|
||||
labels = [label for label in hostname.lower().split(".") if label]
|
||||
|
||||
return len(labels) >= 3 and labels[-3:] == ["openai", "azure", "com"]
|
||||
|
||||
@staticmethod
|
||||
def _get_endpoint_path_segments(endpoint: str) -> list[str]:
|
||||
parsed_endpoint = AzureCompletion._parse_endpoint_url(endpoint)
|
||||
return [segment for segment in parsed_endpoint.path.split("/") if segment]
|
||||
|
||||
@staticmethod
|
||||
def _is_azure_openai_deployment_endpoint(endpoint: str) -> bool:
|
||||
if not AzureCompletion._is_azure_openai_hostname(endpoint):
|
||||
return False
|
||||
|
||||
path_segments = AzureCompletion._get_endpoint_path_segments(endpoint)
|
||||
return len(path_segments) >= 3 and path_segments[:2] == [
|
||||
"openai",
|
||||
"deployments",
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _is_azure_openai_deployments_collection(endpoint: str) -> bool:
|
||||
if not AzureCompletion._is_azure_openai_hostname(endpoint):
|
||||
return False
|
||||
|
||||
path_segments = AzureCompletion._get_endpoint_path_segments(endpoint)
|
||||
return path_segments == ["openai", "deployments"]
|
||||
|
||||
@staticmethod
|
||||
def _validate_and_fix_endpoint(endpoint: str, model: str) -> str:
|
||||
"""Validate and fix Azure endpoint URL format.
|
||||
@@ -194,10 +235,12 @@ class AzureCompletion(BaseLLM):
|
||||
Returns:
|
||||
Validated and potentially corrected endpoint URL
|
||||
"""
|
||||
if "openai.azure.com" in endpoint and "/openai/deployments/" not in endpoint:
|
||||
if AzureCompletion._is_azure_openai_hostname(
|
||||
endpoint
|
||||
) and not AzureCompletion._is_azure_openai_deployment_endpoint(endpoint):
|
||||
endpoint = endpoint.rstrip("/")
|
||||
|
||||
if not endpoint.endswith("/openai/deployments"):
|
||||
if not AzureCompletion._is_azure_openai_deployments_collection(endpoint):
|
||||
deployment_name = model.replace("azure/", "")
|
||||
endpoint = f"{endpoint}/openai/deployments/{deployment_name}"
|
||||
logging.info(f"Constructed Azure OpenAI endpoint URL: {endpoint}")
|
||||
@@ -425,9 +468,8 @@ class AzureCompletion(BaseLLM):
|
||||
"stream": self.stream,
|
||||
}
|
||||
|
||||
model_extras: dict[str, Any] = {}
|
||||
if self.stream:
|
||||
model_extras["stream_options"] = {"include_usage": True}
|
||||
params["model_extras"] = {"stream_options": {"include_usage": True}}
|
||||
|
||||
if response_model and self.is_openai_model:
|
||||
model_description = generate_model_description(response_model)
|
||||
@@ -465,13 +507,6 @@ class AzureCompletion(BaseLLM):
|
||||
params["tools"] = self._convert_tools_for_interference(tools)
|
||||
params["tool_choice"] = "auto"
|
||||
|
||||
prompt_cache_key = self.additional_params.get("prompt_cache_key")
|
||||
if prompt_cache_key:
|
||||
model_extras["prompt_cache_key"] = prompt_cache_key
|
||||
|
||||
if model_extras:
|
||||
params["model_extras"] = model_extras
|
||||
|
||||
additional_params = self.additional_params
|
||||
additional_drop_params = additional_params.get("additional_drop_params")
|
||||
drop_params = additional_params.get("drop_params")
|
||||
@@ -1071,15 +1106,10 @@ class AzureCompletion(BaseLLM):
|
||||
"""Extract token usage from Azure response."""
|
||||
if hasattr(response, "usage") and response.usage:
|
||||
usage = response.usage
|
||||
cached_tokens = 0
|
||||
prompt_details = getattr(usage, "prompt_tokens_details", None)
|
||||
if prompt_details:
|
||||
cached_tokens = getattr(prompt_details, "cached_tokens", 0) or 0
|
||||
return {
|
||||
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
|
||||
"completion_tokens": getattr(usage, "completion_tokens", 0),
|
||||
"total_tokens": getattr(usage, "total_tokens", 0),
|
||||
"cached_prompt_tokens": cached_tokens,
|
||||
}
|
||||
return {"total_tokens": 0}
|
||||
|
||||
|
||||
@@ -1295,13 +1295,11 @@ class GeminiCompletion(BaseLLM):
|
||||
"""Extract token usage from Gemini response."""
|
||||
if response.usage_metadata:
|
||||
usage = response.usage_metadata
|
||||
cached_tokens = getattr(usage, "cached_content_token_count", 0) or 0
|
||||
return {
|
||||
"prompt_token_count": getattr(usage, "prompt_token_count", 0),
|
||||
"candidates_token_count": getattr(usage, "candidates_token_count", 0),
|
||||
"total_token_count": getattr(usage, "total_token_count", 0),
|
||||
"total_tokens": getattr(usage, "total_token_count", 0),
|
||||
"cached_prompt_tokens": cached_tokens,
|
||||
}
|
||||
return {"total_tokens": 0}
|
||||
|
||||
|
||||
@@ -1094,7 +1094,11 @@ class OpenAICompletion(BaseLLM):
|
||||
if reasoning_items:
|
||||
self._last_reasoning_items = reasoning_items
|
||||
if event.response and event.response.usage:
|
||||
usage = self._extract_responses_token_usage(event.response)
|
||||
usage = {
|
||||
"prompt_tokens": event.response.usage.input_tokens,
|
||||
"completion_tokens": event.response.usage.output_tokens,
|
||||
"total_tokens": event.response.usage.total_tokens,
|
||||
}
|
||||
self._track_token_usage_internal(usage)
|
||||
|
||||
# If parse_tool_outputs is enabled, return structured result
|
||||
@@ -1218,7 +1222,11 @@ class OpenAICompletion(BaseLLM):
|
||||
if reasoning_items:
|
||||
self._last_reasoning_items = reasoning_items
|
||||
if event.response and event.response.usage:
|
||||
usage = self._extract_responses_token_usage(event.response)
|
||||
usage = {
|
||||
"prompt_tokens": event.response.usage.input_tokens,
|
||||
"completion_tokens": event.response.usage.output_tokens,
|
||||
"total_tokens": event.response.usage.total_tokens,
|
||||
}
|
||||
self._track_token_usage_internal(usage)
|
||||
|
||||
# If parse_tool_outputs is enabled, return structured result
|
||||
@@ -1302,18 +1310,11 @@ class OpenAICompletion(BaseLLM):
|
||||
def _extract_responses_token_usage(self, response: Response) -> dict[str, Any]:
|
||||
"""Extract token usage from Responses API response."""
|
||||
if response.usage:
|
||||
result = {
|
||||
return {
|
||||
"prompt_tokens": response.usage.input_tokens,
|
||||
"completion_tokens": response.usage.output_tokens,
|
||||
"total_tokens": response.usage.total_tokens,
|
||||
}
|
||||
# Extract cached prompt tokens from input_tokens_details
|
||||
input_details = getattr(response.usage, "input_tokens_details", None)
|
||||
if input_details:
|
||||
result["cached_prompt_tokens"] = (
|
||||
getattr(input_details, "cached_tokens", 0) or 0
|
||||
)
|
||||
return result
|
||||
return {"total_tokens": 0}
|
||||
|
||||
def _extract_builtin_tool_outputs(self, response: Response) -> ResponsesAPIResult:
|
||||
@@ -1695,99 +1696,6 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
return content
|
||||
|
||||
def _finalize_streaming_response(
|
||||
self,
|
||||
full_response: str,
|
||||
tool_calls: dict[int, dict[str, Any]],
|
||||
usage_data: dict[str, int],
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
) -> str | list[dict[str, Any]]:
|
||||
"""Finalize a streaming response with usage tracking, tool call handling, and events.
|
||||
|
||||
Args:
|
||||
full_response: The accumulated text response from the stream.
|
||||
tool_calls: Accumulated tool calls from the stream, keyed by index.
|
||||
usage_data: Token usage data from the stream.
|
||||
params: The completion parameters containing messages.
|
||||
available_functions: Available functions for tool calling.
|
||||
from_task: Task that initiated the call.
|
||||
from_agent: Agent that initiated the call.
|
||||
|
||||
Returns:
|
||||
Tool calls list when tools were invoked without available_functions,
|
||||
tool execution result when available_functions is provided,
|
||||
or the text response string.
|
||||
"""
|
||||
self._track_token_usage_internal(usage_data)
|
||||
|
||||
if tool_calls and not available_functions:
|
||||
tool_calls_list = [
|
||||
{
|
||||
"id": call_data["id"],
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": call_data["name"],
|
||||
"arguments": call_data["arguments"],
|
||||
},
|
||||
"index": call_data["index"],
|
||||
}
|
||||
for call_data in tool_calls.values()
|
||||
]
|
||||
self._emit_call_completed_event(
|
||||
response=tool_calls_list,
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return tool_calls_list
|
||||
|
||||
if tool_calls and available_functions:
|
||||
for call_data in tool_calls.values():
|
||||
function_name = call_data["name"]
|
||||
arguments = call_data["arguments"]
|
||||
|
||||
if not function_name or not arguments:
|
||||
continue
|
||||
|
||||
if function_name not in available_functions:
|
||||
logging.warning(
|
||||
f"Function '{function_name}' not found in available functions"
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
function_args = json.loads(arguments)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.error(f"Failed to parse streamed tool arguments: {e}")
|
||||
continue
|
||||
|
||||
result = self._handle_tool_execution(
|
||||
function_name=function_name,
|
||||
function_args=function_args,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
full_response = self._apply_stop_words(full_response)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return full_response
|
||||
|
||||
def _handle_streaming_completion(
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
@@ -1795,7 +1703,7 @@ class OpenAICompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | list[dict[str, Any]] | BaseModel:
|
||||
) -> str | BaseModel:
|
||||
"""Handle streaming chat completion."""
|
||||
full_response = ""
|
||||
tool_calls: dict[int, dict[str, Any]] = {}
|
||||
@@ -1912,20 +1820,54 @@ class OpenAICompletion(BaseLLM):
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
result = self._finalize_streaming_response(
|
||||
full_response=full_response,
|
||||
tool_calls=tool_calls,
|
||||
usage_data=usage_data,
|
||||
params=params,
|
||||
available_functions=available_functions,
|
||||
self._track_token_usage_internal(usage_data)
|
||||
|
||||
if tool_calls and available_functions:
|
||||
for call_data in tool_calls.values():
|
||||
function_name = call_data["name"]
|
||||
arguments = call_data["arguments"]
|
||||
|
||||
# Skip if function name is empty or arguments are empty
|
||||
if not function_name or not arguments:
|
||||
continue
|
||||
|
||||
# Check if function exists in available functions
|
||||
if function_name not in available_functions:
|
||||
logging.warning(
|
||||
f"Function '{function_name}' not found in available functions"
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
function_args = json.loads(arguments)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.error(f"Failed to parse streamed tool arguments: {e}")
|
||||
continue
|
||||
|
||||
result = self._handle_tool_execution(
|
||||
function_name=function_name,
|
||||
function_args=function_args,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
full_response = self._apply_stop_words(full_response)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return self._invoke_after_llm_call_hooks(
|
||||
params["messages"], full_response, from_agent
|
||||
)
|
||||
if isinstance(result, str):
|
||||
return self._invoke_after_llm_call_hooks(
|
||||
params["messages"], result, from_agent
|
||||
)
|
||||
return result
|
||||
|
||||
async def _ahandle_completion(
|
||||
self,
|
||||
@@ -2074,7 +2016,7 @@ class OpenAICompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | list[dict[str, Any]] | BaseModel:
|
||||
) -> str | BaseModel:
|
||||
"""Handle async streaming chat completion."""
|
||||
full_response = ""
|
||||
tool_calls: dict[int, dict[str, Any]] = {}
|
||||
@@ -2200,16 +2142,51 @@ class OpenAICompletion(BaseLLM):
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
return self._finalize_streaming_response(
|
||||
full_response=full_response,
|
||||
tool_calls=tool_calls,
|
||||
usage_data=usage_data,
|
||||
params=params,
|
||||
available_functions=available_functions,
|
||||
self._track_token_usage_internal(usage_data)
|
||||
|
||||
if tool_calls and available_functions:
|
||||
for call_data in tool_calls.values():
|
||||
function_name = call_data["name"]
|
||||
arguments = call_data["arguments"]
|
||||
|
||||
if not function_name or not arguments:
|
||||
continue
|
||||
|
||||
if function_name not in available_functions:
|
||||
logging.warning(
|
||||
f"Function '{function_name}' not found in available functions"
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
function_args = json.loads(arguments)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.error(f"Failed to parse streamed tool arguments: {e}")
|
||||
continue
|
||||
|
||||
result = self._handle_tool_execution(
|
||||
function_name=function_name,
|
||||
function_args=function_args,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
full_response = self._apply_stop_words(full_response)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return full_response
|
||||
|
||||
def supports_function_calling(self) -> bool:
|
||||
"""Check if the model supports function calling."""
|
||||
return not self.is_o1_model
|
||||
@@ -2263,18 +2240,11 @@ class OpenAICompletion(BaseLLM):
|
||||
"""Extract token usage from OpenAI ChatCompletion or ChatCompletionChunk response."""
|
||||
if hasattr(response, "usage") and response.usage:
|
||||
usage = response.usage
|
||||
result = {
|
||||
return {
|
||||
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
|
||||
"completion_tokens": getattr(usage, "completion_tokens", 0),
|
||||
"total_tokens": getattr(usage, "total_tokens", 0),
|
||||
}
|
||||
# Extract cached prompt tokens from prompt_tokens_details
|
||||
prompt_details = getattr(usage, "prompt_tokens_details", None)
|
||||
if prompt_details:
|
||||
result["cached_prompt_tokens"] = (
|
||||
getattr(prompt_details, "cached_tokens", 0) or 0
|
||||
)
|
||||
return result
|
||||
return {"total_tokens": 0}
|
||||
|
||||
def _format_messages(self, messages: str | list[LLMMessage]) -> list[LLMMessage]:
|
||||
|
||||
@@ -1,26 +1,13 @@
|
||||
"""Memory module: unified Memory with LLM analysis and pluggable storage."""
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
|
||||
|
||||
from crewai.memory.consolidation_flow import ConsolidationFlow
|
||||
from crewai.memory.encoding_flow import EncodingFlow
|
||||
from crewai.memory.memory_scope import MemoryScope, MemorySlice
|
||||
from crewai.memory.unified_memory import Memory
|
||||
from crewai.memory.types import (
|
||||
MemoryMatch,
|
||||
MemoryRecord,
|
||||
ScopeInfo,
|
||||
compute_composite_score,
|
||||
embed_text,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ConsolidationFlow",
|
||||
"EncodingFlow",
|
||||
"Memory",
|
||||
"MemoryMatch",
|
||||
"MemoryRecord",
|
||||
"MemoryScope",
|
||||
"MemorySlice",
|
||||
"ScopeInfo",
|
||||
"compute_composite_score",
|
||||
"embed_text",
|
||||
"EntityMemory",
|
||||
"ExternalMemory",
|
||||
"LongTermMemory",
|
||||
"ShortTermMemory",
|
||||
]
|
||||
|
||||
@@ -1,392 +0,0 @@
|
||||
"""LLM-powered analysis for memory save and recall."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from crewai.memory.types import MemoryRecord, ScopeInfo
|
||||
from crewai.utilities.i18n import get_i18n
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExtractedMetadata(BaseModel):
|
||||
"""Fixed schema for LLM-extracted metadata (OpenAI requires additionalProperties: false)."""
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
entities: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Entities (people, orgs, places) mentioned in the content.",
|
||||
)
|
||||
dates: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Dates or time references in the content.",
|
||||
)
|
||||
topics: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Topics or themes in the content.",
|
||||
)
|
||||
|
||||
|
||||
class MemoryAnalysis(BaseModel):
|
||||
"""LLM output for analyzing content before saving to memory."""
|
||||
|
||||
suggested_scope: str = Field(
|
||||
description="Best matching existing scope or new path (e.g. /company/decisions).",
|
||||
)
|
||||
categories: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Categories for the memory (prefer existing, add new if needed).",
|
||||
)
|
||||
importance: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Importance score from 0.0 to 1.0.",
|
||||
)
|
||||
extracted_metadata: ExtractedMetadata = Field(
|
||||
default_factory=ExtractedMetadata,
|
||||
description="Entities, dates, topics extracted from the content.",
|
||||
)
|
||||
|
||||
|
||||
class QueryAnalysis(BaseModel):
|
||||
"""LLM output for analyzing a recall query."""
|
||||
|
||||
keywords: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Key entities or keywords for filtering.",
|
||||
)
|
||||
suggested_scopes: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Scope paths to search (subset of available scopes).",
|
||||
)
|
||||
complexity: str = Field(
|
||||
default="simple",
|
||||
description="One of 'simple' (single fact) or 'complex' (aggregation/reasoning).",
|
||||
)
|
||||
recall_queries: list[str] = Field(
|
||||
default_factory=list,
|
||||
description=(
|
||||
"1-3 short, targeted search phrases distilled from the query. "
|
||||
"Each should be a concise question or keyword phrase optimized "
|
||||
"for semantic vector search. If the query is already short and "
|
||||
"focused, return it as a single item."
|
||||
),
|
||||
)
|
||||
time_filter: str | None = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"If the query references a specific time period (e.g. 'last week', "
|
||||
"'yesterday', 'in January'), return an ISO 8601 date string representing "
|
||||
"the earliest date that results should match (e.g. '2026-02-01'). "
|
||||
"Return null if no time constraint is implied."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class ExtractedMemories(BaseModel):
|
||||
"""LLM output for extracting discrete memories from raw content."""
|
||||
|
||||
memories: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="List of discrete, self-contained memory statements extracted from the content.",
|
||||
)
|
||||
|
||||
|
||||
class ConsolidationAction(BaseModel):
|
||||
"""A single action in a consolidation plan."""
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
action: str = Field(
|
||||
description="One of 'keep', 'update', or 'delete'.",
|
||||
)
|
||||
record_id: str = Field(
|
||||
description="ID of the existing record this action applies to.",
|
||||
)
|
||||
new_content: str | None = Field(
|
||||
default=None,
|
||||
description="Updated content text. Required when action is 'update'.",
|
||||
)
|
||||
reason: str = Field(
|
||||
default="",
|
||||
description="Brief reason for this action.",
|
||||
)
|
||||
|
||||
|
||||
class ConsolidationPlan(BaseModel):
|
||||
"""LLM output for consolidating new content with existing memories."""
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
actions: list[ConsolidationAction] = Field(
|
||||
default_factory=list,
|
||||
description="Actions to take on existing records (keep/update/delete).",
|
||||
)
|
||||
insert_new: bool = Field(
|
||||
default=True,
|
||||
description="Whether to also insert the new content as a separate record.",
|
||||
)
|
||||
insert_reason: str = Field(
|
||||
default="",
|
||||
description="Why the new content should or should not be inserted.",
|
||||
)
|
||||
|
||||
|
||||
def _get_prompt(key: str) -> str:
|
||||
"""Retrieve a memory prompt from the i18n translations.
|
||||
|
||||
Args:
|
||||
key: The prompt key under the "memory" section.
|
||||
|
||||
Returns:
|
||||
The prompt string.
|
||||
"""
|
||||
return get_i18n().memory(key)
|
||||
|
||||
|
||||
def extract_memories_from_content(content: str, llm: Any) -> list[str]:
|
||||
"""Use the LLM to extract discrete memory statements from raw content.
|
||||
|
||||
This is a pure helper: it does NOT store anything. Callers should call
|
||||
memory.remember() on each returned string to persist them.
|
||||
|
||||
On LLM failure, returns the full content as a single memory so callers
|
||||
still persist something rather than dropping the output.
|
||||
|
||||
Args:
|
||||
content: Raw text (e.g. task description + result dump).
|
||||
llm: The LLM instance to use.
|
||||
|
||||
Returns:
|
||||
List of short, self-contained memory statements (or [content] on failure).
|
||||
"""
|
||||
if not (content or "").strip():
|
||||
return []
|
||||
user = _get_prompt("extract_memories_user").format(content=content)
|
||||
messages = [
|
||||
{"role": "system", "content": _get_prompt("extract_memories_system")},
|
||||
{"role": "user", "content": user},
|
||||
]
|
||||
try:
|
||||
if getattr(llm, "supports_function_calling", lambda: False)():
|
||||
response = llm.call(messages, response_model=ExtractedMemories)
|
||||
if isinstance(response, ExtractedMemories):
|
||||
return response.memories
|
||||
return ExtractedMemories.model_validate(response).memories
|
||||
response = llm.call(messages)
|
||||
if isinstance(response, ExtractedMemories):
|
||||
return response.memories
|
||||
if isinstance(response, str):
|
||||
data = json.loads(response)
|
||||
return ExtractedMemories.model_validate(data).memories
|
||||
return ExtractedMemories.model_validate(response).memories
|
||||
except Exception as e:
|
||||
_logger.warning(
|
||||
"Memory extraction failed, storing full content as single memory: %s",
|
||||
e,
|
||||
exc_info=False,
|
||||
)
|
||||
return [content]
|
||||
|
||||
|
||||
async def aextract_memories_from_content(content: str, llm: Any) -> list[str]:
|
||||
"""Async variant of extract_memories_from_content."""
|
||||
return extract_memories_from_content(content, llm)
|
||||
|
||||
|
||||
def analyze_for_save(
|
||||
content: str,
|
||||
existing_scopes: list[str],
|
||||
existing_categories: list[str],
|
||||
llm: Any,
|
||||
) -> MemoryAnalysis:
|
||||
"""Use the LLM to infer scope, categories, importance, and metadata for a memory.
|
||||
|
||||
On LLM failure, returns safe defaults so remember() still persists the content.
|
||||
|
||||
Args:
|
||||
content: The memory content to analyze.
|
||||
existing_scopes: Current scope paths in the memory store.
|
||||
existing_categories: Current categories in use.
|
||||
llm: The LLM instance to use.
|
||||
|
||||
Returns:
|
||||
MemoryAnalysis with suggested_scope, categories, importance, extracted_metadata.
|
||||
"""
|
||||
user = _get_prompt("save_user").format(
|
||||
content=content,
|
||||
existing_scopes=existing_scopes or ["/"],
|
||||
existing_categories=existing_categories or [],
|
||||
)
|
||||
messages = [
|
||||
{"role": "system", "content": _get_prompt("save_system")},
|
||||
{"role": "user", "content": user},
|
||||
]
|
||||
try:
|
||||
if getattr(llm, "supports_function_calling", lambda: False)():
|
||||
response = llm.call(messages, response_model=MemoryAnalysis)
|
||||
if isinstance(response, MemoryAnalysis):
|
||||
return response
|
||||
return MemoryAnalysis.model_validate(response)
|
||||
response = llm.call(messages)
|
||||
if isinstance(response, MemoryAnalysis):
|
||||
return response
|
||||
if isinstance(response, str):
|
||||
data = json.loads(response)
|
||||
return MemoryAnalysis.model_validate(data)
|
||||
return MemoryAnalysis.model_validate(response)
|
||||
except Exception as e:
|
||||
_logger.warning(
|
||||
"Memory save analysis failed, using defaults (scope=/, importance=0.5): %s",
|
||||
e,
|
||||
exc_info=False,
|
||||
)
|
||||
return MemoryAnalysis(
|
||||
suggested_scope="/",
|
||||
categories=[],
|
||||
importance=0.5,
|
||||
extracted_metadata=ExtractedMetadata(),
|
||||
)
|
||||
|
||||
|
||||
async def aanalyze_for_save(
|
||||
content: str,
|
||||
existing_scopes: list[str],
|
||||
existing_categories: list[str],
|
||||
llm: Any,
|
||||
) -> MemoryAnalysis:
|
||||
"""Async variant of analyze_for_save."""
|
||||
# Fallback to sync if no acall
|
||||
return analyze_for_save(content, existing_scopes, existing_categories, llm)
|
||||
|
||||
|
||||
def analyze_query(
|
||||
query: str,
|
||||
available_scopes: list[str],
|
||||
scope_info: ScopeInfo | None,
|
||||
llm: Any,
|
||||
) -> QueryAnalysis:
|
||||
"""Use the LLM to analyze a recall query.
|
||||
|
||||
On LLM failure, returns safe defaults so recall degrades to plain vector search.
|
||||
|
||||
Args:
|
||||
query: The user's recall query.
|
||||
available_scopes: Scope paths that exist in the store.
|
||||
scope_info: Optional info about the current scope.
|
||||
llm: The LLM instance to use.
|
||||
|
||||
Returns:
|
||||
QueryAnalysis with keywords, suggested_scopes, complexity, recall_queries, time_filter.
|
||||
"""
|
||||
scope_desc = ""
|
||||
if scope_info:
|
||||
scope_desc = f"Current scope has {scope_info.record_count} records, categories: {scope_info.categories}"
|
||||
user = _get_prompt("query_user").format(
|
||||
query=query,
|
||||
available_scopes=available_scopes or ["/"],
|
||||
scope_desc=scope_desc,
|
||||
)
|
||||
messages = [
|
||||
{"role": "system", "content": _get_prompt("query_system")},
|
||||
{"role": "user", "content": user},
|
||||
]
|
||||
try:
|
||||
if getattr(llm, "supports_function_calling", lambda: False)():
|
||||
response = llm.call(messages, response_model=QueryAnalysis)
|
||||
if isinstance(response, QueryAnalysis):
|
||||
return response
|
||||
return QueryAnalysis.model_validate(response)
|
||||
response = llm.call(messages)
|
||||
if isinstance(response, QueryAnalysis):
|
||||
return response
|
||||
if isinstance(response, str):
|
||||
data = json.loads(response)
|
||||
return QueryAnalysis.model_validate(data)
|
||||
return QueryAnalysis.model_validate(response)
|
||||
except Exception as e:
|
||||
_logger.warning(
|
||||
"Query analysis failed, using defaults (complexity=simple): %s",
|
||||
e,
|
||||
exc_info=False,
|
||||
)
|
||||
scopes = (available_scopes or ["/"])[:5]
|
||||
return QueryAnalysis(
|
||||
keywords=[],
|
||||
suggested_scopes=scopes,
|
||||
complexity="simple",
|
||||
recall_queries=[query],
|
||||
)
|
||||
|
||||
|
||||
def analyze_for_consolidation(
|
||||
new_content: str,
|
||||
existing_records: list[MemoryRecord],
|
||||
llm: Any,
|
||||
) -> ConsolidationPlan:
|
||||
"""Use the LLM to decide how to consolidate new content with existing memories.
|
||||
|
||||
On LLM failure, returns a safe default (insert_new=True, no actions) so save
|
||||
is never blocked.
|
||||
|
||||
Args:
|
||||
new_content: The new content to store.
|
||||
existing_records: Existing records that are semantically similar.
|
||||
llm: The LLM instance to use.
|
||||
|
||||
Returns:
|
||||
ConsolidationPlan with actions per record and whether to insert the new content.
|
||||
"""
|
||||
if not existing_records:
|
||||
return ConsolidationPlan(actions=[], insert_new=True)
|
||||
records_lines = []
|
||||
for r in existing_records:
|
||||
created = r.created_at.isoformat() if r.created_at else ""
|
||||
records_lines.append(
|
||||
f"- id={r.id} | scope={r.scope} | importance={r.importance:.2f} | created={created}\n content: {r.content[:200]}{'...' if len(r.content) > 200 else ''}"
|
||||
)
|
||||
user = _get_prompt("consolidation_user").format(
|
||||
new_content=new_content,
|
||||
records_summary="\n\n".join(records_lines),
|
||||
)
|
||||
messages = [
|
||||
{"role": "system", "content": _get_prompt("consolidation_system")},
|
||||
{"role": "user", "content": user},
|
||||
]
|
||||
try:
|
||||
if getattr(llm, "supports_function_calling", lambda: False)():
|
||||
response = llm.call(messages, response_model=ConsolidationPlan)
|
||||
if isinstance(response, ConsolidationPlan):
|
||||
return response
|
||||
return ConsolidationPlan.model_validate(response)
|
||||
response = llm.call(messages)
|
||||
if isinstance(response, ConsolidationPlan):
|
||||
return response
|
||||
if isinstance(response, str):
|
||||
data = json.loads(response)
|
||||
return ConsolidationPlan.model_validate(data)
|
||||
return ConsolidationPlan.model_validate(response)
|
||||
except Exception as e:
|
||||
_logger.warning(
|
||||
"Consolidation analysis failed, defaulting to insert only: %s",
|
||||
e,
|
||||
exc_info=False,
|
||||
)
|
||||
return ConsolidationPlan(actions=[], insert_new=True)
|
||||
|
||||
|
||||
async def aanalyze_query(
|
||||
query: str,
|
||||
available_scopes: list[str],
|
||||
scope_info: ScopeInfo | None,
|
||||
llm: Any,
|
||||
) -> QueryAnalysis:
|
||||
"""Async variant of analyze_query."""
|
||||
return analyze_query(query, available_scopes, scope_info, llm)
|
||||
@@ -1,166 +0,0 @@
|
||||
"""Consolidation flow: decide insert/update/delete when new content is similar to existing memories."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.flow.flow import Flow, listen, router, start
|
||||
from crewai.memory.analyze import (
|
||||
ConsolidationPlan,
|
||||
analyze_for_consolidation,
|
||||
)
|
||||
from crewai.memory.types import MemoryConfig, MemoryRecord, embed_text
|
||||
|
||||
|
||||
class ConsolidationState(BaseModel):
|
||||
"""State for the consolidation flow."""
|
||||
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
new_content: str = ""
|
||||
new_embedding: list[float] = Field(default_factory=list)
|
||||
scope: str = "/"
|
||||
categories: list[str] = Field(default_factory=list)
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
importance: float = 0.5
|
||||
similar_records: list[MemoryRecord] = Field(default_factory=list)
|
||||
top_similarity: float = 0.0
|
||||
plan: ConsolidationPlan | None = None
|
||||
result_record: MemoryRecord | None = None
|
||||
records_updated: int = 0
|
||||
records_deleted: int = 0
|
||||
|
||||
|
||||
class ConsolidationFlow(Flow[ConsolidationState]):
|
||||
"""Flow that gates and runs memory consolidation on remember()."""
|
||||
|
||||
_skip_auto_memory: bool = True
|
||||
|
||||
initial_state = ConsolidationState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage: Any,
|
||||
llm: Any,
|
||||
embedder: Any,
|
||||
config: MemoryConfig | None = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self._storage = storage
|
||||
self._llm = llm
|
||||
self._embedder = embedder
|
||||
self._config = config or MemoryConfig()
|
||||
|
||||
@start()
|
||||
def find_similar(self) -> list[MemoryRecord]:
|
||||
"""Search for existing records similar to the new content (cheap pre-check)."""
|
||||
if not self.state.new_embedding:
|
||||
self.state.top_similarity = 0.0
|
||||
return []
|
||||
scope_prefix = self.state.scope if self.state.scope.strip("/") else None
|
||||
raw = self._storage.search(
|
||||
self.state.new_embedding,
|
||||
scope_prefix=scope_prefix,
|
||||
categories=None,
|
||||
limit=self._config.consolidation_limit,
|
||||
min_score=0.0,
|
||||
)
|
||||
records = [r for r, _ in raw]
|
||||
self.state.similar_records = records
|
||||
if raw:
|
||||
_, top_score = raw[0]
|
||||
self.state.top_similarity = float(top_score)
|
||||
else:
|
||||
self.state.top_similarity = 0.0
|
||||
return records
|
||||
|
||||
@router(find_similar)
|
||||
def check_threshold(self) -> str:
|
||||
"""Gate: only run LLM consolidation when similarity is above threshold."""
|
||||
if self.state.top_similarity < self._config.consolidation_threshold:
|
||||
return "insert_only"
|
||||
return "needs_consolidation"
|
||||
|
||||
@listen("needs_consolidation")
|
||||
def analyze_conflicts(self) -> ConsolidationPlan:
|
||||
"""Run LLM to decide keep/update/delete per record and whether to insert new."""
|
||||
plan = analyze_for_consolidation(
|
||||
self.state.new_content,
|
||||
self.state.similar_records,
|
||||
self._llm,
|
||||
)
|
||||
self.state.plan = plan
|
||||
return plan
|
||||
|
||||
@listen("insert_only")
|
||||
def insert_only_path(self) -> None:
|
||||
"""Fast path: no similar records above threshold; plan stays None."""
|
||||
return None
|
||||
|
||||
@listen(insert_only_path)
|
||||
@listen(analyze_conflicts)
|
||||
def execute_plan(self) -> MemoryRecord | None:
|
||||
"""Apply consolidation plan or insert new record; set state.result_record."""
|
||||
now = datetime.utcnow()
|
||||
plan = self.state.plan
|
||||
record_by_id = {r.id: r for r in self.state.similar_records}
|
||||
|
||||
if plan is not None:
|
||||
for action in plan.actions:
|
||||
if action.action == "delete":
|
||||
self._storage.delete(record_ids=[action.record_id])
|
||||
self.state.records_deleted += 1
|
||||
elif action.action == "update" and action.new_content:
|
||||
existing = record_by_id.get(action.record_id)
|
||||
if existing is not None:
|
||||
new_embedding = embed_text(
|
||||
self._embedder, action.new_content
|
||||
)
|
||||
updated = MemoryRecord(
|
||||
id=existing.id,
|
||||
content=action.new_content,
|
||||
scope=existing.scope,
|
||||
categories=existing.categories,
|
||||
metadata=existing.metadata,
|
||||
importance=existing.importance,
|
||||
created_at=existing.created_at,
|
||||
last_accessed=now,
|
||||
embedding=new_embedding if new_embedding else existing.embedding,
|
||||
)
|
||||
self._storage.update(updated)
|
||||
self.state.records_updated += 1
|
||||
record_by_id[action.record_id] = updated
|
||||
|
||||
if not plan.insert_new:
|
||||
first_updated = next(
|
||||
(
|
||||
record_by_id[a.record_id]
|
||||
for a in plan.actions
|
||||
if a.action == "update" and a.record_id in record_by_id
|
||||
),
|
||||
None,
|
||||
)
|
||||
if first_updated is not None:
|
||||
self.state.result_record = first_updated
|
||||
return first_updated
|
||||
if self.state.similar_records:
|
||||
self.state.result_record = self.state.similar_records[0]
|
||||
return self.state.similar_records[0]
|
||||
|
||||
new_record = MemoryRecord(
|
||||
content=self.state.new_content,
|
||||
scope=self.state.scope,
|
||||
categories=self.state.categories,
|
||||
metadata=self.state.metadata,
|
||||
importance=self.state.importance,
|
||||
embedding=self.state.new_embedding if self.state.new_embedding else None,
|
||||
)
|
||||
if not new_record.embedding:
|
||||
new_embedding = embed_text(self._embedder, self.state.new_content)
|
||||
new_record = new_record.model_copy(update={"embedding": new_embedding if new_embedding else None})
|
||||
self._storage.save([new_record])
|
||||
self.state.result_record = new_record
|
||||
return new_record
|
||||
0
lib/crewai/src/crewai/memory/contextual/__init__.py
Normal file
0
lib/crewai/src/crewai/memory/contextual/__init__.py
Normal file
254
lib/crewai/src/crewai/memory/contextual/contextual_memory.py
Normal file
254
lib/crewai/src/crewai/memory/contextual/contextual_memory.py
Normal file
@@ -0,0 +1,254 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.memory import (
|
||||
EntityMemory,
|
||||
ExternalMemory,
|
||||
LongTermMemory,
|
||||
ShortTermMemory,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class ContextualMemory:
|
||||
"""Aggregates and retrieves context from multiple memory sources."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stm: ShortTermMemory,
|
||||
ltm: LongTermMemory,
|
||||
em: EntityMemory,
|
||||
exm: ExternalMemory,
|
||||
agent: Agent | None = None,
|
||||
task: Task | None = None,
|
||||
) -> None:
|
||||
self.stm = stm
|
||||
self.ltm = ltm
|
||||
self.em = em
|
||||
self.exm = exm
|
||||
self.agent = agent
|
||||
self.task = task
|
||||
|
||||
if self.stm is not None:
|
||||
self.stm.agent = self.agent
|
||||
self.stm.task = self.task
|
||||
if self.ltm is not None:
|
||||
self.ltm.agent = self.agent
|
||||
self.ltm.task = self.task
|
||||
if self.em is not None:
|
||||
self.em.agent = self.agent
|
||||
self.em.task = self.task
|
||||
if self.exm is not None:
|
||||
self.exm.agent = self.agent
|
||||
self.exm.task = self.task
|
||||
|
||||
def build_context_for_task(self, task: Task, context: str) -> str:
|
||||
"""Build contextual information for a task synchronously.
|
||||
|
||||
Args:
|
||||
task: The task to build context for.
|
||||
context: Additional context string.
|
||||
|
||||
Returns:
|
||||
Formatted context string from all memory sources.
|
||||
"""
|
||||
query = f"{task.description} {context}".strip()
|
||||
|
||||
if query == "":
|
||||
return ""
|
||||
|
||||
context_parts = [
|
||||
self._fetch_ltm_context(task.description),
|
||||
self._fetch_stm_context(query),
|
||||
self._fetch_entity_context(query),
|
||||
self._fetch_external_context(query),
|
||||
]
|
||||
return "\n".join(filter(None, context_parts))
|
||||
|
||||
async def abuild_context_for_task(self, task: Task, context: str) -> str:
|
||||
"""Build contextual information for a task asynchronously.
|
||||
|
||||
Args:
|
||||
task: The task to build context for.
|
||||
context: Additional context string.
|
||||
|
||||
Returns:
|
||||
Formatted context string from all memory sources.
|
||||
"""
|
||||
query = f"{task.description} {context}".strip()
|
||||
|
||||
if query == "":
|
||||
return ""
|
||||
|
||||
# Fetch all contexts concurrently
|
||||
results = await asyncio.gather(
|
||||
self._afetch_ltm_context(task.description),
|
||||
self._afetch_stm_context(query),
|
||||
self._afetch_entity_context(query),
|
||||
self._afetch_external_context(query),
|
||||
)
|
||||
|
||||
return "\n".join(filter(None, results))
|
||||
|
||||
def _fetch_stm_context(self, query: str) -> str:
|
||||
"""
|
||||
Fetches recent relevant insights from STM related to the task's description and expected_output,
|
||||
formatted as bullet points.
|
||||
"""
|
||||
|
||||
if self.stm is None:
|
||||
return ""
|
||||
|
||||
stm_results = self.stm.search(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['content']}" for result in stm_results]
|
||||
)
|
||||
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
|
||||
|
||||
def _fetch_ltm_context(self, task: str) -> str | None:
|
||||
"""
|
||||
Fetches historical data or insights from LTM that are relevant to the task's description and expected_output,
|
||||
formatted as bullet points.
|
||||
"""
|
||||
|
||||
if self.ltm is None:
|
||||
return ""
|
||||
|
||||
ltm_results = self.ltm.search(task, latest_n=2)
|
||||
if not ltm_results:
|
||||
return None
|
||||
|
||||
formatted_results = [
|
||||
suggestion
|
||||
for result in ltm_results
|
||||
for suggestion in result["metadata"]["suggestions"]
|
||||
]
|
||||
formatted_results = list(dict.fromkeys(formatted_results))
|
||||
formatted_results = "\n".join([f"- {result}" for result in formatted_results]) # type: ignore # Incompatible types in assignment (expression has type "str", variable has type "list[str]")
|
||||
|
||||
return f"Historical Data:\n{formatted_results}" if ltm_results else ""
|
||||
|
||||
def _fetch_entity_context(self, query: str) -> str:
|
||||
"""
|
||||
Fetches relevant entity information from Entity Memory related to the task's description and expected_output,
|
||||
formatted as bullet points.
|
||||
"""
|
||||
if self.em is None:
|
||||
return ""
|
||||
|
||||
em_results = self.em.search(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['content']}" for result in em_results]
|
||||
)
|
||||
return f"Entities:\n{formatted_results}" if em_results else ""
|
||||
|
||||
def _fetch_external_context(self, query: str) -> str:
|
||||
"""
|
||||
Fetches and formats relevant information from External Memory.
|
||||
Args:
|
||||
query (str): The search query to find relevant information.
|
||||
Returns:
|
||||
str: Formatted information as bullet points, or an empty string if none found.
|
||||
"""
|
||||
if self.exm is None:
|
||||
return ""
|
||||
|
||||
external_memories = self.exm.search(query)
|
||||
|
||||
if not external_memories:
|
||||
return ""
|
||||
|
||||
formatted_memories = "\n".join(
|
||||
f"- {result['content']}" for result in external_memories
|
||||
)
|
||||
return f"External memories:\n{formatted_memories}"
|
||||
|
||||
async def _afetch_stm_context(self, query: str) -> str:
|
||||
"""Fetch recent relevant insights from STM asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
|
||||
Returns:
|
||||
Formatted insights as bullet points, or empty string if none found.
|
||||
"""
|
||||
if self.stm is None:
|
||||
return ""
|
||||
|
||||
stm_results = await self.stm.asearch(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['content']}" for result in stm_results]
|
||||
)
|
||||
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
|
||||
|
||||
async def _afetch_ltm_context(self, task: str) -> str | None:
|
||||
"""Fetch historical data from LTM asynchronously.
|
||||
|
||||
Args:
|
||||
task: The task description to search for.
|
||||
|
||||
Returns:
|
||||
Formatted historical data as bullet points, or None if none found.
|
||||
"""
|
||||
if self.ltm is None:
|
||||
return ""
|
||||
|
||||
ltm_results = await self.ltm.asearch(task, latest_n=2)
|
||||
if not ltm_results:
|
||||
return None
|
||||
|
||||
formatted_results = [
|
||||
suggestion
|
||||
for result in ltm_results
|
||||
for suggestion in result["metadata"]["suggestions"]
|
||||
]
|
||||
formatted_results = list(dict.fromkeys(formatted_results))
|
||||
formatted_results = "\n".join([f"- {result}" for result in formatted_results]) # type: ignore # Incompatible types in assignment (expression has type "str", variable has type "list[str]")
|
||||
|
||||
return f"Historical Data:\n{formatted_results}" if ltm_results else ""
|
||||
|
||||
async def _afetch_entity_context(self, query: str) -> str:
|
||||
"""Fetch relevant entity information asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
|
||||
Returns:
|
||||
Formatted entity information as bullet points, or empty string if none found.
|
||||
"""
|
||||
if self.em is None:
|
||||
return ""
|
||||
|
||||
em_results = await self.em.asearch(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['content']}" for result in em_results]
|
||||
)
|
||||
return f"Entities:\n{formatted_results}" if em_results else ""
|
||||
|
||||
async def _afetch_external_context(self, query: str) -> str:
|
||||
"""Fetch relevant information from External Memory asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
|
||||
Returns:
|
||||
Formatted information as bullet points, or empty string if none found.
|
||||
"""
|
||||
if self.exm is None:
|
||||
return ""
|
||||
|
||||
external_memories = await self.exm.asearch(query)
|
||||
|
||||
if not external_memories:
|
||||
return ""
|
||||
|
||||
formatted_memories = "\n".join(
|
||||
f"- {result['content']}" for result in external_memories
|
||||
)
|
||||
return f"External memories:\n{formatted_memories}"
|
||||
@@ -1,216 +0,0 @@
|
||||
"""Encoding flow: full save pipeline from analysis through consolidation.
|
||||
|
||||
Orchestrates the encoding side of memory:
|
||||
1. Decide whether LLM analysis is needed (router)
|
||||
2. If yes: infer scope, categories, importance, metadata via LLM
|
||||
3. If no: use caller-provided values with config defaults
|
||||
4. Embed the content
|
||||
5. Delegate to ConsolidationFlow for conflict resolution and storage
|
||||
6. Finalize: ensure a record is persisted
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.flow.flow import Flow, listen, or_, router, start
|
||||
from crewai.memory.analyze import MemoryAnalysis, analyze_for_save
|
||||
from crewai.memory.types import MemoryConfig, MemoryRecord, embed_text
|
||||
|
||||
|
||||
class EncodingState(BaseModel):
|
||||
"""State for the encoding flow."""
|
||||
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
content: str = ""
|
||||
|
||||
# Caller-provided values (None = infer via LLM)
|
||||
scope: str | None = None
|
||||
categories: list[str] | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
importance: float | None = None
|
||||
|
||||
# Resolved values (populated by analysis or defaults step)
|
||||
resolved_scope: str = "/"
|
||||
resolved_categories: list[str] = Field(default_factory=list)
|
||||
resolved_metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
resolved_importance: float = 0.5
|
||||
|
||||
# Embedding and result
|
||||
embedding: list[float] = Field(default_factory=list)
|
||||
result_record: MemoryRecord | None = None
|
||||
consolidation_stats: dict[str, int] = Field(default_factory=dict)
|
||||
|
||||
# Internal routing flag
|
||||
needs_analysis: bool = False
|
||||
|
||||
|
||||
class EncodingFlow(Flow[EncodingState]):
|
||||
"""Flow that owns the full encoding pipeline for memory.remember().
|
||||
|
||||
Routes between LLM-powered deep analysis and a fast skip path,
|
||||
embeds the content, then delegates to ConsolidationFlow for
|
||||
conflict resolution and storage.
|
||||
"""
|
||||
|
||||
_skip_auto_memory: bool = True
|
||||
|
||||
initial_state = EncodingState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage: Any,
|
||||
llm: Any,
|
||||
embedder: Any,
|
||||
config: MemoryConfig | None = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self._storage = storage
|
||||
self._llm = llm
|
||||
self._embedder = embedder
|
||||
self._config = config or MemoryConfig()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Step 1: Decide whether LLM analysis is needed
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@start()
|
||||
def check_inputs(self) -> bool:
|
||||
"""Check if LLM analysis is needed based on caller-provided fields."""
|
||||
self.state.needs_analysis = (
|
||||
self.state.scope is None
|
||||
or self.state.categories is None
|
||||
or self.state.importance is None
|
||||
or (self.state.metadata is None and self.state.scope is None)
|
||||
)
|
||||
return self.state.needs_analysis
|
||||
|
||||
@router(check_inputs)
|
||||
def route_analysis(self) -> str:
|
||||
"""Route to deep analysis or skip based on whether fields are provided."""
|
||||
if self.state.needs_analysis:
|
||||
return "deep_analysis"
|
||||
return "skip_analysis"
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Step 2a: Deep analysis path (LLM infers scope/categories/importance)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@listen("deep_analysis")
|
||||
def analyze_with_llm(self) -> MemoryAnalysis:
|
||||
"""Call analyze_for_save to infer scope, categories, importance, metadata."""
|
||||
existing_scopes = self._storage.list_scopes("/")
|
||||
if not existing_scopes:
|
||||
existing_scopes = ["/"]
|
||||
existing_categories = list(
|
||||
self._storage.list_categories(scope_prefix=None).keys()
|
||||
)
|
||||
analysis = analyze_for_save(
|
||||
self.state.content,
|
||||
existing_scopes,
|
||||
existing_categories,
|
||||
self._llm,
|
||||
)
|
||||
self.state.resolved_scope = self.state.scope or analysis.suggested_scope or "/"
|
||||
self.state.resolved_categories = (
|
||||
self.state.categories
|
||||
if self.state.categories is not None
|
||||
else analysis.categories
|
||||
)
|
||||
self.state.resolved_importance = (
|
||||
self.state.importance
|
||||
if self.state.importance is not None
|
||||
else analysis.importance
|
||||
)
|
||||
self.state.resolved_metadata = dict(
|
||||
self.state.metadata or {},
|
||||
**(
|
||||
analysis.extracted_metadata.model_dump()
|
||||
if analysis.extracted_metadata
|
||||
else {}
|
||||
),
|
||||
)
|
||||
return analysis
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Step 2b: Skip path (use caller values with config defaults)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@listen("skip_analysis")
|
||||
def use_defaults(self) -> str:
|
||||
"""Populate resolved fields from caller values and config defaults."""
|
||||
self.state.resolved_scope = self.state.scope or "/"
|
||||
self.state.resolved_categories = self.state.categories or []
|
||||
self.state.resolved_metadata = self.state.metadata or {}
|
||||
self.state.resolved_importance = (
|
||||
self.state.importance
|
||||
if self.state.importance is not None
|
||||
else self._config.default_importance
|
||||
)
|
||||
return "defaults_applied"
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Step 3: Embed (both paths converge here)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@listen(or_(analyze_with_llm, use_defaults))
|
||||
def embed_content(self) -> list[float]:
|
||||
"""Embed the content for vector storage and similarity search."""
|
||||
self.state.embedding = embed_text(self._embedder, self.state.content)
|
||||
return self.state.embedding
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Step 4: Consolidate (delegates to ConsolidationFlow)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@listen(embed_content)
|
||||
def consolidate(self) -> MemoryRecord | None:
|
||||
"""Run ConsolidationFlow for conflict resolution and storage."""
|
||||
from crewai.memory.consolidation_flow import ConsolidationFlow
|
||||
|
||||
cflow = ConsolidationFlow(
|
||||
storage=self._storage,
|
||||
llm=self._llm,
|
||||
embedder=self._embedder,
|
||||
config=self._config,
|
||||
)
|
||||
cflow.kickoff(
|
||||
inputs={
|
||||
"new_content": self.state.content,
|
||||
"new_embedding": self.state.embedding,
|
||||
"scope": self.state.resolved_scope,
|
||||
"categories": self.state.resolved_categories,
|
||||
"metadata": self.state.resolved_metadata,
|
||||
"importance": self.state.resolved_importance,
|
||||
},
|
||||
)
|
||||
self.state.result_record = cflow.state.result_record
|
||||
self.state.consolidation_stats = {
|
||||
"records_updated": cflow.state.records_updated,
|
||||
"records_deleted": cflow.state.records_deleted,
|
||||
}
|
||||
return self.state.result_record
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Step 5: Finalize (fallback save if consolidation didn't persist)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@listen(consolidate)
|
||||
def finalize(self) -> MemoryRecord:
|
||||
"""Ensure a record is persisted. Fallback if ConsolidationFlow returned None."""
|
||||
if self.state.result_record is not None:
|
||||
return self.state.result_record
|
||||
record = MemoryRecord(
|
||||
content=self.state.content,
|
||||
scope=self.state.resolved_scope,
|
||||
categories=self.state.resolved_categories,
|
||||
metadata=self.state.resolved_metadata,
|
||||
importance=self.state.resolved_importance,
|
||||
embedding=self.state.embedding if self.state.embedding else None,
|
||||
)
|
||||
self._storage.save([record])
|
||||
self.state.result_record = record
|
||||
return record
|
||||
0
lib/crewai/src/crewai/memory/entity/__init__.py
Normal file
0
lib/crewai/src/crewai/memory/entity/__init__.py
Normal file
404
lib/crewai/src/crewai/memory/entity/entity_memory.py
Normal file
404
lib/crewai/src/crewai/memory/entity/entity_memory.py
Normal file
@@ -0,0 +1,404 @@
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
|
||||
|
||||
class EntityMemory(Memory):
|
||||
"""
|
||||
EntityMemory class for managing structured information about entities
|
||||
and their relationships using SQLite storage.
|
||||
Inherits from the Memory class.
|
||||
"""
|
||||
|
||||
_memory_provider: str | None = PrivateAttr()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
crew: Any = None,
|
||||
embedder_config: Any = None,
|
||||
storage: Any = None,
|
||||
path: str | None = None,
|
||||
) -> None:
|
||||
memory_provider = None
|
||||
if embedder_config and isinstance(embedder_config, dict):
|
||||
memory_provider = embedder_config.get("provider")
|
||||
|
||||
if memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
) from e
|
||||
config = (
|
||||
embedder_config.get("config")
|
||||
if embedder_config and isinstance(embedder_config, dict)
|
||||
else None
|
||||
)
|
||||
storage = Mem0Storage(type="short_term", crew=crew, config=config) # type: ignore[no-untyped-call]
|
||||
else:
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="entities",
|
||||
allow_reset=True,
|
||||
embedder_config=embedder_config,
|
||||
crew=crew,
|
||||
path=path,
|
||||
)
|
||||
)
|
||||
|
||||
super().__init__(storage=storage)
|
||||
self._memory_provider = memory_provider
|
||||
|
||||
def save(
|
||||
self,
|
||||
value: EntityMemoryItem | list[EntityMemoryItem],
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Saves one or more entity items into the SQLite storage.
|
||||
|
||||
Args:
|
||||
value: Single EntityMemoryItem or list of EntityMemoryItems to save.
|
||||
metadata: Optional metadata dict (included for supertype compatibility but not used).
|
||||
|
||||
Notes:
|
||||
The metadata parameter is included to satisfy the supertype signature but is not
|
||||
used - entity metadata is extracted from the EntityMemoryItem objects themselves.
|
||||
"""
|
||||
|
||||
if not value:
|
||||
return
|
||||
|
||||
items = value if isinstance(value, list) else [value]
|
||||
is_batch = len(items) > 1
|
||||
|
||||
metadata = {"entity_count": len(items)} if is_batch else items[0].metadata
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
metadata=metadata,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
saved_count = 0
|
||||
errors = []
|
||||
|
||||
def save_single_item(item: EntityMemoryItem) -> tuple[bool, str | None]:
|
||||
"""Save a single item and return success status."""
|
||||
try:
|
||||
if self._memory_provider == "mem0":
|
||||
data = f"""
|
||||
Remember details about the following entity:
|
||||
Name: {item.name}
|
||||
Type: {item.type}
|
||||
Entity Description: {item.description}
|
||||
"""
|
||||
else:
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
|
||||
super(EntityMemory, self).save(data, item.metadata)
|
||||
return True, None
|
||||
except Exception as e:
|
||||
return False, f"{item.name}: {e!s}"
|
||||
|
||||
try:
|
||||
for item in items:
|
||||
success, error = save_single_item(item)
|
||||
if success:
|
||||
saved_count += 1
|
||||
else:
|
||||
errors.append(error)
|
||||
|
||||
if is_batch:
|
||||
emit_value = f"Saved {saved_count} entities"
|
||||
metadata = {"entity_count": saved_count, "errors": errors}
|
||||
else:
|
||||
emit_value = f"{items[0].name}({items[0].type}): {items[0].description}"
|
||||
metadata = items[0].metadata
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=emit_value,
|
||||
metadata=metadata,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
if errors:
|
||||
raise Exception(
|
||||
f"Partial save: {len(errors)} failed out of {len(items)}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
fail_metadata = (
|
||||
{"entity_count": len(items), "saved": saved_count}
|
||||
if is_batch
|
||||
else items[0].metadata
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
metadata=fail_metadata,
|
||||
error=str(e),
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search entity memory for relevant entries.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = super().search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="entity_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
value: EntityMemoryItem | list[EntityMemoryItem],
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Save entity items asynchronously.
|
||||
|
||||
Args:
|
||||
value: Single EntityMemoryItem or list of EntityMemoryItems to save.
|
||||
metadata: Optional metadata dict (not used, for signature compatibility).
|
||||
"""
|
||||
if not value:
|
||||
return
|
||||
|
||||
items = value if isinstance(value, list) else [value]
|
||||
is_batch = len(items) > 1
|
||||
|
||||
metadata = {"entity_count": len(items)} if is_batch else items[0].metadata
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
metadata=metadata,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
saved_count = 0
|
||||
errors: list[str | None] = []
|
||||
|
||||
async def save_single_item(item: EntityMemoryItem) -> tuple[bool, str | None]:
|
||||
"""Save a single item asynchronously."""
|
||||
try:
|
||||
if self._memory_provider == "mem0":
|
||||
data = f"""
|
||||
Remember details about the following entity:
|
||||
Name: {item.name}
|
||||
Type: {item.type}
|
||||
Entity Description: {item.description}
|
||||
"""
|
||||
else:
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
|
||||
await super(EntityMemory, self).asave(data, item.metadata)
|
||||
return True, None
|
||||
except Exception as e:
|
||||
return False, f"{item.name}: {e!s}"
|
||||
|
||||
try:
|
||||
for item in items:
|
||||
success, error = await save_single_item(item)
|
||||
if success:
|
||||
saved_count += 1
|
||||
else:
|
||||
errors.append(error)
|
||||
|
||||
if is_batch:
|
||||
emit_value = f"Saved {saved_count} entities"
|
||||
metadata = {"entity_count": saved_count, "errors": errors}
|
||||
else:
|
||||
emit_value = f"{items[0].name}({items[0].type}): {items[0].description}"
|
||||
metadata = items[0].metadata
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=emit_value,
|
||||
metadata=metadata,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
if errors:
|
||||
raise Exception(
|
||||
f"Partial save: {len(errors)} failed out of {len(items)}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
fail_metadata = (
|
||||
{"entity_count": len(items), "saved": saved_count}
|
||||
if is_batch
|
||||
else items[0].metadata
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
metadata=fail_metadata,
|
||||
error=str(e),
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search entity memory asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = await super().asearch(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="entity_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
self.storage.reset()
|
||||
except Exception as e:
|
||||
raise Exception(
|
||||
f"An error occurred while resetting the entity memory: {e}"
|
||||
) from e
|
||||
12
lib/crewai/src/crewai/memory/entity/entity_memory_item.py
Normal file
12
lib/crewai/src/crewai/memory/entity/entity_memory_item.py
Normal file
@@ -0,0 +1,12 @@
|
||||
class EntityMemoryItem:
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
type: str,
|
||||
description: str,
|
||||
relationships: str,
|
||||
):
|
||||
self.name = name
|
||||
self.type = type
|
||||
self.description = description
|
||||
self.metadata = {"relationships": relationships}
|
||||
0
lib/crewai/src/crewai/memory/external/__init__.py
vendored
Normal file
0
lib/crewai/src/crewai/memory/external/__init__.py
vendored
Normal file
301
lib/crewai/src/crewai/memory/external/external_memory.py
vendored
Normal file
301
lib/crewai/src/crewai/memory/external/external_memory.py
vendored
Normal file
@@ -0,0 +1,301 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.external.external_memory_item import ExternalMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.rag.embeddings.types import ProviderSpec
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
|
||||
|
||||
class ExternalMemory(Memory):
|
||||
def __init__(self, storage: Storage | None = None, **data: Any):
|
||||
super().__init__(storage=storage, **data)
|
||||
|
||||
@staticmethod
|
||||
def _configure_mem0(crew: Any, config: dict[str, Any]) -> Mem0Storage:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
|
||||
return Mem0Storage(type="external", crew=crew, config=config) # type: ignore[no-untyped-call]
|
||||
|
||||
@staticmethod
|
||||
def external_supported_storages() -> dict[str, Any]:
|
||||
return {
|
||||
"mem0": ExternalMemory._configure_mem0,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def create_storage(
|
||||
crew: Any, embedder_config: dict[str, Any] | ProviderSpec | None
|
||||
) -> Storage:
|
||||
if not embedder_config:
|
||||
raise ValueError("embedder_config is required")
|
||||
|
||||
if "provider" not in embedder_config:
|
||||
raise ValueError("embedder_config must include a 'provider' key")
|
||||
|
||||
provider = embedder_config["provider"]
|
||||
supported_storages = ExternalMemory.external_supported_storages()
|
||||
if provider not in supported_storages:
|
||||
raise ValueError(f"Provider {provider} not supported")
|
||||
|
||||
storage: Storage = supported_storages[provider](
|
||||
crew, embedder_config.get("config", {})
|
||||
)
|
||||
return storage
|
||||
|
||||
def save(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Saves a value into the external storage."""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
item = ExternalMemoryItem(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
agent=self.agent.role if self.agent else None,
|
||||
)
|
||||
super().save(value=item.value, metadata=item.metadata)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
error=str(e),
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search external memory for relevant entries.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = super().search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="external_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Save a value to external memory asynchronously.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Optional metadata to associate with the value.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
item = ExternalMemoryItem(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
agent=self.agent.role if self.agent else None,
|
||||
)
|
||||
await super().asave(value=item.value, metadata=item.metadata)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
error=str(e),
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search external memory asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = await super().asearch(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="external_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
self.storage.reset()
|
||||
|
||||
def set_crew(self, crew: Any) -> ExternalMemory:
|
||||
super().set_crew(crew)
|
||||
|
||||
if not self.storage:
|
||||
self.storage = self.create_storage(crew, self.embedder_config) # type: ignore[arg-type]
|
||||
|
||||
return self
|
||||
13
lib/crewai/src/crewai/memory/external/external_memory_item.py
vendored
Normal file
13
lib/crewai/src/crewai/memory/external/external_memory_item.py
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
from typing import Any
|
||||
|
||||
|
||||
class ExternalMemoryItem:
|
||||
def __init__(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
agent: str | None = None,
|
||||
):
|
||||
self.value = value
|
||||
self.metadata = metadata
|
||||
self.agent = agent
|
||||
0
lib/crewai/src/crewai/memory/long_term/__init__.py
Normal file
0
lib/crewai/src/crewai/memory/long_term/__init__.py
Normal file
255
lib/crewai/src/crewai/memory/long_term/long_term_memory.py
Normal file
255
lib/crewai/src/crewai/memory/long_term/long_term_memory.py
Normal file
@@ -0,0 +1,255 @@
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.ltm_sqlite_storage import LTMSQLiteStorage
|
||||
|
||||
|
||||
class LongTermMemory(Memory):
|
||||
"""
|
||||
LongTermMemory class for managing cross runs data related to overall crew's
|
||||
execution and performance.
|
||||
Inherits from the Memory class and utilizes an instance of a class that
|
||||
adheres to the Storage for data storage, specifically working with
|
||||
LongTermMemoryItem instances.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage: LTMSQLiteStorage | None = None,
|
||||
path: str | None = None,
|
||||
) -> None:
|
||||
if not storage:
|
||||
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
|
||||
super().__init__(storage=storage)
|
||||
|
||||
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
metadata = item.metadata
|
||||
metadata.update(
|
||||
{"agent": item.agent, "expected_output": item.expected_output}
|
||||
)
|
||||
self.storage.save(
|
||||
task_description=item.task,
|
||||
score=metadata["quality"],
|
||||
metadata=metadata,
|
||||
datetime=item.datetime,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
error=str(e),
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search( # type: ignore[override]
|
||||
self,
|
||||
task: str,
|
||||
latest_n: int = 3,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Search long-term memory for relevant entries.
|
||||
|
||||
Args:
|
||||
task: The task description to search for.
|
||||
latest_n: Maximum number of results to return.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=task,
|
||||
limit=latest_n,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = self.storage.load(task, latest_n)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=task,
|
||||
results=results,
|
||||
limit=latest_n,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results or []
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=task,
|
||||
limit=latest_n,
|
||||
error=str(e),
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asave(self, item: LongTermMemoryItem) -> None: # type: ignore[override]
|
||||
"""Save an item to long-term memory asynchronously.
|
||||
|
||||
Args:
|
||||
item: The LongTermMemoryItem to save.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
metadata = item.metadata
|
||||
metadata.update(
|
||||
{"agent": item.agent, "expected_output": item.expected_output}
|
||||
)
|
||||
await self.storage.asave(
|
||||
task_description=item.task,
|
||||
score=metadata["quality"],
|
||||
metadata=metadata,
|
||||
datetime=item.datetime,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
error=str(e),
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asearch( # type: ignore[override]
|
||||
self,
|
||||
task: str,
|
||||
latest_n: int = 3,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Search long-term memory asynchronously.
|
||||
|
||||
Args:
|
||||
task: The task description to search for.
|
||||
latest_n: Maximum number of results to return.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=task,
|
||||
limit=latest_n,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = await self.storage.aload(task, latest_n)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=task,
|
||||
results=results,
|
||||
limit=latest_n,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results or []
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=task,
|
||||
limit=latest_n,
|
||||
error=str(e),
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset long-term memory."""
|
||||
self.storage.reset()
|
||||
@@ -0,0 +1,19 @@
|
||||
from typing import Any
|
||||
|
||||
|
||||
class LongTermMemoryItem:
|
||||
def __init__(
|
||||
self,
|
||||
agent: str,
|
||||
task: str,
|
||||
expected_output: str,
|
||||
datetime: str,
|
||||
quality: int | float | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
):
|
||||
self.task = task
|
||||
self.agent = agent
|
||||
self.quality = quality
|
||||
self.datetime = datetime
|
||||
self.expected_output = expected_output
|
||||
self.metadata = metadata if metadata is not None else {}
|
||||
121
lib/crewai/src/crewai/memory/memory.py
Normal file
121
lib/crewai/src/crewai/memory/memory.py
Normal file
@@ -0,0 +1,121 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.rag.embeddings.types import EmbedderConfig
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class Memory(BaseModel):
|
||||
"""Base class for memory, supporting agent tags and generic metadata."""
|
||||
|
||||
embedder_config: EmbedderConfig | dict[str, Any] | None = None
|
||||
crew: Any | None = None
|
||||
|
||||
storage: Any
|
||||
_agent: Agent | None = None
|
||||
_task: Task | None = None
|
||||
|
||||
def __init__(self, storage: Any, **data: Any):
|
||||
super().__init__(storage=storage, **data)
|
||||
|
||||
@property
|
||||
def task(self) -> Task | None:
|
||||
"""Get the current task associated with this memory."""
|
||||
return self._task
|
||||
|
||||
@task.setter
|
||||
def task(self, task: Task | None) -> None:
|
||||
"""Set the current task associated with this memory."""
|
||||
self._task = task
|
||||
|
||||
@property
|
||||
def agent(self) -> Agent | None:
|
||||
"""Get the current agent associated with this memory."""
|
||||
return self._agent
|
||||
|
||||
@agent.setter
|
||||
def agent(self, agent: Agent | None) -> None:
|
||||
"""Set the current agent associated with this memory."""
|
||||
self._agent = agent
|
||||
|
||||
def save(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Save a value to memory.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Optional metadata to associate with the value.
|
||||
"""
|
||||
metadata = metadata or {}
|
||||
self.storage.save(value, metadata)
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Save a value to memory asynchronously.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Optional metadata to associate with the value.
|
||||
"""
|
||||
metadata = metadata or {}
|
||||
await self.storage.asave(value, metadata)
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search memory for relevant entries.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
results: list[Any] = self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
return results
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search memory for relevant entries asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
results: list[Any] = await self.storage.asearch(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
return results
|
||||
|
||||
def set_crew(self, crew: Any) -> Memory:
|
||||
"""Set the crew for this memory instance."""
|
||||
self.crew = crew
|
||||
return self
|
||||
@@ -1,256 +0,0 @@
|
||||
"""Scoped and sliced views over unified Memory."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
from crewai.memory.types import (
|
||||
_RECALL_OVERSAMPLE_FACTOR,
|
||||
MemoryMatch,
|
||||
MemoryRecord,
|
||||
ScopeInfo,
|
||||
)
|
||||
|
||||
|
||||
class MemoryScope:
|
||||
"""View of Memory restricted to a root path. All operations are scoped under that path."""
|
||||
|
||||
def __init__(self, memory: Memory, root_path: str) -> None:
|
||||
"""Initialize scope.
|
||||
|
||||
Args:
|
||||
memory: The underlying Memory instance.
|
||||
root_path: Root path for this scope (e.g. /agent/1).
|
||||
"""
|
||||
self._memory = memory
|
||||
self._root = root_path.rstrip("/") or ""
|
||||
if self._root and not self._root.startswith("/"):
|
||||
self._root = "/" + self._root
|
||||
|
||||
def _scope_path(self, scope: str | None) -> str:
|
||||
if not scope or scope == "/":
|
||||
return self._root or "/"
|
||||
s = scope.rstrip("/")
|
||||
if not s.startswith("/"):
|
||||
s = "/" + s
|
||||
if not self._root:
|
||||
return s
|
||||
base = self._root.rstrip("/")
|
||||
return f"{base}{s}"
|
||||
|
||||
def remember(
|
||||
self,
|
||||
content: str,
|
||||
scope: str | None = "/",
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
) -> MemoryRecord:
|
||||
"""Remember content; scope is relative to this scope's root."""
|
||||
path = self._scope_path(scope)
|
||||
return self._memory.remember(
|
||||
content,
|
||||
scope=path,
|
||||
categories=categories,
|
||||
metadata=metadata,
|
||||
importance=importance,
|
||||
)
|
||||
|
||||
def recall(
|
||||
self,
|
||||
query: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
depth: str = "deep",
|
||||
) -> list[MemoryMatch]:
|
||||
"""Recall within this scope (root path and below)."""
|
||||
search_scope = self._scope_path(scope) if scope else (self._root or "/")
|
||||
return self._memory.recall(
|
||||
query,
|
||||
scope=search_scope,
|
||||
categories=categories,
|
||||
limit=limit,
|
||||
depth=depth,
|
||||
)
|
||||
|
||||
def extract_memories(self, content: str) -> list[str]:
|
||||
"""Extract discrete memories from content; delegates to underlying Memory."""
|
||||
return self._memory.extract_memories(content)
|
||||
|
||||
def forget(
|
||||
self,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
) -> int:
|
||||
"""Forget within this scope."""
|
||||
prefix = self._scope_path(scope) if scope else (self._root or "/")
|
||||
return self._memory.forget(
|
||||
scope=prefix,
|
||||
categories=categories,
|
||||
older_than=older_than,
|
||||
metadata_filter=metadata_filter,
|
||||
record_ids=record_ids,
|
||||
)
|
||||
|
||||
def list_scopes(self, path: str = "/") -> list[str]:
|
||||
"""List child scopes under path (relative to this scope's root)."""
|
||||
full = self._scope_path(path)
|
||||
return self._memory.list_scopes(full)
|
||||
|
||||
def info(self, path: str = "/") -> ScopeInfo:
|
||||
"""Info for path under this scope."""
|
||||
full = self._scope_path(path)
|
||||
return self._memory.info(full)
|
||||
|
||||
def tree(self, path: str = "/", max_depth: int = 3) -> str:
|
||||
"""Tree under path within this scope."""
|
||||
full = self._scope_path(path)
|
||||
return self._memory.tree(full, max_depth=max_depth)
|
||||
|
||||
def list_categories(self, path: str | None = None) -> dict[str, int]:
|
||||
"""Categories in this scope; path None means this scope root."""
|
||||
full = self._scope_path(path) if path else (self._root or "/")
|
||||
return self._memory.list_categories(full)
|
||||
|
||||
def reset(self, scope: str | None = None) -> None:
|
||||
"""Reset within this scope."""
|
||||
prefix = self._scope_path(scope) if scope else (self._root or "/")
|
||||
self._memory.reset(scope=prefix)
|
||||
|
||||
def subscope(self, path: str) -> MemoryScope:
|
||||
"""Return a narrower scope under this scope."""
|
||||
child = path.strip("/")
|
||||
if not child:
|
||||
return MemoryScope(self._memory, self._root or "/")
|
||||
base = self._root.rstrip("/") or ""
|
||||
new_root = f"{base}/{child}" if base else f"/{child}"
|
||||
return MemoryScope(self._memory, new_root)
|
||||
|
||||
|
||||
class MemorySlice:
|
||||
"""View over multiple scopes: recall searches all, remember requires explicit scope unless read_only."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
memory: Memory,
|
||||
scopes: list[str],
|
||||
categories: list[str] | None = None,
|
||||
read_only: bool = True,
|
||||
) -> None:
|
||||
"""Initialize slice.
|
||||
|
||||
Args:
|
||||
memory: The underlying Memory instance.
|
||||
scopes: List of scope paths to include.
|
||||
categories: Optional category filter for recall.
|
||||
read_only: If True, remember() raises PermissionError.
|
||||
"""
|
||||
self._memory = memory
|
||||
self._scopes = [s.rstrip("/") or "/" for s in scopes]
|
||||
self._categories = categories
|
||||
self._read_only = read_only
|
||||
|
||||
def remember(
|
||||
self,
|
||||
content: str,
|
||||
scope: str,
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
) -> MemoryRecord:
|
||||
"""Remember into an explicit scope. Required when read_only=False."""
|
||||
if self._read_only:
|
||||
raise PermissionError("This MemorySlice is read-only")
|
||||
return self._memory.remember(
|
||||
content,
|
||||
scope=scope,
|
||||
categories=categories,
|
||||
metadata=metadata,
|
||||
importance=importance,
|
||||
)
|
||||
|
||||
def recall(
|
||||
self,
|
||||
query: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
depth: str = "deep",
|
||||
) -> list[MemoryMatch]:
|
||||
"""Recall across all slice scopes; results merged and re-ranked."""
|
||||
cats = categories or self._categories
|
||||
all_matches: list[MemoryMatch] = []
|
||||
for sc in self._scopes:
|
||||
matches = self._memory.recall(
|
||||
query,
|
||||
scope=sc,
|
||||
categories=cats,
|
||||
limit=limit * _RECALL_OVERSAMPLE_FACTOR,
|
||||
depth=depth,
|
||||
)
|
||||
all_matches.extend(matches)
|
||||
seen_ids: set[str] = set()
|
||||
unique: list[MemoryMatch] = []
|
||||
for m in sorted(all_matches, key=lambda x: x.score, reverse=True):
|
||||
if m.record.id not in seen_ids:
|
||||
seen_ids.add(m.record.id)
|
||||
unique.append(m)
|
||||
if len(unique) >= limit:
|
||||
break
|
||||
return unique
|
||||
|
||||
def extract_memories(self, content: str) -> list[str]:
|
||||
"""Extract discrete memories from content; delegates to underlying Memory."""
|
||||
return self._memory.extract_memories(content)
|
||||
|
||||
def list_scopes(self, path: str = "/") -> list[str]:
|
||||
"""List scopes across all slice roots."""
|
||||
out: list[str] = []
|
||||
for sc in self._scopes:
|
||||
full = f"{sc.rstrip('/')}{path}" if sc != "/" else path
|
||||
out.extend(self._memory.list_scopes(full))
|
||||
return sorted(set(out))
|
||||
|
||||
def info(self, path: str = "/") -> ScopeInfo:
|
||||
"""Aggregate info across slice scopes (record counts summed)."""
|
||||
total_records = 0
|
||||
all_categories: set[str] = set()
|
||||
oldest: datetime | None = None
|
||||
newest: datetime | None = None
|
||||
children: list[str] = []
|
||||
for sc in self._scopes:
|
||||
full = f"{sc.rstrip('/')}{path}" if sc != "/" else path
|
||||
inf = self._memory.info(full)
|
||||
total_records += inf.record_count
|
||||
all_categories.update(inf.categories)
|
||||
if inf.oldest_record:
|
||||
oldest = inf.oldest_record if oldest is None else min(oldest, inf.oldest_record)
|
||||
if inf.newest_record:
|
||||
newest = inf.newest_record if newest is None else max(newest, inf.newest_record)
|
||||
children.extend(inf.child_scopes)
|
||||
return ScopeInfo(
|
||||
path=path,
|
||||
record_count=total_records,
|
||||
categories=sorted(all_categories),
|
||||
oldest_record=oldest,
|
||||
newest_record=newest,
|
||||
child_scopes=sorted(set(children)),
|
||||
)
|
||||
|
||||
def list_categories(self, path: str | None = None) -> dict[str, int]:
|
||||
"""Categories and counts across slice scopes."""
|
||||
counts: dict[str, int] = {}
|
||||
for sc in self._scopes:
|
||||
full = (f"{sc.rstrip('/')}{path}" if sc != "/" else path) if path else sc
|
||||
for k, v in self._memory.list_categories(full).items():
|
||||
counts[k] = counts.get(k, 0) + v
|
||||
return counts
|
||||
@@ -1,335 +0,0 @@
|
||||
"""RLM-inspired intelligent recall flow for memory retrieval.
|
||||
|
||||
Implements adaptive-depth retrieval with:
|
||||
- LLM query distillation into targeted sub-queries
|
||||
- Keyword-driven category filtering
|
||||
- Time-based filtering from temporal hints
|
||||
- Parallel multi-query, multi-scope search
|
||||
- Confidence-based routing with iterative deepening (budget loop)
|
||||
- Evidence gap tracking propagated to results
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.flow.flow import Flow, listen, router, start
|
||||
from crewai.memory.analyze import QueryAnalysis, analyze_query
|
||||
from crewai.memory.types import (
|
||||
_RECALL_OVERSAMPLE_FACTOR,
|
||||
MemoryConfig,
|
||||
MemoryMatch,
|
||||
MemoryRecord,
|
||||
compute_composite_score,
|
||||
embed_text,
|
||||
)
|
||||
|
||||
|
||||
class RecallState(BaseModel):
|
||||
"""State for the recall flow."""
|
||||
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
query: str = ""
|
||||
scope: str | None = None
|
||||
categories: list[str] | None = None
|
||||
inferred_categories: list[str] = Field(default_factory=list)
|
||||
time_cutoff: datetime | None = None
|
||||
limit: int = 10
|
||||
query_embeddings: list[tuple[str, list[float]]] = Field(default_factory=list)
|
||||
query_analysis: QueryAnalysis | None = None
|
||||
candidate_scopes: list[str] = Field(default_factory=list)
|
||||
chunk_findings: list[Any] = Field(default_factory=list)
|
||||
evidence_gaps: list[str] = Field(default_factory=list)
|
||||
confidence: float = 0.0
|
||||
final_results: list[MemoryMatch] = Field(default_factory=list)
|
||||
exploration_budget: int = 1
|
||||
|
||||
|
||||
class RecallFlow(Flow[RecallState]):
|
||||
"""RLM-inspired intelligent memory recall flow.
|
||||
|
||||
Analyzes the query via LLM to produce targeted sub-queries and filters,
|
||||
embeds each sub-query, searches across candidate scopes in parallel,
|
||||
and iteratively deepens exploration when confidence is low.
|
||||
"""
|
||||
|
||||
_skip_auto_memory: bool = True
|
||||
|
||||
initial_state = RecallState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage: Any,
|
||||
llm: Any,
|
||||
embedder: Any,
|
||||
config: MemoryConfig | None = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self._storage = storage
|
||||
self._llm = llm
|
||||
self._embedder = embedder
|
||||
self._config = config or MemoryConfig()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _merged_categories(self) -> list[str] | None:
|
||||
"""Merge caller-supplied and LLM-inferred categories."""
|
||||
merged = list(
|
||||
set((self.state.categories or []) + self.state.inferred_categories)
|
||||
)
|
||||
return merged or None
|
||||
|
||||
def _do_search(self) -> list[dict[str, Any]]:
|
||||
"""Run parallel search across (embeddings x scopes) with filters.
|
||||
|
||||
Populates ``state.chunk_findings`` and ``state.confidence``.
|
||||
Returns the findings list.
|
||||
"""
|
||||
search_categories = self._merged_categories()
|
||||
|
||||
def _search_one(
|
||||
embedding: list[float], scope: str
|
||||
) -> tuple[str, list[tuple[MemoryRecord, float]]]:
|
||||
raw = self._storage.search(
|
||||
embedding,
|
||||
scope_prefix=scope,
|
||||
categories=search_categories,
|
||||
limit=self.state.limit * _RECALL_OVERSAMPLE_FACTOR,
|
||||
min_score=0.0,
|
||||
)
|
||||
# Post-filter by time cutoff
|
||||
if self.state.time_cutoff and raw:
|
||||
raw = [
|
||||
(r, s) for r, s in raw if r.created_at >= self.state.time_cutoff
|
||||
]
|
||||
return scope, raw
|
||||
|
||||
# Build (embedding, scope) task list
|
||||
tasks: list[tuple[list[float], str]] = [
|
||||
(embedding, scope)
|
||||
for _query_text, embedding in self.state.query_embeddings
|
||||
for scope in self.state.candidate_scopes
|
||||
]
|
||||
|
||||
findings: list[dict[str, Any]] = []
|
||||
|
||||
if len(tasks) <= 1:
|
||||
for emb, sc in tasks:
|
||||
scope, results = _search_one(emb, sc)
|
||||
if results:
|
||||
top_composite, _ = compute_composite_score(
|
||||
results[0][0], results[0][1], self._config
|
||||
)
|
||||
findings.append({
|
||||
"scope": scope,
|
||||
"results": results,
|
||||
"top_score": top_composite,
|
||||
})
|
||||
else:
|
||||
with ThreadPoolExecutor(max_workers=min(len(tasks), 4)) as pool:
|
||||
futures = {
|
||||
pool.submit(_search_one, emb, sc): (emb, sc)
|
||||
for emb, sc in tasks
|
||||
}
|
||||
for future in as_completed(futures):
|
||||
scope, results = future.result()
|
||||
if results:
|
||||
top_composite, _ = compute_composite_score(
|
||||
results[0][0], results[0][1], self._config
|
||||
)
|
||||
findings.append({
|
||||
"scope": scope,
|
||||
"results": results,
|
||||
"top_score": top_composite,
|
||||
})
|
||||
|
||||
self.state.chunk_findings = findings
|
||||
self.state.confidence = max(
|
||||
(f["top_score"] for f in findings), default=0.0
|
||||
)
|
||||
return findings
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Flow steps
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@start()
|
||||
def analyze_query_step(self) -> QueryAnalysis:
|
||||
"""Analyze the query, embed distilled sub-queries, extract filters."""
|
||||
self.state.exploration_budget = self._config.exploration_budget
|
||||
available = self._storage.list_scopes(self.state.scope or "/")
|
||||
if not available:
|
||||
available = ["/"]
|
||||
scope_info = (
|
||||
self._storage.get_scope_info(self.state.scope or "/")
|
||||
if self.state.scope
|
||||
else None
|
||||
)
|
||||
analysis = analyze_query(
|
||||
self.state.query,
|
||||
available,
|
||||
scope_info,
|
||||
self._llm,
|
||||
)
|
||||
self.state.query_analysis = analysis
|
||||
|
||||
# Wire keywords -> category filter
|
||||
if analysis.keywords:
|
||||
self.state.inferred_categories = analysis.keywords
|
||||
|
||||
# Parse time_filter into a datetime cutoff
|
||||
if analysis.time_filter:
|
||||
try:
|
||||
self.state.time_cutoff = datetime.fromisoformat(analysis.time_filter)
|
||||
except ValueError:
|
||||
# If the time filter isn't a valid ISO format, ignore it and proceed without a cutoff.
|
||||
pass
|
||||
|
||||
# Embed distilled recall queries (or fall back to original query)
|
||||
queries = analysis.recall_queries if analysis.recall_queries else [self.state.query]
|
||||
pairs: list[tuple[str, list[float]]] = []
|
||||
for q in queries[:3]:
|
||||
emb = embed_text(self._embedder, q)
|
||||
if emb:
|
||||
pairs.append((q, emb))
|
||||
if not pairs:
|
||||
emb = embed_text(self._embedder, self.state.query)
|
||||
if emb:
|
||||
pairs.append((self.state.query, emb))
|
||||
self.state.query_embeddings = pairs
|
||||
return analysis
|
||||
|
||||
@listen(analyze_query_step)
|
||||
def filter_and_chunk(self) -> list[str]:
|
||||
"""Select candidate scopes based on LLM analysis."""
|
||||
analysis = self.state.query_analysis
|
||||
scope_prefix = (self.state.scope or "/").rstrip("/") or "/"
|
||||
if analysis and analysis.suggested_scopes:
|
||||
candidates = [s for s in analysis.suggested_scopes if s]
|
||||
else:
|
||||
candidates = self._storage.list_scopes(scope_prefix)
|
||||
if not candidates:
|
||||
info = self._storage.get_scope_info(scope_prefix)
|
||||
if info.record_count > 0:
|
||||
candidates = [scope_prefix]
|
||||
else:
|
||||
candidates = [scope_prefix]
|
||||
self.state.candidate_scopes = candidates[:20]
|
||||
return self.state.candidate_scopes
|
||||
|
||||
@listen(filter_and_chunk)
|
||||
def search_chunks(self) -> list[Any]:
|
||||
"""Initial parallel search across (embeddings x scopes) with filters."""
|
||||
return self._do_search()
|
||||
|
||||
@router(search_chunks)
|
||||
def decide_depth(self) -> str:
|
||||
"""Route based on confidence, complexity, and remaining budget."""
|
||||
analysis = self.state.query_analysis
|
||||
if (
|
||||
analysis
|
||||
and analysis.complexity == "complex"
|
||||
and self.state.confidence < self._config.complex_query_threshold
|
||||
):
|
||||
if self.state.exploration_budget > 0:
|
||||
return "explore_deeper"
|
||||
if self.state.confidence >= self._config.confidence_threshold_high:
|
||||
return "synthesize"
|
||||
if (
|
||||
self.state.exploration_budget > 0
|
||||
and self.state.confidence < self._config.confidence_threshold_low
|
||||
):
|
||||
return "explore_deeper"
|
||||
return "synthesize"
|
||||
|
||||
@listen("explore_deeper")
|
||||
def recursive_exploration(self) -> list[Any]:
|
||||
"""Feed top results back to LLM for deeper context extraction.
|
||||
|
||||
Decrements the exploration budget so the loop terminates.
|
||||
"""
|
||||
self.state.exploration_budget -= 1
|
||||
|
||||
enhanced = []
|
||||
for finding in self.state.chunk_findings:
|
||||
if not finding.get("results"):
|
||||
continue
|
||||
content_parts = [r[0].content for r in finding["results"][:5]]
|
||||
chunk_text = "\n---\n".join(content_parts)
|
||||
prompt = (
|
||||
f"Query: {self.state.query}\n\n"
|
||||
f"Relevant memory excerpts:\n{chunk_text}\n\n"
|
||||
"Extract the most relevant information for the query. "
|
||||
"If something is missing, say what's missing in one short line."
|
||||
)
|
||||
try:
|
||||
response = self._llm.call([{"role": "user", "content": prompt}])
|
||||
if isinstance(response, str) and "missing" in response.lower():
|
||||
self.state.evidence_gaps.append(response[:200])
|
||||
enhanced.append({
|
||||
"scope": finding["scope"],
|
||||
"extraction": response,
|
||||
"results": finding["results"],
|
||||
})
|
||||
except Exception:
|
||||
enhanced.append({
|
||||
"scope": finding["scope"],
|
||||
"extraction": "",
|
||||
"results": finding["results"],
|
||||
})
|
||||
self.state.chunk_findings = enhanced
|
||||
return enhanced
|
||||
|
||||
@listen(recursive_exploration)
|
||||
def re_search(self) -> list[Any]:
|
||||
"""Re-search after exploration to update confidence for the router loop."""
|
||||
return self._do_search()
|
||||
|
||||
@router(re_search)
|
||||
def re_decide_depth(self) -> str:
|
||||
"""Re-evaluate depth after re-search. Same logic as decide_depth."""
|
||||
return self.decide_depth()
|
||||
|
||||
@listen("synthesize")
|
||||
def synthesize_results(self) -> list[MemoryMatch]:
|
||||
"""Deduplicate, composite-score, rank, and attach evidence gaps."""
|
||||
seen_ids: set[str] = set()
|
||||
matches: list[MemoryMatch] = []
|
||||
for finding in self.state.chunk_findings:
|
||||
if not isinstance(finding, dict):
|
||||
continue
|
||||
results = finding.get("results", [])
|
||||
if not isinstance(results, list):
|
||||
continue
|
||||
for item in results:
|
||||
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
||||
record, score = item[0], item[1]
|
||||
else:
|
||||
continue
|
||||
if isinstance(record, MemoryRecord) and record.id not in seen_ids:
|
||||
seen_ids.add(record.id)
|
||||
composite, reasons = compute_composite_score(
|
||||
record, float(score), self._config
|
||||
)
|
||||
matches.append(
|
||||
MemoryMatch(
|
||||
record=record,
|
||||
score=composite,
|
||||
match_reasons=reasons,
|
||||
)
|
||||
)
|
||||
matches.sort(key=lambda m: m.score, reverse=True)
|
||||
self.state.final_results = matches[: self.state.limit]
|
||||
|
||||
# Attach evidence gaps to the first result so callers can inspect them
|
||||
if self.state.evidence_gaps and self.state.final_results:
|
||||
self.state.final_results[0].evidence_gaps = list(self.state.evidence_gaps)
|
||||
|
||||
return self.state.final_results
|
||||
0
lib/crewai/src/crewai/memory/short_term/__init__.py
Normal file
0
lib/crewai/src/crewai/memory/short_term/__init__.py
Normal file
318
lib/crewai/src/crewai/memory/short_term/short_term_memory.py
Normal file
318
lib/crewai/src/crewai/memory/short_term/short_term_memory.py
Normal file
@@ -0,0 +1,318 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
|
||||
|
||||
class ShortTermMemory(Memory):
|
||||
"""
|
||||
ShortTermMemory class for managing transient data related to immediate tasks
|
||||
and interactions.
|
||||
Inherits from the Memory class and utilizes an instance of a class that
|
||||
adheres to the Storage for data storage, specifically working with
|
||||
MemoryItem instances.
|
||||
"""
|
||||
|
||||
_memory_provider: str | None = PrivateAttr()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
crew: Any = None,
|
||||
embedder_config: Any = None,
|
||||
storage: Any = None,
|
||||
path: str | None = None,
|
||||
) -> None:
|
||||
memory_provider = None
|
||||
if embedder_config and isinstance(embedder_config, dict):
|
||||
memory_provider = embedder_config.get("provider")
|
||||
|
||||
if memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
) from e
|
||||
config = (
|
||||
embedder_config.get("config")
|
||||
if embedder_config and isinstance(embedder_config, dict)
|
||||
else None
|
||||
)
|
||||
storage = Mem0Storage(type="short_term", crew=crew, config=config) # type: ignore[no-untyped-call]
|
||||
else:
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="short_term",
|
||||
embedder_config=embedder_config,
|
||||
crew=crew,
|
||||
path=path,
|
||||
)
|
||||
)
|
||||
super().__init__(storage=storage)
|
||||
self._memory_provider = memory_provider
|
||||
|
||||
def save(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
item = ShortTermMemoryItem(
|
||||
data=value,
|
||||
metadata=metadata,
|
||||
agent=self.agent.role if self.agent else None,
|
||||
)
|
||||
if self._memory_provider == "mem0":
|
||||
item.data = (
|
||||
f"Remember the following insights from Agent run: {item.data}"
|
||||
)
|
||||
|
||||
super().save(value=item.data, metadata=item.metadata)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
# agent_role=agent,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
error=str(e),
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search short-term memory for relevant entries.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return list(results)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="short_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Save a value to short-term memory asynchronously.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Optional metadata to associate with the value.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
item = ShortTermMemoryItem(
|
||||
data=value,
|
||||
metadata=metadata,
|
||||
agent=self.agent.role if self.agent else None,
|
||||
)
|
||||
if self._memory_provider == "mem0":
|
||||
item.data = (
|
||||
f"Remember the following insights from Agent run: {item.data}"
|
||||
)
|
||||
|
||||
await super().asave(value=item.data, metadata=item.metadata)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
error=str(e),
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search short-term memory asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = await self.storage.asearch(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return list(results)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="short_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
self.storage.reset()
|
||||
except Exception as e:
|
||||
raise Exception(
|
||||
f"An error occurred while resetting the short-term memory: {e}"
|
||||
) from e
|
||||
@@ -0,0 +1,13 @@
|
||||
from typing import Any
|
||||
|
||||
|
||||
class ShortTermMemoryItem:
|
||||
def __init__(
|
||||
self,
|
||||
data: Any,
|
||||
agent: str | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
):
|
||||
self.data = data
|
||||
self.agent = agent
|
||||
self.metadata = metadata if metadata is not None else {}
|
||||
@@ -1,179 +0,0 @@
|
||||
"""Storage backend protocol for the unified memory system."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Any, Protocol, runtime_checkable
|
||||
|
||||
from crewai.memory.types import MemoryRecord, ScopeInfo
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class StorageBackend(Protocol):
|
||||
"""Protocol for pluggable memory storage backends."""
|
||||
|
||||
def save(self, records: list[MemoryRecord]) -> None:
|
||||
"""Save memory records to storage.
|
||||
|
||||
Args:
|
||||
records: List of memory records to persist.
|
||||
"""
|
||||
...
|
||||
|
||||
def search(
|
||||
self,
|
||||
query_embedding: list[float],
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
limit: int = 10,
|
||||
min_score: float = 0.0,
|
||||
) -> list[tuple[MemoryRecord, float]]:
|
||||
"""Search for memories by vector similarity with optional filters.
|
||||
|
||||
Args:
|
||||
query_embedding: Embedding vector for the query.
|
||||
scope_prefix: Optional scope path prefix to filter results.
|
||||
categories: Optional list of categories to filter by.
|
||||
metadata_filter: Optional metadata key-value filter.
|
||||
limit: Maximum number of results to return.
|
||||
min_score: Minimum similarity score threshold.
|
||||
|
||||
Returns:
|
||||
List of (MemoryRecord, score) tuples ordered by relevance.
|
||||
"""
|
||||
...
|
||||
|
||||
def delete(
|
||||
self,
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
) -> int:
|
||||
"""Delete memories matching the given criteria.
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope path prefix.
|
||||
categories: Optional list of categories.
|
||||
record_ids: Optional list of record IDs to delete.
|
||||
older_than: Optional cutoff datetime (delete older records).
|
||||
metadata_filter: Optional metadata key-value filter.
|
||||
|
||||
Returns:
|
||||
Number of records deleted.
|
||||
"""
|
||||
...
|
||||
|
||||
def update(self, record: MemoryRecord) -> None:
|
||||
"""Update an existing record. Replaces the record with the same ID."""
|
||||
...
|
||||
|
||||
def get_record(self, record_id: str) -> MemoryRecord | None:
|
||||
"""Return a single record by ID, or None if not found.
|
||||
|
||||
Args:
|
||||
record_id: The unique ID of the record.
|
||||
|
||||
Returns:
|
||||
The MemoryRecord, or None if no record with that ID exists.
|
||||
"""
|
||||
...
|
||||
|
||||
def list_records(
|
||||
self,
|
||||
scope_prefix: str | None = None,
|
||||
limit: int = 200,
|
||||
offset: int = 0,
|
||||
) -> list[MemoryRecord]:
|
||||
"""List records in a scope, newest first.
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope path prefix to filter by.
|
||||
limit: Maximum number of records to return.
|
||||
offset: Number of records to skip (for pagination).
|
||||
|
||||
Returns:
|
||||
List of MemoryRecord, ordered by created_at descending.
|
||||
"""
|
||||
...
|
||||
|
||||
def get_scope_info(self, scope: str) -> ScopeInfo:
|
||||
"""Get information about a scope.
|
||||
|
||||
Args:
|
||||
scope: The scope path.
|
||||
|
||||
Returns:
|
||||
ScopeInfo with record count, categories, date range, child scopes.
|
||||
"""
|
||||
...
|
||||
|
||||
def list_scopes(self, parent: str = "/") -> list[str]:
|
||||
"""List immediate child scopes under a parent path.
|
||||
|
||||
Args:
|
||||
parent: Parent scope path (default root).
|
||||
|
||||
Returns:
|
||||
List of immediate child scope paths.
|
||||
"""
|
||||
...
|
||||
|
||||
def list_categories(self, scope_prefix: str | None = None) -> dict[str, int]:
|
||||
"""List categories and their counts within a scope.
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope to limit to (None = global).
|
||||
|
||||
Returns:
|
||||
Mapping of category name to record count.
|
||||
"""
|
||||
...
|
||||
|
||||
def count(self, scope_prefix: str | None = None) -> int:
|
||||
"""Count records in scope (and subscopes).
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope path (None = all).
|
||||
|
||||
Returns:
|
||||
Number of records.
|
||||
"""
|
||||
...
|
||||
|
||||
def reset(self, scope_prefix: str | None = None) -> None:
|
||||
"""Reset (delete all) memories in scope.
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope path (None = reset all).
|
||||
"""
|
||||
...
|
||||
|
||||
async def asave(self, records: list[MemoryRecord]) -> None:
|
||||
"""Save memory records asynchronously."""
|
||||
...
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query_embedding: list[float],
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
limit: int = 10,
|
||||
min_score: float = 0.0,
|
||||
) -> list[tuple[MemoryRecord, float]]:
|
||||
"""Search for memories asynchronously."""
|
||||
...
|
||||
|
||||
async def adelete(
|
||||
self,
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
) -> int:
|
||||
"""Delete memories asynchronously."""
|
||||
...
|
||||
16
lib/crewai/src/crewai/memory/storage/interface.py
Normal file
16
lib/crewai/src/crewai/memory/storage/interface.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from typing import Any
|
||||
|
||||
|
||||
class Storage:
|
||||
"""Abstract base class defining the storage interface"""
|
||||
|
||||
def save(self, value: Any, metadata: dict[str, Any]) -> None:
|
||||
pass
|
||||
|
||||
def search(
|
||||
self, query: str, limit: int, score_threshold: float
|
||||
) -> dict[str, Any] | list[Any]:
|
||||
return {}
|
||||
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
@@ -1,447 +0,0 @@
|
||||
"""LanceDB storage backend for the unified memory system."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import lancedb
|
||||
|
||||
from crewai.memory.types import MemoryRecord, ScopeInfo
|
||||
|
||||
|
||||
# Default embedding vector dimensionality (matches OpenAI text-embedding-3-small).
|
||||
# Used when creating new tables and for zero-vector placeholder scans.
|
||||
# Callers can override via the ``vector_dim`` constructor parameter.
|
||||
DEFAULT_VECTOR_DIM = 1536
|
||||
|
||||
# Safety cap on the number of rows returned by a single scan query.
|
||||
# Prevents unbounded memory use when scanning large tables for scope info,
|
||||
# listing, or deletion. Internal only -- not user-configurable.
|
||||
_SCAN_ROWS_LIMIT = 50_000
|
||||
|
||||
|
||||
class LanceDBStorage:
|
||||
"""LanceDB-backed storage for the unified memory system."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: str | Path | None = None,
|
||||
table_name: str = "memories",
|
||||
vector_dim: int | None = None,
|
||||
) -> None:
|
||||
"""Initialize LanceDB storage.
|
||||
|
||||
Args:
|
||||
path: Directory path for the LanceDB database. Defaults to
|
||||
``$CREWAI_STORAGE_DIR/memory`` if the env var is set,
|
||||
otherwise ``./.crewai/memory``.
|
||||
table_name: Name of the table for memory records.
|
||||
vector_dim: Dimensionality of the embedding vector. When ``None``
|
||||
(default), the dimension is auto-detected from the existing
|
||||
table schema or from the first saved embedding.
|
||||
"""
|
||||
if path is None:
|
||||
storage_dir = os.environ.get("CREWAI_STORAGE_DIR")
|
||||
path = Path(storage_dir) / "memory" if storage_dir else Path("./.crewai/memory")
|
||||
self._path = Path(path)
|
||||
self._path.mkdir(parents=True, exist_ok=True)
|
||||
self._table_name = table_name
|
||||
self._db = lancedb.connect(str(self._path))
|
||||
|
||||
# Try to open an existing table and infer dimension from its schema.
|
||||
# If no table exists yet, defer creation until the first save so the
|
||||
# dimension can be auto-detected from the embedder's actual output.
|
||||
try:
|
||||
self._table: lancedb.table.Table | None = self._db.open_table(self._table_name)
|
||||
self._vector_dim: int = self._infer_dim_from_table(self._table)
|
||||
except Exception:
|
||||
self._table = None
|
||||
self._vector_dim = vector_dim or 0 # 0 = not yet known
|
||||
|
||||
# Explicit dim provided: create the table immediately if it doesn't exist.
|
||||
if self._table is None and vector_dim is not None:
|
||||
self._vector_dim = vector_dim
|
||||
self._table = self._create_table(vector_dim)
|
||||
|
||||
@staticmethod
|
||||
def _infer_dim_from_table(table: lancedb.table.Table) -> int:
|
||||
"""Read vector dimension from an existing table's schema."""
|
||||
schema = table.schema
|
||||
for field in schema:
|
||||
if field.name == "vector":
|
||||
try:
|
||||
return field.type.list_size
|
||||
except Exception:
|
||||
break
|
||||
return DEFAULT_VECTOR_DIM
|
||||
|
||||
def _create_table(self, vector_dim: int) -> lancedb.table.Table:
|
||||
"""Create a new table with the given vector dimension."""
|
||||
placeholder = [
|
||||
{
|
||||
"id": "__schema_placeholder__",
|
||||
"content": "",
|
||||
"scope": "/",
|
||||
"categories_str": "[]",
|
||||
"metadata_str": "{}",
|
||||
"importance": 0.5,
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"last_accessed": datetime.utcnow().isoformat(),
|
||||
"vector": [0.0] * vector_dim,
|
||||
}
|
||||
]
|
||||
table = self._db.create_table(self._table_name, placeholder)
|
||||
table.delete("id = '__schema_placeholder__'")
|
||||
return table
|
||||
|
||||
def _ensure_table(self, vector_dim: int | None = None) -> lancedb.table.Table:
|
||||
"""Return the table, creating it lazily if needed.
|
||||
|
||||
Args:
|
||||
vector_dim: Dimension hint (e.g. from the first embedding).
|
||||
Falls back to the stored ``_vector_dim`` or ``DEFAULT_VECTOR_DIM``.
|
||||
"""
|
||||
if self._table is not None:
|
||||
return self._table
|
||||
dim = vector_dim or self._vector_dim or DEFAULT_VECTOR_DIM
|
||||
self._vector_dim = dim
|
||||
self._table = self._create_table(dim)
|
||||
return self._table
|
||||
|
||||
def _record_to_row(self, record: MemoryRecord) -> dict[str, Any]:
|
||||
return {
|
||||
"id": record.id,
|
||||
"content": record.content,
|
||||
"scope": record.scope,
|
||||
"categories_str": json.dumps(record.categories),
|
||||
"metadata_str": json.dumps(record.metadata),
|
||||
"importance": record.importance,
|
||||
"created_at": record.created_at.isoformat(),
|
||||
"last_accessed": record.last_accessed.isoformat(),
|
||||
"vector": record.embedding if record.embedding else [0.0] * self._vector_dim,
|
||||
}
|
||||
|
||||
def _row_to_record(self, row: dict[str, Any]) -> MemoryRecord:
|
||||
def _parse_dt(val: Any) -> datetime:
|
||||
if val is None:
|
||||
return datetime.utcnow()
|
||||
if isinstance(val, datetime):
|
||||
return val
|
||||
s = str(val)
|
||||
return datetime.fromisoformat(s.replace("Z", "+00:00"))
|
||||
|
||||
return MemoryRecord(
|
||||
id=str(row["id"]),
|
||||
content=str(row["content"]),
|
||||
scope=str(row["scope"]),
|
||||
categories=json.loads(row["categories_str"]) if row.get("categories_str") else [],
|
||||
metadata=json.loads(row["metadata_str"]) if row.get("metadata_str") else {},
|
||||
importance=float(row.get("importance", 0.5)),
|
||||
created_at=_parse_dt(row.get("created_at")),
|
||||
last_accessed=_parse_dt(row.get("last_accessed")),
|
||||
embedding=row.get("vector"),
|
||||
)
|
||||
|
||||
def save(self, records: list[MemoryRecord]) -> None:
|
||||
if not records:
|
||||
return
|
||||
# Auto-detect dimension from the first real embedding.
|
||||
dim = None
|
||||
for r in records:
|
||||
if r.embedding and len(r.embedding) > 0:
|
||||
dim = len(r.embedding)
|
||||
break
|
||||
self._ensure_table(vector_dim=dim)
|
||||
rows = [self._record_to_row(r) for r in records]
|
||||
for r in rows:
|
||||
if r["vector"] is None or len(r["vector"]) != self._vector_dim:
|
||||
r["vector"] = [0.0] * self._vector_dim
|
||||
self._table.add(rows)
|
||||
|
||||
def update(self, record: MemoryRecord) -> None:
|
||||
"""Update a record by ID. Preserves created_at, updates last_accessed."""
|
||||
table = self._ensure_table()
|
||||
safe_id = str(record.id).replace("'", "''")
|
||||
table.delete(f"id = '{safe_id}'")
|
||||
row = self._record_to_row(record)
|
||||
if row["vector"] is None or len(row["vector"]) != self._vector_dim:
|
||||
row["vector"] = [0.0] * self._vector_dim
|
||||
table.add([row])
|
||||
|
||||
def touch_records(self, record_ids: list[str]) -> None:
|
||||
"""Update last_accessed to now for the given record IDs.
|
||||
|
||||
Args:
|
||||
record_ids: IDs of records to touch.
|
||||
"""
|
||||
if not record_ids or self._table is None:
|
||||
return
|
||||
now = datetime.utcnow().isoformat()
|
||||
for rid in record_ids:
|
||||
safe_id = str(rid).replace("'", "''")
|
||||
rows = (
|
||||
self._table.search([0.0] * self._vector_dim)
|
||||
.where(f"id = '{safe_id}'")
|
||||
.limit(1)
|
||||
.to_list()
|
||||
)
|
||||
if rows:
|
||||
rows[0]["last_accessed"] = now
|
||||
self._table.delete(f"id = '{safe_id}'")
|
||||
self._table.add([rows[0]])
|
||||
|
||||
def get_record(self, record_id: str) -> MemoryRecord | None:
|
||||
"""Return a single record by ID, or None if not found."""
|
||||
if self._table is None:
|
||||
return None
|
||||
safe_id = str(record_id).replace("'", "''")
|
||||
rows = self._table.search([0.0] * self._vector_dim).where(f"id = '{safe_id}'").limit(1).to_list()
|
||||
if not rows:
|
||||
return None
|
||||
return self._row_to_record(rows[0])
|
||||
|
||||
def search(
|
||||
self,
|
||||
query_embedding: list[float],
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
limit: int = 10,
|
||||
min_score: float = 0.0,
|
||||
) -> list[tuple[MemoryRecord, float]]:
|
||||
if self._table is None:
|
||||
return []
|
||||
query = self._table.search(query_embedding)
|
||||
if scope_prefix is not None and scope_prefix.strip("/"):
|
||||
prefix = scope_prefix.rstrip("/")
|
||||
like_val = prefix + "%"
|
||||
query = query.where(f"scope LIKE '{like_val}'")
|
||||
results = query.limit(limit * 3 if (categories or metadata_filter) else limit).to_list()
|
||||
out: list[tuple[MemoryRecord, float]] = []
|
||||
for row in results:
|
||||
record = self._row_to_record(row)
|
||||
if categories and not any(c in record.categories for c in categories):
|
||||
continue
|
||||
if metadata_filter and not all(record.metadata.get(k) == v for k, v in metadata_filter.items()):
|
||||
continue
|
||||
distance = row.get("_distance", 0.0)
|
||||
score = 1.0 / (1.0 + float(distance)) if distance is not None else 1.0
|
||||
if score >= min_score:
|
||||
out.append((record, score))
|
||||
if len(out) >= limit:
|
||||
break
|
||||
return out[:limit]
|
||||
|
||||
def delete(
|
||||
self,
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
) -> int:
|
||||
if self._table is None:
|
||||
return 0
|
||||
if record_ids and not (categories or metadata_filter):
|
||||
before = self._table.count_rows()
|
||||
ids_expr = ", ".join(f"'{rid}'" for rid in record_ids)
|
||||
self._table.delete(f"id IN ({ids_expr})")
|
||||
return before - self._table.count_rows()
|
||||
if categories or metadata_filter:
|
||||
rows = self._scan_rows(scope_prefix)
|
||||
to_delete: list[str] = []
|
||||
for row in rows:
|
||||
record = self._row_to_record(row)
|
||||
if categories and not any(c in record.categories for c in categories):
|
||||
continue
|
||||
if metadata_filter and not all(record.metadata.get(k) == v for k, v in metadata_filter.items()):
|
||||
continue
|
||||
if older_than and record.created_at >= older_than:
|
||||
continue
|
||||
to_delete.append(record.id)
|
||||
if not to_delete:
|
||||
return 0
|
||||
before = self._table.count_rows()
|
||||
ids_expr = ", ".join(f"'{rid}'" for rid in to_delete)
|
||||
self._table.delete(f"id IN ({ids_expr})")
|
||||
return before - self._table.count_rows()
|
||||
conditions = []
|
||||
if scope_prefix is not None and scope_prefix.strip("/"):
|
||||
prefix = scope_prefix.rstrip("/")
|
||||
if not prefix.startswith("/"):
|
||||
prefix = "/" + prefix
|
||||
conditions.append(f"scope LIKE '{prefix}%' OR scope = '/'")
|
||||
if older_than is not None:
|
||||
conditions.append(f"created_at < '{older_than.isoformat()}'")
|
||||
if not conditions:
|
||||
# Delete all rows (scope_prefix is "/" or None and no older_than)
|
||||
before = self._table.count_rows()
|
||||
self._table.delete("id != ''")
|
||||
return before - self._table.count_rows()
|
||||
where_expr = " AND ".join(conditions)
|
||||
before = self._table.count_rows()
|
||||
self._table.delete(where_expr)
|
||||
return before - self._table.count_rows()
|
||||
|
||||
def _scan_rows(self, scope_prefix: str | None = None, limit: int = _SCAN_ROWS_LIMIT) -> list[dict[str, Any]]:
|
||||
"""Scan rows optionally filtered by scope prefix."""
|
||||
if self._table is None:
|
||||
return []
|
||||
q = self._table.search([0.0] * self._vector_dim)
|
||||
if scope_prefix is not None and scope_prefix.strip("/"):
|
||||
q = q.where(f"scope LIKE '{scope_prefix.rstrip('/')}%'")
|
||||
return q.limit(limit).to_list()
|
||||
|
||||
def list_records(
|
||||
self, scope_prefix: str | None = None, limit: int = 200, offset: int = 0
|
||||
) -> list[MemoryRecord]:
|
||||
"""List records in a scope, newest first.
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope path prefix to filter by.
|
||||
limit: Maximum number of records to return.
|
||||
offset: Number of records to skip (for pagination).
|
||||
|
||||
Returns:
|
||||
List of MemoryRecord, ordered by created_at descending.
|
||||
"""
|
||||
rows = self._scan_rows(scope_prefix, limit=limit + offset)
|
||||
records = [self._row_to_record(r) for r in rows]
|
||||
records.sort(key=lambda r: r.created_at, reverse=True)
|
||||
return records[offset : offset + limit]
|
||||
|
||||
def get_scope_info(self, scope: str) -> ScopeInfo:
|
||||
scope = scope.rstrip("/") or "/"
|
||||
prefix = scope if scope != "/" else ""
|
||||
if prefix and not prefix.startswith("/"):
|
||||
prefix = "/" + prefix
|
||||
rows = self._scan_rows(prefix or None)
|
||||
if not rows:
|
||||
return ScopeInfo(
|
||||
path=scope or "/",
|
||||
record_count=0,
|
||||
categories=[],
|
||||
oldest_record=None,
|
||||
newest_record=None,
|
||||
child_scopes=[],
|
||||
)
|
||||
categories_set: set[str] = set()
|
||||
oldest: datetime | None = None
|
||||
newest: datetime | None = None
|
||||
child_prefix = (prefix + "/") if prefix else "/"
|
||||
children: set[str] = set()
|
||||
for row in rows:
|
||||
sc = str(row.get("scope", ""))
|
||||
if child_prefix and sc.startswith(child_prefix):
|
||||
rest = sc[len(child_prefix):]
|
||||
if "/" not in rest and rest:
|
||||
children.add(sc)
|
||||
try:
|
||||
cat_str = row.get("categories_str") or "[]"
|
||||
categories_set.update(json.loads(cat_str))
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
created = row.get("created_at")
|
||||
if created:
|
||||
dt = datetime.fromisoformat(str(created).replace("Z", "+00:00")) if isinstance(created, str) else created
|
||||
if isinstance(dt, datetime):
|
||||
if oldest is None or dt < oldest:
|
||||
oldest = dt
|
||||
if newest is None or dt > newest:
|
||||
newest = dt
|
||||
return ScopeInfo(
|
||||
path=scope or "/",
|
||||
record_count=len(rows),
|
||||
categories=sorted(categories_set),
|
||||
oldest_record=oldest,
|
||||
newest_record=newest,
|
||||
child_scopes=sorted(children),
|
||||
)
|
||||
|
||||
def list_scopes(self, parent: str = "/") -> list[str]:
|
||||
parent = parent.rstrip("/") or ""
|
||||
prefix = (parent + "/") if parent else "/"
|
||||
rows = self._scan_rows(prefix if prefix != "/" else None)
|
||||
children: set[str] = set()
|
||||
for row in rows:
|
||||
sc = str(row.get("scope", ""))
|
||||
if sc.startswith(prefix) and sc != (prefix.rstrip("/") or "/"):
|
||||
rest = sc[len(prefix):]
|
||||
if "/" not in rest and rest:
|
||||
children.add(prefix + rest)
|
||||
return sorted(children)
|
||||
|
||||
def list_categories(self, scope_prefix: str | None = None) -> dict[str, int]:
|
||||
rows = self._scan_rows(scope_prefix)
|
||||
counts: dict[str, int] = {}
|
||||
for row in rows:
|
||||
cat_str = row.get("categories_str") or "[]"
|
||||
try:
|
||||
parsed = json.loads(cat_str)
|
||||
except Exception: # noqa: S112
|
||||
continue
|
||||
for c in parsed:
|
||||
counts[c] = counts.get(c, 0) + 1
|
||||
return counts
|
||||
|
||||
def count(self, scope_prefix: str | None = None) -> int:
|
||||
if self._table is None:
|
||||
return 0
|
||||
if scope_prefix is None or scope_prefix.strip("/") == "":
|
||||
return self._table.count_rows()
|
||||
info = self.get_scope_info(scope_prefix)
|
||||
return info.record_count
|
||||
|
||||
def reset(self, scope_prefix: str | None = None) -> None:
|
||||
if scope_prefix is None or scope_prefix.strip("/") == "":
|
||||
if self._table is not None:
|
||||
self._db.drop_table(self._table_name)
|
||||
self._table = None
|
||||
# Dimension is preserved; table will be recreated on next save.
|
||||
return
|
||||
if self._table is None:
|
||||
return
|
||||
prefix = scope_prefix.rstrip("/")
|
||||
if prefix:
|
||||
self._table.delete(f"scope >= '{prefix}' AND scope < '{prefix}/\uFFFF'")
|
||||
|
||||
async def asave(self, records: list[MemoryRecord]) -> None:
|
||||
self.save(records)
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query_embedding: list[float],
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
limit: int = 10,
|
||||
min_score: float = 0.0,
|
||||
) -> list[tuple[MemoryRecord, float]]:
|
||||
return self.search(
|
||||
query_embedding,
|
||||
scope_prefix=scope_prefix,
|
||||
categories=categories,
|
||||
metadata_filter=metadata_filter,
|
||||
limit=limit,
|
||||
min_score=min_score,
|
||||
)
|
||||
|
||||
async def adelete(
|
||||
self,
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
) -> int:
|
||||
return self.delete(
|
||||
scope_prefix=scope_prefix,
|
||||
categories=categories,
|
||||
record_ids=record_ids,
|
||||
older_than=older_than,
|
||||
metadata_filter=metadata_filter,
|
||||
)
|
||||
215
lib/crewai/src/crewai/memory/storage/ltm_sqlite_storage.py
Normal file
215
lib/crewai/src/crewai/memory/storage/ltm_sqlite_storage.py
Normal file
@@ -0,0 +1,215 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
import sqlite3
|
||||
from typing import Any
|
||||
|
||||
import aiosqlite
|
||||
|
||||
from crewai.utilities import Printer
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
|
||||
class LTMSQLiteStorage:
|
||||
"""SQLite storage class for long-term memory data."""
|
||||
|
||||
def __init__(self, db_path: str | None = None, verbose: bool = True) -> None:
|
||||
"""Initialize the SQLite storage.
|
||||
|
||||
Args:
|
||||
db_path: Optional path to the database file.
|
||||
verbose: Whether to print error messages.
|
||||
"""
|
||||
if db_path is None:
|
||||
db_path = str(Path(db_storage_path()) / "long_term_memory_storage.db")
|
||||
self.db_path = db_path
|
||||
self._verbose = verbose
|
||||
self._printer: Printer = Printer()
|
||||
Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
self._initialize_db()
|
||||
|
||||
def _initialize_db(self) -> None:
|
||||
"""Initialize the SQLite database and create LTM table."""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS long_term_memories (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
task_description TEXT,
|
||||
metadata TEXT,
|
||||
datetime TEXT,
|
||||
score REAL
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred during database initialization: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def save(
|
||||
self,
|
||||
task_description: str,
|
||||
metadata: dict[str, Any],
|
||||
datetime: str,
|
||||
score: int | float,
|
||||
) -> None:
|
||||
"""Saves data to the LTM table with error handling."""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO long_term_memories (task_description, metadata, datetime, score)
|
||||
VALUES (?, ?, ?, ?)
|
||||
""",
|
||||
(task_description, json.dumps(metadata), datetime, score),
|
||||
)
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def load(self, task_description: str, latest_n: int) -> list[dict[str, Any]] | None:
|
||||
"""Queries the LTM table by task description with error handling."""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT metadata, datetime, score
|
||||
FROM long_term_memories
|
||||
WHERE task_description = ?
|
||||
ORDER BY datetime DESC, score ASC
|
||||
LIMIT {latest_n}
|
||||
""", # nosec # noqa: S608
|
||||
(task_description,),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
if rows:
|
||||
return [
|
||||
{
|
||||
"metadata": json.loads(row[0]),
|
||||
"datetime": row[1],
|
||||
"score": row[2],
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Resets the LTM table with error handling."""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("DELETE FROM long_term_memories")
|
||||
conn.commit()
|
||||
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
task_description: str,
|
||||
metadata: dict[str, Any],
|
||||
datetime: str,
|
||||
score: int | float,
|
||||
) -> None:
|
||||
"""Save data to the LTM table asynchronously.
|
||||
|
||||
Args:
|
||||
task_description: Description of the task.
|
||||
metadata: Metadata associated with the memory.
|
||||
datetime: Timestamp of the memory.
|
||||
score: Quality score of the memory.
|
||||
"""
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as conn:
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO long_term_memories (task_description, metadata, datetime, score)
|
||||
VALUES (?, ?, ?, ?)
|
||||
""",
|
||||
(task_description, json.dumps(metadata), datetime, score),
|
||||
)
|
||||
await conn.commit()
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
async def aload(
|
||||
self, task_description: str, latest_n: int
|
||||
) -> list[dict[str, Any]] | None:
|
||||
"""Query the LTM table by task description asynchronously.
|
||||
|
||||
Args:
|
||||
task_description: Description of the task to search for.
|
||||
latest_n: Maximum number of results to return.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries or None if error occurs.
|
||||
"""
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as conn:
|
||||
cursor = await conn.execute(
|
||||
f"""
|
||||
SELECT metadata, datetime, score
|
||||
FROM long_term_memories
|
||||
WHERE task_description = ?
|
||||
ORDER BY datetime DESC, score ASC
|
||||
LIMIT {latest_n}
|
||||
""", # nosec # noqa: S608
|
||||
(task_description,),
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
if rows:
|
||||
return [
|
||||
{
|
||||
"metadata": json.loads(row[0]),
|
||||
"datetime": row[1],
|
||||
"score": row[2],
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
async def areset(self) -> None:
|
||||
"""Reset the LTM table asynchronously."""
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as conn:
|
||||
await conn.execute("DELETE FROM long_term_memories")
|
||||
await conn.commit()
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
230
lib/crewai/src/crewai/memory/storage/mem0_storage.py
Normal file
230
lib/crewai/src/crewai/memory/storage/mem0_storage.py
Normal file
@@ -0,0 +1,230 @@
|
||||
from collections import defaultdict
|
||||
from collections.abc import Iterable
|
||||
import os
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from mem0 import Memory, MemoryClient # type: ignore[import-untyped,import-not-found]
|
||||
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.rag.chromadb.utils import _sanitize_collection_name
|
||||
|
||||
|
||||
MAX_AGENT_ID_LENGTH_MEM0 = 255
|
||||
|
||||
|
||||
class Mem0Storage(Storage):
|
||||
"""
|
||||
Extends Storage to handle embedding and searching across entities using Mem0.
|
||||
"""
|
||||
|
||||
def __init__(self, type, crew=None, config=None):
|
||||
super().__init__()
|
||||
|
||||
self._validate_type(type)
|
||||
self.memory_type = type
|
||||
self.crew = crew
|
||||
self.config = config or {}
|
||||
|
||||
self._extract_config_values()
|
||||
self._initialize_memory()
|
||||
|
||||
def _validate_type(self, type):
|
||||
supported_types = {"short_term", "long_term", "entities", "external"}
|
||||
if type not in supported_types:
|
||||
raise ValueError(
|
||||
f"Invalid type '{type}' for Mem0Storage. "
|
||||
f"Must be one of: {', '.join(supported_types)}"
|
||||
)
|
||||
|
||||
def _extract_config_values(self):
|
||||
self.mem0_run_id = self.config.get("run_id")
|
||||
self.includes = self.config.get("includes")
|
||||
self.excludes = self.config.get("excludes")
|
||||
self.custom_categories = self.config.get("custom_categories")
|
||||
self.infer = self.config.get("infer", True)
|
||||
|
||||
def _initialize_memory(self):
|
||||
api_key = self.config.get("api_key") or os.getenv("MEM0_API_KEY")
|
||||
org_id = self.config.get("org_id")
|
||||
project_id = self.config.get("project_id")
|
||||
local_config = self.config.get("local_mem0_config")
|
||||
|
||||
if api_key:
|
||||
self.memory = (
|
||||
MemoryClient(api_key=api_key, org_id=org_id, project_id=project_id)
|
||||
if org_id and project_id
|
||||
else MemoryClient(api_key=api_key)
|
||||
)
|
||||
if self.custom_categories:
|
||||
self.memory.update_project(custom_categories=self.custom_categories)
|
||||
else:
|
||||
self.memory = (
|
||||
Memory.from_config(local_config)
|
||||
if local_config and len(local_config)
|
||||
else Memory()
|
||||
)
|
||||
|
||||
def _create_filter_for_search(self):
|
||||
"""
|
||||
Returns:
|
||||
dict: A filter dictionary containing AND conditions for querying data.
|
||||
- Includes user_id and agent_id if both are present.
|
||||
- Includes user_id if only user_id is present.
|
||||
- Includes agent_id if only agent_id is present.
|
||||
- Includes run_id if memory_type is 'short_term' and
|
||||
mem0_run_id is present.
|
||||
"""
|
||||
filter = defaultdict(list)
|
||||
|
||||
if self.memory_type == "short_term" and self.mem0_run_id:
|
||||
filter["AND"].append({"run_id": self.mem0_run_id})
|
||||
else:
|
||||
user_id = self.config.get("user_id", "")
|
||||
agent_id = self.config.get("agent_id", "")
|
||||
|
||||
if user_id and agent_id:
|
||||
filter["OR"].append({"user_id": user_id})
|
||||
filter["OR"].append({"agent_id": agent_id})
|
||||
elif user_id:
|
||||
filter["AND"].append({"user_id": user_id})
|
||||
elif agent_id:
|
||||
filter["AND"].append({"agent_id": agent_id})
|
||||
|
||||
return filter
|
||||
|
||||
def save(self, value: Any, metadata: dict[str, Any]) -> None:
|
||||
def _last_content(messages: Iterable[dict[str, Any]], role: str) -> str:
|
||||
return next(
|
||||
(
|
||||
m.get("content", "")
|
||||
for m in reversed(list(messages))
|
||||
if m.get("role") == role
|
||||
),
|
||||
"",
|
||||
)
|
||||
|
||||
conversations = []
|
||||
messages = metadata.pop("messages", None)
|
||||
if messages:
|
||||
last_user = _last_content(messages, "user")
|
||||
last_assistant = _last_content(messages, "assistant")
|
||||
|
||||
if user_msg := self._get_user_message(last_user):
|
||||
conversations.append({"role": "user", "content": user_msg})
|
||||
|
||||
if assistant_msg := self._get_assistant_message(last_assistant):
|
||||
conversations.append({"role": "assistant", "content": assistant_msg})
|
||||
else:
|
||||
conversations.append({"role": "assistant", "content": value})
|
||||
|
||||
user_id = self.config.get("user_id", "")
|
||||
|
||||
base_metadata = {
|
||||
"short_term": "short_term",
|
||||
"long_term": "long_term",
|
||||
"entities": "entity",
|
||||
"external": "external",
|
||||
}
|
||||
|
||||
# Shared base params
|
||||
params: dict[str, Any] = {
|
||||
"metadata": {"type": base_metadata[self.memory_type], **metadata},
|
||||
"infer": self.infer,
|
||||
}
|
||||
|
||||
# MemoryClient-specific overrides
|
||||
if isinstance(self.memory, MemoryClient):
|
||||
params["includes"] = self.includes
|
||||
params["excludes"] = self.excludes
|
||||
params["output_format"] = "v1.1"
|
||||
params["version"] = "v2"
|
||||
|
||||
if self.memory_type == "short_term" and self.mem0_run_id:
|
||||
params["run_id"] = self.mem0_run_id
|
||||
|
||||
if user_id:
|
||||
params["user_id"] = user_id
|
||||
|
||||
if agent_id := self.config.get("agent_id", self._get_agent_name()):
|
||||
params["agent_id"] = agent_id
|
||||
|
||||
self.memory.add(conversations, **params)
|
||||
|
||||
def search(
|
||||
self, query: str, limit: int = 5, score_threshold: float = 0.6
|
||||
) -> list[Any]:
|
||||
params = {
|
||||
"query": query,
|
||||
"limit": limit,
|
||||
"version": "v2",
|
||||
"output_format": "v1.1",
|
||||
}
|
||||
|
||||
if user_id := self.config.get("user_id", ""):
|
||||
params["user_id"] = user_id
|
||||
|
||||
memory_type_map = {
|
||||
"short_term": {"type": "short_term"},
|
||||
"long_term": {"type": "long_term"},
|
||||
"entities": {"type": "entity"},
|
||||
"external": {"type": "external"},
|
||||
}
|
||||
|
||||
if self.memory_type in memory_type_map:
|
||||
params["metadata"] = memory_type_map[self.memory_type]
|
||||
if self.memory_type == "short_term":
|
||||
params["run_id"] = self.mem0_run_id
|
||||
|
||||
# Discard the filters for now since we create the filters
|
||||
# automatically when the crew is created.
|
||||
|
||||
params["filters"] = self._create_filter_for_search()
|
||||
params["threshold"] = score_threshold
|
||||
|
||||
if isinstance(self.memory, Memory):
|
||||
del params["metadata"], params["version"], params["output_format"]
|
||||
if params.get("run_id"):
|
||||
del params["run_id"]
|
||||
|
||||
results = self.memory.search(**params)
|
||||
|
||||
# This makes it compatible for Contextual Memory to retrieve
|
||||
for result in results["results"]:
|
||||
result["content"] = result["memory"]
|
||||
|
||||
return [r for r in results["results"]]
|
||||
|
||||
def reset(self):
|
||||
if self.memory:
|
||||
self.memory.reset()
|
||||
|
||||
def _sanitize_role(self, role: str) -> str:
|
||||
"""
|
||||
Sanitizes agent roles to ensure valid directory names.
|
||||
"""
|
||||
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
|
||||
|
||||
def _get_agent_name(self) -> str:
|
||||
if not self.crew:
|
||||
return ""
|
||||
|
||||
agents = self.crew.agents
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
agents = "_".join(agents)
|
||||
return _sanitize_collection_name(
|
||||
name=agents, max_collection_length=MAX_AGENT_ID_LENGTH_MEM0
|
||||
)
|
||||
|
||||
def _get_assistant_message(self, text: str) -> str:
|
||||
marker = "Final Answer:"
|
||||
if marker in text:
|
||||
return text.split(marker, 1)[1].strip()
|
||||
return text
|
||||
|
||||
def _get_user_message(self, text: str) -> str:
|
||||
pattern = r"User message:\s*(.*)"
|
||||
match = re.search(pattern, text)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
return text
|
||||
315
lib/crewai/src/crewai/memory/storage/rag_storage.py
Normal file
315
lib/crewai/src/crewai/memory/storage/rag_storage.py
Normal file
@@ -0,0 +1,315 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import traceback
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
import warnings
|
||||
|
||||
from crewai.rag.chromadb.config import ChromaDBConfig
|
||||
from crewai.rag.chromadb.types import ChromaEmbeddingFunctionWrapper
|
||||
from crewai.rag.config.utils import get_rag_client
|
||||
from crewai.rag.embeddings.factory import build_embedder
|
||||
from crewai.rag.factory import create_client
|
||||
from crewai.rag.storage.base_rag_storage import BaseRAGStorage
|
||||
from crewai.utilities.constants import MAX_FILE_NAME_LENGTH
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.crew import Crew
|
||||
from crewai.rag.core.base_client import BaseClient
|
||||
from crewai.rag.core.base_embeddings_provider import BaseEmbeddingsProvider
|
||||
from crewai.rag.embeddings.types import ProviderSpec
|
||||
from crewai.rag.types import BaseRecord
|
||||
|
||||
|
||||
class RAGStorage(BaseRAGStorage):
|
||||
"""
|
||||
Extends Storage to handle embeddings for memory entries, improving
|
||||
search efficiency.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
type: str,
|
||||
allow_reset: bool = True,
|
||||
embedder_config: ProviderSpec | BaseEmbeddingsProvider[Any] | None = None,
|
||||
crew: Crew | None = None,
|
||||
path: str | None = None,
|
||||
) -> None:
|
||||
super().__init__(type, allow_reset, embedder_config, crew)
|
||||
crew_agents = crew.agents if crew else []
|
||||
sanitized_roles = [self._sanitize_role(agent.role) for agent in crew_agents]
|
||||
agents_str = "_".join(sanitized_roles)
|
||||
self.agents = agents_str
|
||||
self.storage_file_name = self._build_storage_file_name(type, agents_str)
|
||||
|
||||
self.type = type
|
||||
self._client: BaseClient | None = None
|
||||
|
||||
self.allow_reset = allow_reset
|
||||
self.path = path
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=r".*'model_fields'.*is deprecated.*",
|
||||
module=r"^chromadb(\.|$)",
|
||||
)
|
||||
|
||||
if self.embedder_config:
|
||||
embedding_function = build_embedder(self.embedder_config)
|
||||
|
||||
try:
|
||||
_ = embedding_function(["test"])
|
||||
except Exception as e:
|
||||
provider = (
|
||||
self.embedder_config["provider"]
|
||||
if isinstance(self.embedder_config, dict)
|
||||
else self.embedder_config.__class__.__name__.replace(
|
||||
"Provider", ""
|
||||
).lower()
|
||||
)
|
||||
raise ValueError(
|
||||
f"Failed to initialize embedder. Please check your configuration or connection.\n"
|
||||
f"Provider: {provider}\n"
|
||||
f"Error: {e}"
|
||||
) from e
|
||||
|
||||
batch_size = None
|
||||
if (
|
||||
isinstance(self.embedder_config, dict)
|
||||
and "config" in self.embedder_config
|
||||
):
|
||||
nested_config = self.embedder_config["config"]
|
||||
if isinstance(nested_config, dict):
|
||||
batch_size = nested_config.get("batch_size")
|
||||
|
||||
if batch_size is not None:
|
||||
config = ChromaDBConfig(
|
||||
embedding_function=cast(
|
||||
ChromaEmbeddingFunctionWrapper, embedding_function
|
||||
),
|
||||
batch_size=cast(int, batch_size),
|
||||
)
|
||||
else:
|
||||
config = ChromaDBConfig(
|
||||
embedding_function=cast(
|
||||
ChromaEmbeddingFunctionWrapper, embedding_function
|
||||
)
|
||||
)
|
||||
|
||||
if self.path:
|
||||
config.settings.persist_directory = self.path
|
||||
|
||||
self._client = create_client(config)
|
||||
|
||||
def _get_client(self) -> BaseClient:
|
||||
"""Get the appropriate client - instance-specific or global."""
|
||||
return self._client if self._client else get_rag_client()
|
||||
|
||||
def _sanitize_role(self, role: str) -> str:
|
||||
"""
|
||||
Sanitizes agent roles to ensure valid directory names.
|
||||
"""
|
||||
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
|
||||
|
||||
@staticmethod
|
||||
def _build_storage_file_name(type: str, file_name: str) -> str:
|
||||
"""
|
||||
Ensures file name does not exceed max allowed by OS
|
||||
"""
|
||||
base_path = f"{db_storage_path()}/{type}"
|
||||
|
||||
if len(file_name) > MAX_FILE_NAME_LENGTH:
|
||||
logging.warning(
|
||||
f"Trimming file name from {len(file_name)} to {MAX_FILE_NAME_LENGTH} characters."
|
||||
)
|
||||
file_name = file_name[:MAX_FILE_NAME_LENGTH]
|
||||
|
||||
return f"{base_path}/{file_name}"
|
||||
|
||||
def save(self, value: Any, metadata: dict[str, Any]) -> None:
|
||||
"""Save a value to storage.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Metadata to associate with the value.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"memory_{self.type}_{self.agents}"
|
||||
if self.agents
|
||||
else f"memory_{self.type}"
|
||||
)
|
||||
client.get_or_create_collection(collection_name=collection_name)
|
||||
|
||||
document: BaseRecord = {"content": value}
|
||||
if metadata:
|
||||
document["metadata"] = metadata
|
||||
|
||||
batch_size = None
|
||||
if (
|
||||
self.embedder_config
|
||||
and isinstance(self.embedder_config, dict)
|
||||
and "config" in self.embedder_config
|
||||
):
|
||||
nested_config = self.embedder_config["config"]
|
||||
if isinstance(nested_config, dict):
|
||||
batch_size = nested_config.get("batch_size")
|
||||
|
||||
if batch_size is not None:
|
||||
client.add_documents(
|
||||
collection_name=collection_name,
|
||||
documents=[document],
|
||||
batch_size=cast(int, batch_size),
|
||||
)
|
||||
else:
|
||||
client.add_documents(
|
||||
collection_name=collection_name, documents=[document]
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error during {self.type} save: {e!s}\n{traceback.format_exc()}"
|
||||
)
|
||||
|
||||
async def asave(self, value: Any, metadata: dict[str, Any]) -> None:
|
||||
"""Save a value to storage asynchronously.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Metadata to associate with the value.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"memory_{self.type}_{self.agents}"
|
||||
if self.agents
|
||||
else f"memory_{self.type}"
|
||||
)
|
||||
await client.aget_or_create_collection(collection_name=collection_name)
|
||||
|
||||
document: BaseRecord = {"content": value}
|
||||
if metadata:
|
||||
document["metadata"] = metadata
|
||||
|
||||
batch_size = None
|
||||
if (
|
||||
self.embedder_config
|
||||
and isinstance(self.embedder_config, dict)
|
||||
and "config" in self.embedder_config
|
||||
):
|
||||
nested_config = self.embedder_config["config"]
|
||||
if isinstance(nested_config, dict):
|
||||
batch_size = nested_config.get("batch_size")
|
||||
|
||||
if batch_size is not None:
|
||||
await client.aadd_documents(
|
||||
collection_name=collection_name,
|
||||
documents=[document],
|
||||
batch_size=cast(int, batch_size),
|
||||
)
|
||||
else:
|
||||
await client.aadd_documents(
|
||||
collection_name=collection_name, documents=[document]
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error during {self.type} async save: {e!s}\n{traceback.format_exc()}"
|
||||
)
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
filter: dict[str, Any] | None = None,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search for matching entries in storage.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
filter: Optional metadata filter.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching entries.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"memory_{self.type}_{self.agents}"
|
||||
if self.agents
|
||||
else f"memory_{self.type}"
|
||||
)
|
||||
return client.search(
|
||||
collection_name=collection_name,
|
||||
query=query,
|
||||
limit=limit,
|
||||
metadata_filter=filter,
|
||||
score_threshold=score_threshold,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error during {self.type} search: {e!s}\n{traceback.format_exc()}"
|
||||
)
|
||||
return []
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
filter: dict[str, Any] | None = None,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search for matching entries in storage asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
filter: Optional metadata filter.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching entries.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"memory_{self.type}_{self.agents}"
|
||||
if self.agents
|
||||
else f"memory_{self.type}"
|
||||
)
|
||||
return await client.asearch(
|
||||
collection_name=collection_name,
|
||||
query=query,
|
||||
limit=limit,
|
||||
metadata_filter=filter,
|
||||
score_threshold=score_threshold,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error during {self.type} async search: {e!s}\n{traceback.format_exc()}"
|
||||
)
|
||||
return []
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"memory_{self.type}_{self.agents}"
|
||||
if self.agents
|
||||
else f"memory_{self.type}"
|
||||
)
|
||||
client.delete_collection(collection_name=collection_name)
|
||||
except Exception as e:
|
||||
if "attempt to write a readonly database" in str(
|
||||
e
|
||||
) or "does not exist" in str(e):
|
||||
# Ignore readonly database and collection not found errors (already reset)
|
||||
pass
|
||||
else:
|
||||
raise Exception(
|
||||
f"An error occurred while resetting the {self.type} memory: {e}"
|
||||
) from e
|
||||
@@ -1,299 +0,0 @@
|
||||
"""Data types for the unified memory system."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
# When searching the vector store, we ask for more results than the caller
|
||||
# requested so that post-search steps (composite scoring, deduplication,
|
||||
# category filtering) have enough candidates to fill the final result set.
|
||||
# For example, if the caller asks for 10 results and this is 2, we fetch 20
|
||||
# from the vector store and then trim down after scoring.
|
||||
_RECALL_OVERSAMPLE_FACTOR = 2
|
||||
|
||||
|
||||
class MemoryRecord(BaseModel):
|
||||
"""A single memory entry stored in the memory system."""
|
||||
|
||||
id: str = Field(
|
||||
default_factory=lambda: str(uuid4()),
|
||||
description="Unique identifier for the memory record.",
|
||||
)
|
||||
content: str = Field(description="The textual content of the memory.")
|
||||
scope: str = Field(
|
||||
default="/",
|
||||
description="Hierarchical path organizing the memory (e.g. /company/team/user).",
|
||||
)
|
||||
categories: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Categories or tags for the memory.",
|
||||
)
|
||||
metadata: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Arbitrary metadata associated with the memory.",
|
||||
)
|
||||
importance: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Importance score from 0.0 to 1.0, affects retrieval ranking.",
|
||||
)
|
||||
created_at: datetime = Field(
|
||||
default_factory=datetime.utcnow,
|
||||
description="When the memory was created.",
|
||||
)
|
||||
last_accessed: datetime = Field(
|
||||
default_factory=datetime.utcnow,
|
||||
description="When the memory was last accessed.",
|
||||
)
|
||||
embedding: list[float] | None = Field(
|
||||
default=None,
|
||||
description="Vector embedding for semantic search. Computed on save if not provided.",
|
||||
)
|
||||
|
||||
|
||||
class MemoryMatch(BaseModel):
|
||||
"""A memory record with relevance score from a recall operation."""
|
||||
|
||||
record: MemoryRecord = Field(description="The matched memory record.")
|
||||
score: float = Field(
|
||||
description="Combined relevance score (semantic, recency, importance).",
|
||||
)
|
||||
match_reasons: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Reasons for the match (e.g. semantic, recency, importance).",
|
||||
)
|
||||
evidence_gaps: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Information the system looked for but could not find.",
|
||||
)
|
||||
|
||||
|
||||
class ScopeInfo(BaseModel):
|
||||
"""Information about a scope in the memory hierarchy."""
|
||||
|
||||
path: str = Field(description="The scope path (e.g. /company/engineering).")
|
||||
record_count: int = Field(
|
||||
default=0,
|
||||
description="Number of records in this scope (including subscopes if applicable).",
|
||||
)
|
||||
categories: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Categories used in this scope.",
|
||||
)
|
||||
oldest_record: datetime | None = Field(
|
||||
default=None,
|
||||
description="Timestamp of the oldest record in this scope.",
|
||||
)
|
||||
newest_record: datetime | None = Field(
|
||||
default=None,
|
||||
description="Timestamp of the newest record in this scope.",
|
||||
)
|
||||
child_scopes: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Immediate child scope paths.",
|
||||
)
|
||||
|
||||
|
||||
class MemoryConfig(BaseModel):
|
||||
"""Internal configuration for memory scoring, consolidation, and recall behavior.
|
||||
|
||||
Users configure these values via ``Memory(...)`` keyword arguments.
|
||||
This model is not part of the public API -- it exists so that the config
|
||||
can be passed as a single object to RecallFlow, ConsolidationFlow, and
|
||||
compute_composite_score.
|
||||
"""
|
||||
|
||||
# -- Composite score weights --
|
||||
# The recall composite score is:
|
||||
# semantic_weight * similarity + recency_weight * decay + importance_weight * importance
|
||||
# These should sum to ~1.0 for intuitive 0-1 scoring.
|
||||
|
||||
recency_weight: float = Field(
|
||||
default=0.3,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"Weight for recency in the composite relevance score. "
|
||||
"Higher values favor recently created memories over older ones."
|
||||
),
|
||||
)
|
||||
semantic_weight: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"Weight for semantic similarity in the composite relevance score. "
|
||||
"Higher values make recall rely more on vector-search closeness."
|
||||
),
|
||||
)
|
||||
importance_weight: float = Field(
|
||||
default=0.2,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"Weight for explicit importance in the composite relevance score. "
|
||||
"Higher values make high-importance memories surface more often."
|
||||
),
|
||||
)
|
||||
recency_half_life_days: int = Field(
|
||||
default=30,
|
||||
ge=1,
|
||||
description=(
|
||||
"Number of days for the recency score to halve (exponential decay). "
|
||||
"Lower values make memories lose relevance faster; higher values "
|
||||
"keep old memories relevant longer."
|
||||
),
|
||||
)
|
||||
|
||||
# -- Consolidation (on save) --
|
||||
|
||||
consolidation_threshold: float = Field(
|
||||
default=0.85,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"Semantic similarity above which the consolidation flow is triggered "
|
||||
"when saving new content. The LLM then decides whether to merge, "
|
||||
"update, or delete overlapping records. Set to 1.0 to disable."
|
||||
),
|
||||
)
|
||||
consolidation_limit: int = Field(
|
||||
default=5,
|
||||
ge=1,
|
||||
description=(
|
||||
"Maximum number of existing records to compare against when checking "
|
||||
"for consolidation during a save."
|
||||
),
|
||||
)
|
||||
|
||||
# -- Save defaults --
|
||||
|
||||
default_importance: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"Importance assigned to new memories when no explicit value is given "
|
||||
"and the LLM analysis path is skipped (i.e. all fields provided by "
|
||||
"the caller)."
|
||||
),
|
||||
)
|
||||
|
||||
# -- Recall depth control --
|
||||
# The RecallFlow router uses these thresholds to decide between returning
|
||||
# results immediately ("synthesize") and doing an extra LLM-driven
|
||||
# exploration round ("explore_deeper").
|
||||
|
||||
confidence_threshold_high: float = Field(
|
||||
default=0.8,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"When recall confidence is at or above this value, results are "
|
||||
"returned directly without deeper exploration."
|
||||
),
|
||||
)
|
||||
confidence_threshold_low: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"When recall confidence is below this value and exploration budget "
|
||||
"remains, a deeper LLM-driven exploration round is triggered."
|
||||
),
|
||||
)
|
||||
complex_query_threshold: float = Field(
|
||||
default=0.7,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"For queries classified as 'complex' by the LLM, deeper exploration "
|
||||
"is triggered when confidence is below this value."
|
||||
),
|
||||
)
|
||||
exploration_budget: int = Field(
|
||||
default=1,
|
||||
ge=0,
|
||||
description=(
|
||||
"Number of LLM-driven exploration rounds allowed during deep recall. "
|
||||
"0 means recall always uses direct vector search only; higher values "
|
||||
"allow more thorough but slower retrieval."
|
||||
),
|
||||
)
|
||||
recall_oversample_factor: int = Field(
|
||||
default=_RECALL_OVERSAMPLE_FACTOR,
|
||||
ge=1,
|
||||
description=(
|
||||
"When searching the vector store, fetch this many times more results "
|
||||
"than the caller requested so that post-search steps (composite "
|
||||
"scoring, deduplication, category filtering) have enough candidates "
|
||||
"to fill the final result set."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def embed_text(embedder: Any, text: str) -> list[float]:
|
||||
"""Embed a single text string and return a list of floats.
|
||||
|
||||
Args:
|
||||
embedder: Callable that accepts a list of strings and returns embeddings.
|
||||
text: The text to embed.
|
||||
|
||||
Returns:
|
||||
List of floats representing the embedding, or empty list on failure.
|
||||
"""
|
||||
if not text or not text.strip():
|
||||
return []
|
||||
result = embedder([text])
|
||||
if not result:
|
||||
return []
|
||||
first = result[0]
|
||||
if hasattr(first, "tolist"):
|
||||
return first.tolist()
|
||||
if isinstance(first, list):
|
||||
return [float(x) for x in first]
|
||||
return list(first)
|
||||
|
||||
|
||||
def compute_composite_score(
|
||||
record: MemoryRecord,
|
||||
semantic_score: float,
|
||||
config: MemoryConfig,
|
||||
) -> tuple[float, list[str]]:
|
||||
"""Compute a weighted composite relevance score from semantic, recency, and importance.
|
||||
|
||||
composite = w_semantic * semantic + w_recency * decay + w_importance * importance
|
||||
where decay = 0.5^(age_days / half_life_days).
|
||||
|
||||
Args:
|
||||
record: The memory record (provides created_at and importance).
|
||||
semantic_score: Raw semantic similarity from vector search, in [0, 1].
|
||||
config: Weights and recency half-life.
|
||||
|
||||
Returns:
|
||||
Tuple of (composite_score, match_reasons). match_reasons includes
|
||||
"semantic" always; "recency" if decay > 0.5; "importance" if record.importance > 0.5.
|
||||
"""
|
||||
age_seconds = (datetime.utcnow() - record.created_at).total_seconds()
|
||||
age_days = max(age_seconds / 86400.0, 0.0)
|
||||
decay = 0.5 ** (age_days / config.recency_half_life_days)
|
||||
|
||||
composite = (
|
||||
config.semantic_weight * semantic_score
|
||||
+ config.recency_weight * decay
|
||||
+ config.importance_weight * record.importance
|
||||
)
|
||||
|
||||
reasons: list[str] = ["semantic"]
|
||||
if decay > 0.5:
|
||||
reasons.append("recency")
|
||||
if record.importance > 0.5:
|
||||
reasons.append("importance")
|
||||
|
||||
return composite, reasons
|
||||
@@ -1,515 +0,0 @@
|
||||
"""Unified Memory class: single intelligent memory with LLM analysis and pluggable storage."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
import time
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.memory.analyze import extract_memories_from_content
|
||||
from crewai.memory.recall_flow import RecallFlow
|
||||
from crewai.memory.storage.backend import StorageBackend
|
||||
from crewai.memory.storage.lancedb_storage import LanceDBStorage
|
||||
from crewai.memory.types import (
|
||||
MemoryConfig,
|
||||
MemoryMatch,
|
||||
MemoryRecord,
|
||||
ScopeInfo,
|
||||
compute_composite_score,
|
||||
embed_text,
|
||||
)
|
||||
|
||||
|
||||
def _default_embedder() -> Any:
|
||||
"""Build default OpenAI embedder for memory."""
|
||||
from crewai.rag.embeddings.factory import build_embedder
|
||||
|
||||
return build_embedder({"provider": "openai", "config": {}})
|
||||
|
||||
|
||||
class Memory:
|
||||
"""Unified memory: standalone, LLM-analyzed, with intelligent recall flow.
|
||||
|
||||
Works without agent/crew. Uses LLM to infer scope, categories, importance on save.
|
||||
Uses RecallFlow for adaptive-depth recall. Supports scope/slice views and
|
||||
pluggable storage (LanceDB default).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm: BaseLLM | str = "gpt-4o-mini",
|
||||
storage: StorageBackend | str = "lancedb",
|
||||
embedder: Any = None,
|
||||
# -- Scoring weights --
|
||||
# These three weights control how recall results are ranked.
|
||||
# The composite score is: semantic_weight * similarity + recency_weight * decay + importance_weight * importance.
|
||||
# They should sum to ~1.0 for intuitive scoring.
|
||||
recency_weight: float = 0.3,
|
||||
semantic_weight: float = 0.5,
|
||||
importance_weight: float = 0.2,
|
||||
# How quickly old memories lose relevance. The recency score halves every
|
||||
# N days (exponential decay). Lower = faster forgetting; higher = longer relevance.
|
||||
recency_half_life_days: int = 30,
|
||||
# -- Consolidation --
|
||||
# When remembering new content, if an existing record has similarity >= this
|
||||
# threshold, the LLM is asked to merge/update/delete. Set to 1.0 to disable.
|
||||
consolidation_threshold: float = 0.85,
|
||||
# Max existing records to compare against when checking for consolidation.
|
||||
consolidation_limit: int = 5,
|
||||
# -- Save defaults --
|
||||
# Importance assigned to new memories when no explicit value is given and
|
||||
# the LLM analysis path is skipped (all fields provided by the caller).
|
||||
default_importance: float = 0.5,
|
||||
# -- Recall depth control --
|
||||
# These thresholds govern the RecallFlow router that decides between
|
||||
# returning results immediately ("synthesize") vs. doing an extra
|
||||
# LLM-driven exploration round ("explore_deeper").
|
||||
# confidence >= confidence_threshold_high => always synthesize
|
||||
# confidence < confidence_threshold_low => explore deeper (if budget > 0)
|
||||
# complex query + confidence < complex_query_threshold => explore deeper
|
||||
confidence_threshold_high: float = 0.8,
|
||||
confidence_threshold_low: float = 0.5,
|
||||
complex_query_threshold: float = 0.7,
|
||||
# How many LLM-driven exploration rounds the RecallFlow is allowed to run.
|
||||
# 0 = always shallow (vector search only); higher = more thorough but slower.
|
||||
exploration_budget: int = 1,
|
||||
) -> None:
|
||||
"""Initialize Memory.
|
||||
|
||||
Args:
|
||||
llm: LLM for analysis (model name or BaseLLM instance).
|
||||
storage: Backend: "lancedb" or a StorageBackend instance.
|
||||
embedder: Embedding callable, provider config dict, or None (default OpenAI).
|
||||
recency_weight: Weight for recency in the composite relevance score.
|
||||
semantic_weight: Weight for semantic similarity in the composite relevance score.
|
||||
importance_weight: Weight for importance in the composite relevance score.
|
||||
recency_half_life_days: Recency score halves every N days (exponential decay).
|
||||
consolidation_threshold: Similarity above which consolidation is triggered on save.
|
||||
consolidation_limit: Max existing records to compare during consolidation.
|
||||
default_importance: Default importance when not provided or inferred.
|
||||
confidence_threshold_high: Recall confidence above which results are returned directly.
|
||||
confidence_threshold_low: Recall confidence below which deeper exploration is triggered.
|
||||
complex_query_threshold: For complex queries, explore deeper below this confidence.
|
||||
exploration_budget: Number of LLM-driven exploration rounds during deep recall.
|
||||
"""
|
||||
from crewai.llm import LLM
|
||||
|
||||
self._config = MemoryConfig(
|
||||
recency_weight=recency_weight,
|
||||
semantic_weight=semantic_weight,
|
||||
importance_weight=importance_weight,
|
||||
recency_half_life_days=recency_half_life_days,
|
||||
consolidation_threshold=consolidation_threshold,
|
||||
consolidation_limit=consolidation_limit,
|
||||
default_importance=default_importance,
|
||||
confidence_threshold_high=confidence_threshold_high,
|
||||
confidence_threshold_low=confidence_threshold_low,
|
||||
complex_query_threshold=complex_query_threshold,
|
||||
exploration_budget=exploration_budget,
|
||||
)
|
||||
self._llm = LLM(model=llm) if isinstance(llm, str) else llm
|
||||
if storage == "lancedb":
|
||||
self._storage = LanceDBStorage()
|
||||
elif isinstance(storage, str):
|
||||
self._storage = LanceDBStorage(path=storage)
|
||||
else:
|
||||
self._storage = storage
|
||||
if embedder is None:
|
||||
self._embedder = _default_embedder()
|
||||
elif isinstance(embedder, dict):
|
||||
from crewai.rag.embeddings.factory import build_embedder
|
||||
self._embedder = build_embedder(embedder)
|
||||
else:
|
||||
self._embedder = embedder
|
||||
|
||||
def remember(
|
||||
self,
|
||||
content: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
) -> MemoryRecord:
|
||||
"""Store content in memory. Infers scope/categories/importance via LLM when not provided.
|
||||
|
||||
The full encoding pipeline is implemented as an ``EncodingFlow``:
|
||||
check whether LLM analysis is needed, infer or apply defaults,
|
||||
embed the content, then delegate to ``ConsolidationFlow`` for
|
||||
conflict resolution and storage.
|
||||
|
||||
Args:
|
||||
content: Text to remember.
|
||||
scope: Optional scope path; inferred if None.
|
||||
categories: Optional categories; inferred if None.
|
||||
metadata: Optional metadata; merged with LLM-extracted if scope/categories/importance inferred.
|
||||
importance: Optional importance 0-1; inferred if None.
|
||||
|
||||
Returns:
|
||||
The created MemoryRecord.
|
||||
|
||||
Raises:
|
||||
Exception: On save failure (events emitted).
|
||||
"""
|
||||
from crewai.memory.encoding_flow import EncodingFlow
|
||||
|
||||
_source = "unified_memory"
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemorySaveStartedEvent(
|
||||
value=content,
|
||||
metadata=metadata,
|
||||
source_type=_source,
|
||||
),
|
||||
)
|
||||
start = time.perf_counter()
|
||||
|
||||
flow = EncodingFlow(
|
||||
storage=self._storage,
|
||||
llm=self._llm,
|
||||
embedder=self._embedder,
|
||||
config=self._config,
|
||||
)
|
||||
flow.kickoff(
|
||||
inputs={
|
||||
"content": content,
|
||||
"scope": scope,
|
||||
"categories": categories,
|
||||
"metadata": metadata,
|
||||
"importance": importance,
|
||||
},
|
||||
)
|
||||
record = flow.state.result_record
|
||||
|
||||
elapsed_ms = (time.perf_counter() - start) * 1000
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemorySaveCompletedEvent(
|
||||
value=content,
|
||||
metadata=flow.state.resolved_metadata or metadata or {},
|
||||
agent_role=None,
|
||||
save_time_ms=elapsed_ms,
|
||||
source_type=_source,
|
||||
),
|
||||
)
|
||||
return record
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemorySaveFailedEvent(
|
||||
value=content,
|
||||
metadata=metadata,
|
||||
error=str(e),
|
||||
source_type=_source,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def extract_memories(self, content: str) -> list[str]:
|
||||
"""Extract discrete memories from a raw content blob using the LLM.
|
||||
|
||||
This is a pure helper -- it does NOT store anything.
|
||||
Call remember() on each returned string to persist them.
|
||||
|
||||
Args:
|
||||
content: Raw text (e.g. task + result dump).
|
||||
|
||||
Returns:
|
||||
List of short, self-contained memory statements.
|
||||
"""
|
||||
return extract_memories_from_content(content, self._llm)
|
||||
|
||||
def recall(
|
||||
self,
|
||||
query: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
depth: Literal["shallow", "deep"] = "deep",
|
||||
) -> list[MemoryMatch]:
|
||||
"""Retrieve relevant memories.
|
||||
|
||||
``shallow`` embeds the query directly and runs a single vector search.
|
||||
``deep`` (default) uses the RecallFlow: the LLM distills the query into
|
||||
targeted sub-queries, selects scopes, searches in parallel, and applies
|
||||
confidence-based routing for optional deeper exploration.
|
||||
|
||||
Args:
|
||||
query: Natural language query.
|
||||
scope: Optional scope prefix to search within.
|
||||
categories: Optional category filter.
|
||||
limit: Max number of results.
|
||||
depth: "shallow" for direct vector search, "deep" for intelligent flow.
|
||||
|
||||
Returns:
|
||||
List of MemoryMatch, ordered by relevance.
|
||||
"""
|
||||
_source = "unified_memory"
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=None,
|
||||
source_type=_source,
|
||||
),
|
||||
)
|
||||
start = time.perf_counter()
|
||||
|
||||
if depth == "shallow":
|
||||
embedding = embed_text(self._embedder, query)
|
||||
if not embedding:
|
||||
results: list[MemoryMatch] = []
|
||||
else:
|
||||
raw = self._storage.search(
|
||||
embedding,
|
||||
scope_prefix=scope,
|
||||
categories=categories,
|
||||
limit=limit,
|
||||
min_score=0.0,
|
||||
)
|
||||
results = []
|
||||
for r, s in raw:
|
||||
composite, reasons = compute_composite_score(
|
||||
r, s, self._config
|
||||
)
|
||||
results.append(
|
||||
MemoryMatch(
|
||||
record=r,
|
||||
score=composite,
|
||||
match_reasons=reasons,
|
||||
)
|
||||
)
|
||||
results.sort(key=lambda m: m.score, reverse=True)
|
||||
else:
|
||||
flow = RecallFlow(
|
||||
storage=self._storage,
|
||||
llm=self._llm,
|
||||
embedder=self._embedder,
|
||||
config=self._config,
|
||||
)
|
||||
flow.kickoff(
|
||||
inputs={
|
||||
"query": query,
|
||||
"scope": scope,
|
||||
"categories": categories or [],
|
||||
"limit": limit,
|
||||
}
|
||||
)
|
||||
results = flow.state.final_results
|
||||
|
||||
# Update last_accessed for recalled records
|
||||
if results:
|
||||
try:
|
||||
touch = getattr(self._storage, "touch_records", None)
|
||||
if touch is not None:
|
||||
touch([m.record.id for m in results])
|
||||
except Exception: # noqa: S110
|
||||
pass # Non-critical: don't fail recall because of touch
|
||||
|
||||
elapsed_ms = (time.perf_counter() - start) * 1000
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=None,
|
||||
query_time_ms=elapsed_ms,
|
||||
source_type=_source,
|
||||
),
|
||||
)
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=None,
|
||||
error=str(e),
|
||||
source_type=_source,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def forget(
|
||||
self,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
) -> int:
|
||||
"""Delete memories matching criteria.
|
||||
|
||||
Returns:
|
||||
Number of records deleted.
|
||||
"""
|
||||
return self._storage.delete(
|
||||
scope_prefix=scope,
|
||||
categories=categories,
|
||||
record_ids=record_ids,
|
||||
older_than=older_than,
|
||||
metadata_filter=metadata_filter,
|
||||
)
|
||||
|
||||
def update(
|
||||
self,
|
||||
record_id: str,
|
||||
content: str | None = None,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
) -> MemoryRecord:
|
||||
"""Update an existing memory record by ID.
|
||||
|
||||
Args:
|
||||
record_id: ID of the record to update.
|
||||
content: New content; re-embedded if provided.
|
||||
scope: New scope path.
|
||||
categories: New categories.
|
||||
metadata: New metadata.
|
||||
importance: New importance score.
|
||||
|
||||
Returns:
|
||||
The updated MemoryRecord.
|
||||
|
||||
Raises:
|
||||
ValueError: If the record is not found.
|
||||
"""
|
||||
existing = self._storage.get_record(record_id)
|
||||
if existing is None:
|
||||
raise ValueError(f"Record not found: {record_id}")
|
||||
now = datetime.utcnow()
|
||||
updates: dict[str, Any] = {"last_accessed": now}
|
||||
if content is not None:
|
||||
updates["content"] = content
|
||||
embedding = embed_text(self._embedder, content)
|
||||
updates["embedding"] = embedding if embedding else existing.embedding
|
||||
if scope is not None:
|
||||
updates["scope"] = scope
|
||||
if categories is not None:
|
||||
updates["categories"] = categories
|
||||
if metadata is not None:
|
||||
updates["metadata"] = metadata
|
||||
if importance is not None:
|
||||
updates["importance"] = importance
|
||||
updated = existing.model_copy(update=updates)
|
||||
self._storage.update(updated)
|
||||
return updated
|
||||
|
||||
def scope(self, path: str) -> Any:
|
||||
"""Return a scoped view of this memory."""
|
||||
from crewai.memory.memory_scope import MemoryScope
|
||||
|
||||
return MemoryScope(memory=self, root_path=path)
|
||||
|
||||
def slice(
|
||||
self,
|
||||
scopes: list[str],
|
||||
categories: list[str] | None = None,
|
||||
read_only: bool = True,
|
||||
) -> Any:
|
||||
"""Return a multi-scope view (slice) of this memory."""
|
||||
from crewai.memory.memory_scope import MemorySlice
|
||||
|
||||
return MemorySlice(
|
||||
memory=self,
|
||||
scopes=scopes,
|
||||
categories=categories,
|
||||
read_only=read_only,
|
||||
)
|
||||
|
||||
def list_scopes(self, path: str = "/") -> list[str]:
|
||||
"""List immediate child scopes under path."""
|
||||
return self._storage.list_scopes(path)
|
||||
|
||||
def list_records(
|
||||
self, scope: str | None = None, limit: int = 200, offset: int = 0
|
||||
) -> list[MemoryRecord]:
|
||||
"""List records in a scope, newest first.
|
||||
|
||||
Args:
|
||||
scope: Optional scope path prefix to filter by.
|
||||
limit: Maximum number of records to return.
|
||||
offset: Number of records to skip (for pagination).
|
||||
"""
|
||||
return self._storage.list_records(scope_prefix=scope, limit=limit, offset=offset)
|
||||
|
||||
def info(self, path: str = "/") -> ScopeInfo:
|
||||
"""Return scope info for path."""
|
||||
return self._storage.get_scope_info(path)
|
||||
|
||||
def tree(self, path: str = "/", max_depth: int = 3) -> str:
|
||||
"""Return a formatted tree of scopes (string)."""
|
||||
lines: list[str] = []
|
||||
|
||||
def _walk(p: str, depth: int, prefix: str) -> None:
|
||||
if depth > max_depth:
|
||||
return
|
||||
info = self._storage.get_scope_info(p)
|
||||
lines.append(f"{prefix}{p or '/'} ({info.record_count} records)")
|
||||
for child in info.child_scopes[:20]:
|
||||
_walk(child, depth + 1, prefix + " ")
|
||||
|
||||
_walk(path.rstrip("/") or "/", 0, "")
|
||||
return "\n".join(lines) if lines else f"{path or '/'} (0 records)"
|
||||
|
||||
def list_categories(self, path: str | None = None) -> dict[str, int]:
|
||||
"""List categories and counts; path=None means global."""
|
||||
return self._storage.list_categories(scope_prefix=path)
|
||||
|
||||
def reset(self, scope: str | None = None) -> None:
|
||||
"""Reset (delete all) memories in scope. None = all."""
|
||||
self._storage.reset(scope_prefix=scope)
|
||||
|
||||
async def aextract_memories(self, content: str) -> list[str]:
|
||||
"""Async variant of extract_memories."""
|
||||
return self.extract_memories(content)
|
||||
|
||||
async def aremember(
|
||||
self,
|
||||
content: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
) -> MemoryRecord:
|
||||
"""Async remember: delegates to sync for now."""
|
||||
return self.remember(
|
||||
content,
|
||||
scope=scope,
|
||||
categories=categories,
|
||||
metadata=metadata,
|
||||
importance=importance,
|
||||
)
|
||||
|
||||
async def arecall(
|
||||
self,
|
||||
query: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
depth: Literal["shallow", "deep"] = "deep",
|
||||
) -> list[MemoryMatch]:
|
||||
"""Async recall: delegates to sync for now."""
|
||||
return self.recall(
|
||||
query,
|
||||
scope=scope,
|
||||
categories=categories,
|
||||
limit=limit,
|
||||
depth=depth,
|
||||
)
|
||||
@@ -1,111 +0,0 @@
|
||||
"""Memory tools that give agents active recall and remember capabilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.i18n import get_i18n
|
||||
|
||||
|
||||
class RecallMemorySchema(BaseModel):
|
||||
"""Schema for the recall memory tool."""
|
||||
|
||||
query: str = Field(..., description="What to search for in memory")
|
||||
scope: str | None = Field(
|
||||
default=None,
|
||||
description="Optional scope to narrow the search (e.g. /project/alpha)",
|
||||
)
|
||||
depth: str = Field(
|
||||
default="shallow",
|
||||
description="'shallow' for fast vector search, 'deep' for LLM-analyzed retrieval",
|
||||
)
|
||||
|
||||
|
||||
class RecallMemoryTool(BaseTool):
|
||||
"""Tool that lets an agent actively search memory mid-task."""
|
||||
|
||||
name: str = "Search memory"
|
||||
description: str = ""
|
||||
args_schema: type[BaseModel] = RecallMemorySchema
|
||||
memory: Any = Field(exclude=True)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
scope: str | None = None,
|
||||
depth: str = "shallow",
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""Search memory for relevant information.
|
||||
|
||||
Args:
|
||||
query: Natural language description of what to find.
|
||||
scope: Optional scope prefix to narrow the search.
|
||||
depth: "shallow" for fast vector search, "deep" for LLM-analyzed retrieval.
|
||||
|
||||
Returns:
|
||||
Formatted string of matching memories, or a message if none found.
|
||||
"""
|
||||
actual_depth = depth if depth in ("shallow", "deep") else "shallow"
|
||||
matches = self.memory.recall(query, scope=scope, limit=5, depth=actual_depth)
|
||||
if not matches:
|
||||
return "No relevant memories found."
|
||||
lines = [f"- (score={m.score:.2f}) {m.record.content}" for m in matches]
|
||||
return "Found memories:\n" + "\n".join(lines)
|
||||
|
||||
|
||||
class RememberSchema(BaseModel):
|
||||
"""Schema for the remember tool."""
|
||||
|
||||
content: str = Field(
|
||||
..., description="The fact, decision, or observation to remember"
|
||||
)
|
||||
|
||||
|
||||
class RememberTool(BaseTool):
|
||||
"""Tool that lets an agent explicitly save information to memory mid-task."""
|
||||
|
||||
name: str = "Save to memory"
|
||||
description: str = ""
|
||||
args_schema: type[BaseModel] = RememberSchema
|
||||
memory: Any = Field(exclude=True)
|
||||
|
||||
def _run(self, content: str, **kwargs: Any) -> str:
|
||||
"""Store content in memory. The system infers scope, categories, and importance.
|
||||
|
||||
Args:
|
||||
content: The information to remember.
|
||||
|
||||
Returns:
|
||||
Confirmation with the inferred scope and importance.
|
||||
"""
|
||||
record = self.memory.remember(content)
|
||||
return (
|
||||
f"Saved to memory (scope={record.scope}, "
|
||||
f"importance={record.importance:.1f})."
|
||||
)
|
||||
|
||||
|
||||
def create_memory_tools(memory: Any) -> list[BaseTool]:
|
||||
"""Create Recall and Remember tools for the given memory instance.
|
||||
|
||||
Args:
|
||||
memory: A Memory, MemoryScope, or MemorySlice instance.
|
||||
|
||||
Returns:
|
||||
List containing a RecallMemoryTool and a RememberTool.
|
||||
"""
|
||||
i18n = get_i18n()
|
||||
return [
|
||||
RecallMemoryTool(
|
||||
memory=memory,
|
||||
description=i18n.tools("recall_memory"),
|
||||
),
|
||||
RememberTool(
|
||||
memory=memory,
|
||||
description=i18n.tools("save_to_memory"),
|
||||
),
|
||||
]
|
||||
@@ -22,9 +22,9 @@
|
||||
"expected_output": "\nThis is the expected criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||
"getting_input": "This is the agent's final answer: {final_answer}\n\n",
|
||||
"summarizer_system_message": "You are a precise assistant that creates structured summaries of agent conversations. You preserve critical context needed for seamless task continuation.",
|
||||
"summarize_instruction": "Analyze the following conversation and create a structured summary that preserves all information needed to continue the task seamlessly.\n\n<conversation>\n{conversation}\n</conversation>\n\nCreate a summary with these sections:\n1. **Task Overview**: What is the agent trying to accomplish?\n2. **Current State**: What has been completed so far? What step is the agent on?\n3. **Important Discoveries**: Key facts, data, tool results, or findings that must not be lost.\n4. **Next Steps**: What should the agent do next based on the conversation?\n5. **Context to Preserve**: Any specific values, names, URLs, code snippets, or details referenced in the conversation.\n\nWrap your entire summary in <summary> tags.\n\n<summary>\n[Your structured summary here]\n</summary>",
|
||||
"summary": "<summary>\n{merged_summary}\n</summary>\n\nContinue the task from where the conversation left off. The above is a structured summary of prior context.",
|
||||
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
||||
"summarize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
|
||||
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
|
||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
"formatted_task_instructions": "Format your final answer according to the following OpenAPI schema: {output_format}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or modify the meaning of the content. Only structure it to match the schema format.\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.",
|
||||
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
|
||||
@@ -55,19 +55,7 @@
|
||||
"name": "Add image to content",
|
||||
"description": "See image to understand its content, you can optionally ask a question about the image",
|
||||
"default_action": "Please provide a detailed description of this image, including all visual elements, context, and any notable details you can observe."
|
||||
},
|
||||
"recall_memory": "Search through the team's shared memory for relevant information. Use this when you need to find facts, decisions, preferences, or past results that may have been stored previously. The query should describe what you're looking for in natural language.",
|
||||
"save_to_memory": "Store an important fact, decision, observation, or lesson in memory so it can be recalled later by you or other agents. Use this when you encounter something worth remembering beyond the current task -- a decision made, a preference discovered, a key finding, or a correction."
|
||||
},
|
||||
"memory": {
|
||||
"save_system": "You analyze content to be stored in a hierarchical memory system.\nGiven the content and the existing scopes and categories, output:\n1. suggested_scope: The best matching existing scope path, or a new path if none fit (use / for root).\n2. categories: A list of categories (reuse existing when relevant, add new ones if needed).\n3. importance: A number from 0.0 to 1.0 indicating how significant this memory is.\n4. extracted_metadata: A JSON object with any entities, dates, or topics you can extract.",
|
||||
"query_system": "You analyze a query for searching memory.\nGiven the query and available scopes, output:\n1. keywords: Key entities or keywords that can be used to filter by category.\n2. suggested_scopes: Which available scopes are most relevant (empty for all).\n3. complexity: 'simple' or 'complex'.\n4. recall_queries: 1-3 short, targeted search phrases distilled from the query. Each should be a concise phrase optimized for semantic vector search. If the query is already short and focused, return it as-is in a single-item list. For long task descriptions, extract the distinct things worth searching for.\n5. time_filter: If the query references a time period (like 'last week', 'yesterday', 'in January'), return an ISO 8601 date string for the earliest relevant date (e.g. '2026-02-01'). Return null if no time constraint is implied.",
|
||||
"extract_memories_system": "You extract discrete, reusable memory statements from raw content (e.g. a task description and its result).\n\nFor the given content, output a list of memory statements. Each memory must:\n- Be one clear sentence or short statement\n- Be understandable without the original context\n- Capture a decision, fact, outcome, preference, lesson, or observation worth remembering\n- NOT be a vague summary or a restatement of the task description\n- NOT duplicate the same idea in different words\n\nIf there is nothing worth remembering (e.g. empty result, no decisions or facts), return an empty list.\nOutput a JSON object with a single key \"memories\" whose value is a list of strings.",
|
||||
"consolidation_system": "You are comparing new content against existing memories to decide how to consolidate them.\n\nFor each existing memory, choose one action:\n- 'keep': The existing memory is still accurate and not redundant with the new content.\n- 'update': The existing memory should be updated with new information. Provide the updated content.\n- 'delete': The existing memory is outdated, superseded, or contradicted by the new content.\n\nAlso decide whether the new content should be inserted as a separate memory:\n- insert_new=true: The new content adds information not fully captured by existing memories (even after updates).\n- insert_new=false: The new content is fully captured by the existing memories (after any updates).\n\nBe conservative: prefer 'keep' when unsure. Only 'update' or 'delete' when there is a clear contradiction, supersession, or redundancy.",
|
||||
"extract_memories_user": "Content:\n{content}\n\nExtract memory statements as described. Return structured output.",
|
||||
"save_user": "Content to store:\n{content}\n\nExisting scopes: {existing_scopes}\nExisting categories: {existing_categories}\n\nReturn the analysis as structured output.",
|
||||
"query_user": "Query: {query}\n\nAvailable scopes: {available_scopes}\n{scope_desc}\n\nReturn the analysis as structured output.",
|
||||
"consolidation_user": "New content to consider storing:\n{new_content}\n\nExisting similar memories:\n{records_summary}\n\nReturn the consolidation plan as structured output."
|
||||
}
|
||||
},
|
||||
"reasoning": {
|
||||
"initial_plan": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are creating a strategic plan for a task that requires your expertise and unique perspective.",
|
||||
|
||||
@@ -2,7 +2,6 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Sequence
|
||||
import concurrent.futures
|
||||
import json
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Final, Literal, TypedDict
|
||||
@@ -641,180 +640,6 @@ def handle_context_length(
|
||||
)
|
||||
|
||||
|
||||
def _estimate_token_count(text: str) -> int:
|
||||
"""Estimate token count using a conservative cross-provider heuristic.
|
||||
|
||||
Args:
|
||||
text: The text to estimate tokens for.
|
||||
|
||||
Returns:
|
||||
Estimated token count (roughly 1 token per 4 characters).
|
||||
"""
|
||||
return len(text) // 4
|
||||
|
||||
|
||||
def _format_messages_for_summary(messages: list[LLMMessage]) -> str:
|
||||
"""Format messages with role labels for summarization.
|
||||
|
||||
Skips system messages. Handles None content, tool_calls, and
|
||||
multimodal content blocks.
|
||||
|
||||
Args:
|
||||
messages: List of messages to format.
|
||||
|
||||
Returns:
|
||||
Role-labeled conversation text.
|
||||
"""
|
||||
lines: list[str] = []
|
||||
for msg in messages:
|
||||
role = msg.get("role", "user")
|
||||
if role == "system":
|
||||
continue
|
||||
|
||||
content = msg.get("content")
|
||||
if content is None:
|
||||
# Check for tool_calls on assistant messages with no content
|
||||
tool_calls = msg.get("tool_calls")
|
||||
if tool_calls:
|
||||
tool_names = []
|
||||
for tc in tool_calls:
|
||||
func = tc.get("function", {})
|
||||
name = (
|
||||
func.get("name", "unknown")
|
||||
if isinstance(func, dict)
|
||||
else "unknown"
|
||||
)
|
||||
tool_names.append(name)
|
||||
content = f"[Called tools: {', '.join(tool_names)}]"
|
||||
else:
|
||||
content = ""
|
||||
elif isinstance(content, list):
|
||||
# Multimodal content blocks — extract text parts
|
||||
text_parts = [
|
||||
block.get("text", "")
|
||||
for block in content
|
||||
if isinstance(block, dict) and block.get("type") == "text"
|
||||
]
|
||||
content = " ".join(text_parts) if text_parts else "[multimodal content]"
|
||||
|
||||
if role == "assistant":
|
||||
label = "[ASSISTANT]:"
|
||||
elif role == "tool":
|
||||
tool_name = msg.get("name", "unknown")
|
||||
label = f"[TOOL_RESULT ({tool_name})]:"
|
||||
else:
|
||||
label = "[USER]:"
|
||||
|
||||
lines.append(f"{label} {content}")
|
||||
|
||||
return "\n\n".join(lines)
|
||||
|
||||
|
||||
def _split_messages_into_chunks(
|
||||
messages: list[LLMMessage], max_tokens: int
|
||||
) -> list[list[LLMMessage]]:
|
||||
"""Split messages into chunks at message boundaries.
|
||||
|
||||
Excludes system messages from chunks. Each chunk stays under
|
||||
max_tokens based on estimated token count.
|
||||
|
||||
Args:
|
||||
messages: List of messages to split.
|
||||
max_tokens: Maximum estimated tokens per chunk.
|
||||
|
||||
Returns:
|
||||
List of message chunks.
|
||||
"""
|
||||
non_system = [m for m in messages if m.get("role") != "system"]
|
||||
if not non_system:
|
||||
return []
|
||||
|
||||
chunks: list[list[LLMMessage]] = []
|
||||
current_chunk: list[LLMMessage] = []
|
||||
current_tokens = 0
|
||||
|
||||
for msg in non_system:
|
||||
content = msg.get("content")
|
||||
if content is None:
|
||||
msg_text = ""
|
||||
elif isinstance(content, list):
|
||||
msg_text = str(content)
|
||||
else:
|
||||
msg_text = str(content)
|
||||
|
||||
msg_tokens = _estimate_token_count(msg_text)
|
||||
|
||||
# If adding this message would exceed the limit and we already have
|
||||
# messages in the current chunk, start a new chunk
|
||||
if current_chunk and (current_tokens + msg_tokens) > max_tokens:
|
||||
chunks.append(current_chunk)
|
||||
current_chunk = []
|
||||
current_tokens = 0
|
||||
|
||||
current_chunk.append(msg)
|
||||
current_tokens += msg_tokens
|
||||
|
||||
if current_chunk:
|
||||
chunks.append(current_chunk)
|
||||
|
||||
return chunks
|
||||
|
||||
|
||||
def _extract_summary_tags(text: str) -> str:
|
||||
"""Extract content between <summary></summary> tags.
|
||||
|
||||
Falls back to the full text if no tags are found.
|
||||
|
||||
Args:
|
||||
text: Text potentially containing summary tags.
|
||||
|
||||
Returns:
|
||||
Extracted summary content, or full text if no tags found.
|
||||
"""
|
||||
match = re.search(r"<summary>(.*?)</summary>", text, re.DOTALL)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
return text.strip()
|
||||
|
||||
|
||||
async def _asummarize_chunks(
|
||||
chunks: list[list[LLMMessage]],
|
||||
llm: LLM | BaseLLM,
|
||||
callbacks: list[TokenCalcHandler],
|
||||
i18n: I18N,
|
||||
) -> list[SummaryContent]:
|
||||
"""Summarize multiple message chunks concurrently using asyncio.
|
||||
|
||||
Args:
|
||||
chunks: List of message chunks to summarize.
|
||||
llm: LLM instance (must support ``acall``).
|
||||
callbacks: List of callbacks for the LLM.
|
||||
i18n: I18N instance for prompt templates.
|
||||
|
||||
Returns:
|
||||
Ordered list of summary contents, one per chunk.
|
||||
"""
|
||||
|
||||
async def _summarize_one(chunk: list[LLMMessage]) -> SummaryContent:
|
||||
conversation_text = _format_messages_for_summary(chunk)
|
||||
summarization_messages = [
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarizer_system_message"), role="system"
|
||||
),
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarize_instruction").format(
|
||||
conversation=conversation_text
|
||||
),
|
||||
),
|
||||
]
|
||||
summary = await llm.acall(summarization_messages, callbacks=callbacks)
|
||||
extracted = _extract_summary_tags(str(summary))
|
||||
return {"content": extracted}
|
||||
|
||||
results = await asyncio.gather(*[_summarize_one(chunk) for chunk in chunks])
|
||||
return list(results)
|
||||
|
||||
|
||||
def summarize_messages(
|
||||
messages: list[LLMMessage],
|
||||
llm: LLM | BaseLLM,
|
||||
@@ -824,10 +649,6 @@ def summarize_messages(
|
||||
) -> None:
|
||||
"""Summarize messages to fit within context window.
|
||||
|
||||
Uses structured context compaction: preserves system messages,
|
||||
splits at message boundaries, formats with role labels, and
|
||||
produces structured summaries for seamless task continuation.
|
||||
|
||||
Preserves any files attached to user messages and re-attaches them to
|
||||
the summarized message. Files from all user messages are merged.
|
||||
|
||||
@@ -836,74 +657,49 @@ def summarize_messages(
|
||||
llm: LLM instance for summarization
|
||||
callbacks: List of callbacks for LLM
|
||||
i18n: I18N instance for messages
|
||||
verbose: Whether to print progress.
|
||||
"""
|
||||
# 1. Extract & preserve file attachments from user messages
|
||||
preserved_files: dict[str, Any] = {}
|
||||
for msg in messages:
|
||||
if msg.get("role") == "user" and msg.get("files"):
|
||||
preserved_files.update(msg["files"])
|
||||
|
||||
# 2. Extract system messages — never summarize them
|
||||
system_messages = [m for m in messages if m.get("role") == "system"]
|
||||
non_system_messages = [m for m in messages if m.get("role") != "system"]
|
||||
messages_string = " ".join(
|
||||
[str(message.get("content", "")) for message in messages]
|
||||
)
|
||||
cut_size = llm.get_context_window_size()
|
||||
|
||||
# If there are only system messages (or no non-system messages), nothing to summarize
|
||||
if not non_system_messages:
|
||||
return
|
||||
messages_groups = [
|
||||
{"content": messages_string[i : i + cut_size]}
|
||||
for i in range(0, len(messages_string), cut_size)
|
||||
]
|
||||
|
||||
# 3. Split non-system messages into chunks at message boundaries
|
||||
max_tokens = llm.get_context_window_size()
|
||||
chunks = _split_messages_into_chunks(non_system_messages, max_tokens)
|
||||
summarized_contents: list[SummaryContent] = []
|
||||
|
||||
# 4. Summarize each chunk with role-labeled formatting
|
||||
total_chunks = len(chunks)
|
||||
|
||||
if total_chunks <= 1:
|
||||
# Single chunk — no benefit from async overhead
|
||||
summarized_contents: list[SummaryContent] = []
|
||||
for idx, chunk in enumerate(chunks, 1):
|
||||
if verbose:
|
||||
Printer().print(
|
||||
content=f"Summarizing {idx}/{total_chunks}...",
|
||||
color="yellow",
|
||||
)
|
||||
conversation_text = _format_messages_for_summary(chunk)
|
||||
summarization_messages = [
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarizer_system_message"), role="system"
|
||||
),
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarize_instruction").format(
|
||||
conversation=conversation_text
|
||||
),
|
||||
),
|
||||
]
|
||||
summary = llm.call(summarization_messages, callbacks=callbacks)
|
||||
extracted = _extract_summary_tags(str(summary))
|
||||
summarized_contents.append({"content": extracted})
|
||||
else:
|
||||
# Multiple chunks — summarize in parallel via asyncio
|
||||
total_groups = len(messages_groups)
|
||||
for idx, group in enumerate(messages_groups, 1):
|
||||
if verbose:
|
||||
Printer().print(
|
||||
content=f"Summarizing {total_chunks} chunks in parallel...",
|
||||
content=f"Summarizing {idx}/{total_groups}...",
|
||||
color="yellow",
|
||||
)
|
||||
coro = _asummarize_chunks(
|
||||
chunks=chunks, llm=llm, callbacks=callbacks, i18n=i18n
|
||||
|
||||
summarization_messages = [
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarizer_system_message"), role="system"
|
||||
),
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarize_instruction").format(group=group["content"]),
|
||||
),
|
||||
]
|
||||
summary = llm.call(
|
||||
summarization_messages,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
if is_inside_event_loop():
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
|
||||
summarized_contents = pool.submit(asyncio.run, coro).result()
|
||||
else:
|
||||
summarized_contents = asyncio.run(coro)
|
||||
summarized_contents.append({"content": str(summary)})
|
||||
|
||||
merged_summary = "\n\n".join(content["content"] for content in summarized_contents)
|
||||
merged_summary = " ".join(content["content"] for content in summarized_contents)
|
||||
|
||||
# 6. Reconstruct messages: [system messages...] + [summary user message]
|
||||
messages.clear()
|
||||
messages.extend(system_messages)
|
||||
|
||||
summary_message = format_message_for_llm(
|
||||
i18n.slice("summary").format(merged_summary=merged_summary)
|
||||
)
|
||||
|
||||
@@ -86,21 +86,10 @@ class I18N(BaseModel):
|
||||
"""
|
||||
return self.retrieve("tools", tool)
|
||||
|
||||
def memory(self, key: str) -> str:
|
||||
"""Retrieve a memory prompt by key.
|
||||
|
||||
Args:
|
||||
key: The key of the memory prompt to retrieve.
|
||||
|
||||
Returns:
|
||||
The memory prompt as a string.
|
||||
"""
|
||||
return self.retrieve("memory", key)
|
||||
|
||||
def retrieve(
|
||||
self,
|
||||
kind: Literal[
|
||||
"slices", "errors", "tools", "reasoning", "hierarchical_manager_agent", "memory"
|
||||
"slices", "errors", "tools", "reasoning", "hierarchical_manager_agent"
|
||||
],
|
||||
key: str,
|
||||
) -> str:
|
||||
|
||||
@@ -372,7 +372,10 @@ class TestFlowInvoke:
|
||||
task.human_input = False
|
||||
|
||||
crew = Mock()
|
||||
crew._memory = None
|
||||
crew._short_term_memory = None
|
||||
crew._long_term_memory = None
|
||||
crew._entity_memory = None
|
||||
crew._external_memory = None
|
||||
|
||||
agent = Mock()
|
||||
agent.role = "Test"
|
||||
@@ -395,10 +398,14 @@ class TestFlowInvoke:
|
||||
}
|
||||
|
||||
@patch.object(AgentExecutor, "kickoff")
|
||||
@patch.object(AgentExecutor, "_save_to_memory")
|
||||
@patch.object(AgentExecutor, "_create_short_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_long_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_external_memory")
|
||||
def test_invoke_success(
|
||||
self,
|
||||
mock_save_to_memory,
|
||||
mock_external_memory,
|
||||
mock_long_term_memory,
|
||||
mock_short_term_memory,
|
||||
mock_kickoff,
|
||||
mock_dependencies,
|
||||
):
|
||||
@@ -418,7 +425,9 @@ class TestFlowInvoke:
|
||||
|
||||
assert result == {"output": "Final result"}
|
||||
mock_kickoff.assert_called_once()
|
||||
mock_save_to_memory.assert_called_once()
|
||||
mock_short_term_memory.assert_called_once()
|
||||
mock_long_term_memory.assert_called_once()
|
||||
mock_external_memory.assert_called_once()
|
||||
|
||||
@patch.object(AgentExecutor, "kickoff")
|
||||
def test_invoke_failure_no_agent_finish(self, mock_kickoff, mock_dependencies):
|
||||
@@ -434,10 +443,14 @@ class TestFlowInvoke:
|
||||
executor.invoke(inputs)
|
||||
|
||||
@patch.object(AgentExecutor, "kickoff")
|
||||
@patch.object(AgentExecutor, "_save_to_memory")
|
||||
@patch.object(AgentExecutor, "_create_short_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_long_term_memory")
|
||||
@patch.object(AgentExecutor, "_create_external_memory")
|
||||
def test_invoke_with_system_prompt(
|
||||
self,
|
||||
mock_save_to_memory,
|
||||
mock_external_memory,
|
||||
mock_long_term_memory,
|
||||
mock_short_term_memory,
|
||||
mock_kickoff,
|
||||
mock_dependencies,
|
||||
):
|
||||
@@ -457,7 +470,9 @@ class TestFlowInvoke:
|
||||
|
||||
inputs = {"input": "test", "tool_names": "", "tools": ""}
|
||||
result = executor.invoke(inputs)
|
||||
mock_save_to_memory.assert_called_once()
|
||||
mock_short_term_memory.assert_called_once()
|
||||
mock_long_term_memory.assert_called_once()
|
||||
mock_external_memory.assert_called_once()
|
||||
mock_kickoff.assert_called_once()
|
||||
|
||||
assert result == {"output": "Done"}
|
||||
|
||||
@@ -95,14 +95,16 @@ class TestAsyncAgentExecutor:
|
||||
),
|
||||
):
|
||||
with patch.object(executor, "_show_start_logs"):
|
||||
with patch.object(executor, "_save_to_memory"):
|
||||
result = await executor.ainvoke(
|
||||
{
|
||||
"input": "test input",
|
||||
"tool_names": "",
|
||||
"tools": "",
|
||||
}
|
||||
)
|
||||
with patch.object(executor, "_create_short_term_memory"):
|
||||
with patch.object(executor, "_create_long_term_memory"):
|
||||
with patch.object(executor, "_create_external_memory"):
|
||||
result = await executor.ainvoke(
|
||||
{
|
||||
"input": "test input",
|
||||
"tool_names": "",
|
||||
"tools": "",
|
||||
}
|
||||
)
|
||||
|
||||
assert result == {"output": expected_output}
|
||||
|
||||
@@ -271,14 +273,16 @@ class TestAsyncAgentExecutor:
|
||||
):
|
||||
with patch.object(executor, "_show_start_logs"):
|
||||
with patch.object(executor, "_show_logs"):
|
||||
with patch.object(executor, "_save_to_memory"):
|
||||
return await executor.ainvoke(
|
||||
{
|
||||
"input": f"test {executor_id}",
|
||||
"tool_names": "",
|
||||
"tools": "",
|
||||
}
|
||||
)
|
||||
with patch.object(executor, "_create_short_term_memory"):
|
||||
with patch.object(executor, "_create_long_term_memory"):
|
||||
with patch.object(executor, "_create_external_memory"):
|
||||
return await executor.ainvoke(
|
||||
{
|
||||
"input": f"test {executor_id}",
|
||||
"tool_names": "",
|
||||
"tools": "",
|
||||
}
|
||||
)
|
||||
|
||||
results = await asyncio.gather(
|
||||
create_and_run_executor(1),
|
||||
|
||||
@@ -16,7 +16,6 @@ import pytest
|
||||
from crewai import LLM, Agent
|
||||
from crewai.flow import Flow, start
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
|
||||
# A simple test tool
|
||||
@@ -1065,97 +1064,3 @@ def test_lite_agent_verbose_false_suppresses_printer_output():
|
||||
agent2.kickoff("Say hello")
|
||||
|
||||
mock_printer.print.assert_not_called()
|
||||
|
||||
|
||||
# --- LiteAgent memory integration ---
|
||||
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:LiteAgent is deprecated")
|
||||
def test_lite_agent_memory_none_default():
|
||||
"""With memory=None (default), _memory is None and no memory is used."""
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Ok"
|
||||
mock_llm.stop = []
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=10,
|
||||
prompt_tokens=5,
|
||||
completion_tokens=5,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
agent = LiteAgent(
|
||||
role="Test",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
memory=None,
|
||||
verbose=False,
|
||||
)
|
||||
assert agent._memory is None
|
||||
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:LiteAgent is deprecated")
|
||||
def test_lite_agent_memory_true_resolves_to_default_memory():
|
||||
"""With memory=True, _memory is a Memory instance."""
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Ok"
|
||||
mock_llm.stop = []
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=10,
|
||||
prompt_tokens=5,
|
||||
completion_tokens=5,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
agent = LiteAgent(
|
||||
role="Test",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
memory=True,
|
||||
verbose=False,
|
||||
)
|
||||
assert agent._memory is not None
|
||||
assert isinstance(agent._memory, Memory)
|
||||
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:LiteAgent is deprecated")
|
||||
def test_lite_agent_memory_instance_recall_and_save_called():
|
||||
"""With a custom memory instance, kickoff calls recall and then extract_memories/remember."""
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: The answer is 42."
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=10,
|
||||
prompt_tokens=5,
|
||||
completion_tokens=5,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
mock_memory = Mock()
|
||||
mock_memory.recall.return_value = []
|
||||
mock_memory.extract_memories.return_value = ["Fact one.", "Fact two."]
|
||||
|
||||
agent = LiteAgent(
|
||||
role="Test",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
memory=mock_memory,
|
||||
verbose=False,
|
||||
)
|
||||
assert agent._memory is mock_memory
|
||||
|
||||
agent.kickoff("What is the answer?")
|
||||
|
||||
mock_memory.recall.assert_called_once()
|
||||
call_kw = mock_memory.recall.call_args[1]
|
||||
assert call_kw.get("limit") == 10
|
||||
# depth is not passed explicitly; Memory.recall() defaults to "deep"
|
||||
mock_memory.extract_memories.assert_called_once()
|
||||
assert mock_memory.remember.call_count == 2
|
||||
mock_memory.remember.assert_any_call("Fact one.")
|
||||
mock_memory.remember.assert_any_call("Fact two.")
|
||||
|
||||
@@ -1,332 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say
|
||||
hello in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You
|
||||
are a helpful assistant. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. "}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5918'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_013xTaKq41TFn6drdxt1mFdx","type":"message","role":"assistant","content":[{"type":"text","text":"Hello!"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":5,"service_tier":"standard","inference_geo":"not_available"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:40 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '726'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say
|
||||
goodbye in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You
|
||||
are a helpful assistant. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. "}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5920'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01LdueHX7nvf19wD8Uxn4EZD","type":"message","role":"assistant","content":[{"type":"text","text":"Goodbye"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":5,"service_tier":"standard","inference_geo":"not_available"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:41 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '759'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,336 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"What
|
||||
is the weather in Tokyo?","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You
|
||||
are a helpful assistant that uses tools. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. ","tool_choice":{"type":"tool","name":"get_weather"},"tools":[{"name":"get_weather","description":"Get
|
||||
the current weather for a location","input_schema":{"type":"object","properties":{"location":{"type":"string","description":"The
|
||||
city name"}},"required":["location"]}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6211'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01WhFk2ppoz43nbh4uNhXBfL","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_01CX1yZuJ5MQaJbXNSrnCiqf","name":"get_weather","input":{"location":"Tokyo"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":24,"cache_creation_input_tokens":0,"cache_read_input_tokens":1857,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":33,"service_tier":"standard","inference_geo":"not_available"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '1390'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"What
|
||||
is the weather in Paris?","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You
|
||||
are a helpful assistant that uses tools. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. ","tool_choice":{"type":"tool","name":"get_weather"},"tools":[{"name":"get_weather","description":"Get
|
||||
the current weather for a location","input_schema":{"type":"object","properties":{"location":{"type":"string","description":"The
|
||||
city name"}},"required":["location"]}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6211'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01Nmw5NyAEwCLGjpVnf15rh4","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_01DEe9K7N4EfhPFqxHhqEHCE","name":"get_weather","input":{"location":"Paris"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":24,"cache_creation_input_tokens":0,"cache_read_input_tokens":1857,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":33,"service_tier":"standard","inference_geo":"not_available"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:40 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '1259'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,411 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say
|
||||
hello in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","system":"You
|
||||
are a helpful assistant. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. ","stream":true}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5917'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-stream-helper:
|
||||
- messages
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: 'event: message_start
|
||||
|
||||
data: {"type":"message_start","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_01LshZroyEGgd3HfDrKdQMLm","type":"message","role":"assistant","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":4,"service_tier":"standard","inference_geo":"not_available"}} }
|
||||
|
||||
|
||||
event: content_block_start
|
||||
|
||||
data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} }
|
||||
|
||||
|
||||
event: ping
|
||||
|
||||
data: {"type": "ping"}
|
||||
|
||||
|
||||
event: content_block_delta
|
||||
|
||||
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Hello"} }
|
||||
|
||||
|
||||
event: content_block_stop
|
||||
|
||||
data: {"type":"content_block_stop","index":0 }
|
||||
|
||||
|
||||
event: message_delta
|
||||
|
||||
data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"output_tokens":4}
|
||||
}
|
||||
|
||||
|
||||
event: message_stop
|
||||
|
||||
data: {"type":"message_stop" }
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Cache-Control:
|
||||
- no-cache
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '837'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say
|
||||
goodbye in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","system":"You
|
||||
are a helpful assistant. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. ","stream":true}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5919'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-stream-helper:
|
||||
- messages
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: 'event: message_start
|
||||
|
||||
data: {"type":"message_start","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_01MZSWarEUbFXmek8aEpwKDu","type":"message","role":"assistant","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":6,"service_tier":"standard","inference_geo":"not_available"}} }
|
||||
|
||||
|
||||
event: content_block_start
|
||||
|
||||
data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}}
|
||||
|
||||
|
||||
event: ping
|
||||
|
||||
data: {"type": "ping"}
|
||||
|
||||
|
||||
event: content_block_delta
|
||||
|
||||
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Goodbye."} }
|
||||
|
||||
|
||||
event: content_block_stop
|
||||
|
||||
data: {"type":"content_block_stop","index":0 }
|
||||
|
||||
|
||||
event: message_delta
|
||||
|
||||
data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"output_tokens":6} }
|
||||
|
||||
|
||||
event: message_stop
|
||||
|
||||
data: {"type":"message_stop" }
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Cache-Control:
|
||||
- no-cache
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '870'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user