Compare commits
37 Commits
copilot/fi
...
devin/1755
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
774dbe3474 | ||
|
|
f96b779df5 | ||
|
|
842bed4e9c | ||
|
|
1217935b31 | ||
|
|
641c156c17 | ||
|
|
7fdf9f9290 | ||
|
|
c0d2bf4c12 | ||
|
|
ed187b495b | ||
|
|
2773996b49 | ||
|
|
95923b78c6 | ||
|
|
7065ad4336 | ||
|
|
d6254918fd | ||
|
|
95e3d6db7a | ||
|
|
d7f8002baa | ||
|
|
d743e12a06 | ||
|
|
6068fe941f | ||
|
|
2a0cefc98b | ||
|
|
a4f65e4870 | ||
|
|
a1b3edd79c | ||
|
|
80b3d9689a | ||
|
|
ec03a53121 | ||
|
|
2fdf3f3a6a | ||
|
|
1d3d7ebf5e | ||
|
|
2c2196f415 | ||
|
|
c9f30b175c | ||
|
|
a17b93a7f8 | ||
|
|
0d3e462791 | ||
|
|
947c9552f0 | ||
|
|
04a03d332f | ||
|
|
992e093610 | ||
|
|
07f8e73958 | ||
|
|
66c2fa1623 | ||
|
|
7a52cc9667 | ||
|
|
8b686fb0c6 | ||
|
|
dc6771ae95 | ||
|
|
e9b1e5a8f6 | ||
|
|
57c787f919 |
1
.gitignore
vendored
@@ -21,7 +21,6 @@ crew_tasks_output.json
|
||||
.mypy_cache
|
||||
.ruff_cache
|
||||
.venv
|
||||
agentops.log
|
||||
test_flow.html
|
||||
crewairules.mdc
|
||||
plan.md
|
||||
|
||||
@@ -226,7 +226,6 @@
|
||||
"group": "Observability",
|
||||
"pages": [
|
||||
"en/observability/overview",
|
||||
"en/observability/agentops",
|
||||
"en/observability/arize-phoenix",
|
||||
"en/observability/langdb",
|
||||
"en/observability/langfuse",
|
||||
@@ -238,7 +237,8 @@
|
||||
"en/observability/opik",
|
||||
"en/observability/patronus-evaluation",
|
||||
"en/observability/portkey",
|
||||
"en/observability/weave"
|
||||
"en/observability/weave",
|
||||
"en/observability/truefoundry"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -281,6 +281,7 @@
|
||||
{
|
||||
"group": "Features",
|
||||
"pages": [
|
||||
"en/enterprise/features/rbac",
|
||||
"en/enterprise/features/tool-repository",
|
||||
"en/enterprise/features/webhook-streaming",
|
||||
"en/enterprise/features/traces",
|
||||
@@ -319,6 +320,7 @@
|
||||
"en/enterprise/guides/update-crew",
|
||||
"en/enterprise/guides/enable-crew-studio",
|
||||
"en/enterprise/guides/azure-openai-setup",
|
||||
"en/enterprise/guides/automation-triggers",
|
||||
"en/enterprise/guides/hubspot-trigger",
|
||||
"en/enterprise/guides/react-component-export",
|
||||
"en/enterprise/guides/salesforce-trigger",
|
||||
@@ -340,11 +342,12 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Getting Started",
|
||||
"pages": ["en/api-reference/introduction"]
|
||||
},
|
||||
{
|
||||
"group": "Endpoints",
|
||||
"openapi": "enterprise-api.yaml"
|
||||
"pages": [
|
||||
"en/api-reference/introduction",
|
||||
"en/api-reference/inputs",
|
||||
"en/api-reference/kickoff",
|
||||
"en/api-reference/status"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -353,7 +356,7 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Examples",
|
||||
"pages": ["en/examples/example"]
|
||||
"pages": ["en/examples/example", "en/examples/cookbooks"]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -564,7 +567,6 @@
|
||||
"group": "Observabilidade",
|
||||
"pages": [
|
||||
"pt-BR/observability/overview",
|
||||
"pt-BR/observability/agentops",
|
||||
"pt-BR/observability/arize-phoenix",
|
||||
"pt-BR/observability/langdb",
|
||||
"pt-BR/observability/langfuse",
|
||||
@@ -575,7 +577,8 @@
|
||||
"pt-BR/observability/opik",
|
||||
"pt-BR/observability/patronus-evaluation",
|
||||
"pt-BR/observability/portkey",
|
||||
"pt-BR/observability/weave"
|
||||
"pt-BR/observability/weave",
|
||||
"pt-BR/observability/truefoundry"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -618,6 +621,7 @@
|
||||
{
|
||||
"group": "Funcionalidades",
|
||||
"pages": [
|
||||
"pt-BR/enterprise/features/rbac",
|
||||
"pt-BR/enterprise/features/tool-repository",
|
||||
"pt-BR/enterprise/features/webhook-streaming",
|
||||
"pt-BR/enterprise/features/traces",
|
||||
@@ -655,6 +659,7 @@
|
||||
"pt-BR/enterprise/guides/update-crew",
|
||||
"pt-BR/enterprise/guides/enable-crew-studio",
|
||||
"pt-BR/enterprise/guides/azure-openai-setup",
|
||||
"pt-BR/enterprise/guides/automation-triggers",
|
||||
"pt-BR/enterprise/guides/hubspot-trigger",
|
||||
"pt-BR/enterprise/guides/react-component-export",
|
||||
"pt-BR/enterprise/guides/salesforce-trigger",
|
||||
@@ -678,11 +683,12 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Começando",
|
||||
"pages": ["pt-BR/api-reference/introduction"]
|
||||
},
|
||||
{
|
||||
"group": "Endpoints",
|
||||
"openapi": "enterprise-api.yaml"
|
||||
"pages": [
|
||||
"pt-BR/api-reference/introduction",
|
||||
"pt-BR/api-reference/inputs",
|
||||
"pt-BR/api-reference/kickoff",
|
||||
"pt-BR/api-reference/status"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -691,7 +697,7 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Exemplos",
|
||||
"pages": ["pt-BR/examples/example"]
|
||||
"pages": ["pt-BR/examples/example", "pt-BR/examples/cookbooks"]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -707,7 +713,7 @@
|
||||
"icon": "globe"
|
||||
},
|
||||
{
|
||||
"anchor": "법정",
|
||||
"anchor": "포럼",
|
||||
"href": "https://community.crewai.com",
|
||||
"icon": "discourse"
|
||||
},
|
||||
@@ -717,7 +723,7 @@
|
||||
"icon": "robot"
|
||||
},
|
||||
{
|
||||
"anchor": "출시",
|
||||
"anchor": "릴리스",
|
||||
"href": "https://github.com/crewAIInc/crewAI/releases",
|
||||
"icon": "tag"
|
||||
}
|
||||
@@ -732,22 +738,22 @@
|
||||
"pages": ["ko/introduction", "ko/installation", "ko/quickstart"]
|
||||
},
|
||||
{
|
||||
"group": "안내서",
|
||||
"group": "가이드",
|
||||
"pages": [
|
||||
{
|
||||
"group": "전략",
|
||||
"pages": ["ko/guides/concepts/evaluating-use-cases"]
|
||||
},
|
||||
{
|
||||
"group": "Agents",
|
||||
"group": "에이전트 (Agents)",
|
||||
"pages": ["ko/guides/agents/crafting-effective-agents"]
|
||||
},
|
||||
{
|
||||
"group": "Crews",
|
||||
"group": "크루 (Crews)",
|
||||
"pages": ["ko/guides/crews/first-crew"]
|
||||
},
|
||||
{
|
||||
"group": "Flows",
|
||||
"group": "플로우 (Flows)",
|
||||
"pages": [
|
||||
"ko/guides/flows/first-flow",
|
||||
"ko/guides/flows/mastering-flow-state"
|
||||
@@ -795,7 +801,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "도구",
|
||||
"group": "도구 (Tools)",
|
||||
"pages": [
|
||||
"ko/tools/overview",
|
||||
{
|
||||
@@ -885,7 +891,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "클라우드 & 저장",
|
||||
"group": "클라우드 & 스토리지",
|
||||
"pages": [
|
||||
"ko/tools/cloud-storage/overview",
|
||||
"ko/tools/cloud-storage/s3readertool",
|
||||
@@ -907,10 +913,9 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "오브저버빌리티",
|
||||
"group": "Observability",
|
||||
"pages": [
|
||||
"ko/observability/overview",
|
||||
"ko/observability/agentops",
|
||||
"ko/observability/arize-phoenix",
|
||||
"ko/observability/langdb",
|
||||
"ko/observability/langfuse",
|
||||
@@ -926,7 +931,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "익히다",
|
||||
"group": "학습",
|
||||
"pages": [
|
||||
"ko/learn/overview",
|
||||
"ko/learn/llm-selection-guide",
|
||||
@@ -950,13 +955,13 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "원격측정",
|
||||
"group": "Telemetry",
|
||||
"pages": ["ko/telemetry"]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"tab": "기업",
|
||||
"tab": "엔터프라이즈",
|
||||
"groups": [
|
||||
{
|
||||
"group": "시작 안내",
|
||||
@@ -965,6 +970,7 @@
|
||||
{
|
||||
"group": "특징",
|
||||
"pages": [
|
||||
"ko/enterprise/features/rbac",
|
||||
"ko/enterprise/features/tool-repository",
|
||||
"ko/enterprise/features/webhook-streaming",
|
||||
"ko/enterprise/features/traces",
|
||||
@@ -995,7 +1001,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "사용 안내서",
|
||||
"group": "How-To Guides",
|
||||
"pages": [
|
||||
"ko/enterprise/guides/build-crew",
|
||||
"ko/enterprise/guides/deploy-crew",
|
||||
@@ -1003,6 +1009,7 @@
|
||||
"ko/enterprise/guides/update-crew",
|
||||
"ko/enterprise/guides/enable-crew-studio",
|
||||
"ko/enterprise/guides/azure-openai-setup",
|
||||
"ko/enterprise/guides/automation-triggers",
|
||||
"ko/enterprise/guides/hubspot-trigger",
|
||||
"ko/enterprise/guides/react-component-export",
|
||||
"ko/enterprise/guides/salesforce-trigger",
|
||||
@@ -1024,11 +1031,12 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "시작 안내",
|
||||
"pages": ["ko/api-reference/introduction"]
|
||||
},
|
||||
{
|
||||
"group": "Endpoints",
|
||||
"openapi": "enterprise-api.yaml"
|
||||
"pages": [
|
||||
"ko/api-reference/introduction",
|
||||
"ko/api-reference/inputs",
|
||||
"ko/api-reference/kickoff",
|
||||
"ko/api-reference/status"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1037,7 +1045,7 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "예시",
|
||||
"pages": ["ko/examples/example"]
|
||||
"pages": ["ko/examples/example", "ko/examples/cookbooks"]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1079,6 +1087,10 @@
|
||||
"indexing": "all"
|
||||
},
|
||||
"redirects": [
|
||||
{
|
||||
"source": "/api-reference",
|
||||
"destination": "/en/api-reference/introduction"
|
||||
},
|
||||
{
|
||||
"source": "/introduction",
|
||||
"destination": "/en/introduction"
|
||||
@@ -1131,6 +1143,18 @@
|
||||
"source": "/api-reference/:path*",
|
||||
"destination": "/en/api-reference/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/en/api-reference",
|
||||
"destination": "/en/api-reference/introduction"
|
||||
},
|
||||
{
|
||||
"source": "/pt-BR/api-reference",
|
||||
"destination": "/pt-BR/api-reference/introduction"
|
||||
},
|
||||
{
|
||||
"source": "/ko/api-reference",
|
||||
"destination": "/ko/api-reference/introduction"
|
||||
},
|
||||
{
|
||||
"source": "/examples/:path*",
|
||||
"destination": "/en/examples/:path*"
|
||||
|
||||
7
docs/en/api-reference/inputs.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /inputs"
|
||||
description: "Get required inputs for your crew"
|
||||
openapi: "/enterprise-api.en.yaml GET /inputs"
|
||||
---
|
||||
|
||||
|
||||
7
docs/en/api-reference/kickoff.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "POST /kickoff"
|
||||
description: "Start a crew execution"
|
||||
openapi: "/enterprise-api.en.yaml POST /kickoff"
|
||||
---
|
||||
|
||||
|
||||
7
docs/en/api-reference/status.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /status/{kickoff_id}"
|
||||
description: "Get execution status"
|
||||
openapi: "/enterprise-api.en.yaml GET /status/{kickoff_id}"
|
||||
---
|
||||
|
||||
|
||||
@@ -177,14 +177,7 @@ class MyCustomCrew:
|
||||
# Your crew implementation...
|
||||
```
|
||||
|
||||
This is exactly how CrewAI's built-in `agentops_listener` is registered. In the CrewAI codebase, you'll find:
|
||||
|
||||
```python
|
||||
# src/crewai/utilities/events/third_party/__init__.py
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
This ensures the `agentops_listener` is loaded when the `crewai.utilities.events` package is imported.
|
||||
This is how third-party event listeners are registered in the CrewAI codebase.
|
||||
|
||||
## Available Event Types
|
||||
|
||||
@@ -280,77 +273,6 @@ The structure of the event object depends on the event type, but all events inhe
|
||||
|
||||
Additional fields vary by event type. For example, `CrewKickoffCompletedEvent` includes `crew_name` and `output` fields.
|
||||
|
||||
## Real-World Example: Integration with AgentOps
|
||||
|
||||
CrewAI includes an example of a third-party integration with [AgentOps](https://github.com/AgentOps-AI/agentops), a monitoring and observability platform for AI agents. Here's how it's implemented:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
|
||||
from crewai.utilities.events.task_events import TaskEvaluationEvent
|
||||
|
||||
try:
|
||||
import agentops
|
||||
AGENTOPS_INSTALLED = True
|
||||
except ImportError:
|
||||
AGENTOPS_INSTALLED = False
|
||||
|
||||
class AgentOpsListener(BaseEventListener):
|
||||
tool_event: Optional["agentops.ToolEvent"] = None
|
||||
session: Optional["agentops.Session"] = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
if not AGENTOPS_INSTALLED:
|
||||
return
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
|
||||
self.session = agentops.init()
|
||||
for agent in source.agents:
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
|
||||
if self.session:
|
||||
self.session.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
||||
self.tool_event = agentops.ToolEvent(name=event.tool_name)
|
||||
if self.session:
|
||||
self.session.record(self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
|
||||
```
|
||||
|
||||
This listener initializes an AgentOps session when a Crew starts, registers agents with AgentOps, tracks tool usage, and ends the session when the Crew completes.
|
||||
|
||||
The AgentOps listener is registered in CrewAI's event system through the import in `src/crewai/utilities/events/third_party/__init__.py`:
|
||||
|
||||
```python
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
This ensures the `agentops_listener` is loaded when the `crewai.utilities.events` package is imported.
|
||||
|
||||
## Advanced Usage: Scoped Handlers
|
||||
|
||||
|
||||
@@ -539,16 +539,71 @@ crew = Crew(
|
||||
)
|
||||
```
|
||||
|
||||
### Mem0 Provider
|
||||
|
||||
Short-Term Memory and Entity Memory both supports a tight integration with both Mem0 OSS and Mem0 Client as a provider. Here is how you can use Mem0 as a provider.
|
||||
|
||||
```python
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.memory.entity_entity_memory import EntityMemory
|
||||
|
||||
mem0_oss_embedder_config = {
|
||||
"provider": "mem0",
|
||||
"config": {
|
||||
"user_id": "john",
|
||||
"local_mem0_config": {
|
||||
"vector_store": {"provider": "qdrant","config": {"host": "localhost", "port": 6333}},
|
||||
"llm": {"provider": "openai","config": {"api_key": "your-api-key", "model": "gpt-4"}},
|
||||
"embedder": {"provider": "openai","config": {"api_key": "your-api-key", "model": "text-embedding-3-small"}}
|
||||
},
|
||||
"infer": True # Optional defaults to True
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
mem0_client_embedder_config = {
|
||||
"provider": "mem0",
|
||||
"config": {
|
||||
"user_id": "john",
|
||||
"org_id": "my_org_id", # Optional
|
||||
"project_id": "my_project_id", # Optional
|
||||
"api_key": "custom-api-key" # Optional - overrides env var
|
||||
"run_id": "my_run_id", # Optional - for short-term memory
|
||||
"includes": "include1", # Optional
|
||||
"excludes": "exclude1", # Optional
|
||||
"infer": True # Optional defaults to True
|
||||
"custom_categories": new_categories # Optional - custom categories for user memory
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
short_term_memory_mem0_oss = ShortTermMemory(embedder_config=mem0_oss_embedder_config) # Short Term Memory with Mem0 OSS
|
||||
short_term_memory_mem0_client = ShortTermMemory(embedder_config=mem0_client_embedder_config) # Short Term Memory with Mem0 Client
|
||||
entity_memory_mem0_oss = EntityMemory(embedder_config=mem0_oss_embedder_config) # Entity Memory with Mem0 OSS
|
||||
entity_memory_mem0_client = EntityMemory(embedder_config=mem0_client_embedder_config) # Short Term Memory with Mem0 Client
|
||||
|
||||
crew = Crew(
|
||||
memory=True,
|
||||
short_term_memory=short_term_memory_mem0_oss, # or short_term_memory_mem0_client
|
||||
entity_memory=entity_memory_mem0_oss # or entity_memory_mem0_client
|
||||
)
|
||||
```
|
||||
|
||||
### Choosing the Right Embedding Provider
|
||||
|
||||
| Provider | Best For | Pros | Cons |
|
||||
|:---------|:----------|:------|:------|
|
||||
| **OpenAI** | General use, reliability | High quality, well-tested | Cost, requires API key |
|
||||
| **Ollama** | Privacy, cost savings | Free, local, private | Requires local setup |
|
||||
| **Google AI** | Google ecosystem | Good performance | Requires Google account |
|
||||
| **Azure OpenAI** | Enterprise, compliance | Enterprise features | Complex setup |
|
||||
| **Cohere** | Multilingual content | Great language support | Specialized use case |
|
||||
| **VoyageAI** | Retrieval tasks | Optimized for search | Newer provider |
|
||||
When selecting an embedding provider, consider factors like performance, privacy, cost, and integration needs.
|
||||
Below is a comparison to help you decide:
|
||||
|
||||
| Provider | Best For | Pros | Cons |
|
||||
| -------------- | ------------------------------ | --------------------------------- | ------------------------- |
|
||||
| **OpenAI** | General use, high reliability | High quality, widely tested | Paid service, API key required |
|
||||
| **Ollama** | Privacy-focused, cost savings | Free, runs locally, fully private | Requires local installation/setup |
|
||||
| **Google AI** | Integration in Google ecosystem| Strong performance, good support | Google account required |
|
||||
| **Azure OpenAI** | Enterprise & compliance needs| Enterprise-grade features, security | More complex setup process |
|
||||
| **Cohere** | Multilingual content handling | Excellent language support | More niche use cases |
|
||||
| **VoyageAI** | Information retrieval & search | Optimized for retrieval tasks | Relatively new provider |
|
||||
| **Mem0** | Per-user personalization | Search-optimized embeddings | Paid service, API key required |
|
||||
|
||||
|
||||
### Environment Variable Configuration
|
||||
|
||||
|
||||
@@ -21,13 +21,17 @@ To use the training feature, follow these steps:
|
||||
3. Run the following command:
|
||||
|
||||
```shell
|
||||
crewai train -n <n_iterations> <filename> (optional)
|
||||
crewai train -n <n_iterations> -f <filename.pkl>
|
||||
```
|
||||
<Tip>
|
||||
Replace `<n_iterations>` with the desired number of training iterations and `<filename>` with the appropriate filename ending with `.pkl`.
|
||||
</Tip>
|
||||
|
||||
### Training Your Crew Programmatically
|
||||
<Note>
|
||||
If you omit `-f`, the output defaults to `trained_agents_data.pkl` in the current working directory. You can pass an absolute path to control where the file is written.
|
||||
</Note>
|
||||
|
||||
### Training your Crew programmatically
|
||||
|
||||
To train your crew programmatically, use the following steps:
|
||||
|
||||
@@ -51,19 +55,65 @@ except Exception as e:
|
||||
raise Exception(f"An error occurred while training the crew: {e}")
|
||||
```
|
||||
|
||||
### Key Points to Note
|
||||
## How trained data is used by agents
|
||||
|
||||
- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met.
|
||||
- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met.
|
||||
- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user.
|
||||
CrewAI uses the training artifacts in two ways: during training to incorporate your human feedback, and after training to guide agents with consolidated suggestions.
|
||||
|
||||
It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration.
|
||||
### Training data flow
|
||||
|
||||
Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights.
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A["Start training<br/>CLI: crewai train -n -f<br/>or Python: crew.train(...)"] --> B["Setup training mode<br/>- task.human_input = true<br/>- disable delegation<br/>- init training_data.pkl + trained file"]
|
||||
|
||||
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.
|
||||
subgraph "Iterations"
|
||||
direction LR
|
||||
C["Iteration i<br/>initial_output"] --> D["User human_feedback"]
|
||||
D --> E["improved_output"]
|
||||
E --> F["Append to training_data.pkl<br/>by agent_id and iteration"]
|
||||
end
|
||||
|
||||
Happy training with CrewAI! 🚀
|
||||
B --> C
|
||||
F --> G{"More iterations?"}
|
||||
G -- "Yes" --> C
|
||||
G -- "No" --> H["Evaluate per agent<br/>aggregate iterations"]
|
||||
|
||||
H --> I["Consolidate<br/>suggestions[] + quality + final_summary"]
|
||||
I --> J["Save by agent role to trained file<br/>(default: trained_agents_data.pkl)"]
|
||||
|
||||
J --> K["Normal (non-training) runs"]
|
||||
K --> L["Auto-load suggestions<br/>from trained_agents_data.pkl"]
|
||||
L --> M["Append to prompt<br/>for consistent improvements"]
|
||||
```
|
||||
|
||||
### During training runs
|
||||
|
||||
- On each iteration, the system records for every agent:
|
||||
- `initial_output`: the agent’s first answer
|
||||
- `human_feedback`: your inline feedback when prompted
|
||||
- `improved_output`: the agent’s follow-up answer after feedback
|
||||
- This data is stored in a working file named `training_data.pkl` keyed by the agent’s internal ID and iteration.
|
||||
- While training is active, the agent automatically appends your prior human feedback to its prompt to enforce those instructions on subsequent attempts within the training session.
|
||||
Training is interactive: tasks set `human_input = true`, so running in a non-interactive environment will block on user input.
|
||||
|
||||
### After training completes
|
||||
|
||||
- When `train(...)` finishes, CrewAI evaluates the collected training data per agent and produces a consolidated result containing:
|
||||
- `suggestions`: clear, actionable instructions distilled from your feedback and the difference between initial/improved outputs
|
||||
- `quality`: a 0–10 score capturing improvement
|
||||
- `final_summary`: a step-by-step set of action items for future tasks
|
||||
- These consolidated results are saved to the filename you pass to `train(...)` (default via CLI is `trained_agents_data.pkl`). Entries are keyed by the agent’s `role` so they can be applied across sessions.
|
||||
- During normal (non-training) execution, each agent automatically loads its consolidated `suggestions` and appends them to the task prompt as mandatory instructions. This gives you consistent improvements without changing your agent definitions.
|
||||
|
||||
### File summary
|
||||
|
||||
- `training_data.pkl` (ephemeral, per-session):
|
||||
- Structure: `agent_id -> { iteration_number: { initial_output, human_feedback, improved_output } }`
|
||||
- Purpose: capture raw data and human feedback during training
|
||||
- Location: saved in the current working directory (CWD)
|
||||
- `trained_agents_data.pkl` (or your custom filename):
|
||||
- Structure: `agent_role -> { suggestions: string[], quality: number, final_summary: string }`
|
||||
- Purpose: persist consolidated guidance for future runs
|
||||
- Location: written to the CWD by default; use `-f` to set a custom (including absolute) path
|
||||
|
||||
## Small Language Model Considerations
|
||||
|
||||
@@ -129,3 +179,18 @@ Happy training with CrewAI! 🚀
|
||||
</Warning>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Key Points to Note
|
||||
|
||||
- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met.
|
||||
- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met.
|
||||
- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user.
|
||||
- Trained guidance is applied at prompt time; it does not modify your Python/YAML agent configuration.
|
||||
- Agents automatically load trained suggestions from a file named `trained_agents_data.pkl` located in the current working directory. If you trained to a different filename, either rename it to `trained_agents_data.pkl` before running, or adjust the loader in code.
|
||||
- You can change the output filename when calling `crewai train` with `-f/--filename`. Absolute paths are supported if you want to save outside the CWD.
|
||||
|
||||
It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration.
|
||||
|
||||
Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights.
|
||||
|
||||
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.
|
||||
|
||||
103
docs/en/enterprise/features/rbac.mdx
Normal file
@@ -0,0 +1,103 @@
|
||||
---
|
||||
title: "Role-Based Access Control (RBAC)"
|
||||
description: "Control access to crews, tools, and data with roles, scopes, and granular permissions."
|
||||
icon: "shield"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
RBAC in CrewAI Enterprise enables secure, scalable access management through a combination of organization‑level roles and automation‑level visibility controls.
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/users_and_roles.png" alt="RBAC overview in CrewAI Enterprise" />
|
||||
|
||||
</Frame>
|
||||
|
||||
## Users and Roles
|
||||
|
||||
Each member in your CrewAI workspace is assigned a role, which determines their access across various features.
|
||||
|
||||
You can:
|
||||
|
||||
- Use predefined roles (Owner, Member)
|
||||
- Create custom roles tailored to specific permissions
|
||||
- Assign roles at any time through the settings panel
|
||||
|
||||
You can configure users and roles in Settings → Roles.
|
||||
|
||||
<Steps>
|
||||
<Step title="Open Roles settings">
|
||||
Go to <b>Settings → Roles</b> in CrewAI Enterprise.
|
||||
</Step>
|
||||
<Step title="Choose a role type">
|
||||
Use a predefined role (<b>Owner</b>, <b>Member</b>) or click <b>Create role</b> to define a custom one.
|
||||
</Step>
|
||||
<Step title="Assign to members">
|
||||
Select users and assign the role. You can change this anytime.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Configuration summary
|
||||
|
||||
| Area | Where to configure | Options |
|
||||
|:---|:---|:---|
|
||||
| Users & Roles | Settings → Roles | Predefined: Owner, Member; Custom roles |
|
||||
| Automation visibility | Automation → Settings → Visibility | Private; Whitelist users/roles |
|
||||
|
||||
## Automation‑level Access Control
|
||||
|
||||
In addition to organization‑wide roles, CrewAI Automations support fine‑grained visibility settings that let you restrict access to specific automations by user or role.
|
||||
|
||||
This is useful for:
|
||||
|
||||
- Keeping sensitive or experimental automations private
|
||||
- Managing visibility across large teams or external collaborators
|
||||
- Testing automations in isolated contexts
|
||||
|
||||
Deployments can be configured as private, meaning only whitelisted users and roles will be able to:
|
||||
|
||||
- View the deployment
|
||||
- Run it or interact with its API
|
||||
- Access its logs, metrics, and settings
|
||||
|
||||
The organization owner always has access, regardless of visibility settings.
|
||||
|
||||
You can configure automation‑level access control in Automation → Settings → Visibility tab.
|
||||
|
||||
<Steps>
|
||||
<Step title="Open Visibility tab">
|
||||
Navigate to <b>Automation → Settings → Visibility</b>.
|
||||
</Step>
|
||||
<Step title="Set visibility">
|
||||
Choose <b>Private</b> to restrict access. The organization owner always retains access.
|
||||
</Step>
|
||||
<Step title="Whitelist access">
|
||||
Add specific users and roles allowed to view, run, and access logs/metrics/settings.
|
||||
</Step>
|
||||
<Step title="Save and verify">
|
||||
Save changes, then confirm that non‑whitelisted users cannot view or run the automation.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Private visibility: access outcomes
|
||||
|
||||
| Action | Owner | Whitelisted user/role | Not whitelisted |
|
||||
|:---|:---|:---|:---|
|
||||
| View automation | ✓ | ✓ | ✗ |
|
||||
| Run automation/API | ✓ | ✓ | ✗ |
|
||||
| Access logs/metrics/settings | ✓ | ✓ | ✗ |
|
||||
|
||||
<Tip>
|
||||
The organization owner always has access. In private mode, only whitelisted users and roles can view, run, and access logs/metrics/settings.
|
||||
</Tip>
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/visibility.png" alt="Automation Visibility settings in CrewAI Enterprise" />
|
||||
|
||||
</Frame>
|
||||
|
||||
<Card title="Need Help?" icon="headset" href="mailto:support@crewai.com">
|
||||
Contact our support team for assistance with RBAC questions.
|
||||
</Card>
|
||||
|
||||
|
||||
@@ -35,6 +35,22 @@ crewai tool install <tool-name>
|
||||
|
||||
This installs the tool and adds it to `pyproject.toml`.
|
||||
|
||||
You can use the tool by importing it and adding it to your agents:
|
||||
|
||||
```python
|
||||
from your_tool.tool import YourTool
|
||||
|
||||
custom_tool = YourTool()
|
||||
|
||||
researcher = Agent(
|
||||
role='Market Research Analyst',
|
||||
goal='Provide up-to-date market analysis of the AI industry',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[custom_tool],
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
## Creating and Publishing Tools
|
||||
|
||||
To create a new tool project:
|
||||
|
||||
171
docs/en/enterprise/guides/automation-triggers.mdx
Normal file
@@ -0,0 +1,171 @@
|
||||
---
|
||||
title: "Automation Triggers"
|
||||
description: "Automatically execute your CrewAI workflows when specific events occur in connected integrations"
|
||||
icon: "bolt"
|
||||
---
|
||||
|
||||
Automation triggers enable you to automatically run your CrewAI deployments when specific events occur in your connected integrations, creating powerful event-driven workflows that respond to real-time changes in your business systems.
|
||||
|
||||
## Overview
|
||||
|
||||
With automation triggers, you can:
|
||||
|
||||
- **Respond to real-time events** - Automatically execute workflows when specific conditions are met
|
||||
- **Integrate with external systems** - Connect with platforms like Gmail, Outlook, OneDrive, JIRA, Slack, Stripe and more
|
||||
- **Scale your automation** - Handle high-volume events without manual intervention
|
||||
- **Maintain context** - Access trigger data within your crews and flows
|
||||
|
||||
## Managing Automation Triggers
|
||||
|
||||
### Viewing Available Triggers
|
||||
|
||||
To access and manage your automation triggers:
|
||||
|
||||
1. Navigate to your deployment in the CrewAI dashboard
|
||||
2. Click on the **Triggers** tab to view all available trigger integrations
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/list-available-triggers.png" alt="List of available automation triggers" />
|
||||
</Frame>
|
||||
|
||||
This view shows all the trigger integrations available for your deployment, along with their current connection status.
|
||||
|
||||
### Enabling and Disabling Triggers
|
||||
|
||||
Each trigger can be easily enabled or disabled using the toggle switch:
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/trigger-selected.png" alt="Enable or disable triggers with toggle" />
|
||||
</Frame>
|
||||
|
||||
- **Enabled (blue toggle)**: The trigger is active and will automatically execute your deployment when the specified events occur
|
||||
- **Disabled (gray toggle)**: The trigger is inactive and will not respond to events
|
||||
|
||||
Simply click the toggle to change the trigger state. Changes take effect immediately.
|
||||
|
||||
### Monitoring Trigger Executions
|
||||
|
||||
Track the performance and history of your triggered executions:
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/list-executions.png" alt="List of executions triggered by automation" />
|
||||
</Frame>
|
||||
|
||||
## Building Automation
|
||||
|
||||
Before building your automation, it's helpful to understand the structure of trigger payloads that your crews and flows will receive.
|
||||
|
||||
### Payload Samples Repository
|
||||
|
||||
We maintain a comprehensive repository with sample payloads from various trigger sources to help you build and test your automations:
|
||||
|
||||
**🔗 [CrewAI Enterprise Trigger Payload Samples](https://github.com/crewAIInc/crewai-enterprise-trigger-payload-samples)**
|
||||
|
||||
This repository contains:
|
||||
|
||||
- **Real payload examples** from different trigger sources (Gmail, Google Drive, etc.)
|
||||
- **Payload structure documentation** showing the format and available fields
|
||||
|
||||
### Triggers with Crew
|
||||
|
||||
Your existing crew definitions work seamlessly with triggers, you just need to have a task to parse the received payload:
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class MyAutomatedCrew:
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
)
|
||||
|
||||
@task
|
||||
def parse_trigger_payload(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['parse_trigger_payload'],
|
||||
agent=self.researcher(),
|
||||
)
|
||||
|
||||
@task
|
||||
def analyze_trigger_content(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['analyze_trigger_data'],
|
||||
agent=self.researcher(),
|
||||
)
|
||||
```
|
||||
|
||||
The crew will automatically receive and can access the trigger payload through the standard CrewAI context mechanisms.
|
||||
|
||||
### Integration with Flows
|
||||
|
||||
For flows, you have more control over how trigger data is handled:
|
||||
|
||||
#### Accessing Trigger Payload
|
||||
|
||||
All `@start()` methods in your flows will accept an additional parameter called `crewai_trigger_payload`:
|
||||
|
||||
```python
|
||||
from crewai.flow import Flow, start, listen
|
||||
|
||||
class MyAutomatedFlow(Flow):
|
||||
@start()
|
||||
def handle_trigger(self, crewai_trigger_payload: dict = None):
|
||||
"""
|
||||
This start method can receive trigger data
|
||||
"""
|
||||
if crewai_trigger_payload:
|
||||
# Process the trigger data
|
||||
trigger_id = crewai_trigger_payload.get('id')
|
||||
event_data = crewai_trigger_payload.get('payload', {})
|
||||
|
||||
# Store in flow state for use by other methods
|
||||
self.state.trigger_id = trigger_id
|
||||
self.state.trigger_type = event_data
|
||||
|
||||
return event_data
|
||||
|
||||
# Handle manual execution
|
||||
return None
|
||||
|
||||
@listen(handle_trigger)
|
||||
def process_data(self, trigger_data):
|
||||
"""
|
||||
Process the data from the trigger
|
||||
"""
|
||||
# ... process the trigger
|
||||
```
|
||||
|
||||
#### Triggering Crews from Flows
|
||||
|
||||
When kicking off a crew within a flow that was triggered, pass the trigger payload as it:
|
||||
|
||||
```python
|
||||
@start()
|
||||
def delegate_to_crew(self, crewai_trigger_payload: dict = None):
|
||||
"""
|
||||
Delegate processing to a specialized crew
|
||||
"""
|
||||
crew = MySpecializedCrew()
|
||||
|
||||
# Pass the trigger payload to the crew
|
||||
result = crew.crew().kickoff(
|
||||
inputs={
|
||||
'a_custom_parameter': "custom_value",
|
||||
'crewai_trigger_payload': crewai_trigger_payload
|
||||
},
|
||||
)
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Trigger not firing:**
|
||||
- Verify the trigger is enabled
|
||||
- Check integration connection status
|
||||
|
||||
**Execution failures:**
|
||||
- Check the execution logs for error details
|
||||
- If you are developing, make sure the inputs include the `crewai_trigger_payload` parameter with the correct payload
|
||||
|
||||
Automation triggers transform your CrewAI deployments into responsive, event-driven systems that can seamlessly integrate with your existing business processes and tools.
|
||||
22
docs/en/examples/cookbooks.mdx
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
title: CrewAI Cookbooks
|
||||
description: Feature-focused quickstarts and notebooks for learning patterns fast.
|
||||
icon: book
|
||||
---
|
||||
|
||||
## Quickstarts & Demos
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Task Guardrails" icon="shield-check" href="https://github.com/crewAIInc/crewAI-quickstarts/tree/main/Task%20Guardrails">
|
||||
Interactive notebooks for hands-on exploration.
|
||||
</Card>
|
||||
<Card title="Browse Quickstarts" icon="bolt" href="https://github.com/crewAIInc/crewAI-quickstarts">
|
||||
Feature demos and small projects showcasing specific CrewAI capabilities.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
<Tip>
|
||||
Use Cookbooks to learn a pattern quickly, then jump to Full Examples for production‑grade implementations.
|
||||
</Tip>
|
||||
|
||||
|
||||
@@ -1,62 +1,85 @@
|
||||
---
|
||||
title: CrewAI Examples
|
||||
description: A collection of examples that show how to use CrewAI framework to automate workflows.
|
||||
description: Explore curated examples organized by Crews, Flows, Integrations, and Notebooks.
|
||||
icon: rocket-launch
|
||||
---
|
||||
|
||||
## Crews
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="Marketing Strategy"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/marketing_strategy"
|
||||
icon="bullhorn"
|
||||
iconType="solid"
|
||||
>
|
||||
Automate marketing strategy creation with CrewAI.
|
||||
<Card title="Marketing Strategy" icon="bullhorn" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/marketing_strategy">
|
||||
Multi‑agent marketing campaign planning.
|
||||
</Card>
|
||||
<Card title="Surprise Trip" icon="plane" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/surprise_trip">
|
||||
Personalized surprise travel planning.
|
||||
</Card>
|
||||
<Card title="Match Profile to Positions" icon="id-card" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/match_profile_to_positions">
|
||||
CV‑to‑job matching with vector search.
|
||||
</Card>
|
||||
<Card title="Job Posting" icon="newspaper" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/job-posting">
|
||||
Automated job description creation.
|
||||
</Card>
|
||||
<Card title="Game Builder Crew" icon="gamepad" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/game-builder-crew">
|
||||
Multi‑agent team that designs and builds Python games.
|
||||
</Card>
|
||||
<Card title="Recruitment" icon="user-group" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/recruitment">
|
||||
Candidate sourcing and evaluation.
|
||||
</Card>
|
||||
<Card title="Browse all Crews" icon="users" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews">
|
||||
See the full list of crew examples.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Flows
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Content Creator Flow" icon="pen" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/content_creator_flow">
|
||||
Multi‑crew content generation with routing.
|
||||
</Card>
|
||||
<Card title="Email Auto Responder" icon="envelope" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/email_auto_responder_flow">
|
||||
Automated email monitoring and replies.
|
||||
</Card>
|
||||
<Card title="Lead Score Flow" icon="chart-line" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/lead_score_flow">
|
||||
Lead qualification with human‑in‑the‑loop.
|
||||
</Card>
|
||||
<Card title="Meeting Assistant Flow" icon="calendar" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/meeting_assistant_flow">
|
||||
Notes processing with integrations.
|
||||
</Card>
|
||||
<Card title="Self Evaluation Loop" icon="rotate" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/self_evaluation_loop_flow">
|
||||
Iterative self‑improvement workflows.
|
||||
</Card>
|
||||
<Card title="Write a Book (Flows)" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/write_a_book_with_flows">
|
||||
Parallel chapter generation.
|
||||
</Card>
|
||||
<Card title="Browse all Flows" icon="diagram-project" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows">
|
||||
See the full list of flow examples.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Integrations
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="CrewAI ↔ LangGraph" icon="link" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/crewai-langgraph">
|
||||
Integration with LangGraph framework.
|
||||
</Card>
|
||||
<Card title="Azure OpenAI" icon="cloud" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/azure_model">
|
||||
Using CrewAI with Azure OpenAI.
|
||||
</Card>
|
||||
<Card title="NVIDIA Models" icon="microchip" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/nvidia_models">
|
||||
NVIDIA ecosystem integrations.
|
||||
</Card>
|
||||
<Card title="Browse Integrations" icon="puzzle-piece" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations">
|
||||
See all integration examples.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Notebooks
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Simple QA Crew + Flow" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks/Simple%20QA%20Crew%20%2B%20Flow">
|
||||
Simple QA Crew + Flow.
|
||||
</Card>
|
||||
<Card title="All Notebooks" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks">
|
||||
Interactive examples for learning and experimentation.
|
||||
</Card>
|
||||
<Card
|
||||
title="Surprise Trip"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/surprise_trip"
|
||||
icon="plane"
|
||||
iconType="duotone"
|
||||
>
|
||||
Create a surprise trip itinerary with CrewAI.
|
||||
</Card>
|
||||
<Card
|
||||
title="Match Profile to Positions"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/match_profile_to_positions"
|
||||
icon="linkedin"
|
||||
iconType="duotone"
|
||||
>
|
||||
Match a profile to jobpositions with CrewAI.
|
||||
</Card>
|
||||
<Card
|
||||
title="Create Job Posting"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/job-posting"
|
||||
icon="newspaper"
|
||||
iconType="duotone"
|
||||
>
|
||||
Create a job posting with CrewAI.
|
||||
</Card>
|
||||
<Card
|
||||
title="Game Generator"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/game-builder-crew"
|
||||
icon="gamepad"
|
||||
iconType="duotone"
|
||||
>
|
||||
Create a game with CrewAI.
|
||||
</Card>
|
||||
<Card
|
||||
title="Find Job Candidates"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/recruitment"
|
||||
icon="user-group"
|
||||
iconType="duotone"
|
||||
>
|
||||
Find job candidates with CrewAI.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
---
|
||||
title: AgentOps Integration
|
||||
description: Understanding and logging your agent performance with AgentOps.
|
||||
icon: paperclip
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
Observability is a key aspect of developing and deploying conversational AI agents. It allows developers to understand how their agents are performing,
|
||||
how their agents are interacting with users, and how their agents use external tools and APIs.
|
||||
AgentOps is a product independent of CrewAI that provides a comprehensive observability solution for agents.
|
||||
|
||||
## AgentOps
|
||||
|
||||
[AgentOps](https://agentops.ai/?=crew) provides session replays, metrics, and monitoring for agents.
|
||||
|
||||
At a high level, AgentOps gives you the ability to monitor cost, token usage, latency, agent failures, session-wide statistics, and more.
|
||||
For more info, check out the [AgentOps Repo](https://github.com/AgentOps-AI/agentops).
|
||||
|
||||
### Overview
|
||||
|
||||
AgentOps provides monitoring for agents in development and production.
|
||||
It provides a dashboard for tracking agent performance, session replays, and custom reporting.
|
||||
|
||||
Additionally, AgentOps provides session drilldowns for viewing Crew agent interactions, LLM calls, and tool usage in real-time.
|
||||
This feature is useful for debugging and understanding how agents interact with users as well as other agents.
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
### Features
|
||||
|
||||
- **LLM Cost Management and Tracking**: Track spend with foundation model providers.
|
||||
- **Replay Analytics**: Watch step-by-step agent execution graphs.
|
||||
- **Recursive Thought Detection**: Identify when agents fall into infinite loops.
|
||||
- **Custom Reporting**: Create custom analytics on agent performance.
|
||||
- **Analytics Dashboard**: Monitor high-level statistics about agents in development and production.
|
||||
- **Public Model Testing**: Test your agents against benchmarks and leaderboards.
|
||||
- **Custom Tests**: Run your agents against domain-specific tests.
|
||||
- **Time Travel Debugging**: Restart your sessions from checkpoints.
|
||||
- **Compliance and Security**: Create audit logs and detect potential threats such as profanity and PII leaks.
|
||||
- **Prompt Injection Detection**: Identify potential code injection and secret leaks.
|
||||
|
||||
### Using AgentOps
|
||||
|
||||
<Steps>
|
||||
<Step title="Create an API Key">
|
||||
Create a user API key here: [Create API Key](https://app.agentops.ai/account)
|
||||
</Step>
|
||||
<Step title="Configure Your Environment">
|
||||
Add your API key to your environment variables:
|
||||
```bash
|
||||
AGENTOPS_API_KEY=<YOUR_AGENTOPS_API_KEY>
|
||||
```
|
||||
</Step>
|
||||
<Step title="Install AgentOps">
|
||||
Install AgentOps with:
|
||||
```bash
|
||||
pip install 'crewai[agentops]'
|
||||
```
|
||||
or
|
||||
```bash
|
||||
pip install agentops
|
||||
```
|
||||
</Step>
|
||||
<Step title="Initialize AgentOps">
|
||||
Before using `Crew` in your script, include these lines:
|
||||
|
||||
```python
|
||||
import agentops
|
||||
agentops.init()
|
||||
```
|
||||
|
||||
This will initiate an AgentOps session as well as automatically track Crew agents. For further info on how to outfit more complex agentic systems,
|
||||
check out the [AgentOps documentation](https://docs.agentops.ai) or join the [Discord](https://discord.gg/j4f3KbeH).
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Crew + AgentOps Examples
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="Job Posting"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting"
|
||||
icon="briefcase"
|
||||
iconType="solid"
|
||||
>
|
||||
Example of a Crew agent that generates job posts.
|
||||
</Card>
|
||||
<Card
|
||||
title="Markdown Validator"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/markdown_validator"
|
||||
icon="markdown"
|
||||
iconType="solid"
|
||||
>
|
||||
Example of a Crew agent that validates Markdown files.
|
||||
</Card>
|
||||
<Card
|
||||
title="Instagram Post"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post"
|
||||
icon="square-instagram"
|
||||
iconType="brands"
|
||||
>
|
||||
Example of a Crew agent that generates Instagram posts.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
### Further Information
|
||||
|
||||
To get started, create an [AgentOps account](https://agentops.ai/?=crew).
|
||||
|
||||
For feature requests or bug reports, please reach out to the AgentOps team on the [AgentOps Repo](https://github.com/AgentOps-AI/agentops).
|
||||
|
||||
#### Extra links
|
||||
|
||||
<a href="https://twitter.com/agentopsai/">🐦 Twitter</a>
|
||||
<span> • </span>
|
||||
<a href="https://discord.gg/JHPt4C7r">📢 Discord</a>
|
||||
<span> • </span>
|
||||
<a href="https://app.agentops.ai/?=crew">🖇️ AgentOps Dashboard</a>
|
||||
<span> • </span>
|
||||
<a href="https://docs.agentops.ai/introduction">📙 Documentation</a>
|
||||
@@ -21,9 +21,6 @@ Observability is crucial for understanding how your CrewAI agents perform, ident
|
||||
### Monitoring & Tracing Platforms
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="AgentOps" icon="paperclip" href="/en/observability/agentops">
|
||||
Session replays, metrics, and monitoring for agent development and production.
|
||||
</Card>
|
||||
|
||||
<Card title="LangDB" icon="database" href="/en/observability/langdb">
|
||||
End-to-end tracing for CrewAI workflows with automatic agent interaction capture.
|
||||
|
||||
146
docs/en/observability/truefoundry.mdx
Normal file
@@ -0,0 +1,146 @@
|
||||
---
|
||||
title: TrueFoundry Integration
|
||||
icon: chart-line
|
||||
---
|
||||
|
||||
TrueFoundry provides an enterprise-ready [AI Gateway](https://www.truefoundry.com/ai-gateway) which can integrate with agentic frameworks like CrewAI and provides governance and observability for your AI Applications. TrueFoundry AI Gateway serves as a unified interface for LLM access, providing:
|
||||
|
||||
- **Unified API Access**: Connect to 250+ LLMs (OpenAI, Claude, Gemini, Groq, Mistral) through one API
|
||||
- **Low Latency**: Sub-3ms internal latency with intelligent routing and load balancing
|
||||
- **Enterprise Security**: SOC 2, HIPAA, GDPR compliance with RBAC and audit logging
|
||||
- **Quota and cost management**: Token-based quotas, rate limiting, and comprehensive usage tracking
|
||||
- **Observability**: Full request/response logging, metrics, and traces with customizable retention
|
||||
|
||||
## How TrueFoundry Integrates with CrewAI
|
||||
|
||||
|
||||
### Installation & Setup
|
||||
|
||||
<Steps>
|
||||
<Step title="Install CrewAI">
|
||||
```bash
|
||||
pip install crewai
|
||||
```
|
||||
</Step>
|
||||
|
||||
<Step title="Get TrueFoundry Access Token">
|
||||
1. Sign up for a [TrueFoundry account](https://www.truefoundry.com/register)
|
||||
2. Follow the steps here in [Quick start](https://docs.truefoundry.com/gateway/quick-start)
|
||||
</Step>
|
||||
|
||||
<Step title="Configure CrewAI with TrueFoundry">
|
||||

|
||||
|
||||
```python
|
||||
from crewai import LLM
|
||||
|
||||
# Create an LLM instance with TrueFoundry AI Gateway
|
||||
truefoundry_llm = LLM(
|
||||
model="openai-main/gpt-4o", # Similarly, you can call any model from any provider
|
||||
base_url="your_truefoundry_gateway_base_url",
|
||||
api_key="your_truefoundry_api_key"
|
||||
)
|
||||
|
||||
# Use in your CrewAI agents
|
||||
from crewai import Agent
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
llm=truefoundry_llm,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Complete CrewAI Example
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew, LLM
|
||||
|
||||
# Configure LLM with TrueFoundry
|
||||
llm = LLM(
|
||||
model="openai-main/gpt-4o",
|
||||
base_url="your_truefoundry_gateway_base_url",
|
||||
api_key="your_truefoundry_api_key"
|
||||
)
|
||||
|
||||
# Create agents
|
||||
researcher = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Conduct detailed market research',
|
||||
backstory='Expert market analyst with attention to detail',
|
||||
llm=llm,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
writer = Agent(
|
||||
role='Content Writer',
|
||||
goal='Create comprehensive reports',
|
||||
backstory='Experienced technical writer',
|
||||
llm=llm,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Create tasks
|
||||
research_task = Task(
|
||||
description='Research AI market trends for 2024',
|
||||
agent=researcher,
|
||||
expected_output='Comprehensive research summary'
|
||||
)
|
||||
|
||||
writing_task = Task(
|
||||
description='Create a market research report',
|
||||
agent=writer,
|
||||
expected_output='Well-structured report with insights',
|
||||
context=[research_task]
|
||||
)
|
||||
|
||||
# Create and execute crew
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research_task, writing_task],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
### Observability and Governance
|
||||
|
||||
Monitor your CrewAI agents through TrueFoundry's metrics tab:
|
||||

|
||||
|
||||
With Truefoundry's AI gateway, you can monitor and analyze:
|
||||
|
||||
- **Performance Metrics**: Track key latency metrics like Request Latency, Time to First Token (TTFS), and Inter-Token Latency (ITL) with P99, P90, and P50 percentiles
|
||||
- **Cost and Token Usage**: Gain visibility into your application's costs with detailed breakdowns of input/output tokens and the associated expenses for each model
|
||||
- **Usage Patterns**: Understand how your application is being used with detailed analytics on user activity, model distribution, and team-based usage
|
||||
- **Rate limit and Load balancing**: You can set up rate limiting, load balancing and fallback for your models
|
||||
|
||||
## Tracing
|
||||
|
||||
For a more detailed understanding on tracing, please see [getting-started-tracing](https://docs.truefoundry.com/docs/tracing/tracing-getting-started).For tracing, you can add the Traceloop SDK:
|
||||
For tracing, you can add the Traceloop SDK:
|
||||
|
||||
```bash
|
||||
pip install traceloop-sdk
|
||||
```
|
||||
|
||||
```python
|
||||
from traceloop.sdk import Traceloop
|
||||
|
||||
# Initialize enhanced tracing
|
||||
Traceloop.init(
|
||||
api_endpoint="https://your-truefoundry-endpoint/api/tracing",
|
||||
headers={
|
||||
"Authorization": f"Bearer {your_truefoundry_pat_token}",
|
||||
"TFY-Tracing-Project": "your_project_name",
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
This provides additional trace correlation across your entire CrewAI workflow.
|
||||

|
||||
@@ -117,4 +117,19 @@ agent = Agent(
|
||||
)
|
||||
```
|
||||
|
||||
## **Max Usage Count**
|
||||
|
||||
You can set a maximum usage count for a tool to prevent it from being used more than a certain number of times.
|
||||
By default, the max usage count is unlimited.
|
||||
|
||||
|
||||
|
||||
```python
|
||||
from crewai_tools import FileReadTool
|
||||
|
||||
tool = FileReadTool(max_usage_count=5, ...)
|
||||
```
|
||||
|
||||
|
||||
|
||||
Ready to explore? Pick a category above to discover tools that fit your use case!
|
||||
|
||||
435
docs/enterprise-api.en.yaml
Normal file
@@ -0,0 +1,435 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: CrewAI Enterprise API
|
||||
description: |
|
||||
REST API for interacting with your deployed CrewAI crews on CrewAI Enterprise.
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. **Find your crew URL**: Get your unique crew URL from the CrewAI Enterprise dashboard
|
||||
2. **Copy examples**: Use the code examples from each endpoint page as templates
|
||||
3. **Replace placeholders**: Update URLs and tokens with your actual values
|
||||
4. **Test with your tools**: Use cURL, Postman, or your preferred API client
|
||||
|
||||
## Authentication
|
||||
|
||||
All API requests require a bearer token for authentication. There are two types of tokens:
|
||||
|
||||
- **Bearer Token**: Organization-level token for full crew operations
|
||||
- **User Bearer Token**: User-scoped token for individual access with limited permissions
|
||||
|
||||
You can find your bearer tokens in the Status tab of your crew's detail page in the CrewAI Enterprise dashboard.
|
||||
|
||||
## Reference Documentation
|
||||
|
||||
This documentation provides comprehensive examples for each endpoint:
|
||||
|
||||
- **Request formats** with all required and optional parameters
|
||||
- **Response examples** for success and error scenarios
|
||||
- **Code samples** in multiple programming languages
|
||||
- **Authentication patterns** with proper Bearer token usage
|
||||
|
||||
Copy the examples and customize them with your actual crew URL and authentication tokens.
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Discover inputs** using `GET /inputs`
|
||||
2. **Start execution** using `POST /kickoff`
|
||||
3. **Monitor progress** using `GET /status/{kickoff_id}`
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: CrewAI Support
|
||||
email: support@crewai.com
|
||||
url: https://crewai.com
|
||||
servers:
|
||||
- url: https://your-actual-crew-name.crewai.com
|
||||
description: Replace with your actual deployed crew URL from the CrewAI Enterprise dashboard
|
||||
- url: https://my-travel-crew.crewai.com
|
||||
description: Example travel planning crew (replace with your URL)
|
||||
- url: https://content-creation-crew.crewai.com
|
||||
description: Example content creation crew (replace with your URL)
|
||||
- url: https://research-assistant-crew.crewai.com
|
||||
description: Example research assistant crew (replace with your URL)
|
||||
security:
|
||||
- BearerAuth: []
|
||||
paths:
|
||||
/inputs:
|
||||
get:
|
||||
summary: Get Required Inputs
|
||||
description: |
|
||||
**📋 Reference Example Only** - *This shows the request format. To test with your actual crew, copy the cURL example and replace the URL + token with your real values.*
|
||||
|
||||
Retrieves the list of all required input parameters that your crew expects for execution.
|
||||
Use this endpoint to discover what inputs you need to provide when starting a crew execution.
|
||||
operationId: getRequiredInputs
|
||||
responses:
|
||||
'200':
|
||||
description: Successfully retrieved required inputs
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
inputs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Array of required input parameter names
|
||||
example: ["budget", "interests", "duration", "age"]
|
||||
examples:
|
||||
travel_crew:
|
||||
summary: Travel planning crew inputs
|
||||
value:
|
||||
inputs: ["budget", "interests", "duration", "age"]
|
||||
outreach_crew:
|
||||
summary: Outreach crew inputs
|
||||
value:
|
||||
inputs: ["name", "title", "company", "industry", "our_product", "linkedin_url"]
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'404':
|
||||
$ref: '#/components/responses/NotFoundError'
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
/kickoff:
|
||||
post:
|
||||
summary: Start Crew Execution
|
||||
description: |
|
||||
**📋 Reference Example Only** - *This shows the request format. To test with your actual crew, copy the cURL example and replace the URL + token with your real values.*
|
||||
|
||||
Initiates a new crew execution with the provided inputs. Returns a kickoff ID that can be used
|
||||
to track the execution progress and retrieve results.
|
||||
|
||||
Crew executions can take anywhere from seconds to minutes depending on their complexity.
|
||||
Consider using webhooks for real-time notifications or implement polling with the status endpoint.
|
||||
operationId: startCrewExecution
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- inputs
|
||||
properties:
|
||||
inputs:
|
||||
type: object
|
||||
description: Key-value pairs of all required inputs for your crew
|
||||
additionalProperties:
|
||||
type: string
|
||||
example:
|
||||
budget: "1000 USD"
|
||||
interests: "games, tech, ai, relaxing hikes, amazing food"
|
||||
duration: "7 days"
|
||||
age: "35"
|
||||
meta:
|
||||
type: object
|
||||
description: Additional metadata to pass to the crew
|
||||
additionalProperties: true
|
||||
example:
|
||||
requestId: "user-request-12345"
|
||||
source: "mobile-app"
|
||||
taskWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed after each task completion
|
||||
example: "https://your-server.com/webhooks/task"
|
||||
stepWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed after each agent thought/action
|
||||
example: "https://your-server.com/webhooks/step"
|
||||
crewWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed when the crew execution completes
|
||||
example: "https://your-server.com/webhooks/crew"
|
||||
examples:
|
||||
travel_planning:
|
||||
summary: Travel planning crew
|
||||
value:
|
||||
inputs:
|
||||
budget: "1000 USD"
|
||||
interests: "games, tech, ai, relaxing hikes, amazing food"
|
||||
duration: "7 days"
|
||||
age: "35"
|
||||
meta:
|
||||
requestId: "travel-req-123"
|
||||
source: "web-app"
|
||||
outreach_campaign:
|
||||
summary: Outreach crew with webhooks
|
||||
value:
|
||||
inputs:
|
||||
name: "John Smith"
|
||||
title: "CTO"
|
||||
company: "TechCorp"
|
||||
industry: "Software"
|
||||
our_product: "AI Development Platform"
|
||||
linkedin_url: "https://linkedin.com/in/johnsmith"
|
||||
taskWebhookUrl: "https://api.example.com/webhooks/task"
|
||||
crewWebhookUrl: "https://api.example.com/webhooks/crew"
|
||||
responses:
|
||||
'200':
|
||||
description: Crew execution started successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
kickoff_id:
|
||||
type: string
|
||||
format: uuid
|
||||
description: Unique identifier for tracking this execution
|
||||
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
'400':
|
||||
description: Invalid request body or missing required inputs
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'422':
|
||||
description: Validation error - ensure all required inputs are provided
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ValidationError'
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
/status/{kickoff_id}:
|
||||
get:
|
||||
summary: Get Execution Status
|
||||
description: |
|
||||
**📋 Reference Example Only** - *This shows the request format. To test with your actual crew, copy the cURL example and replace the URL + token with your real values.*
|
||||
|
||||
Retrieves the current status and results of a crew execution using its kickoff ID.
|
||||
|
||||
The response structure varies depending on the execution state:
|
||||
- **running**: Execution in progress with current task info
|
||||
- **completed**: Execution finished with full results
|
||||
- **error**: Execution failed with error details
|
||||
operationId: getExecutionStatus
|
||||
parameters:
|
||||
- name: kickoff_id
|
||||
in: path
|
||||
required: true
|
||||
description: The kickoff ID returned from the /kickoff endpoint
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
responses:
|
||||
'200':
|
||||
description: Successfully retrieved execution status
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/ExecutionRunning'
|
||||
- $ref: '#/components/schemas/ExecutionCompleted'
|
||||
- $ref: '#/components/schemas/ExecutionError'
|
||||
examples:
|
||||
running:
|
||||
summary: Execution in progress
|
||||
value:
|
||||
status: "running"
|
||||
current_task: "research_task"
|
||||
progress:
|
||||
completed_tasks: 1
|
||||
total_tasks: 3
|
||||
completed:
|
||||
summary: Execution completed successfully
|
||||
value:
|
||||
status: "completed"
|
||||
result:
|
||||
output: "Comprehensive travel itinerary for 7 days in Japan focusing on tech culture..."
|
||||
tasks:
|
||||
- task_id: "research_task"
|
||||
output: "Research findings on tech destinations in Japan..."
|
||||
agent: "Travel Researcher"
|
||||
execution_time: 45.2
|
||||
- task_id: "planning_task"
|
||||
output: "7-day detailed itinerary with activities and recommendations..."
|
||||
agent: "Trip Planner"
|
||||
execution_time: 62.8
|
||||
execution_time: 108.5
|
||||
error:
|
||||
summary: Execution failed
|
||||
value:
|
||||
status: "error"
|
||||
error: "Task execution failed: Invalid API key for external service"
|
||||
execution_time: 23.1
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'404':
|
||||
description: Kickoff ID not found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Execution not found"
|
||||
message: "No execution found with ID: abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
BearerAuth:
|
||||
type: http
|
||||
scheme: bearer
|
||||
description: |
|
||||
**📋 Reference Documentation** - *The tokens shown in examples are placeholders for reference only.*
|
||||
|
||||
Use your actual Bearer Token or User Bearer Token from the CrewAI Enterprise dashboard for real API calls.
|
||||
|
||||
**Bearer Token**: Organization-level access for full crew operations
|
||||
**User Bearer Token**: User-scoped access with limited permissions
|
||||
|
||||
schemas:
|
||||
ExecutionRunning:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["running"]
|
||||
example: "running"
|
||||
current_task:
|
||||
type: string
|
||||
description: Name of the currently executing task
|
||||
example: "research_task"
|
||||
progress:
|
||||
type: object
|
||||
properties:
|
||||
completed_tasks:
|
||||
type: integer
|
||||
description: Number of completed tasks
|
||||
example: 1
|
||||
total_tasks:
|
||||
type: integer
|
||||
description: Total number of tasks in the crew
|
||||
example: 3
|
||||
|
||||
ExecutionCompleted:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["completed"]
|
||||
example: "completed"
|
||||
result:
|
||||
type: object
|
||||
properties:
|
||||
output:
|
||||
type: string
|
||||
description: Final output from the crew execution
|
||||
example: "Comprehensive travel itinerary..."
|
||||
tasks:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/TaskResult'
|
||||
execution_time:
|
||||
type: number
|
||||
description: Total execution time in seconds
|
||||
example: 108.5
|
||||
|
||||
ExecutionError:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["error"]
|
||||
example: "error"
|
||||
error:
|
||||
type: string
|
||||
description: Error message describing what went wrong
|
||||
example: "Task execution failed: Invalid API key"
|
||||
execution_time:
|
||||
type: number
|
||||
description: Time until error occurred in seconds
|
||||
example: 23.1
|
||||
|
||||
TaskResult:
|
||||
type: object
|
||||
properties:
|
||||
task_id:
|
||||
type: string
|
||||
description: Unique identifier for the task
|
||||
example: "research_task"
|
||||
output:
|
||||
type: string
|
||||
description: Output generated by this task
|
||||
example: "Research findings..."
|
||||
agent:
|
||||
type: string
|
||||
description: Name of the agent that executed this task
|
||||
example: "Travel Researcher"
|
||||
execution_time:
|
||||
type: number
|
||||
description: Time taken to execute this task in seconds
|
||||
example: 45.2
|
||||
|
||||
Error:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
description: Error type or title
|
||||
example: "Authentication Error"
|
||||
message:
|
||||
type: string
|
||||
description: Detailed error message
|
||||
example: "Invalid bearer token provided"
|
||||
|
||||
ValidationError:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: "Validation Error"
|
||||
message:
|
||||
type: string
|
||||
example: "Missing required inputs"
|
||||
details:
|
||||
type: object
|
||||
properties:
|
||||
missing_inputs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
example: ["budget", "interests"]
|
||||
|
||||
responses:
|
||||
UnauthorizedError:
|
||||
description: Authentication failed - check your bearer token
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Unauthorized"
|
||||
message: "Invalid or missing bearer token"
|
||||
|
||||
NotFoundError:
|
||||
description: Resource not found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Not Found"
|
||||
message: "The requested resource was not found"
|
||||
|
||||
ServerError:
|
||||
description: Internal server error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Internal Server Error"
|
||||
message: "An unexpected error occurred"
|
||||
|
||||
231
docs/enterprise-api.ko.yaml
Normal file
@@ -0,0 +1,231 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: CrewAI 엔터프라이즈 API
|
||||
description: |
|
||||
CrewAI Enterprise에 배포된 crew와 상호작용하기 위한 REST API입니다.
|
||||
|
||||
## 시작하기
|
||||
1. **Crew URL 확인**: 대시보드에서 고유한 crew URL을 확인하세요
|
||||
2. **예제 복사**: 각 엔드포인트의 예제를 템플릿으로 사용하세요
|
||||
3. **플레이스홀더 교체**: 실제 URL과 토큰으로 바꾸세요
|
||||
4. **도구로 테스트**: cURL, Postman 등 선호하는 도구로 테스트하세요
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: CrewAI 지원
|
||||
email: support@crewai.com
|
||||
url: https://crewai.com
|
||||
servers:
|
||||
- url: https://your-actual-crew-name.crewai.com
|
||||
description: 대시보드의 실제 crew URL로 교체하세요
|
||||
security:
|
||||
- BearerAuth: []
|
||||
paths:
|
||||
/inputs:
|
||||
get:
|
||||
summary: 필요 입력값 조회
|
||||
description: |
|
||||
**📋 참조 예제만 제공** - *요청 형식을 보여줍니다. 실제 호출은 cURL 예제를 복사해 URL과 토큰을 교체하세요.*
|
||||
|
||||
실행에 필요한 입력 파라미터 목록을 반환합니다.
|
||||
operationId: getRequiredInputs
|
||||
responses:
|
||||
'200':
|
||||
description: 입력값을 성공적으로 조회
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
inputs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'404':
|
||||
$ref: '#/components/responses/NotFoundError'
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
/kickoff:
|
||||
post:
|
||||
summary: Crew 실행 시작
|
||||
description: |
|
||||
**📋 참조 예제만 제공** - *요청 형식을 보여줍니다. 실제 호출은 cURL 예제를 복사해 URL과 토큰을 교체하세요.*
|
||||
|
||||
제공된 입력으로 새로운 실행을 시작하고 kickoff ID를 반환합니다.
|
||||
operationId: startCrewExecution
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- inputs
|
||||
properties:
|
||||
inputs:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
description: 실행이 성공적으로 시작됨
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
kickoff_id:
|
||||
type: string
|
||||
format: uuid
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
/status/{kickoff_id}:
|
||||
get:
|
||||
summary: 실행 상태 조회
|
||||
description: |
|
||||
**📋 참조 예제만 제공** - *요청 형식을 보여줍니다. 실제 호출은 cURL 예제를 복사해 URL과 토큰을 교체하세요.*
|
||||
|
||||
kickoff ID로 실행 상태와 결과를 조회합니다.
|
||||
operationId: getExecutionStatus
|
||||
parameters:
|
||||
- name: kickoff_id
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
responses:
|
||||
'200':
|
||||
description: 상태를 성공적으로 조회
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/ExecutionRunning'
|
||||
- $ref: '#/components/schemas/ExecutionCompleted'
|
||||
- $ref: '#/components/schemas/ExecutionError'
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'404':
|
||||
description: Kickoff ID를 찾을 수 없음
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
BearerAuth:
|
||||
type: http
|
||||
scheme: bearer
|
||||
description: |
|
||||
**📋 참고** - *예시의 토큰은 자리 표시자입니다.* 실제 토큰을 사용하세요.
|
||||
|
||||
schemas:
|
||||
ExecutionRunning:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["running"]
|
||||
current_task:
|
||||
type: string
|
||||
progress:
|
||||
type: object
|
||||
properties:
|
||||
completed_tasks:
|
||||
type: integer
|
||||
total_tasks:
|
||||
type: integer
|
||||
|
||||
ExecutionCompleted:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["completed"]
|
||||
result:
|
||||
type: object
|
||||
properties:
|
||||
output:
|
||||
type: string
|
||||
tasks:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/TaskResult'
|
||||
execution_time:
|
||||
type: number
|
||||
|
||||
ExecutionError:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["error"]
|
||||
error:
|
||||
type: string
|
||||
execution_time:
|
||||
type: number
|
||||
|
||||
TaskResult:
|
||||
type: object
|
||||
properties:
|
||||
task_id:
|
||||
type: string
|
||||
output:
|
||||
type: string
|
||||
agent:
|
||||
type: string
|
||||
execution_time:
|
||||
type: number
|
||||
|
||||
Error:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
message:
|
||||
type: string
|
||||
|
||||
ValidationError:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
message:
|
||||
type: string
|
||||
details:
|
||||
type: object
|
||||
properties:
|
||||
missing_inputs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
|
||||
responses:
|
||||
UnauthorizedError:
|
||||
description: 인증 실패
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
NotFoundError:
|
||||
description: 리소스를 찾을 수 없음
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
ServerError:
|
||||
description: 서버 내부 오류
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
268
docs/enterprise-api.pt-BR.yaml
Normal file
@@ -0,0 +1,268 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: CrewAI Enterprise API
|
||||
description: |
|
||||
REST API para interagir com suas crews implantadas no CrewAI Enterprise.
|
||||
|
||||
## Introdução
|
||||
|
||||
1. **Encontre a URL da sua crew**: Obtenha sua URL única no painel do CrewAI Enterprise
|
||||
2. **Copie os exemplos**: Use os exemplos de cada endpoint como modelo
|
||||
3. **Substitua os placeholders**: Atualize URLs e tokens com seus valores reais
|
||||
4. **Teste com suas ferramentas**: Use cURL, Postman ou seu cliente preferido
|
||||
|
||||
## Autenticação
|
||||
|
||||
Todas as requisições exigem um token bearer. Existem dois tipos:
|
||||
|
||||
- **Bearer Token**: Token em nível de organização para operações completas
|
||||
- **User Bearer Token**: Token com escopo de usuário com permissões limitadas
|
||||
|
||||
Você encontra os tokens na aba Status da sua crew no painel do CrewAI Enterprise.
|
||||
|
||||
## Documentação de Referência
|
||||
|
||||
Este documento fornece exemplos completos para cada endpoint:
|
||||
|
||||
- **Formatos de requisição** com parâmetros obrigatórios e opcionais
|
||||
- **Exemplos de resposta** para sucesso e erro
|
||||
- **Amostras de código** em várias linguagens
|
||||
- **Padrões de autenticação** com uso correto de Bearer token
|
||||
|
||||
Copie os exemplos e personalize com sua URL e tokens reais.
|
||||
|
||||
## Fluxo
|
||||
|
||||
1. **Descubra os inputs** usando `GET /inputs`
|
||||
2. **Inicie a execução** usando `POST /kickoff`
|
||||
3. **Monitore o progresso** usando `GET /status/{kickoff_id}`
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: CrewAI Suporte
|
||||
email: support@crewai.com
|
||||
url: https://crewai.com
|
||||
servers:
|
||||
- url: https://your-actual-crew-name.crewai.com
|
||||
description: Substitua pela URL real da sua crew no painel do CrewAI Enterprise
|
||||
security:
|
||||
- BearerAuth: []
|
||||
paths:
|
||||
/inputs:
|
||||
get:
|
||||
summary: Obter Inputs Requeridos
|
||||
description: |
|
||||
**📋 Exemplo de Referência** - *Mostra o formato da requisição. Para testar com sua crew real, copie o cURL e substitua URL + token.*
|
||||
|
||||
Retorna a lista de parâmetros de entrada que sua crew espera.
|
||||
operationId: getRequiredInputs
|
||||
responses:
|
||||
'200':
|
||||
description: Inputs requeridos obtidos com sucesso
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
inputs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Nomes dos parâmetros de entrada
|
||||
example: ["budget", "interests", "duration", "age"]
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'404':
|
||||
$ref: '#/components/responses/NotFoundError'
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
/kickoff:
|
||||
post:
|
||||
summary: Iniciar Execução da Crew
|
||||
description: |
|
||||
**📋 Exemplo de Referência** - *Mostra o formato da requisição. Para testar com sua crew real, copie o cURL e substitua URL + token.*
|
||||
|
||||
Inicia uma nova execução da crew com os inputs fornecidos e retorna um kickoff ID.
|
||||
operationId: startCrewExecution
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- inputs
|
||||
properties:
|
||||
inputs:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
example:
|
||||
budget: "1000 USD"
|
||||
interests: "games, tech, ai, relaxing hikes, amazing food"
|
||||
duration: "7 days"
|
||||
age: "35"
|
||||
|
||||
responses:
|
||||
'200':
|
||||
description: Execução iniciada com sucesso
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
kickoff_id:
|
||||
type: string
|
||||
format: uuid
|
||||
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
/status/{kickoff_id}:
|
||||
get:
|
||||
summary: Obter Status da Execução
|
||||
description: |
|
||||
**📋 Exemplo de Referência** - *Mostra o formato da requisição. Para testar com sua crew real, copie o cURL e substitua URL + token.*
|
||||
|
||||
Retorna o status atual e os resultados de uma execução usando o kickoff ID.
|
||||
operationId: getExecutionStatus
|
||||
parameters:
|
||||
- name: kickoff_id
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
responses:
|
||||
'200':
|
||||
description: Status recuperado com sucesso
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/ExecutionRunning'
|
||||
- $ref: '#/components/schemas/ExecutionCompleted'
|
||||
- $ref: '#/components/schemas/ExecutionError'
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'404':
|
||||
description: Kickoff ID não encontrado
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
BearerAuth:
|
||||
type: http
|
||||
scheme: bearer
|
||||
description: |
|
||||
**📋 Referência** - *Os tokens mostrados são apenas exemplos.*
|
||||
Use seus tokens reais do painel do CrewAI Enterprise.
|
||||
|
||||
schemas:
|
||||
ExecutionRunning:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["running"]
|
||||
current_task:
|
||||
type: string
|
||||
progress:
|
||||
type: object
|
||||
properties:
|
||||
completed_tasks:
|
||||
type: integer
|
||||
total_tasks:
|
||||
type: integer
|
||||
|
||||
ExecutionCompleted:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["completed"]
|
||||
result:
|
||||
type: object
|
||||
properties:
|
||||
output:
|
||||
type: string
|
||||
tasks:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/TaskResult'
|
||||
execution_time:
|
||||
type: number
|
||||
|
||||
ExecutionError:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["error"]
|
||||
error:
|
||||
type: string
|
||||
execution_time:
|
||||
type: number
|
||||
|
||||
TaskResult:
|
||||
type: object
|
||||
properties:
|
||||
task_id:
|
||||
type: string
|
||||
output:
|
||||
type: string
|
||||
agent:
|
||||
type: string
|
||||
execution_time:
|
||||
type: number
|
||||
|
||||
Error:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
message:
|
||||
type: string
|
||||
|
||||
ValidationError:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
message:
|
||||
type: string
|
||||
details:
|
||||
type: object
|
||||
properties:
|
||||
missing_inputs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
|
||||
responses:
|
||||
UnauthorizedError:
|
||||
description: Autenticação falhou
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
NotFoundError:
|
||||
description: Recurso não encontrado
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
ServerError:
|
||||
description: Erro interno do servidor
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
|
Before Width: | Height: | Size: 288 KiB |
|
Before Width: | Height: | Size: 419 KiB |
|
Before Width: | Height: | Size: 263 KiB |
BIN
docs/images/enterprise/list-available-triggers.png
Normal file
|
After Width: | Height: | Size: 142 KiB |
BIN
docs/images/enterprise/list-executions.png
Normal file
|
After Width: | Height: | Size: 330 KiB |
BIN
docs/images/enterprise/trigger-selected.png
Normal file
|
After Width: | Height: | Size: 133 KiB |
BIN
docs/images/enterprise/users_and_roles.png
Normal file
|
After Width: | Height: | Size: 248 KiB |
BIN
docs/images/enterprise/visibility.png
Normal file
|
After Width: | Height: | Size: 472 KiB |
BIN
docs/images/gateway-metrics.png
Normal file
|
After Width: | Height: | Size: 530 KiB |
BIN
docs/images/new-code-snippet.png
Normal file
|
After Width: | Height: | Size: 554 KiB |
BIN
docs/images/tracing_crewai.png
Normal file
|
After Width: | Height: | Size: 128 KiB |
7
docs/ko/api-reference/inputs.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /inputs"
|
||||
description: "크루가 필요로 하는 입력 확인"
|
||||
openapi: "/enterprise-api.ko.yaml GET /inputs"
|
||||
---
|
||||
|
||||
|
||||
7
docs/ko/api-reference/kickoff.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "POST /kickoff"
|
||||
description: "크루 실행 시작"
|
||||
openapi: "/enterprise-api.ko.yaml POST /kickoff"
|
||||
---
|
||||
|
||||
|
||||
7
docs/ko/api-reference/status.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /status/{kickoff_id}"
|
||||
description: "실행 상태 조회"
|
||||
openapi: "/enterprise-api.ko.yaml GET /status/{kickoff_id}"
|
||||
---
|
||||
|
||||
|
||||
@@ -177,14 +177,7 @@ class MyCustomCrew:
|
||||
# Your crew implementation...
|
||||
```
|
||||
|
||||
이것이 바로 CrewAI의 내장 `agentops_listener`가 등록되는 방식과 동일합니다. CrewAI 코드베이스에서는 다음과 같이 되어 있습니다:
|
||||
|
||||
```python
|
||||
# src/crewai/utilities/events/third_party/__init__.py
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
이렇게 하면 `crewai.utilities.events` 패키지가 임포트될 때 `agentops_listener`가 자동으로 로드됩니다.
|
||||
이것이 CrewAI 코드베이스에서 서드파티 이벤트 리스너가 등록되는 방식입니다.
|
||||
|
||||
## 사용 가능한 이벤트 유형
|
||||
|
||||
@@ -280,77 +273,6 @@ CrewAI는 여러분이 청취할 수 있는 다양한 이벤트를 제공합니
|
||||
|
||||
추가 필드는 이벤트 타입에 따라 다릅니다. 예를 들어, `CrewKickoffCompletedEvent`에는 `crew_name`과 `output` 필드가 포함됩니다.
|
||||
|
||||
## 실제 예시: AgentOps와의 통합
|
||||
|
||||
CrewAI는 AI 에이전트를 위한 모니터링 및 관찰 플랫폼인 [AgentOps](https://github.com/AgentOps-AI/agentops)와의 서드파티 통합 예시를 포함하고 있습니다. 구현 방식은 다음과 같습니다:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
|
||||
from crewai.utilities.events.task_events import TaskEvaluationEvent
|
||||
|
||||
try:
|
||||
import agentops
|
||||
AGENTOPS_INSTALLED = True
|
||||
except ImportError:
|
||||
AGENTOPS_INSTALLED = False
|
||||
|
||||
class AgentOpsListener(BaseEventListener):
|
||||
tool_event: Optional["agentops.ToolEvent"] = None
|
||||
session: Optional["agentops.Session"] = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
if not AGENTOPS_INSTALLED:
|
||||
return
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
|
||||
self.session = agentops.init()
|
||||
for agent in source.agents:
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
|
||||
if self.session:
|
||||
self.session.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
||||
self.tool_event = agentops.ToolEvent(name=event.tool_name)
|
||||
if self.session:
|
||||
self.session.record(self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
|
||||
```
|
||||
|
||||
이 listener는 crew가 시작될 때 AgentOps 세션을 초기화하고, agent를 AgentOps에 등록하며, 도구 사용을 추적하고, crew가 완료되면 세션을 종료합니다.
|
||||
|
||||
AgentOps listener는 `src/crewai/utilities/events/third_party/__init__.py` 파일의 import를 통해 CrewAI 이벤트 시스템에 등록됩니다:
|
||||
|
||||
```python
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
이렇게 하면 `crewai.utilities.events` 패키지가 import될 때 `agentops_listener`가 로드되는 것이 보장됩니다.
|
||||
|
||||
## 고급 사용법: Scoped Handlers
|
||||
|
||||
|
||||
103
docs/ko/enterprise/features/rbac.mdx
Normal file
@@ -0,0 +1,103 @@
|
||||
---
|
||||
title: "역할 기반 접근 제어 (RBAC)"
|
||||
description: "역할과 자동화별 가시성으로 crews, 도구, 데이터 접근을 제어합니다."
|
||||
icon: "shield"
|
||||
---
|
||||
|
||||
## 개요
|
||||
|
||||
CrewAI Enterprise의 RBAC는 **조직 수준 역할**과 **자동화(Automation) 수준 가시성**을 결합하여 안전하고 확장 가능한 접근 제어를 제공합니다.
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/users_and_roles.png" alt="CrewAI Enterprise RBAC 개요" />
|
||||
|
||||
</Frame>
|
||||
|
||||
## 사용자와 역할
|
||||
|
||||
워크스페이스의 각 구성원은 역할이 있으며, 이는 기능 접근 범위를 결정합니다.
|
||||
|
||||
가능한 작업:
|
||||
|
||||
- 사전 정의된 역할 사용 (Owner, Member)
|
||||
- 권한을 세분화한 커스텀 역할 생성
|
||||
- 설정 화면에서 언제든 역할 할당/변경
|
||||
|
||||
설정 위치: Settings → Roles
|
||||
|
||||
<Steps>
|
||||
<Step title="Roles 열기">
|
||||
<b>Settings → Roles</b>로 이동합니다.
|
||||
</Step>
|
||||
<Step title="역할 선택">
|
||||
<b>Owner</b> 또는 <b>Member</b>를 사용하거나 <b>Create role</b>로 커스텀 역할을 만듭니다.
|
||||
</Step>
|
||||
<Step title="멤버에 할당">
|
||||
사용자들을 선택하여 역할을 지정합니다. 언제든 변경할 수 있습니다.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### 구성 요약
|
||||
|
||||
| 영역 | 위치 | 옵션 |
|
||||
|:---|:---|:---|
|
||||
| 사용자 & 역할 | Settings → Roles | Owner, Member; 커스텀 역할 |
|
||||
| 자동화 가시성 | Automation → Settings → Visibility | Private; 사용자/역할 화이트리스트 |
|
||||
|
||||
## 자동화 수준 접근 제어
|
||||
|
||||
조직 역할과 별개로, **Automations**는 사용자/역할별로 특정 자동화 접근을 제한하는 가시성 설정을 제공합니다.
|
||||
|
||||
유용한 경우:
|
||||
|
||||
- 민감/실험 자동화를 비공개로 유지
|
||||
- 대규모 팀/외부 협업에서 가시성 관리
|
||||
- 격리된 컨텍스트에서 자동화 테스트
|
||||
|
||||
Private 모드에서는 화이트리스트에 포함된 사용자/역할만 다음 작업이 가능합니다:
|
||||
|
||||
- 자동화 보기
|
||||
- 실행/API 사용
|
||||
- 로그, 메트릭, 설정 접근
|
||||
|
||||
조직 Owner는 항상 접근 가능하며, 가시성 설정에 영향을 받지 않습니다.
|
||||
|
||||
설정 위치: Automation → Settings → Visibility
|
||||
|
||||
<Steps>
|
||||
<Step title="Visibility 탭 열기">
|
||||
<b>Automation → Settings → Visibility</b>로 이동합니다.
|
||||
</Step>
|
||||
<Step title="가시성 설정">
|
||||
<b>Private</b>를 선택합니다. Owner는 항상 접근 가능합니다.
|
||||
</Step>
|
||||
<Step title="허용 대상 추가">
|
||||
보기/실행/로그·메트릭·설정 접근이 가능한 사용자/역할을 추가합니다.
|
||||
</Step>
|
||||
<Step title="저장 및 확인">
|
||||
저장 후, 목록에 없는 사용자가 보거나 실행할 수 없는지 확인합니다.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Private 모드 접근 결과
|
||||
|
||||
| 동작 | Owner | 화이트리스트 사용자/역할 | 비포함 |
|
||||
|:---|:---|:---|:---|
|
||||
| 자동화 보기 | ✓ | ✓ | ✗ |
|
||||
| 실행/API | ✓ | ✓ | ✗ |
|
||||
| 로그/메트릭/설정 | ✓ | ✓ | ✗ |
|
||||
|
||||
<Tip>
|
||||
Owner는 항상 접근 가능하며, Private 모드에서는 화이트리스트에 포함된 사용자/역할만 권한이 부여됩니다.
|
||||
</Tip>
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/visibility.png" alt="CrewAI Enterprise 가시성 설정" />
|
||||
|
||||
</Frame>
|
||||
|
||||
<Card title="도움이 필요하신가요?" icon="headset" href="mailto:support@crewai.com">
|
||||
RBAC 구성과 점검에 대한 지원이 필요하면 연락해 주세요.
|
||||
</Card>
|
||||
|
||||
|
||||
171
docs/ko/enterprise/guides/automation-triggers.mdx
Normal file
@@ -0,0 +1,171 @@
|
||||
---
|
||||
title: "자동화 트리거"
|
||||
description: "연결된 통합에서 특정 이벤트가 발생할 때 CrewAI 워크플로우를 자동으로 실행합니다"
|
||||
icon: "bolt"
|
||||
---
|
||||
|
||||
자동화 트리거를 사용하면 연결된 통합에서 특정 이벤트가 발생할 때 CrewAI 배포를 자동으로 실행할 수 있어, 비즈니스 시스템의 실시간 변화에 반응하는 강력한 이벤트 기반 워크플로우를 만들 수 있습니다.
|
||||
|
||||
## 개요
|
||||
|
||||
자동화 트리거를 사용하면 다음을 수행할 수 있습니다:
|
||||
|
||||
- **실시간 이벤트에 응답** - 특정 조건이 충족될 때 워크플로우를 자동으로 실행
|
||||
- **외부 시스템과 통합** - Gmail, Outlook, OneDrive, JIRA, Slack, Stripe 등의 플랫폼과 연결
|
||||
- **자동화 확장** - 수동 개입 없이 대용량 이벤트 처리
|
||||
- **컨텍스트 유지** - crew와 flow 내에서 트리거 데이터에 액세스
|
||||
|
||||
## 자동화 트리거 관리
|
||||
|
||||
### 사용 가능한 트리거 보기
|
||||
|
||||
자동화 트리거에 액세스하고 관리하려면:
|
||||
|
||||
1. CrewAI 대시보드에서 배포로 이동
|
||||
2. **트리거** 탭을 클릭하여 사용 가능한 모든 트리거 통합 보기
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/list-available-triggers.png" alt="사용 가능한 자동화 트리거 목록" />
|
||||
</Frame>
|
||||
|
||||
이 보기는 배포에 사용 가능한 모든 트리거 통합과 현재 연결 상태를 보여줍니다.
|
||||
|
||||
### 트리거 활성화 및 비활성화
|
||||
|
||||
각 트리거는 토글 스위치를 사용하여 쉽게 활성화하거나 비활성화할 수 있습니다:
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/trigger-selected.png" alt="토글로 트리거 활성화 또는 비활성화" />
|
||||
</Frame>
|
||||
|
||||
- **활성화됨 (파란색 토글)**: 트리거가 활성 상태이며 지정된 이벤트가 발생할 때 배포를 자동으로 실행합니다
|
||||
- **비활성화됨 (회색 토글)**: 트리거가 비활성 상태이며 이벤트에 응답하지 않습니다
|
||||
|
||||
토글을 클릭하기만 하면 트리거 상태를 변경할 수 있습니다. 변경 사항은 즉시 적용됩니다.
|
||||
|
||||
### 트리거 실행 모니터링
|
||||
|
||||
트리거된 실행의 성능과 기록을 추적합니다:
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/list-executions.png" alt="자동화에 의해 트리거된 실행 목록" />
|
||||
</Frame>
|
||||
|
||||
## 자동화 구축
|
||||
|
||||
자동화를 구축하기 전에 crew와 flow가 받을 트리거 페이로드의 구조를 이해하는 것이 도움이 됩니다.
|
||||
|
||||
### 페이로드 샘플 저장소
|
||||
|
||||
자동화를 구축하고 테스트하는 데 도움이 되도록 다양한 트리거 소스의 샘플 페이로드가 포함된 포괄적인 저장소를 유지 관리하고 있습니다:
|
||||
|
||||
**🔗 [CrewAI Enterprise 트리거 페이로드 샘플](https://github.com/crewAIInc/crewai-enterprise-trigger-payload-samples)**
|
||||
|
||||
이 저장소에는 다음이 포함되어 있습니다:
|
||||
|
||||
- **실제 페이로드 예제** - 다양한 트리거 소스(Gmail, Google Drive 등)에서 가져온 예제
|
||||
- **페이로드 구조 문서** - 형식과 사용 가능한 필드를 보여주는 문서
|
||||
|
||||
### Crew와 트리거
|
||||
|
||||
기존 crew 정의는 트리거와 완벽하게 작동하며, 받은 페이로드를 분석하는 작업만 있으면 됩니다:
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class MyAutomatedCrew:
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
)
|
||||
|
||||
@task
|
||||
def parse_trigger_payload(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['parse_trigger_payload'],
|
||||
agent=self.researcher(),
|
||||
)
|
||||
|
||||
@task
|
||||
def analyze_trigger_content(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['analyze_trigger_data'],
|
||||
agent=self.researcher(),
|
||||
)
|
||||
```
|
||||
|
||||
crew는 자동으로 트리거 페이로드를 받고 표준 CrewAI 컨텍스트 메커니즘을 통해 액세스할 수 있습니다.
|
||||
|
||||
### Flow와의 통합
|
||||
|
||||
flow의 경우 트리거 데이터 처리 방법을 더 세밀하게 제어할 수 있습니다:
|
||||
|
||||
#### 트리거 페이로드 액세스
|
||||
|
||||
flow의 모든 `@start()` 메서드는 `crewai_trigger_payload`라는 추가 매개변수를 허용합니다:
|
||||
|
||||
```python
|
||||
from crewai.flow import Flow, start, listen
|
||||
|
||||
class MyAutomatedFlow(Flow):
|
||||
@start()
|
||||
def handle_trigger(self, crewai_trigger_payload: dict = None):
|
||||
"""
|
||||
이 start 메서드는 트리거 데이터를 받을 수 있습니다
|
||||
"""
|
||||
if crewai_trigger_payload:
|
||||
# 트리거 데이터 처리
|
||||
trigger_id = crewai_trigger_payload.get('id')
|
||||
event_data = crewai_trigger_payload.get('payload', {})
|
||||
|
||||
# 다른 메서드에서 사용할 수 있도록 flow 상태에 저장
|
||||
self.state.trigger_id = trigger_id
|
||||
self.state.trigger_type = event_data
|
||||
|
||||
return event_data
|
||||
|
||||
# 수동 실행 처리
|
||||
return None
|
||||
|
||||
@listen(handle_trigger)
|
||||
def process_data(self, trigger_data):
|
||||
"""
|
||||
트리거 데이터 처리
|
||||
"""
|
||||
# ... 트리거 처리
|
||||
```
|
||||
|
||||
#### Flow에서 Crew 트리거하기
|
||||
|
||||
트리거된 flow 내에서 crew를 시작할 때 트리거 페이로드를 전달합니다:
|
||||
|
||||
```python
|
||||
@start()
|
||||
def delegate_to_crew(self, crewai_trigger_payload: dict = None):
|
||||
"""
|
||||
전문 crew에 처리 위임
|
||||
"""
|
||||
crew = MySpecializedCrew()
|
||||
|
||||
# crew에 트리거 페이로드 전달
|
||||
result = crew.crew().kickoff(
|
||||
inputs={
|
||||
'a_custom_parameter': "custom_value",
|
||||
'crewai_trigger_payload': crewai_trigger_payload
|
||||
},
|
||||
)
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
## 문제 해결
|
||||
|
||||
**트리거가 작동하지 않는 경우:**
|
||||
- 트리거가 활성화되어 있는지 확인
|
||||
- 통합 연결 상태 확인
|
||||
|
||||
**실행 실패:**
|
||||
- 오류 세부 정보는 실행 로그 확인
|
||||
- 개발 중인 경우 입력에 올바른 페이로드가 포함된 `crewai_trigger_payload` 매개변수가 포함되어 있는지 확인
|
||||
|
||||
자동화 트리거는 CrewAI 배포를 기존 비즈니스 프로세스 및 도구와 완벽하게 통합할 수 있는 반응형 이벤트 기반 시스템으로 변환합니다.
|
||||
22
docs/ko/examples/cookbooks.mdx
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
title: CrewAI Cookbooks
|
||||
description: 패턴을 빠르게 익히기 위한 기능 중심 Quickstarts와 노트북.
|
||||
icon: book
|
||||
---
|
||||
|
||||
## Quickstarts & Demos
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Quickstarts 저장소" icon="bolt" href="https://github.com/crewAIInc/crewAI-quickstarts">
|
||||
특정 CrewAI 기능을 보여주는 데모와 소규모 프로젝트.
|
||||
</Card>
|
||||
<Card title="예시의 노트북" icon="book-open" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks">
|
||||
실습을 위한 인터랙티브 노트북.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
<Tip>
|
||||
Cookbooks로 패턴을 빠르게 익힌 뒤, 프로덕션급 구현은 Full Examples에서 확인하세요.
|
||||
</Tip>
|
||||
|
||||
|
||||
@@ -1,62 +1,85 @@
|
||||
---
|
||||
title: CrewAI 예시
|
||||
description: CrewAI 프레임워크를 사용하여 워크플로우를 자동화하는 방법을 보여주는 예시 모음입니다.
|
||||
description: Crews, Flows, 통합, Notebooks로 구성된 예시 모음입니다.
|
||||
icon: rocket-launch
|
||||
---
|
||||
|
||||
## Crews
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="마케팅 전략"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/marketing_strategy"
|
||||
icon="bullhorn"
|
||||
iconType="solid"
|
||||
>
|
||||
CrewAI로 마케팅 전략 생성을 자동화하세요.
|
||||
<Card title="마케팅 전략" icon="bullhorn" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/marketing_strategy">
|
||||
다중 에이전트 마케팅 캠페인 기획.
|
||||
</Card>
|
||||
<Card title="깜짝 여행" icon="plane" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/surprise_trip">
|
||||
개인화된 여행 계획.
|
||||
</Card>
|
||||
<Card title="프로필-포지션 매칭" icon="id-card" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/match_profile_to_positions">
|
||||
벡터 검색 기반 이력서 매칭.
|
||||
</Card>
|
||||
<Card title="채용 공고" icon="newspaper" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/job-posting">
|
||||
채용 공고 자동 생성.
|
||||
</Card>
|
||||
<Card title="게임 빌더 Crew" icon="gamepad" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/game-builder-crew">
|
||||
파이썬 게임을 설계·구축하는 멀티 에이전트 팀.
|
||||
</Card>
|
||||
<Card title="리크루팅" icon="user-group" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/recruitment">
|
||||
후보자 소싱 및 평가.
|
||||
</Card>
|
||||
<Card title="모든 Crews 보기" icon="users" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews">
|
||||
전체 crew 예시 목록.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Flows
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Content Creator Flow" icon="pen" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/content_creator_flow">
|
||||
라우팅 기반 콘텐츠 생성.
|
||||
</Card>
|
||||
<Card title="이메일 자동 응답" icon="envelope" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/email_auto_responder_flow">
|
||||
이메일 모니터링과 자동 응답.
|
||||
</Card>
|
||||
<Card title="리드 점수 Flow" icon="chart-line" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/lead_score_flow">
|
||||
휴먼‑인‑더‑루프 리드 평가.
|
||||
</Card>
|
||||
<Card title="미팅 어시스턴트 Flow" icon="calendar" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/meeting_assistant_flow">
|
||||
노트 처리 및 연동.
|
||||
</Card>
|
||||
<Card title="Self Evaluation Loop" icon="rotate" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/self_evaluation_loop_flow">
|
||||
반복적 자가 개선 워크플로우.
|
||||
</Card>
|
||||
<Card title="책 쓰기 (Flows)" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/write_a_book_with_flows">
|
||||
병렬 챕터 생성.
|
||||
</Card>
|
||||
<Card title="모든 Flows 보기" icon="diagram-project" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows">
|
||||
전체 flow 예시 목록.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## 통합 (Integrations)
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="CrewAI ↔ LangGraph" icon="link" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/crewai-langgraph">
|
||||
LangGraph 프레임워크 연동.
|
||||
</Card>
|
||||
<Card title="Azure OpenAI" icon="cloud" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/azure_model">
|
||||
Azure OpenAI와 함께 사용.
|
||||
</Card>
|
||||
<Card title="NVIDIA 모델" icon="microchip" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/nvidia_models">
|
||||
NVIDIA 생태계 연동.
|
||||
</Card>
|
||||
<Card title="모든 통합 보기" icon="puzzle-piece" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations">
|
||||
전체 통합 예시.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## 노트북 (Notebooks)
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Simple QA Crew + Flow" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks/Simple%20QA%20Crew%20%2B%20Flow">
|
||||
Simple QA Crew + Flow.
|
||||
</Card>
|
||||
<Card title="모든 노트북" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks">
|
||||
학습과 실험을 위한 인터랙티브 예시 모음.
|
||||
</Card>
|
||||
<Card
|
||||
title="깜짝 여행"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/surprise_trip"
|
||||
icon="plane"
|
||||
iconType="duotone"
|
||||
>
|
||||
CrewAI로 깜짝 여행 일정표를 만들어보세요.
|
||||
</Card>
|
||||
<Card
|
||||
title="프로필과 포지션 매칭"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/match_profile_to_positions"
|
||||
icon="linkedin"
|
||||
iconType="duotone"
|
||||
>
|
||||
CrewAI로 프로필을 채용 포지션에 매칭하세요.
|
||||
</Card>
|
||||
<Card
|
||||
title="채용 공고 생성"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/job-posting"
|
||||
icon="newspaper"
|
||||
iconType="duotone"
|
||||
>
|
||||
CrewAI로 채용 공고를 만드세요.
|
||||
</Card>
|
||||
<Card
|
||||
title="게임 생성기"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/game-builder-crew"
|
||||
icon="gamepad"
|
||||
iconType="duotone"
|
||||
>
|
||||
CrewAI로 게임을 만들어보세요.
|
||||
</Card>
|
||||
<Card
|
||||
title="채용 후보자 찾기"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/recruitment"
|
||||
icon="user-group"
|
||||
iconType="duotone"
|
||||
>
|
||||
CrewAI로 채용 후보자를 찾으세요.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -1,65 +1,65 @@
|
||||
---
|
||||
title: 소개
|
||||
description: 함께 협력하여 복잡한 작업을 해결하는 AI 에이전트 팀 구축
|
||||
description: 함께 협력하여 복잡한 작업을 해결하는 AI agent 팀 구축
|
||||
icon: handshake
|
||||
---
|
||||
|
||||
# CrewAI란 무엇인가?
|
||||
|
||||
**CrewAI는 완전히 독립적으로, LangChain이나 기타 agent 프레임워크에 의존하지 않고 처음부터 스크래치로 개발된 가볍고 매우 빠른 Python 프레임워크입니다.**
|
||||
**CrewAI는 LangChain이나 기타 agent 프레임워크에 의존하지 않고, 완전히 독립적으로 처음부터 스크래치로 개발된 가볍고 매우 빠른 Python 프레임워크입니다.**
|
||||
|
||||
CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공하여, 어떤 시나리오에도 맞춤화된 자율 AI agent를 만드는 데 이상적입니다:
|
||||
|
||||
- **[CrewAI Crews](/ko/guides/crews/first-crew)**: 자율성과 협업 지능을 극대화하여, 각 agent가 특정 역할, 도구, 목표를 가진 AI 팀을 만들 수 있습니다.
|
||||
- **[CrewAI Flows](/ko/guides/flows/first-flow)**: 세밀한 이벤트 기반 제어와 단일 LLM 호출을 통한 정확한 작업 오케스트레이션을 가능하게 하며 Crews를 네이티브로 지원합니다.
|
||||
- **[CrewAI Flows](/ko/guides/flows/first-flow)**: 이벤트 기반의 세밀한 제어와 단일 LLM 호출을 통한 정확한 작업 orchestration을 지원하며, Crews와 네이티브로 통합됩니다.
|
||||
|
||||
10만 명이 넘는 개발자가 커뮤니티 과정을 통해 인증을 받았으며, CrewAI는 기업용 AI 자동화의 표준으로 빠르게 자리잡고 있습니다.
|
||||
|
||||
## 크루 작동 방식
|
||||
## Crew의 작동 방식
|
||||
|
||||
<Note>
|
||||
회사가 비즈니스 목표를 달성하기 위해 여러 부서(영업, 엔지니어링, 마케팅 등)가 리더십 아래에서 함께 일하는 것처럼, CrewAI는 복잡한 작업을 달성하기 위해 전문화된 역할의 AI 에이전트들이 협력하는 조직을 만들 수 있도록 도와줍니다.
|
||||
회사가 비즈니스 목표를 달성하기 위해 여러 부서(영업, 엔지니어링, 마케팅 등)가 리더십 아래에서 함께 일하는 것처럼, CrewAI는 복잡한 작업을 달성하기 위해 전문화된 역할의 AI agent들이 협력하는 조직을 만들 수 있도록 도와줍니다.
|
||||
</Note>
|
||||
|
||||
<Frame caption="CrewAI 프레임워크 개요">
|
||||
<Frame caption="CrewAI Framework Overview">
|
||||
<img src="/images/crews.png" alt="CrewAI Framework Overview" />
|
||||
</Frame>
|
||||
|
||||
| 구성 요소 | 설명 | 주요 특징 |
|
||||
|:--------------|:---------------------:|:----------|
|
||||
| **크루** | 최상위 조직 | • AI 에이전트 팀 관리<br/>• 워크플로우 감독<br/>• 협업 보장<br/>• 결과 전달 |
|
||||
| **AI 에이전트** | 전문 팀원 | • 특정 역할 보유(연구원, 작가 등)<br/>• 지정된 도구 사용<br/>• 작업 위임 가능<br/>• 자율적 의사결정 가능 |
|
||||
| **프로세스** | 워크플로우 관리 시스템 | • 협업 패턴 정의<br/>• 작업 할당 제어<br/>• 상호작용 관리<br/>• 효율적 실행 보장 |
|
||||
| **작업** | 개별 할당 | • 명확한 목표 보유<br/>• 특정 도구 사용<br/>• 더 큰 프로세스에 기여<br/>• 실행 가능한 결과 도출 |
|
||||
| 구성 요소 | 설명 | 주요 특징 |
|
||||
|:----------|:----:|:----------|
|
||||
| **Crew** | 최상위 조직 | • AI agent 팀 관리<br/>• workflow 감독<br/>• 협업 보장<br/>• 결과 전달 |
|
||||
| **AI agents** | 전문 팀원 | • 특정 역할 보유(Researcher, Writer 등)<br/>• 지정된 도구 사용<br/>• 작업 위임 가능<br/>• 자율적 의사결정 가능 |
|
||||
| **Process** | workflow 관리 시스템 | • 협업 패턴 정의<br/>• 작업 할당 제어<br/>• 상호작용 관리<br/>• 효율적 실행 보장 |
|
||||
| **Task** | 개별 할당 | • 명확한 목표 보유<br/>• 특정 도구 사용<br/>• 더 큰 프로세스에 기여<br/>• 실행 가능한 결과 도출 |
|
||||
|
||||
### 어떻게 모두 함께 작동하는가
|
||||
### 전체 구조의 동작 방식
|
||||
|
||||
1. **Crew**가 전체 운영을 조직합니다
|
||||
2. **AI Agents**가 자신들의 전문 작업을 수행합니다
|
||||
2. **AI agents**가 자신들의 전문 작업을 수행합니다
|
||||
3. **Process**가 원활한 협업을 보장합니다
|
||||
4. **Tasks**가 완료되어 목표를 달성합니다
|
||||
|
||||
## 주요 기능
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="역할 기반 에이전트" icon="users">
|
||||
연구원, 분석가, 작가 등 다양한 역할, 전문성, 목표를 가진 전문 에이전트를 생성할 수 있습니다
|
||||
<Card title="역할 기반 agent" icon="users">
|
||||
Researcher, Analyst, Writer 등 다양한 역할과 전문성, 목표를 가진 agent를 생성할 수 있습니다
|
||||
</Card>
|
||||
<Card title="유연한 도구" icon="screwdriver-wrench">
|
||||
에이전트에게 외부 서비스 및 데이터 소스와 상호작용할 수 있는 맞춤형 도구와 API를 제공합니다
|
||||
agent에게 외부 서비스 및 데이터 소스와 상호작용할 수 있는 맞춤형 도구와 API를 제공합니다
|
||||
</Card>
|
||||
<Card title="지능형 협업" icon="people-arrows">
|
||||
에이전트가 함께 작업하며, 인사이트를 공유하고 작업을 조율하여 복잡한 목표를 달성합니다
|
||||
agent들이 함께 작업하며, 인사이트를 공유하고 작업을 조율하여 복잡한 목표를 달성합니다
|
||||
</Card>
|
||||
<Card title="작업 관리" icon="list-check">
|
||||
순차적 또는 병렬 워크플로우를 정의할 수 있으며, 에이전트가 작업 의존성을 자동으로 처리합니다
|
||||
순차적 또는 병렬 workflow를 정의할 수 있으며, agent가 작업 의존성을 자동으로 처리합니다
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## 플로우의 작동 원리
|
||||
## Flow의 작동 원리
|
||||
|
||||
<Note>
|
||||
crew는 자율 협업에 탁월한 반면, 플로우는 구조화된 자동화를 제공하여 워크플로우 실행에 대한 세밀한 제어를 제공합니다. 플로우는 조건부 로직, 반복문, 동적 상태 관리를 정확하게 처리하면서 작업이 신뢰성 있게, 안전하게, 효율적으로 실행되도록 보장합니다. 플로우는 crew와 원활하게 통합되어 높은 자율성과 엄격한 제어의 균형을 이룰 수 있게 해줍니다.
|
||||
Crew가 자율 협업에 탁월하다면, Flow는 구조화된 자동화를 제공하여 workflow 실행에 대한 세밀한 제어를 제공합니다. Flow는 조건부 로직, 반복문, 동적 상태 관리를 정확하게 처리하면서 작업이 신뢰성 있게, 안전하게, 효율적으로 실행되도록 보장합니다. Flow는 Crew와 원활하게 통합되어 높은 자율성과 엄격한 제어의 균형을 이룰 수 있게 해줍니다.
|
||||
</Note>
|
||||
|
||||
<Frame caption="CrewAI Framework Overview">
|
||||
@@ -68,41 +68,41 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
|
||||
|
||||
| 구성 요소 | 설명 | 주요 기능 |
|
||||
|:----------|:-----------:|:------------|
|
||||
| **Flow** | 구조화된 워크플로우 오케스트레이션 | • 실행 경로 관리<br/>• 상태 전환 처리<br/>• 작업 순서 제어<br/>• 신뢰성 있는 실행 보장 |
|
||||
| **Events** | 워크플로우 액션 트리거 | • 특정 프로세스 시작<br/>• 동적 응답 가능<br/>• 조건부 분기 지원<br/>• 실시간 적응 허용 |
|
||||
| **States** | 워크플로우 실행 컨텍스트 | • 실행 데이터 유지<br/>• 데이터 영속성 지원<br/>• 재개 가능성 보장<br/>• 실행 무결성 확보 |
|
||||
| **Crew Support** | 워크플로우 자동화 강화 | • 필요할 때 agency 삽입<br/>• 구조화된 워크플로우 보완<br/>• 자동화와 인텔리전스의 균형<br/>• 적응적 의사결정 지원 |
|
||||
| **Flow** | 구조화된 workflow orchestration | • 실행 경로 관리<br/>• 상태 전환 처리<br/>• 작업 순서 제어<br/>• 신뢰성 있는 실행 보장 |
|
||||
| **Events** | workflow 액션 트리거 | • 특정 프로세스 시작<br/>• 동적 응답 가능<br/>• 조건부 분기 지원<br/>• 실시간 적응 허용 |
|
||||
| **States** | workflow 실행 컨텍스트 | • 실행 데이터 유지<br/>• 데이터 영속성 지원<br/>• 재개 가능성 보장<br/>• 실행 무결성 확보 |
|
||||
| **Crew Support** | workflow 자동화 강화 | • 필요할 때 agency 삽입<br/>• 구조화된 workflow 보완<br/>• 자동화와 인텔리전스의 균형<br/>• 적응적 의사결정 지원 |
|
||||
|
||||
### 주요 기능
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="이벤트 기반 오케스트레이션" icon="bolt">
|
||||
이벤트에 동적으로 반응하여 정밀한 실행 경로 정의
|
||||
<Card title="이벤트 기반 orchestration" icon="bolt">
|
||||
이벤트에 동적으로 반응하여 정밀한 실행 경로를 정의합니다
|
||||
</Card>
|
||||
<Card title="세밀한 제어" icon="sliders">
|
||||
워크플로우 상태와 조건부 실행을 안전하고 효율적으로 관리
|
||||
workflow 상태와 조건부 실행을 안전하고 효율적으로 관리합니다
|
||||
</Card>
|
||||
<Card title="네이티브 Crew 통합" icon="puzzle-piece">
|
||||
Crews와 손쉽게 결합하여 자율성과 지능 강화
|
||||
Crews와 손쉽게 결합하여 자율성과 지능을 강화합니다
|
||||
</Card>
|
||||
<Card title="결정론적 실행" icon="route">
|
||||
명시적 제어 흐름과 오류 처리로 예측 가능한 결과 보장
|
||||
명시적 제어 흐름과 오류 처리로 예측 가능한 결과를 보장합니다
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## 크루(Crews)와 플로우(Flows)를 언제 사용할까
|
||||
## Crew와 Flow를 언제 사용할까
|
||||
|
||||
<Note>
|
||||
[크루](/ko/guides/crews/first-crew)와 [플로우](/ko/guides/flows/first-flow)를 언제 사용할지 이해하는 것은 CrewAI의 잠재력을 애플리케이션에서 극대화하는 데 핵심적입니다.
|
||||
[Crew](/ko/guides/crews/first-crew)와 [Flow](/ko/guides/flows/first-flow)를 언제 사용할지 이해하는 것은 CrewAI의 잠재력을 애플리케이션에서 극대화하는 데 핵심적입니다.
|
||||
</Note>
|
||||
|
||||
| 사용 사례 | 권장 접근 방식 | 이유 |
|
||||
|:---------|:---------------------|:-----|
|
||||
| **개방형 연구** | [크루](/ko/guides/crews/first-crew) | 과제가 창의적인 사고, 탐색, 적응이 필요할 때 |
|
||||
| **콘텐츠 생성** | [크루](/ko/guides/crews/first-crew) | 기사, 보고서, 마케팅 자료 등 협업형 생성 시 |
|
||||
| **의사결정 워크플로우** | [플로우](/ko/guides/flows/first-flow) | 예측 가능하고 감사 가능한 의사결정 경로 및 정밀 제어가 필요할 때 |
|
||||
| **API 오케스트레이션** | [플로우](/ko/guides/flows/first-flow) | 특정 순서로 여러 외부 서비스에 신뢰성 있게 통합할 때 |
|
||||
| **하이브리드 애플리케이션** | 혼합 접근 방식 | [플로우](/ko/guides/flows/first-flow)로 전체 프로세스를 오케스트레이션하고, [크루](/ko/guides/crews/first-crew)로 복잡한 하위 작업을 처리 |
|
||||
| **개방형 연구** | [Crew](/ko/guides/crews/first-crew) | 창의적 사고, 탐색, 적응이 필요한 작업에 적합 |
|
||||
| **콘텐츠 생성** | [Crew](/ko/guides/crews/first-crew) | 기사, 보고서, 마케팅 자료 등 협업형 생성에 적합 |
|
||||
| **의사결정 workflow** | [Flow](/ko/guides/flows/first-flow) | 예측 가능하고 감사 가능한 의사결정 경로 및 정밀 제어가 필요할 때 |
|
||||
| **API orchestration** | [Flow](/ko/guides/flows/first-flow) | 특정 순서로 여러 외부 서비스에 신뢰성 있게 통합할 때 |
|
||||
| **하이브리드 애플리케이션** | 혼합 접근 방식 | [Flow](/ko/guides/flows/first-flow)로 전체 프로세스를 orchestration하고, [Crew](/ko/guides/crews/first-crew)로 복잡한 하위 작업을 처리 |
|
||||
|
||||
### 의사결정 프레임워크
|
||||
|
||||
@@ -112,8 +112,8 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
|
||||
|
||||
## CrewAI를 선택해야 하는 이유?
|
||||
|
||||
- 🧠 **자율적 운영**: 에이전트가 자신의 역할과 사용 가능한 도구를 바탕으로 지능적인 결정을 내립니다
|
||||
- 📝 **자연스러운 상호작용**: 에이전트가 인간 팀원처럼 소통하고 협업합니다
|
||||
- 🧠 **자율적 운영**: agent가 자신의 역할과 사용 가능한 도구를 바탕으로 지능적인 결정을 내립니다
|
||||
- 📝 **자연스러운 상호작용**: agent가 인간 팀원처럼 소통하고 협업합니다
|
||||
- 🛠️ **확장 가능한 설계**: 새로운 도구, 역할, 기능을 쉽게 추가할 수 있습니다
|
||||
- 🚀 **프로덕션 준비 완료**: 실제 환경에서의 신뢰성과 확장성을 고려하여 구축되었습니다
|
||||
- 🔒 **보안 중심**: 엔터프라이즈 보안 요구 사항을 고려하여 설계되었습니다
|
||||
@@ -134,7 +134,7 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
|
||||
icon="diagram-project"
|
||||
href="/ko/guides/flows/first-flow"
|
||||
>
|
||||
실행을 정밀하게 제어할 수 있는 구조화된, 이벤트 기반 워크플로우를 만드는 방법을 배워보세요.
|
||||
실행을 정밀하게 제어할 수 있는 구조화된, 이벤트 기반 workflow를 만드는 방법을 배워보세요.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
@@ -151,7 +151,7 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
|
||||
icon="bolt"
|
||||
href="ko/quickstart"
|
||||
>
|
||||
빠른 시작 가이드를 따라 첫 번째 CrewAI 에이전트를 만들고 직접 경험해 보세요.
|
||||
빠른 시작 가이드를 따라 첫 번째 CrewAI agent를 만들고 직접 경험해 보세요.
|
||||
</Card>
|
||||
<Card
|
||||
title="커뮤니티 가입하기"
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
---
|
||||
title: AgentOps 통합
|
||||
description: AgentOps를 사용하여 에이전트 성능을 이해하고 로깅하기
|
||||
icon: paperclip
|
||||
---
|
||||
|
||||
# 소개
|
||||
|
||||
Observability는 대화형 AI 에이전트를 개발하고 배포하는 데 있어 핵심적인 요소입니다. 이는 개발자가 에이전트의 성능을 이해하고, 에이전트가 사용자와 어떻게 상호작용하는지, 그리고 에이전트가 외부 도구와 API를 어떻게 사용하는지를 파악할 수 있게 해줍니다.
|
||||
AgentOps는 CrewAI와 독립적인 제품으로, 에이전트를 위한 종합적인 observability 솔루션을 제공합니다.
|
||||
|
||||
## AgentOps
|
||||
|
||||
[AgentOps](https://agentops.ai/?=crew)은 에이전트에 대한 세션 리플레이, 메트릭, 모니터링을 제공합니다.
|
||||
|
||||
AgentOps는 높은 수준에서 비용, 토큰 사용량, 대기 시간, 에이전트 실패, 세션 전체 통계 등 다양한 항목을 모니터링할 수 있는 기능을 제공합니다.
|
||||
더 자세한 내용은 [AgentOps Repo](https://github.com/AgentOps-AI/agentops)를 확인하세요.
|
||||
|
||||
### 개요
|
||||
|
||||
AgentOps는 개발 및 프로덕션 환경에서 에이전트에 대한 모니터링을 제공합니다.
|
||||
에이전트 성능, 세션 리플레이, 맞춤형 리포팅을 추적할 수 있는 대시보드를 제공합니다.
|
||||
|
||||
또한, AgentOps는 Crew 에이전트 상호작용, LLM 호출, 툴 사용을 실시간으로 볼 수 있는 세션 드릴다운 기능을 제공합니다.
|
||||
이 기능은 에이전트가 사용자 및 다른 에이전트와 어떻게 상호작용하는지 디버깅하고 이해하는 데 유용합니다.
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
### 특징
|
||||
|
||||
- **LLM 비용 관리 및 추적**: 기반 모델 공급자와의 지출을 추적합니다.
|
||||
- **재생 분석**: 단계별 에이전트 실행 그래프를 시청할 수 있습니다.
|
||||
- **재귀적 사고 감지**: 에이전트가 무한 루프에 빠졌는지 식별합니다.
|
||||
- **맞춤형 보고서**: 에이전트 성능에 대한 맞춤형 분석을 생성합니다.
|
||||
- **분석 대시보드**: 개발 및 운영 중인 에이전트에 대한 상위 수준 통계를 모니터링합니다.
|
||||
- **공개 모델 테스트**: 벤치마크 및 리더보드를 통해 에이전트를 테스트할 수 있습니다.
|
||||
- **맞춤형 테스트**: 도메인별 테스트로 에이전트를 실행합니다.
|
||||
- **타임 트래블 디버깅**: 체크포인트에서 세션을 재시작합니다.
|
||||
- **컴플라이언스 및 보안**: 감사 로그를 생성하고 욕설 및 PII 유출과 같은 잠재적 위협을 감지합니다.
|
||||
- **프롬프트 인젝션 감지**: 잠재적 코드 인젝션 및 시크릿 유출을 식별합니다.
|
||||
|
||||
### AgentOps 사용하기
|
||||
|
||||
<Steps>
|
||||
<Step title="API 키 생성">
|
||||
사용자 API 키를 여기서 생성하세요: [API 키 생성](https://app.agentops.ai/account)
|
||||
</Step>
|
||||
<Step title="환경 설정">
|
||||
API 키를 환경 변수에 추가하세요:
|
||||
```bash
|
||||
AGENTOPS_API_KEY=<YOUR_AGENTOPS_API_KEY>
|
||||
```
|
||||
</Step>
|
||||
<Step title="AgentOps 설치">
|
||||
다음 명령어로 AgentOps를 설치하세요:
|
||||
```bash
|
||||
pip install 'crewai[agentops]'
|
||||
```
|
||||
또는
|
||||
```bash
|
||||
pip install agentops
|
||||
```
|
||||
</Step>
|
||||
<Step title="AgentOps 초기화">
|
||||
스크립트에서 `Crew`를 사용하기 전에 다음 코드를 포함하세요:
|
||||
|
||||
```python
|
||||
import agentops
|
||||
agentops.init()
|
||||
```
|
||||
|
||||
이렇게 하면 AgentOps 세션이 시작되고 Crew 에이전트가 자동으로 추적됩니다. 더 복잡한 agentic 시스템을 구성하는 방법에 대한 자세한 정보는 [AgentOps 문서](https://docs.agentops.ai) 또는 [Discord](https://discord.gg/j4f3KbeH)를 참조하세요.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Crew + AgentOps 예시
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="Job Posting"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting"
|
||||
icon="briefcase"
|
||||
iconType="solid"
|
||||
>
|
||||
채용 공고를 생성하는 Crew agent의 예시입니다.
|
||||
</Card>
|
||||
<Card
|
||||
title="Markdown Validator"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/markdown_validator"
|
||||
icon="markdown"
|
||||
iconType="solid"
|
||||
>
|
||||
Markdown 파일을 검증하는 Crew agent의 예시입니다.
|
||||
</Card>
|
||||
<Card
|
||||
title="Instagram Post"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post"
|
||||
icon="square-instagram"
|
||||
iconType="brands"
|
||||
>
|
||||
Instagram 게시물을 생성하는 Crew agent의 예시입니다.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
### 추가 정보
|
||||
|
||||
시작하려면 [AgentOps 계정](https://agentops.ai/?=crew)을 생성하세요.
|
||||
|
||||
기능 요청이나 버그 보고가 필요하시면 [AgentOps Repo](https://github.com/AgentOps-AI/agentops)에서 AgentOps 팀에 문의해 주세요.
|
||||
|
||||
#### 추가 링크
|
||||
|
||||
<a href="https://twitter.com/agentopsai/">🐦 트위터</a>
|
||||
<span> • </span>
|
||||
<a href="https://discord.gg/JHPt4C7r">📢 디스코드</a>
|
||||
<span> • </span>
|
||||
<a href="https://app.agentops.ai/?=crew">🖇️ AgentOps 대시보드</a>
|
||||
<span> • </span>
|
||||
<a href="https://docs.agentops.ai/introduction">📙 문서화</a>
|
||||
@@ -21,9 +21,6 @@ icon: "face-smile"
|
||||
### 모니터링 & 트레이싱 플랫폼
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="AgentOps" icon="paperclip" href="/ko/observability/agentops">
|
||||
에이전트 개발 및 운영을 위한 세션 리플레이, 메트릭, 모니터링 제공.
|
||||
</Card>
|
||||
|
||||
<Card title="LangDB" icon="database" href="/ko/observability/langdb">
|
||||
자동 에이전트 상호작용 캡처를 포함한 CrewAI 워크플로의 엔드-투-엔드 트레이싱.
|
||||
|
||||
7
docs/pt-BR/api-reference/inputs.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /inputs"
|
||||
description: "Obter entradas necessárias para sua crew"
|
||||
openapi: "/enterprise-api.pt-BR.yaml GET /inputs"
|
||||
---
|
||||
|
||||
|
||||
7
docs/pt-BR/api-reference/kickoff.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "POST /kickoff"
|
||||
description: "Iniciar a execução da crew"
|
||||
openapi: "/enterprise-api.pt-BR.yaml POST /kickoff"
|
||||
---
|
||||
|
||||
|
||||
7
docs/pt-BR/api-reference/status.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /status/{kickoff_id}"
|
||||
description: "Obter o status da execução"
|
||||
openapi: "/enterprise-api.pt-BR.yaml GET /status/{kickoff_id}"
|
||||
---
|
||||
|
||||
|
||||
@@ -177,14 +177,7 @@ class MyCustomCrew:
|
||||
# Sua implementação do crew...
|
||||
```
|
||||
|
||||
É exatamente assim que o `agentops_listener` integrado do CrewAI é registrado. No código-fonte do CrewAI, você encontrará:
|
||||
|
||||
```python
|
||||
# src/crewai/utilities/events/third_party/__init__.py
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
Isso garante que o `agentops_listener` seja carregado quando o pacote `crewai.utilities.events` for importado.
|
||||
É assim que listeners de eventos de terceiros são registrados no código do CrewAI.
|
||||
|
||||
## Tipos de Eventos Disponíveis
|
||||
|
||||
@@ -269,77 +262,6 @@ A estrutura do objeto de evento depende do tipo do evento, mas todos herdam de `
|
||||
|
||||
Campos adicionais variam pelo tipo de evento. Por exemplo, `CrewKickoffCompletedEvent` inclui os campos `crew_name` e `output`.
|
||||
|
||||
## Exemplo Real: Integração com AgentOps
|
||||
|
||||
O CrewAI inclui um exemplo de integração com [AgentOps](https://github.com/AgentOps-AI/agentops), uma plataforma de monitoramento e observabilidade para agentes de IA. Veja como é implementado:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
|
||||
from crewai.utilities.events.task_events import TaskEvaluationEvent
|
||||
|
||||
try:
|
||||
import agentops
|
||||
AGENTOPS_INSTALLED = True
|
||||
except ImportError:
|
||||
AGENTOPS_INSTALLED = False
|
||||
|
||||
class AgentOpsListener(BaseEventListener):
|
||||
tool_event: Optional["agentops.ToolEvent"] = None
|
||||
session: Optional["agentops.Session"] = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
if not AGENTOPS_INSTALLED:
|
||||
return
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
|
||||
self.session = agentops.init()
|
||||
for agent in source.agents:
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
|
||||
if self.session:
|
||||
self.session.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
||||
self.tool_event = agentops.ToolEvent(name=event.tool_name)
|
||||
if self.session:
|
||||
self.session.record(self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
|
||||
```
|
||||
|
||||
Esse listener inicializa uma sessão do AgentOps quando um Crew inicia, cadastra agentes no AgentOps, rastreia o uso de ferramentas e finaliza a sessão quando o Crew é concluído.
|
||||
|
||||
O listener AgentOps é registrado no sistema de eventos do CrewAI via importação em `src/crewai/utilities/events/third_party/__init__.py`:
|
||||
|
||||
```python
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
Isso garante que o `agentops_listener` seja carregado quando o pacote `crewai.utilities.events` for importado.
|
||||
|
||||
## Uso Avançado: Handlers Escopados
|
||||
|
||||
|
||||
103
docs/pt-BR/enterprise/features/rbac.mdx
Normal file
@@ -0,0 +1,103 @@
|
||||
---
|
||||
title: "Controle de Acesso Baseado em Funções (RBAC)"
|
||||
description: "Controle o acesso a crews, ferramentas e dados com funções e visibilidade por automação."
|
||||
icon: "shield"
|
||||
---
|
||||
|
||||
## Visão Geral
|
||||
|
||||
O RBAC no CrewAI Enterprise permite gerenciar acesso de forma segura e escalável combinando **funções em nível de organização** com **controles de visibilidade em nível de automação**.
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/users_and_roles.png" alt="Visão geral de RBAC no CrewAI Enterprise" />
|
||||
|
||||
</Frame>
|
||||
|
||||
## Usuários e Funções
|
||||
|
||||
Cada membro da sua workspace possui uma função, que determina o acesso aos recursos.
|
||||
|
||||
Você pode:
|
||||
|
||||
- Usar funções pré-definidas (Owner, Member)
|
||||
- Criar funções personalizadas com permissões específicas
|
||||
- Atribuir funções a qualquer momento no painel de configurações
|
||||
|
||||
A configuração de usuários e funções é feita em Settings → Roles.
|
||||
|
||||
<Steps>
|
||||
<Step title="Abrir Roles">
|
||||
Vá em <b>Settings → Roles</b> no CrewAI Enterprise.
|
||||
</Step>
|
||||
<Step title="Escolher a função">
|
||||
Use <b>Owner</b> ou <b>Member</b>, ou clique em <b>Create role</b> para criar uma função personalizada.
|
||||
</Step>
|
||||
<Step title="Atribuir aos membros">
|
||||
Selecione os usuários e atribua a função. Você pode alterar depois.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Resumo de configuração
|
||||
|
||||
| Área | Onde configurar | Opções |
|
||||
|:---|:---|:---|
|
||||
| Usuários & Funções | Settings → Roles | Pré-definidas: Owner, Member; Funções personalizadas |
|
||||
| Visibilidade da automação | Automation → Settings → Visibility | Private; Lista de usuários/funções |
|
||||
|
||||
## Controle de Acesso em Nível de Automação
|
||||
|
||||
Além das funções na organização, as **Automations** suportam visibilidade refinada para restringir acesso por usuário ou função.
|
||||
|
||||
Útil para:
|
||||
|
||||
- Manter automações sensíveis/experimentais privadas
|
||||
- Gerenciar visibilidade em equipes grandes ou colaboradores externos
|
||||
- Testar automações em contexto isolado
|
||||
|
||||
Em modo privado, somente usuários/funções na whitelist poderão:
|
||||
|
||||
- Ver a automação
|
||||
- Executar/usar a API
|
||||
- Acessar logs, métricas e configurações
|
||||
|
||||
O owner da organização sempre tem acesso, independente da visibilidade.
|
||||
|
||||
Configure em Automation → Settings → Visibility.
|
||||
|
||||
<Steps>
|
||||
<Step title="Abrir a aba Visibility">
|
||||
Acesse <b>Automation → Settings → Visibility</b>.
|
||||
</Step>
|
||||
<Step title="Definir visibilidade">
|
||||
Selecione <b>Private</b> para restringir o acesso. O owner mantém acesso.
|
||||
</Step>
|
||||
<Step title="Permitir acesso">
|
||||
Adicione usuários e funções que poderão ver/executar e acessar logs/métricas/configurações.
|
||||
</Step>
|
||||
<Step title="Salvar e verificar">
|
||||
Salve e confirme que não listados não conseguem ver ou executar a automação.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Resultado de acesso no modo Private
|
||||
|
||||
| Ação | Owner | Usuário/função na whitelist | Não listado |
|
||||
|:---|:---|:---|:---|
|
||||
| Ver automação | ✓ | ✓ | ✗ |
|
||||
| Executar/API | ✓ | ✓ | ✗ |
|
||||
| Logs/métricas/configurações | ✓ | ✓ | ✗ |
|
||||
|
||||
<Tip>
|
||||
O owner sempre possui acesso. Em modo privado, somente usuários/funções na whitelist têm permissão.
|
||||
</Tip>
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/visibility.png" alt="Configuração de visibilidade no CrewAI Enterprise" />
|
||||
|
||||
</Frame>
|
||||
|
||||
<Card title="Precisa de Ajuda?" icon="headset" href="mailto:support@crewai.com">
|
||||
Fale com o nosso time para suporte em configuração e auditoria de RBAC.
|
||||
</Card>
|
||||
|
||||
|
||||
171
docs/pt-BR/enterprise/guides/automation-triggers.mdx
Normal file
@@ -0,0 +1,171 @@
|
||||
---
|
||||
title: "Triggers de Automação"
|
||||
description: "Execute automaticamente seus workflows CrewAI quando eventos específicos ocorrem em integrações conectadas"
|
||||
icon: "bolt"
|
||||
---
|
||||
|
||||
Os triggers de automação permitem executar automaticamente suas implantações CrewAI quando eventos específicos ocorrem em suas integrações conectadas, criando workflows poderosos orientados por eventos que respondem a mudanças em tempo real em seus sistemas de negócio.
|
||||
|
||||
## Visão Geral
|
||||
|
||||
Com triggers de automação, você pode:
|
||||
|
||||
- **Responder a eventos em tempo real** - Execute workflows automaticamente quando condições específicas forem atendidas
|
||||
- **Integrar com sistemas externos** - Conecte com plataformas como Gmail, Outlook, OneDrive, JIRA, Slack, Stripe e muito mais
|
||||
- **Escalar sua automação** - Lide com eventos de alto volume sem intervenção manual
|
||||
- **Manter contexto** - Acesse dados do trigger dentro de suas crews e flows
|
||||
|
||||
## Gerenciando Triggers de Automação
|
||||
|
||||
### Visualizando Triggers Disponíveis
|
||||
|
||||
Para acessar e gerenciar seus triggers de automação:
|
||||
|
||||
1. Navegue até sua implantação no painel do CrewAI
|
||||
2. Clique na aba **Triggers** para visualizar todas as integrações de trigger disponíveis
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/list-available-triggers.png" alt="Lista de triggers de automação disponíveis" />
|
||||
</Frame>
|
||||
|
||||
Esta visualização mostra todas as integrações de trigger disponíveis para sua implantação, junto com seus status de conexão atuais.
|
||||
|
||||
### Habilitando e Desabilitando Triggers
|
||||
|
||||
Cada trigger pode ser facilmente habilitado ou desabilitado usando o botão de alternância:
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/trigger-selected.png" alt="Habilitar ou desabilitar triggers com alternância" />
|
||||
</Frame>
|
||||
|
||||
- **Habilitado (alternância azul)**: O trigger está ativo e executará automaticamente sua implantação quando os eventos especificados ocorrerem
|
||||
- **Desabilitado (alternância cinza)**: O trigger está inativo e não responderá a eventos
|
||||
|
||||
Simplesmente clique na alternância para mudar o estado do trigger. As alterações entram em vigor imediatamente.
|
||||
|
||||
### Monitorando Execuções de Trigger
|
||||
|
||||
Acompanhe o desempenho e histórico de suas execuções acionadas:
|
||||
|
||||
<Frame>
|
||||
<img src="/images/enterprise/list-executions.png" alt="Lista de execuções acionadas por automação" />
|
||||
</Frame>
|
||||
|
||||
## Construindo Automação
|
||||
|
||||
Antes de construir sua automação, é útil entender a estrutura dos payloads de trigger que suas crews e flows receberão.
|
||||
|
||||
### Repositório de Amostras de Payload
|
||||
|
||||
Mantemos um repositório abrangente com amostras de payload de várias fontes de trigger para ajudá-lo a construir e testar suas automações:
|
||||
|
||||
**🔗 [Amostras de Payload de Trigger CrewAI Enterprise](https://github.com/crewAIInc/crewai-enterprise-trigger-payload-samples)**
|
||||
|
||||
Este repositório contém:
|
||||
|
||||
- **Exemplos reais de payload** de diferentes fontes de trigger (Gmail, Google Drive, etc.)
|
||||
- **Documentação da estrutura de payload** mostrando o formato e campos disponíveis
|
||||
|
||||
### Triggers com Crew
|
||||
|
||||
Suas definições de crew existentes funcionam perfeitamente com triggers, você só precisa ter uma tarefa para analisar o payload recebido:
|
||||
|
||||
```python
|
||||
@CrewBase
|
||||
class MinhaCrewAutomatizada:
|
||||
@agent
|
||||
def pesquisador(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['pesquisador'],
|
||||
)
|
||||
|
||||
@task
|
||||
def analisar_payload_trigger(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['analisar_payload_trigger'],
|
||||
agent=self.pesquisador(),
|
||||
)
|
||||
|
||||
@task
|
||||
def analisar_conteudo_trigger(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['analisar_dados_trigger'],
|
||||
agent=self.pesquisador(),
|
||||
)
|
||||
```
|
||||
|
||||
A crew receberá automaticamente e pode acessar o payload do trigger através dos mecanismos de contexto padrão do CrewAI.
|
||||
|
||||
### Integração com Flows
|
||||
|
||||
Para flows, você tem mais controle sobre como os dados do trigger são tratados:
|
||||
|
||||
#### Acessando Payload do Trigger
|
||||
|
||||
Todos os métodos `@start()` em seus flows aceitarão um parâmetro adicional chamado `crewai_trigger_payload`:
|
||||
|
||||
```python
|
||||
from crewai.flow import Flow, start, listen
|
||||
|
||||
class MeuFlowAutomatizado(Flow):
|
||||
@start()
|
||||
def lidar_com_trigger(self, crewai_trigger_payload: dict = None):
|
||||
"""
|
||||
Este método start pode receber dados do trigger
|
||||
"""
|
||||
if crewai_trigger_payload:
|
||||
# Processa os dados do trigger
|
||||
trigger_id = crewai_trigger_payload.get('id')
|
||||
dados_evento = crewai_trigger_payload.get('payload', {})
|
||||
|
||||
# Armazena no estado do flow para uso por outros métodos
|
||||
self.state.trigger_id = trigger_id
|
||||
self.state.trigger_type = dados_evento
|
||||
|
||||
return dados_evento
|
||||
|
||||
# Lida com execução manual
|
||||
return None
|
||||
|
||||
@listen(lidar_com_trigger)
|
||||
def processar_dados(self, dados_trigger):
|
||||
"""
|
||||
Processa os dados do trigger
|
||||
"""
|
||||
# ... processa o trigger
|
||||
```
|
||||
|
||||
#### Acionando Crews a partir de Flows
|
||||
|
||||
Ao iniciar uma crew dentro de um flow que foi acionado, passe o payload do trigger conforme ele:
|
||||
|
||||
```python
|
||||
@start()
|
||||
def delegar_para_crew(self, crewai_trigger_payload: dict = None):
|
||||
"""
|
||||
Delega processamento para uma crew especializada
|
||||
"""
|
||||
crew = MinhaCrewEspecializada()
|
||||
|
||||
# Passa o payload do trigger para a crew
|
||||
resultado = crew.crew().kickoff(
|
||||
inputs={
|
||||
'parametro_personalizado': "valor_personalizado",
|
||||
'crewai_trigger_payload': crewai_trigger_payload
|
||||
},
|
||||
)
|
||||
|
||||
return resultado
|
||||
```
|
||||
|
||||
## Solução de Problemas
|
||||
|
||||
**Trigger não está sendo disparado:**
|
||||
- Verifique se o trigger está habilitado
|
||||
- Verifique o status de conexão da integração
|
||||
|
||||
**Falhas de execução:**
|
||||
- Verifique os logs de execução para detalhes do erro
|
||||
- Se você está desenvolvendo, certifique-se de que as entradas incluem o parâmetro `crewai_trigger_payload` com o payload correto
|
||||
|
||||
Os triggers de automação transformam suas implantações CrewAI em sistemas responsivos orientados por eventos que podem se integrar perfeitamente com seus processos de negócio e ferramentas existentes.
|
||||
22
docs/pt-BR/examples/cookbooks.mdx
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
title: CrewAI Cookbooks
|
||||
description: Quickstarts e notebooks focados em recursos para aprender padrões rapidamente.
|
||||
icon: book
|
||||
---
|
||||
|
||||
## Quickstarts & Demos
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Repositório de Quickstarts" icon="bolt" href="https://github.com/crewAIInc/crewAI-quickstarts">
|
||||
Demos e projetos pequenos que mostram capacidades específicas do CrewAI.
|
||||
</Card>
|
||||
<Card title="Notebooks nos Exemplos" icon="book-open" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks">
|
||||
Notebooks interativos para aprendizado prático.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
<Tip>
|
||||
Use Cookbooks para aprender um padrão rapidamente e, em seguida, avance para os Exemplos completos para implementações de produção.
|
||||
</Tip>
|
||||
|
||||
|
||||
@@ -1,62 +1,85 @@
|
||||
---
|
||||
title: Exemplos CrewAI
|
||||
description: Uma coleção de exemplos que mostram como usar o framework CrewAI para automatizar fluxos de trabalho.
|
||||
description: Explore exemplos organizados por Crews, Flows, Integrações e Notebooks.
|
||||
icon: rocket-launch
|
||||
---
|
||||
|
||||
## Crews
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="Estratégia de Marketing"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/marketing_strategy"
|
||||
icon="bullhorn"
|
||||
iconType="solid"
|
||||
>
|
||||
Automatize a criação de estratégias de marketing com CrewAI.
|
||||
<Card title="Estratégia de Marketing" icon="bullhorn" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/marketing_strategy">
|
||||
Planejamento de campanhas com múltiplos agentes.
|
||||
</Card>
|
||||
<Card title="Viagem Surpresa" icon="plane" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/surprise_trip">
|
||||
Planejamento de viagens personalizadas.
|
||||
</Card>
|
||||
<Card title="Relacionar Perfil a Posições" icon="id-card" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/match_profile_to_positions">
|
||||
Correspondência de CV para vagas com busca vetorial.
|
||||
</Card>
|
||||
<Card title="Criar Vaga" icon="newspaper" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/job-posting">
|
||||
Criação automatizada de descrições de vagas.
|
||||
</Card>
|
||||
<Card title="Crew Construtor de Jogos" icon="gamepad" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/game-builder-crew">
|
||||
Equipe multiagente que projeta e constrói jogos em Python.
|
||||
</Card>
|
||||
<Card title="Recrutamento" icon="user-group" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/recruitment">
|
||||
Prospecção e avaliação de candidatos.
|
||||
</Card>
|
||||
<Card title="Ver todos os Crews" icon="users" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews">
|
||||
Lista completa de exemplos de crews.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Flows
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Content Creator Flow" icon="pen" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/content_creator_flow">
|
||||
Geração de conteúdo com roteamento.
|
||||
</Card>
|
||||
<Card title="Email Auto Responder" icon="envelope" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/email_auto_responder_flow">
|
||||
Monitoramento e respostas de e‑mail.
|
||||
</Card>
|
||||
<Card title="Lead Score Flow" icon="chart-line" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/lead_score_flow">
|
||||
Qualificação de leads com revisão humana.
|
||||
</Card>
|
||||
<Card title="Meeting Assistant Flow" icon="calendar" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/meeting_assistant_flow">
|
||||
Processamento de notas com integrações.
|
||||
</Card>
|
||||
<Card title="Self Evaluation Loop" icon="rotate" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/self_evaluation_loop_flow">
|
||||
Fluxos de autoaperfeiçoamento iterativo.
|
||||
</Card>
|
||||
<Card title="Escrever um Livro (Flows)" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/write_a_book_with_flows">
|
||||
Geração paralela de capítulos.
|
||||
</Card>
|
||||
<Card title="Ver todos os Flows" icon="diagram-project" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows">
|
||||
Lista completa de exemplos de flows.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Integrações
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="CrewAI ↔ LangGraph" icon="link" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/crewai-langgraph">
|
||||
Integração com o framework LangGraph.
|
||||
</Card>
|
||||
<Card title="Azure OpenAI" icon="cloud" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/azure_model">
|
||||
Usando CrewAI com Azure OpenAI.
|
||||
</Card>
|
||||
<Card title="Modelos NVIDIA" icon="microchip" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/nvidia_models">
|
||||
Integrações com o ecossistema NVIDIA.
|
||||
</Card>
|
||||
<Card title="Ver todas as Integrações" icon="puzzle-piece" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations">
|
||||
Todos os exemplos de integrações.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Notebooks
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Simple QA Crew + Flow" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks/Simple%20QA%20Crew%20%2B%20Flow">
|
||||
Simple QA Crew + Flow.
|
||||
</Card>
|
||||
<Card title="Todos os Notebooks" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks">
|
||||
Exemplos interativos para aprendizado e experimentação.
|
||||
</Card>
|
||||
<Card
|
||||
title="Viagem Surpresa"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/surprise_trip"
|
||||
icon="plane"
|
||||
iconType="duotone"
|
||||
>
|
||||
Crie um roteiro de viagem surpresa com CrewAI.
|
||||
</Card>
|
||||
<Card
|
||||
title="Relacionar Perfil a Posições"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/match_profile_to_positions"
|
||||
icon="linkedin"
|
||||
iconType="duotone"
|
||||
>
|
||||
Relacione um perfil a vagas de emprego com CrewAI.
|
||||
</Card>
|
||||
<Card
|
||||
title="Criar Vaga"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/job-posting"
|
||||
icon="newspaper"
|
||||
iconType="duotone"
|
||||
>
|
||||
Crie uma vaga de emprego com CrewAI.
|
||||
</Card>
|
||||
<Card
|
||||
title="Gerador de Jogos"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/game-builder-crew"
|
||||
icon="gamepad"
|
||||
iconType="duotone"
|
||||
>
|
||||
Crie um jogo com CrewAI.
|
||||
</Card>
|
||||
<Card
|
||||
title="Encontrar Candidatos"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/crewAIInc/crewAI-examples/tree/main/recruitment"
|
||||
icon="user-group"
|
||||
iconType="duotone"
|
||||
>
|
||||
Encontre candidatos a vagas com CrewAI.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -1,126 +0,0 @@
|
||||
---
|
||||
title: Integração com AgentOps
|
||||
description: Entendendo e registrando a performance do seu agente com AgentOps.
|
||||
icon: paperclip
|
||||
---
|
||||
|
||||
# Introdução
|
||||
|
||||
Observabilidade é um aspecto fundamental no desenvolvimento e implantação de agentes de IA conversacional. Ela permite que desenvolvedores compreendam como seus agentes estão performando,
|
||||
como eles estão interagindo com os usuários e como utilizam ferramentas externas e APIs.
|
||||
AgentOps é um produto independente do CrewAI que fornece uma solução completa de observabilidade para agentes.
|
||||
|
||||
## AgentOps
|
||||
|
||||
[AgentOps](https://agentops.ai/?=crew) oferece replay de sessões, métricas e monitoramento para agentes.
|
||||
|
||||
Em um alto nível, o AgentOps oferece a capacidade de monitorar custos, uso de tokens, latência, falhas do agente, estatísticas de sessão e muito mais.
|
||||
Para mais informações, confira o [Repositório do AgentOps](https://github.com/AgentOps-AI/agentops).
|
||||
|
||||
### Visão Geral
|
||||
|
||||
AgentOps fornece monitoramento para agentes em desenvolvimento e produção.
|
||||
Disponibiliza um dashboard para acompanhamento de performance dos agentes, replay de sessões e relatórios personalizados.
|
||||
|
||||
Além disso, o AgentOps traz análises detalhadas das sessões para visualizar interações do agente Crew, chamadas LLM e uso de ferramentas em tempo real.
|
||||
Esse recurso é útil para depuração e entendimento de como os agentes interagem com usuários e entre si.
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
### Funcionalidades
|
||||
|
||||
- **Gerenciamento e Rastreamento de Custos de LLM**: Acompanhe gastos com provedores de modelos fundamentais.
|
||||
- **Análises de Replay**: Assista gráficos de execução do agente, passo a passo.
|
||||
- **Detecção de Pensamento Recursivo**: Identifique quando agentes entram em loops infinitos.
|
||||
- **Relatórios Personalizados**: Crie análises customizadas sobre a performance dos agentes.
|
||||
- **Dashboard Analítico**: Monitore estatísticas gerais de agentes em desenvolvimento e produção.
|
||||
- **Teste de Modelos Públicos**: Teste seus agentes em benchmarks e rankings.
|
||||
- **Testes Personalizados**: Execute seus agentes em testes específicos de domínio.
|
||||
- **Depuração com Viagem no Tempo**: Reinicie suas sessões a partir de checkpoints.
|
||||
- **Conformidade e Segurança**: Crie registros de auditoria e detecte possíveis ameaças como uso de palavrões e vazamento de dados pessoais.
|
||||
- **Detecção de Prompt Injection**: Identifique possíveis injeções de código e vazamentos de segredos.
|
||||
|
||||
### Utilizando o AgentOps
|
||||
|
||||
<Steps>
|
||||
<Step title="Crie uma Chave de API">
|
||||
Crie uma chave de API de usuário aqui: [Create API Key](https://app.agentops.ai/account)
|
||||
</Step>
|
||||
<Step title="Configure seu Ambiente">
|
||||
Adicione sua chave API nas variáveis de ambiente:
|
||||
```bash
|
||||
AGENTOPS_API_KEY=<YOUR_AGENTOPS_API_KEY>
|
||||
```
|
||||
</Step>
|
||||
<Step title="Instale o AgentOps">
|
||||
Instale o AgentOps com:
|
||||
```bash
|
||||
pip install 'crewai[agentops]'
|
||||
```
|
||||
ou
|
||||
```bash
|
||||
pip install agentops
|
||||
```
|
||||
</Step>
|
||||
<Step title="Inicialize o AgentOps">
|
||||
Antes de utilizar o `Crew` no seu script, inclua estas linhas:
|
||||
|
||||
```python
|
||||
import agentops
|
||||
agentops.init()
|
||||
```
|
||||
|
||||
Isso irá iniciar uma sessão do AgentOps e também rastrear automaticamente os agentes Crew. Para mais detalhes sobre como adaptar sistemas de agentes mais complexos,
|
||||
confira a [documentação do AgentOps](https://docs.agentops.ai) ou participe do [Discord](https://discord.gg/j4f3KbeH).
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Exemplos de Crew + AgentOps
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="Vaga de Emprego"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting"
|
||||
icon="briefcase"
|
||||
iconType="solid"
|
||||
>
|
||||
Exemplo de um agente Crew que gera vagas de emprego.
|
||||
</Card>
|
||||
<Card
|
||||
title="Validador de Markdown"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/markdown_validator"
|
||||
icon="markdown"
|
||||
iconType="solid"
|
||||
>
|
||||
Exemplo de um agente Crew que valida arquivos Markdown.
|
||||
</Card>
|
||||
<Card
|
||||
title="Post no Instagram"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post"
|
||||
icon="square-instagram"
|
||||
iconType="brands"
|
||||
>
|
||||
Exemplo de um agente Crew que gera posts para Instagram.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
### Mais Informações
|
||||
|
||||
Para começar, crie uma [conta AgentOps](https://agentops.ai/?=crew).
|
||||
|
||||
Para sugestões de funcionalidades ou relatos de bugs, entre em contato com o time do AgentOps pelo [Repositório do AgentOps](https://github.com/AgentOps-AI/agentops).
|
||||
|
||||
#### Links Extras
|
||||
|
||||
<a href="https://twitter.com/agentopsai/">🐦 Twitter</a>
|
||||
<span> • </span>
|
||||
<a href="https://discord.gg/JHPt4C7r">📢 Discord</a>
|
||||
<span> • </span>
|
||||
<a href="https://app.agentops.ai/?=crew">🖇️ Dashboard AgentOps</a>
|
||||
<span> • </span>
|
||||
<a href="https://docs.agentops.ai/introduction">📙 Documentação</a>
|
||||
@@ -21,9 +21,6 @@ A observabilidade é fundamental para entender como seus agentes CrewAI estão d
|
||||
### Plataformas de Monitoramento e Rastreamento
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="AgentOps" icon="paperclip" href="/pt-BR/observability/agentops">
|
||||
Replays de sessões, métricas e monitoramento para desenvolvimento e produção de agentes.
|
||||
</Card>
|
||||
|
||||
<Card title="LangDB" icon="database" href="/pt-BR/observability/langdb">
|
||||
Rastreamento ponta a ponta para fluxos de trabalho CrewAI com captura automática de interações de agentes.
|
||||
|
||||
145
docs/pt-BR/observability/truefoundry.mdx
Normal file
@@ -0,0 +1,145 @@
|
||||
---
|
||||
title: Integração com a TrueFoundry
|
||||
icon: chart-line
|
||||
---
|
||||
|
||||
A TrueFoundry fornece um [AI Gateway](https://www.truefoundry.com/ai-gateway) pronto para uso empresarial, que pode ser usado para governança e observabilidade em frameworks agentivos como o CrewAI. O AI Gateway da TrueFoundry funciona como uma interface unificada para acesso a LLMs, oferecendo:
|
||||
|
||||
- **Acesso unificado à API**: Conecte-se a 250+ LLMs (OpenAI, Claude, Gemini, Groq, Mistral) por meio de uma única API
|
||||
- **Baixa latência**: Latência interna abaixo de 3 ms com roteamento inteligente e balanceamento de carga
|
||||
- **Segurança corporativa**: Conformidade com SOC 2, HIPAA e GDPR, com RBAC e auditoria de logs
|
||||
- **Gestão de cotas e custos**: Cotas baseadas em tokens, rate limiting e rastreamento abrangente de uso
|
||||
- **Observabilidade**: Registro completo de requisições/respostas, métricas e traces com retenção personalizável
|
||||
|
||||
## Como a TrueFoundry se integra ao CrewAI
|
||||
|
||||
|
||||
### Instalação e configuração
|
||||
|
||||
<Steps>
|
||||
<Step title="Instalar o CrewAI">
|
||||
```bash
|
||||
pip install crewai
|
||||
```
|
||||
</Step>
|
||||
|
||||
<Step title="Obter o token de acesso da TrueFoundry">
|
||||
1. Crie uma conta na [TrueFoundry](https://www.truefoundry.com/register)
|
||||
2. Siga os passos do [Início rápido](https://docs.truefoundry.com/gateway/quick-start)
|
||||
</Step>
|
||||
|
||||
<Step title="Configurar o CrewAI com a TrueFoundry">
|
||||

|
||||
|
||||
```python
|
||||
from crewai import LLM
|
||||
|
||||
# Criar uma instância de LLM com o AI Gateway da TrueFoundry
|
||||
truefoundry_llm = LLM(
|
||||
model="openai-main/gpt-4o", # Da mesma forma, você pode chamar qualquer modelo de qualquer provedor
|
||||
base_url="your_truefoundry_gateway_base_url",
|
||||
api_key="your_truefoundry_api_key"
|
||||
)
|
||||
|
||||
# Usar nos seus agentes do CrewAI
|
||||
from crewai import Agent
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
llm=truefoundry_llm,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Exemplo completo do CrewAI
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew, LLM
|
||||
|
||||
# Configurar o LLM com a TrueFoundry
|
||||
llm = LLM(
|
||||
model="openai-main/gpt-4o",
|
||||
base_url="your_truefoundry_gateway_base_url",
|
||||
api_key="your_truefoundry_api_key"
|
||||
)
|
||||
|
||||
# Criar agentes
|
||||
researcher = Agent(
|
||||
role='Analista de Pesquisa',
|
||||
goal='Conduzir pesquisa de mercado detalhada',
|
||||
backstory='Analista de mercado especialista com atenção aos detalhes',
|
||||
llm=llm,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
writer = Agent(
|
||||
role='Redator de Conteúdo',
|
||||
goal='Criar relatórios abrangentes',
|
||||
backstory='Redator técnico experiente',
|
||||
llm=llm,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Criar tarefas
|
||||
research_task = Task(
|
||||
description='Pesquisar tendências do mercado de IA para 2024',
|
||||
agent=researcher,
|
||||
expected_output='Resumo de pesquisa abrangente'
|
||||
)
|
||||
|
||||
writing_task = Task(
|
||||
description='Criar um relatório de pesquisa de mercado',
|
||||
agent=writer,
|
||||
expected_output='Relatório bem estruturado com insights',
|
||||
context=[research_task]
|
||||
)
|
||||
|
||||
# Criar e executar a crew
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research_task, writing_task],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
### Observabilidade e governança
|
||||
|
||||
Monitore seus agentes do CrewAI pela aba de métricas da TrueFoundry:
|
||||

|
||||
|
||||
Com o AI Gateway da TrueFoundry, você pode monitorar e analisar:
|
||||
|
||||
- **Métricas de desempenho**: Acompanhe métricas-chave de latência como Latência da Requisição, Tempo até o Primeiro Token (TTFS) e Latência entre Tokens (ITL), com percentis P99, P90 e P50
|
||||
- **Custos e uso de tokens**: Tenha visibilidade dos custos da sua aplicação com detalhamento de tokens de entrada/saída e das despesas associadas a cada modelo
|
||||
- **Padrões de uso**: Entenda como sua aplicação está sendo utilizada com análises detalhadas sobre atividade de usuários, distribuição de modelos e uso por equipe
|
||||
- **Limite de taxa e balanceamento de carga**: Você pode configurar rate limiting, balanceamento de carga e fallback para seus modelos
|
||||
|
||||
## Rastreamento
|
||||
|
||||
Para uma compreensão mais detalhada sobre rastreamento, consulte [getting-started-tracing](https://docs.truefoundry.com/docs/tracing/tracing-getting-started). Para rastreamento, você pode adicionar o SDK do Traceloop:
|
||||
|
||||
```bash
|
||||
pip install traceloop-sdk
|
||||
```
|
||||
|
||||
```python
|
||||
from traceloop.sdk import Traceloop
|
||||
|
||||
# Inicializar rastreamento avançado
|
||||
Traceloop.init(
|
||||
api_endpoint="https://your-truefoundry-endpoint/api/tracing",
|
||||
headers={
|
||||
"Authorization": f"Bearer {your_truefoundry_pat_token}",
|
||||
"TFY-Tracing-Project": "your_project_name",
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
Isso oferece correlação adicional de rastreamentos em todo o seu fluxo de trabalho com o CrewAI.
|
||||

|
||||
@@ -48,11 +48,10 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools~=0.60.0"]
|
||||
tools = ["crewai-tools~=0.62.1"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
]
|
||||
agentops = ["agentops==0.3.18"]
|
||||
pdfplumber = [
|
||||
"pdfplumber>=0.11.4",
|
||||
]
|
||||
@@ -99,6 +98,11 @@ exclude = ["cli/templates"]
|
||||
[tool.bandit]
|
||||
exclude_dirs = ["src/crewai/cli/templates"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
markers = [
|
||||
"telemetry: mark test as a telemetry test (don't mock telemetry)",
|
||||
]
|
||||
|
||||
# PyTorch index configuration, since torch 2.5.0 is not compatible with python 3.13
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-nightly"
|
||||
|
||||
@@ -54,7 +54,7 @@ def _track_install_async():
|
||||
|
||||
_track_install_async()
|
||||
|
||||
__version__ = "0.157.0"
|
||||
__version__ = "0.165.1"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"Crew",
|
||||
|
||||
@@ -1,7 +1,18 @@
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Type, Union
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
|
||||
@@ -162,7 +173,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
guardrail: Optional[Union[Callable[[Any], Tuple[bool, Any]], str]] = Field(
|
||||
default=None,
|
||||
description="Function or string description of a guardrail to validate agent output"
|
||||
description="Function or string description of a guardrail to validate agent output",
|
||||
)
|
||||
guardrail_max_retries: int = Field(
|
||||
default=3, description="Maximum number of retries when guardrail fails"
|
||||
@@ -276,7 +287,7 @@ class Agent(BaseAgent):
|
||||
self._inject_date_to_task(task)
|
||||
|
||||
if self.tools_handler:
|
||||
self.tools_handler.last_used_tool = {} # type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling")
|
||||
self.tools_handler.last_used_tool = None
|
||||
|
||||
task_prompt = task.prompt()
|
||||
|
||||
@@ -336,7 +347,6 @@ class Agent(BaseAgent):
|
||||
self.knowledge_config.model_dump() if self.knowledge_config else {}
|
||||
)
|
||||
|
||||
|
||||
if self.knowledge or (self.crew and self.crew.knowledge):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Dict, List
|
||||
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
@@ -21,6 +21,7 @@ class CrewAgentExecutorMixin:
|
||||
task: "Task"
|
||||
iterations: int
|
||||
max_iter: int
|
||||
messages: List[Dict[str, str]]
|
||||
_i18n: I18N
|
||||
_printer: Printer = Printer()
|
||||
|
||||
@@ -62,6 +63,7 @@ class CrewAgentExecutorMixin:
|
||||
value=output.text,
|
||||
metadata={
|
||||
"description": self.task.description,
|
||||
"messages": self.messages,
|
||||
},
|
||||
agent=self.agent.role,
|
||||
)
|
||||
@@ -127,7 +129,6 @@ class CrewAgentExecutorMixin:
|
||||
def _ask_human_input(self, final_answer: str) -> str:
|
||||
"""Prompt human input with mode-appropriate messaging."""
|
||||
event_listener.formatter.pause_live_updates()
|
||||
|
||||
try:
|
||||
self._printer.print(
|
||||
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
|
||||
|
||||
@@ -8,13 +8,13 @@ from .cache.cache_handler import CacheHandler
|
||||
class ToolsHandler:
|
||||
"""Callback handler for tool usage."""
|
||||
|
||||
last_used_tool: ToolCalling = {} # type: ignore # BUG?: Incompatible types in assignment (expression has type "Dict[...]", variable has type "ToolCalling")
|
||||
last_used_tool: Optional[ToolCalling] = None
|
||||
cache: Optional[CacheHandler]
|
||||
|
||||
def __init__(self, cache: Optional[CacheHandler] = None):
|
||||
"""Initialize the callback handler."""
|
||||
self.cache = cache
|
||||
self.last_used_tool = {} # type: ignore # BUG?: same as above
|
||||
self.last_used_tool = None
|
||||
|
||||
def on_tool_use(
|
||||
self,
|
||||
|
||||
@@ -7,7 +7,8 @@ from rich.console import Console
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
from .utils import TokenManager, validate_jwt_token
|
||||
from .utils import validate_jwt_token
|
||||
from crewai.cli.shared.token_manager import TokenManager
|
||||
from urllib.parse import quote
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
from crewai.cli.config import Settings
|
||||
@@ -21,10 +22,19 @@ console = Console()
|
||||
|
||||
|
||||
class Oauth2Settings(BaseModel):
|
||||
provider: str = Field(description="OAuth2 provider used for authentication (e.g., workos, okta, auth0).")
|
||||
client_id: str = Field(description="OAuth2 client ID issued by the provider, used during authentication requests.")
|
||||
domain: str = Field(description="OAuth2 provider's domain (e.g., your-org.auth0.com) used for issuing tokens.")
|
||||
audience: Optional[str] = Field(description="OAuth2 audience value, typically used to identify the target API or resource.", default=None)
|
||||
provider: str = Field(
|
||||
description="OAuth2 provider used for authentication (e.g., workos, okta, auth0)."
|
||||
)
|
||||
client_id: str = Field(
|
||||
description="OAuth2 client ID issued by the provider, used during authentication requests."
|
||||
)
|
||||
domain: str = Field(
|
||||
description="OAuth2 provider's domain (e.g., your-org.auth0.com) used for issuing tokens."
|
||||
)
|
||||
audience: Optional[str] = Field(
|
||||
description="OAuth2 audience value, typically used to identify the target API or resource.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_settings(cls):
|
||||
@@ -44,11 +54,15 @@ class ProviderFactory:
|
||||
settings = settings or Oauth2Settings.from_settings()
|
||||
|
||||
import importlib
|
||||
module = importlib.import_module(f"crewai.cli.authentication.providers.{settings.provider.lower()}")
|
||||
|
||||
module = importlib.import_module(
|
||||
f"crewai.cli.authentication.providers.{settings.provider.lower()}"
|
||||
)
|
||||
provider = getattr(module, f"{settings.provider.capitalize()}Provider")
|
||||
|
||||
return provider(settings)
|
||||
|
||||
|
||||
class AuthenticationCommand:
|
||||
def __init__(self):
|
||||
self.token_manager = TokenManager()
|
||||
@@ -65,7 +79,7 @@ class AuthenticationCommand:
|
||||
provider="auth0",
|
||||
client_id=AUTH0_CLIENT_ID,
|
||||
domain=AUTH0_DOMAIN,
|
||||
audience=AUTH0_AUDIENCE
|
||||
audience=AUTH0_AUDIENCE,
|
||||
)
|
||||
self.oauth2_provider = ProviderFactory.from_settings(settings)
|
||||
# End of temporary code.
|
||||
@@ -75,9 +89,7 @@ class AuthenticationCommand:
|
||||
|
||||
return self._poll_for_token(device_code_data)
|
||||
|
||||
def _get_device_code(
|
||||
self
|
||||
) -> Dict[str, Any]:
|
||||
def _get_device_code(self) -> Dict[str, Any]:
|
||||
"""Get the device code to authenticate the user."""
|
||||
|
||||
device_code_payload = {
|
||||
@@ -86,7 +98,9 @@ class AuthenticationCommand:
|
||||
"audience": self.oauth2_provider.get_audience(),
|
||||
}
|
||||
response = requests.post(
|
||||
url=self.oauth2_provider.get_authorize_url(), data=device_code_payload, timeout=20
|
||||
url=self.oauth2_provider.get_authorize_url(),
|
||||
data=device_code_payload,
|
||||
timeout=20,
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
@@ -97,9 +111,7 @@ class AuthenticationCommand:
|
||||
console.print("2. Enter the following code: ", device_code_data["user_code"])
|
||||
webbrowser.open(device_code_data["verification_uri_complete"])
|
||||
|
||||
def _poll_for_token(
|
||||
self, device_code_data: Dict[str, Any]
|
||||
) -> None:
|
||||
def _poll_for_token(self, device_code_data: Dict[str, Any]) -> None:
|
||||
"""Polls the server for the token until it is received, or max attempts are reached."""
|
||||
|
||||
token_payload = {
|
||||
@@ -112,7 +124,9 @@ class AuthenticationCommand:
|
||||
|
||||
attempts = 0
|
||||
while True and attempts < 10:
|
||||
response = requests.post(self.oauth2_provider.get_token_url(), data=token_payload, timeout=30)
|
||||
response = requests.post(
|
||||
self.oauth2_provider.get_token_url(), data=token_payload, timeout=30
|
||||
)
|
||||
token_data = response.json()
|
||||
|
||||
if response.status_code == 200:
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
from .utils import TokenManager
|
||||
from crewai.cli.shared.token_manager import TokenManager
|
||||
|
||||
|
||||
class AuthError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_auth_token() -> str:
|
||||
"""Get the authentication token."""
|
||||
access_token = TokenManager().get_token()
|
||||
if not access_token:
|
||||
raise Exception("No token found, make sure you are logged in")
|
||||
raise AuthError("No token found, make sure you are logged in")
|
||||
return access_token
|
||||
|
||||
@@ -1,12 +1,5 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
import jwt
|
||||
from jwt import PyJWKClient
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
|
||||
def validate_jwt_token(
|
||||
@@ -67,118 +60,3 @@ def validate_jwt_token(
|
||||
raise Exception(f"JWKS or key processing error: {str(e)}")
|
||||
except jwt.InvalidTokenError as e:
|
||||
raise Exception(f"Invalid token: {str(e)}")
|
||||
|
||||
|
||||
class TokenManager:
|
||||
def __init__(self, file_path: str = "tokens.enc") -> None:
|
||||
"""
|
||||
Initialize the TokenManager class.
|
||||
|
||||
:param file_path: The file path to store the encrypted tokens. Default is "tokens.enc".
|
||||
"""
|
||||
self.file_path = file_path
|
||||
self.key = self._get_or_create_key()
|
||||
self.fernet = Fernet(self.key)
|
||||
|
||||
def _get_or_create_key(self) -> bytes:
|
||||
"""
|
||||
Get or create the encryption key.
|
||||
|
||||
:return: The encryption key.
|
||||
"""
|
||||
key_filename = "secret.key"
|
||||
key = self.read_secure_file(key_filename)
|
||||
|
||||
if key is not None:
|
||||
return key
|
||||
|
||||
new_key = Fernet.generate_key()
|
||||
self.save_secure_file(key_filename, new_key)
|
||||
return new_key
|
||||
|
||||
def save_tokens(self, access_token: str, expires_at: int) -> None:
|
||||
"""
|
||||
Save the access token and its expiration time.
|
||||
|
||||
:param access_token: The access token to save.
|
||||
:param expires_at: The UNIX timestamp of the expiration time.
|
||||
"""
|
||||
expiration_time = datetime.fromtimestamp(expires_at)
|
||||
data = {
|
||||
"access_token": access_token,
|
||||
"expiration": expiration_time.isoformat(),
|
||||
}
|
||||
encrypted_data = self.fernet.encrypt(json.dumps(data).encode())
|
||||
self.save_secure_file(self.file_path, encrypted_data)
|
||||
|
||||
def get_token(self) -> Optional[str]:
|
||||
"""
|
||||
Get the access token if it is valid and not expired.
|
||||
|
||||
:return: The access token if valid and not expired, otherwise None.
|
||||
"""
|
||||
encrypted_data = self.read_secure_file(self.file_path)
|
||||
|
||||
decrypted_data = self.fernet.decrypt(encrypted_data) # type: ignore
|
||||
data = json.loads(decrypted_data)
|
||||
|
||||
expiration = datetime.fromisoformat(data["expiration"])
|
||||
if expiration <= datetime.now():
|
||||
return None
|
||||
|
||||
return data["access_token"]
|
||||
|
||||
def get_secure_storage_path(self) -> Path:
|
||||
"""
|
||||
Get the secure storage path based on the operating system.
|
||||
|
||||
:return: The secure storage path.
|
||||
"""
|
||||
if sys.platform == "win32":
|
||||
# Windows: Use %LOCALAPPDATA%
|
||||
base_path = os.environ.get("LOCALAPPDATA")
|
||||
elif sys.platform == "darwin":
|
||||
# macOS: Use ~/Library/Application Support
|
||||
base_path = os.path.expanduser("~/Library/Application Support")
|
||||
else:
|
||||
# Linux and other Unix-like: Use ~/.local/share
|
||||
base_path = os.path.expanduser("~/.local/share")
|
||||
|
||||
app_name = "crewai/credentials"
|
||||
storage_path = Path(base_path) / app_name
|
||||
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
return storage_path
|
||||
|
||||
def save_secure_file(self, filename: str, content: bytes) -> None:
|
||||
"""
|
||||
Save the content to a secure file.
|
||||
|
||||
:param filename: The name of the file.
|
||||
:param content: The content to save.
|
||||
"""
|
||||
storage_path = self.get_secure_storage_path()
|
||||
file_path = storage_path / filename
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(content)
|
||||
|
||||
# Set appropriate permissions (read/write for owner only)
|
||||
os.chmod(file_path, 0o600)
|
||||
|
||||
def read_secure_file(self, filename: str) -> Optional[bytes]:
|
||||
"""
|
||||
Read the content of a secure file.
|
||||
|
||||
:param filename: The name of the file.
|
||||
:return: The content of the file if it exists, otherwise None.
|
||||
"""
|
||||
storage_path = self.get_secure_storage_path()
|
||||
file_path = storage_path / filename
|
||||
|
||||
if not file_path.exists():
|
||||
return None
|
||||
|
||||
with open(file_path, "rb") as f:
|
||||
return f.read()
|
||||
|
||||
@@ -11,6 +11,7 @@ from crewai.cli.constants import (
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_CLIENT_ID,
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_DOMAIN,
|
||||
)
|
||||
from crewai.cli.shared.token_manager import TokenManager
|
||||
|
||||
DEFAULT_CONFIG_PATH = Path.home() / ".config" / "crewai" / "settings.json"
|
||||
|
||||
@@ -53,6 +54,7 @@ HIDDEN_SETTINGS_KEYS = [
|
||||
"tool_repository_password",
|
||||
]
|
||||
|
||||
|
||||
class Settings(BaseModel):
|
||||
enterprise_base_url: Optional[str] = Field(
|
||||
default=DEFAULT_CLI_SETTINGS["enterprise_base_url"],
|
||||
@@ -74,12 +76,12 @@ class Settings(BaseModel):
|
||||
|
||||
oauth2_provider: str = Field(
|
||||
description="OAuth2 provider used for authentication (e.g., workos, okta, auth0).",
|
||||
default=DEFAULT_CLI_SETTINGS["oauth2_provider"]
|
||||
default=DEFAULT_CLI_SETTINGS["oauth2_provider"],
|
||||
)
|
||||
|
||||
oauth2_audience: Optional[str] = Field(
|
||||
description="OAuth2 audience value, typically used to identify the target API or resource.",
|
||||
default=DEFAULT_CLI_SETTINGS["oauth2_audience"]
|
||||
default=DEFAULT_CLI_SETTINGS["oauth2_audience"],
|
||||
)
|
||||
|
||||
oauth2_client_id: str = Field(
|
||||
@@ -89,7 +91,7 @@ class Settings(BaseModel):
|
||||
|
||||
oauth2_domain: str = Field(
|
||||
description="OAuth2 provider's domain (e.g., your-org.auth0.com) used for issuing tokens.",
|
||||
default=DEFAULT_CLI_SETTINGS["oauth2_domain"]
|
||||
default=DEFAULT_CLI_SETTINGS["oauth2_domain"],
|
||||
)
|
||||
|
||||
def __init__(self, config_path: Path = DEFAULT_CONFIG_PATH, **data):
|
||||
@@ -116,6 +118,7 @@ class Settings(BaseModel):
|
||||
"""Reset all settings to default values"""
|
||||
self._reset_user_settings()
|
||||
self._reset_cli_settings()
|
||||
self._clear_auth_tokens()
|
||||
self.dump()
|
||||
|
||||
def dump(self) -> None:
|
||||
@@ -139,3 +142,7 @@ class Settings(BaseModel):
|
||||
"""Reset all CLI settings to default values"""
|
||||
for key in CLI_SETTINGS_KEYS:
|
||||
setattr(self, key, DEFAULT_CLI_SETTINGS.get(key))
|
||||
|
||||
def _clear_auth_tokens(self) -> None:
|
||||
"""Clear all authentication tokens"""
|
||||
TokenManager().clear_tokens()
|
||||
|
||||
@@ -18,6 +18,7 @@ class PlusAPI:
|
||||
CREWS_RESOURCE = "/crewai_plus/api/v1/crews"
|
||||
AGENTS_RESOURCE = "/crewai_plus/api/v1/agents"
|
||||
TRACING_RESOURCE = "/crewai_plus/api/v1/tracing"
|
||||
EPHEMERAL_TRACING_RESOURCE = "/crewai_plus/api/v1/tracing/ephemeral"
|
||||
|
||||
def __init__(self, api_key: str) -> None:
|
||||
self.api_key = api_key
|
||||
@@ -124,6 +125,11 @@ class PlusAPI:
|
||||
"POST", f"{self.TRACING_RESOURCE}/batches", json=payload
|
||||
)
|
||||
|
||||
def initialize_ephemeral_trace_batch(self, payload) -> requests.Response:
|
||||
return self._make_request(
|
||||
"POST", f"{self.EPHEMERAL_TRACING_RESOURCE}/batches", json=payload
|
||||
)
|
||||
|
||||
def send_trace_events(self, trace_batch_id: str, payload) -> requests.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
@@ -131,9 +137,27 @@ class PlusAPI:
|
||||
json=payload,
|
||||
)
|
||||
|
||||
def send_ephemeral_trace_events(
|
||||
self, trace_batch_id: str, payload
|
||||
) -> requests.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/events",
|
||||
json=payload,
|
||||
)
|
||||
|
||||
def finalize_trace_batch(self, trace_batch_id: str, payload) -> requests.Response:
|
||||
return self._make_request(
|
||||
"PATCH",
|
||||
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
|
||||
json=payload,
|
||||
)
|
||||
|
||||
def finalize_ephemeral_trace_batch(
|
||||
self, trace_batch_id: str, payload
|
||||
) -> requests.Response:
|
||||
return self._make_request(
|
||||
"PATCH",
|
||||
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
|
||||
json=payload,
|
||||
)
|
||||
|
||||
0
src/crewai/cli/shared/__init__.py
Normal file
139
src/crewai/cli/shared/token_manager.py
Normal file
@@ -0,0 +1,139 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
|
||||
class TokenManager:
|
||||
def __init__(self, file_path: str = "tokens.enc") -> None:
|
||||
"""
|
||||
Initialize the TokenManager class.
|
||||
|
||||
:param file_path: The file path to store the encrypted tokens. Default is "tokens.enc".
|
||||
"""
|
||||
self.file_path = file_path
|
||||
self.key = self._get_or_create_key()
|
||||
self.fernet = Fernet(self.key)
|
||||
|
||||
def _get_or_create_key(self) -> bytes:
|
||||
"""
|
||||
Get or create the encryption key.
|
||||
|
||||
:return: The encryption key.
|
||||
"""
|
||||
key_filename = "secret.key"
|
||||
key = self.read_secure_file(key_filename)
|
||||
|
||||
if key is not None:
|
||||
return key
|
||||
|
||||
new_key = Fernet.generate_key()
|
||||
self.save_secure_file(key_filename, new_key)
|
||||
return new_key
|
||||
|
||||
def save_tokens(self, access_token: str, expires_at: int) -> None:
|
||||
"""
|
||||
Save the access token and its expiration time.
|
||||
|
||||
:param access_token: The access token to save.
|
||||
:param expires_at: The UNIX timestamp of the expiration time.
|
||||
"""
|
||||
expiration_time = datetime.fromtimestamp(expires_at)
|
||||
data = {
|
||||
"access_token": access_token,
|
||||
"expiration": expiration_time.isoformat(),
|
||||
}
|
||||
encrypted_data = self.fernet.encrypt(json.dumps(data).encode())
|
||||
self.save_secure_file(self.file_path, encrypted_data)
|
||||
|
||||
def get_token(self) -> Optional[str]:
|
||||
"""
|
||||
Get the access token if it is valid and not expired.
|
||||
|
||||
:return: The access token if valid and not expired, otherwise None.
|
||||
"""
|
||||
encrypted_data = self.read_secure_file(self.file_path)
|
||||
|
||||
decrypted_data = self.fernet.decrypt(encrypted_data) # type: ignore
|
||||
data = json.loads(decrypted_data)
|
||||
|
||||
expiration = datetime.fromisoformat(data["expiration"])
|
||||
if expiration <= datetime.now():
|
||||
return None
|
||||
|
||||
return data["access_token"]
|
||||
|
||||
def clear_tokens(self) -> None:
|
||||
"""
|
||||
Clear the tokens.
|
||||
"""
|
||||
self.delete_secure_file(self.file_path)
|
||||
|
||||
def get_secure_storage_path(self) -> Path:
|
||||
"""
|
||||
Get the secure storage path based on the operating system.
|
||||
|
||||
:return: The secure storage path.
|
||||
"""
|
||||
if sys.platform == "win32":
|
||||
# Windows: Use %LOCALAPPDATA%
|
||||
base_path = os.environ.get("LOCALAPPDATA")
|
||||
elif sys.platform == "darwin":
|
||||
# macOS: Use ~/Library/Application Support
|
||||
base_path = os.path.expanduser("~/Library/Application Support")
|
||||
else:
|
||||
# Linux and other Unix-like: Use ~/.local/share
|
||||
base_path = os.path.expanduser("~/.local/share")
|
||||
|
||||
app_name = "crewai/credentials"
|
||||
storage_path = Path(base_path) / app_name
|
||||
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
return storage_path
|
||||
|
||||
def save_secure_file(self, filename: str, content: bytes) -> None:
|
||||
"""
|
||||
Save the content to a secure file.
|
||||
|
||||
:param filename: The name of the file.
|
||||
:param content: The content to save.
|
||||
"""
|
||||
storage_path = self.get_secure_storage_path()
|
||||
file_path = storage_path / filename
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(content)
|
||||
|
||||
# Set appropriate permissions (read/write for owner only)
|
||||
os.chmod(file_path, 0o600)
|
||||
|
||||
def read_secure_file(self, filename: str) -> Optional[bytes]:
|
||||
"""
|
||||
Read the content of a secure file.
|
||||
|
||||
:param filename: The name of the file.
|
||||
:return: The content of the file if it exists, otherwise None.
|
||||
"""
|
||||
storage_path = self.get_secure_storage_path()
|
||||
file_path = storage_path / filename
|
||||
|
||||
if not file_path.exists():
|
||||
return None
|
||||
|
||||
with open(file_path, "rb") as f:
|
||||
return f.read()
|
||||
|
||||
def delete_secure_file(self, filename: str) -> None:
|
||||
"""
|
||||
Delete the secure file.
|
||||
|
||||
:param filename: The name of the file.
|
||||
"""
|
||||
storage_path = self.get_secure_storage_path()
|
||||
file_path = storage_path / filename
|
||||
if file_path.exists():
|
||||
file_path.unlink(missing_ok=True)
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.157.0,<1.0.0"
|
||||
"crewai[tools]>=0.165.1,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.157.0,<1.0.0",
|
||||
"crewai[tools]>=0.165.1,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.157.0"
|
||||
"crewai[tools]>=0.165.1"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -44,8 +44,9 @@ def migrate_pyproject(input_file, output_file):
|
||||
]
|
||||
new_pyproject["project"]["requires-python"] = poetry_data.get("python")
|
||||
else:
|
||||
# If it's already in the new format, just copy the project section
|
||||
# If it's already in the new format, just copy the project and tool sections
|
||||
new_pyproject["project"] = pyproject_data.get("project", {})
|
||||
new_pyproject["tool"] = pyproject_data.get("tool", {})
|
||||
|
||||
# Migrate or copy dependencies
|
||||
if "dependencies" in new_pyproject["project"]:
|
||||
|
||||
@@ -77,7 +77,9 @@ from crewai.utilities.events.listeners.tracing.trace_listener import (
|
||||
)
|
||||
|
||||
|
||||
from crewai.utilities.events.listeners.tracing.utils import is_tracing_enabled
|
||||
from crewai.utilities.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
)
|
||||
from crewai.utilities.formatter import (
|
||||
aggregate_raw_outputs_from_task_outputs,
|
||||
aggregate_raw_outputs_from_tasks,
|
||||
@@ -283,8 +285,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
self._cache_handler = CacheHandler()
|
||||
event_listener = EventListener()
|
||||
|
||||
if is_tracing_enabled() or self.tracing:
|
||||
trace_listener = TraceCollectionListener(tracing=self.tracing)
|
||||
trace_listener = TraceCollectionListener()
|
||||
trace_listener.setup_listeners(crewai_event_bus)
|
||||
event_listener.verbose = self.verbose
|
||||
event_listener.formatter.verbose = self.verbose
|
||||
@@ -633,6 +636,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self._inputs = inputs
|
||||
self._interpolate_inputs(inputs)
|
||||
self._set_tasks_callbacks()
|
||||
self._set_allow_crewai_trigger_context_for_first_task()
|
||||
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
|
||||
@@ -1502,3 +1506,18 @@ class Crew(FlowTrackable, BaseModel):
|
||||
"""Reset crew and agent knowledge storage."""
|
||||
for ks in knowledges:
|
||||
ks.reset()
|
||||
|
||||
def _set_allow_crewai_trigger_context_for_first_task(self):
|
||||
crewai_trigger_payload = self._inputs and self._inputs.get(
|
||||
"crewai_trigger_payload"
|
||||
)
|
||||
able_to_inject = (
|
||||
self.tasks and self.tasks[0].allow_crewai_trigger_context is None
|
||||
)
|
||||
|
||||
if (
|
||||
self.process == Process.sequential
|
||||
and crewai_trigger_payload
|
||||
and able_to_inject
|
||||
):
|
||||
self.tasks[0].allow_crewai_trigger_context = True
|
||||
|
||||
@@ -17,10 +17,13 @@ from typing import (
|
||||
)
|
||||
from uuid import uuid4
|
||||
|
||||
from opentelemetry import baggage
|
||||
from opentelemetry.context import attach, detach
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
|
||||
from crewai.flow.flow_visualizer import plot_flow
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.types import FlowExecutionData
|
||||
from crewai.flow.utils import get_possible_return_constants
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.flow_events import (
|
||||
@@ -35,7 +38,9 @@ from crewai.utilities.events.flow_events import (
|
||||
from crewai.utilities.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
from crewai.utilities.events.listeners.tracing.utils import is_tracing_enabled
|
||||
from crewai.utilities.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
)
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -467,13 +472,15 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._method_execution_counts: Dict[str, int] = {}
|
||||
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
||||
self._method_outputs: List[Any] = [] # List to store all method outputs
|
||||
self._completed_methods: Set[str] = set() # Track completed methods for reload
|
||||
self._persistence: Optional[FlowPersistence] = persistence
|
||||
self._is_execution_resuming: bool = False
|
||||
|
||||
# Initialize state with initial values
|
||||
self._state = self._create_initial_state()
|
||||
self.tracing = tracing
|
||||
if is_tracing_enabled() or tracing:
|
||||
trace_listener = TraceCollectionListener(tracing=tracing)
|
||||
if is_tracing_enabled() or self.tracing:
|
||||
trace_listener = TraceCollectionListener()
|
||||
trace_listener.setup_listeners(crewai_event_bus)
|
||||
# Apply any additional kwargs
|
||||
if kwargs:
|
||||
@@ -718,6 +725,73 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
else:
|
||||
raise TypeError(f"State must be dict or BaseModel, got {type(self._state)}")
|
||||
|
||||
def reload(self, execution_data: FlowExecutionData) -> None:
|
||||
"""Reloads the flow from an execution data dict.
|
||||
|
||||
This method restores the flow's execution ID, completed methods, and state,
|
||||
allowing it to resume from where it left off.
|
||||
|
||||
Args:
|
||||
execution_data: Flow execution data containing:
|
||||
- id: Flow execution ID
|
||||
- flow: Flow structure
|
||||
- completed_methods: List of successfully completed methods
|
||||
- execution_methods: All execution methods with their status
|
||||
"""
|
||||
flow_id = execution_data.get("id")
|
||||
if flow_id:
|
||||
self._update_state_field("id", flow_id)
|
||||
|
||||
self._completed_methods = {
|
||||
name
|
||||
for method_data in execution_data.get("completed_methods", [])
|
||||
if (name := method_data.get("flow_method", {}).get("name")) is not None
|
||||
}
|
||||
|
||||
execution_methods = execution_data.get("execution_methods", [])
|
||||
if not execution_methods:
|
||||
return
|
||||
|
||||
sorted_methods = sorted(
|
||||
execution_methods,
|
||||
key=lambda m: m.get("started_at", ""),
|
||||
)
|
||||
|
||||
state_to_apply = None
|
||||
for method in reversed(sorted_methods):
|
||||
if method.get("final_state"):
|
||||
state_to_apply = method["final_state"]
|
||||
break
|
||||
|
||||
if not state_to_apply and sorted_methods:
|
||||
last_method = sorted_methods[-1]
|
||||
if last_method.get("initial_state"):
|
||||
state_to_apply = last_method["initial_state"]
|
||||
|
||||
if state_to_apply:
|
||||
self._apply_state_updates(state_to_apply)
|
||||
|
||||
for i, method in enumerate(sorted_methods[:-1]):
|
||||
method_name = method.get("flow_method", {}).get("name")
|
||||
if method_name:
|
||||
self._completed_methods.add(method_name)
|
||||
|
||||
def _update_state_field(self, field_name: str, value: Any) -> None:
|
||||
"""Update a single field in the state."""
|
||||
if isinstance(self._state, dict):
|
||||
self._state[field_name] = value
|
||||
elif hasattr(self._state, field_name):
|
||||
object.__setattr__(self._state, field_name, value)
|
||||
|
||||
def _apply_state_updates(self, updates: Dict[str, Any]) -> None:
|
||||
"""Apply multiple state updates efficiently."""
|
||||
if isinstance(self._state, dict):
|
||||
self._state.update(updates)
|
||||
elif hasattr(self._state, "__dict__"):
|
||||
for key, value in updates.items():
|
||||
if hasattr(self._state, key):
|
||||
object.__setattr__(self._state, key, value)
|
||||
|
||||
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
"""
|
||||
Start the flow execution in a synchronous context.
|
||||
@@ -746,68 +820,87 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
Returns:
|
||||
The final output from the flow, which is the result of the last executed method.
|
||||
"""
|
||||
if inputs:
|
||||
# Override the id in the state if it exists in inputs
|
||||
if "id" in inputs:
|
||||
if isinstance(self._state, dict):
|
||||
self._state["id"] = inputs["id"]
|
||||
elif isinstance(self._state, BaseModel):
|
||||
setattr(self._state, "id", inputs["id"])
|
||||
ctx = baggage.set_baggage("flow_inputs", inputs or {})
|
||||
flow_token = attach(ctx)
|
||||
|
||||
# If persistence is enabled, attempt to restore the stored state using the provided id.
|
||||
if "id" in inputs and self._persistence is not None:
|
||||
restore_uuid = inputs["id"]
|
||||
stored_state = self._persistence.load_state(restore_uuid)
|
||||
if stored_state:
|
||||
self._log_flow_event(
|
||||
f"Loading flow state from memory for UUID: {restore_uuid}",
|
||||
color="yellow",
|
||||
)
|
||||
self._restore_state(stored_state)
|
||||
else:
|
||||
self._log_flow_event(
|
||||
f"No flow state found for UUID: {restore_uuid}", color="red"
|
||||
)
|
||||
try:
|
||||
# Reset flow state for fresh execution unless restoring from persistence
|
||||
is_restoring = inputs and "id" in inputs and self._persistence is not None
|
||||
if not is_restoring:
|
||||
# Clear completed methods and outputs for a fresh start
|
||||
self._completed_methods.clear()
|
||||
self._method_outputs.clear()
|
||||
else:
|
||||
# We're restoring from persistence, set the flag
|
||||
self._is_execution_resuming = True
|
||||
|
||||
# Update state with any additional inputs (ignoring the 'id' key)
|
||||
filtered_inputs = {k: v for k, v in inputs.items() if k != "id"}
|
||||
if filtered_inputs:
|
||||
self._initialize_state(filtered_inputs)
|
||||
if inputs:
|
||||
# Override the id in the state if it exists in inputs
|
||||
if "id" in inputs:
|
||||
if isinstance(self._state, dict):
|
||||
self._state["id"] = inputs["id"]
|
||||
elif isinstance(self._state, BaseModel):
|
||||
setattr(self._state, "id", inputs["id"])
|
||||
|
||||
# Emit FlowStartedEvent and log the start of the flow.
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
FlowStartedEvent(
|
||||
type="flow_started",
|
||||
flow_name=self.name or self.__class__.__name__,
|
||||
inputs=inputs,
|
||||
),
|
||||
)
|
||||
self._log_flow_event(
|
||||
f"Flow started with ID: {self.flow_id}", color="bold_magenta"
|
||||
)
|
||||
# If persistence is enabled, attempt to restore the stored state using the provided id.
|
||||
if "id" in inputs and self._persistence is not None:
|
||||
restore_uuid = inputs["id"]
|
||||
stored_state = self._persistence.load_state(restore_uuid)
|
||||
if stored_state:
|
||||
self._log_flow_event(
|
||||
f"Loading flow state from memory for UUID: {restore_uuid}",
|
||||
color="yellow",
|
||||
)
|
||||
self._restore_state(stored_state)
|
||||
else:
|
||||
self._log_flow_event(
|
||||
f"No flow state found for UUID: {restore_uuid}", color="red"
|
||||
)
|
||||
|
||||
if inputs is not None and "id" not in inputs:
|
||||
self._initialize_state(inputs)
|
||||
# Update state with any additional inputs (ignoring the 'id' key)
|
||||
filtered_inputs = {k: v for k, v in inputs.items() if k != "id"}
|
||||
if filtered_inputs:
|
||||
self._initialize_state(filtered_inputs)
|
||||
|
||||
tasks = [
|
||||
self._execute_start_method(start_method)
|
||||
for start_method in self._start_methods
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
# Emit FlowStartedEvent and log the start of the flow.
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
FlowStartedEvent(
|
||||
type="flow_started",
|
||||
flow_name=self.name or self.__class__.__name__,
|
||||
inputs=inputs,
|
||||
),
|
||||
)
|
||||
self._log_flow_event(
|
||||
f"Flow started with ID: {self.flow_id}", color="bold_magenta"
|
||||
)
|
||||
|
||||
final_output = self._method_outputs[-1] if self._method_outputs else None
|
||||
if inputs is not None and "id" not in inputs:
|
||||
self._initialize_state(inputs)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
FlowFinishedEvent(
|
||||
type="flow_finished",
|
||||
flow_name=self.name or self.__class__.__name__,
|
||||
result=final_output,
|
||||
),
|
||||
)
|
||||
tasks = [
|
||||
self._execute_start_method(start_method)
|
||||
for start_method in self._start_methods
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
return final_output
|
||||
# Clear the resumption flag after initial execution completes
|
||||
self._is_execution_resuming = False
|
||||
|
||||
final_output = self._method_outputs[-1] if self._method_outputs else None
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
FlowFinishedEvent(
|
||||
type="flow_finished",
|
||||
flow_name=self.name or self.__class__.__name__,
|
||||
result=final_output,
|
||||
),
|
||||
)
|
||||
|
||||
return final_output
|
||||
finally:
|
||||
detach(flow_token)
|
||||
|
||||
async def _execute_start_method(self, start_method_name: str) -> None:
|
||||
"""
|
||||
@@ -826,12 +919,57 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
- Executes the start method and captures its result
|
||||
- Triggers execution of any listeners waiting on this start method
|
||||
- Part of the flow's initialization sequence
|
||||
- Skips execution if method was already completed (e.g., after reload)
|
||||
- Automatically injects crewai_trigger_payload if available in flow inputs
|
||||
"""
|
||||
if start_method_name in self._completed_methods:
|
||||
if self._is_execution_resuming:
|
||||
# During resumption, skip execution but continue listeners
|
||||
last_output = self._method_outputs[-1] if self._method_outputs else None
|
||||
await self._execute_listeners(start_method_name, last_output)
|
||||
return
|
||||
# For cyclic flows, clear from completed to allow re-execution
|
||||
self._completed_methods.discard(start_method_name)
|
||||
|
||||
method = self._methods[start_method_name]
|
||||
enhanced_method = self._inject_trigger_payload_for_start_method(method)
|
||||
|
||||
result = await self._execute_method(
|
||||
start_method_name, self._methods[start_method_name]
|
||||
start_method_name, enhanced_method
|
||||
)
|
||||
await self._execute_listeners(start_method_name, result)
|
||||
|
||||
def _inject_trigger_payload_for_start_method(self, original_method: Callable) -> Callable:
|
||||
def prepare_kwargs(*args, **kwargs):
|
||||
inputs = baggage.get_baggage("flow_inputs") or {}
|
||||
trigger_payload = inputs.get("crewai_trigger_payload")
|
||||
|
||||
sig = inspect.signature(original_method)
|
||||
accepts_trigger_payload = "crewai_trigger_payload" in sig.parameters
|
||||
|
||||
if trigger_payload is not None and accepts_trigger_payload:
|
||||
kwargs["crewai_trigger_payload"] = trigger_payload
|
||||
elif trigger_payload is not None:
|
||||
self._log_flow_event(
|
||||
f"Trigger payload available but {original_method.__name__} doesn't accept crewai_trigger_payload parameter",
|
||||
color="yellow"
|
||||
)
|
||||
return args, kwargs
|
||||
|
||||
if asyncio.iscoroutinefunction(original_method):
|
||||
async def enhanced_method(*args, **kwargs):
|
||||
args, kwargs = prepare_kwargs(*args, **kwargs)
|
||||
return await original_method(*args, **kwargs)
|
||||
else:
|
||||
def enhanced_method(*args, **kwargs):
|
||||
args, kwargs = prepare_kwargs(*args, **kwargs)
|
||||
return original_method(*args, **kwargs)
|
||||
|
||||
enhanced_method.__name__ = original_method.__name__
|
||||
enhanced_method.__doc__ = original_method.__doc__
|
||||
|
||||
return enhanced_method
|
||||
|
||||
async def _execute_method(
|
||||
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
|
||||
) -> Any:
|
||||
@@ -861,6 +999,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._method_execution_counts.get(method_name, 0) + 1
|
||||
)
|
||||
|
||||
self._completed_methods.add(method_name)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MethodExecutionFinishedEvent(
|
||||
@@ -922,11 +1061,15 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
for router_name in routers_triggered:
|
||||
await self._execute_single_listener(router_name, result)
|
||||
# After executing router, the router's result is the path
|
||||
router_result = self._method_outputs[-1]
|
||||
router_result = (
|
||||
self._method_outputs[-1] if self._method_outputs else None
|
||||
)
|
||||
if router_result: # Only add non-None results
|
||||
router_results.append(router_result)
|
||||
current_trigger = (
|
||||
router_result # Update for next iteration of router chain
|
||||
str(router_result)
|
||||
if router_result is not None
|
||||
else "" # Update for next iteration of router chain
|
||||
)
|
||||
|
||||
# Now execute normal listeners for all router results and the original trigger
|
||||
@@ -944,6 +1087,24 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
if current_trigger in router_results:
|
||||
# Find start methods triggered by this router result
|
||||
for method_name in self._start_methods:
|
||||
# Check if this start method is triggered by the current trigger
|
||||
if method_name in self._listeners:
|
||||
condition_type, trigger_methods = self._listeners[
|
||||
method_name
|
||||
]
|
||||
if current_trigger in trigger_methods:
|
||||
# Only execute if this is a cycle (method was already completed)
|
||||
if method_name in self._completed_methods:
|
||||
# For router-triggered start methods in cycles, temporarily clear resumption flag
|
||||
# to allow cyclic execution
|
||||
was_resuming = self._is_execution_resuming
|
||||
self._is_execution_resuming = False
|
||||
await self._execute_start_method(method_name)
|
||||
self._is_execution_resuming = was_resuming
|
||||
|
||||
def _find_triggered_methods(
|
||||
self, trigger_method: str, router_only: bool
|
||||
) -> List[str]:
|
||||
@@ -981,6 +1142,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if router_only != is_router:
|
||||
continue
|
||||
|
||||
if not router_only and listener_name in self._start_methods:
|
||||
continue
|
||||
|
||||
if condition_type == "OR":
|
||||
# If the trigger_method matches any in methods, run this
|
||||
if trigger_method in methods:
|
||||
@@ -1023,12 +1187,21 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
- Handles errors gracefully with detailed logging
|
||||
- Recursively triggers listeners of this listener
|
||||
- Supports both parameterized and parameter-less listeners
|
||||
- Skips execution if method was already completed (e.g., after reload)
|
||||
|
||||
Error Handling
|
||||
-------------
|
||||
Catches and logs any exceptions during execution, preventing
|
||||
individual listener failures from breaking the entire flow.
|
||||
"""
|
||||
if listener_name in self._completed_methods:
|
||||
if self._is_execution_resuming:
|
||||
# During resumption, skip execution but continue listeners
|
||||
await self._execute_listeners(listener_name, None)
|
||||
return
|
||||
# For cyclic flows, clear from completed to allow re-execution
|
||||
self._completed_methods.discard(listener_name)
|
||||
|
||||
try:
|
||||
method = self._methods[listener_name]
|
||||
|
||||
@@ -1047,12 +1220,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
await self._execute_listeners(listener_name, listener_result)
|
||||
|
||||
except Exception as e:
|
||||
print(
|
||||
f"[Flow._execute_single_listener] Error in method {listener_name}: {e}"
|
||||
)
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
logger.error(f"Error executing listener {listener_name}: {e}")
|
||||
raise
|
||||
|
||||
def _log_flow_event(
|
||||
|
||||
95
src/crewai/flow/types.py
Normal file
@@ -0,0 +1,95 @@
|
||||
"""Type definitions for CrewAI Flow module.
|
||||
|
||||
This module contains TypedDict definitions and type aliases used throughout
|
||||
the Flow system.
|
||||
"""
|
||||
|
||||
from typing import Any, TypedDict
|
||||
from typing_extensions import NotRequired, Required
|
||||
|
||||
|
||||
class FlowMethodData(TypedDict):
|
||||
"""Flow method information.
|
||||
|
||||
Attributes:
|
||||
name: The name of the flow method.
|
||||
starting_point: Whether this method is a starting point for the flow.
|
||||
"""
|
||||
|
||||
name: str
|
||||
starting_point: NotRequired[bool]
|
||||
|
||||
|
||||
class CompletedMethodData(TypedDict):
|
||||
"""Completed method information.
|
||||
|
||||
Represents a flow method that has been successfully executed.
|
||||
|
||||
Attributes:
|
||||
flow_method: The flow method information.
|
||||
status: The completion status of the method.
|
||||
"""
|
||||
|
||||
flow_method: FlowMethodData
|
||||
status: str
|
||||
|
||||
|
||||
class ExecutionMethodData(TypedDict, total=False):
|
||||
"""Execution method information.
|
||||
|
||||
Contains detailed information about a method's execution, including
|
||||
timing, state, and any error details.
|
||||
|
||||
Attributes:
|
||||
flow_method: The flow method information.
|
||||
started_at: ISO timestamp when the method started execution.
|
||||
finished_at: ISO timestamp when the method finished execution, if completed.
|
||||
status: Current status of the method execution.
|
||||
initial_state: The state before method execution.
|
||||
final_state: The state after method execution.
|
||||
error_details: Details about any error that occurred during execution.
|
||||
"""
|
||||
|
||||
flow_method: Required[FlowMethodData]
|
||||
started_at: Required[str]
|
||||
status: Required[str]
|
||||
finished_at: str
|
||||
initial_state: dict[str, Any]
|
||||
final_state: dict[str, Any]
|
||||
error_details: dict[str, Any]
|
||||
|
||||
|
||||
class FlowData(TypedDict):
|
||||
"""Flow structure information.
|
||||
|
||||
Contains metadata about the flow structure and its methods.
|
||||
|
||||
Attributes:
|
||||
name: The name of the flow.
|
||||
flow_methods_attributes: List of all flow methods and their attributes.
|
||||
"""
|
||||
|
||||
name: str
|
||||
flow_methods_attributes: list[FlowMethodData]
|
||||
|
||||
|
||||
class FlowExecutionData(TypedDict):
|
||||
"""Flow execution data.
|
||||
|
||||
Complete execution data for a flow, including its current state,
|
||||
completed methods, and execution history. Used for resuming flows
|
||||
from a previous state.
|
||||
|
||||
Attributes:
|
||||
id: Unique identifier for the flow execution.
|
||||
flow: Flow structure and metadata.
|
||||
inputs: Input data provided to the flow.
|
||||
completed_methods: List of methods that have been successfully completed.
|
||||
execution_methods: Detailed execution history for all methods.
|
||||
"""
|
||||
|
||||
id: str
|
||||
flow: FlowData
|
||||
inputs: dict[str, Any]
|
||||
completed_methods: list[CompletedMethodData]
|
||||
execution_methods: list[ExecutionMethodData]
|
||||
@@ -11,6 +11,7 @@ import chromadb.errors
|
||||
from chromadb.api import ClientAPI
|
||||
from chromadb.api.types import OneOrMany
|
||||
from chromadb.config import Settings
|
||||
import warnings
|
||||
|
||||
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
|
||||
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
|
||||
@@ -85,6 +86,14 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
raise Exception("Collection not initialized")
|
||||
|
||||
def initialize_knowledge_storage(self):
|
||||
# Suppress deprecation warnings from chromadb, which are not relevant to us
|
||||
# TODO: Remove this once we upgrade chromadb to at least 1.0.8.
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=r".*'model_fields'.*is deprecated.*",
|
||||
module=r"^chromadb(\.|$)",
|
||||
)
|
||||
|
||||
self.app = create_persistent_client(
|
||||
path=os.path.join(db_storage_path(), "knowledge"),
|
||||
settings=Settings(allow_reset=True),
|
||||
|
||||
@@ -7,6 +7,7 @@ from crewai.utilities.chromadb import sanitize_collection_name
|
||||
from crewai.memory.storage.interface import Storage
|
||||
|
||||
MAX_AGENT_ID_LENGTH_MEM0 = 255
|
||||
MAX_METADATA_SIZE_MEM0 = 2000
|
||||
|
||||
|
||||
class Mem0Storage(Storage):
|
||||
@@ -98,8 +99,11 @@ class Mem0Storage(Storage):
|
||||
}
|
||||
|
||||
# Shared base params
|
||||
raw_metadata = {"type": base_metadata[self.memory_type], **metadata}
|
||||
truncated_metadata = self._truncate_metadata_if_needed(raw_metadata)
|
||||
|
||||
params: dict[str, Any] = {
|
||||
"metadata": {"type": base_metadata[self.memory_type], **metadata},
|
||||
"metadata": truncated_metadata,
|
||||
"infer": self.infer
|
||||
}
|
||||
|
||||
@@ -181,3 +185,30 @@ class Mem0Storage(Storage):
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
agents = "_".join(agents)
|
||||
return sanitize_collection_name(name=agents, max_collection_length=MAX_AGENT_ID_LENGTH_MEM0)
|
||||
|
||||
def _truncate_metadata_if_needed(self, metadata: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Truncate metadata to stay within Mem0's size limits.
|
||||
Prioritizes essential fields and truncates messages if needed.
|
||||
"""
|
||||
import json
|
||||
|
||||
metadata_str = json.dumps(metadata, default=str)
|
||||
|
||||
if len(metadata_str) <= MAX_METADATA_SIZE_MEM0:
|
||||
return metadata
|
||||
|
||||
truncated_metadata = metadata.copy()
|
||||
|
||||
if "messages" in truncated_metadata:
|
||||
messages = truncated_metadata["messages"]
|
||||
if isinstance(messages, list) and len(messages) > 0:
|
||||
while len(json.dumps(truncated_metadata, default=str)) > MAX_METADATA_SIZE_MEM0 and len(messages) > 1:
|
||||
messages = messages[-len(messages)//2:] # Keep last half
|
||||
truncated_metadata["messages"] = messages
|
||||
|
||||
if len(json.dumps(truncated_metadata, default=str)) > MAX_METADATA_SIZE_MEM0:
|
||||
truncated_metadata.pop("messages", None)
|
||||
truncated_metadata["_truncated"] = True
|
||||
|
||||
return truncated_metadata
|
||||
|
||||
@@ -12,6 +12,7 @@ from crewai.rag.embeddings.configurator import EmbeddingConfigurator
|
||||
from crewai.utilities.chromadb import create_persistent_client
|
||||
from crewai.utilities.constants import MAX_FILE_NAME_LENGTH
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
import warnings
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
@@ -62,6 +63,14 @@ class RAGStorage(BaseRAGStorage):
|
||||
def _initialize_app(self):
|
||||
from chromadb.config import Settings
|
||||
|
||||
# Suppress deprecation warnings from chromadb, which are not relevant to us
|
||||
# TODO: Remove this once we upgrade chromadb to at least 1.0.8.
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=r".*'model_fields'.*is deprecated.*",
|
||||
module=r"^chromadb(\.|$)",
|
||||
)
|
||||
|
||||
self._set_embedder_config()
|
||||
|
||||
self.app = create_persistent_client(
|
||||
|
||||
0
src/crewai/rag/chromadb/__init__.py
Normal file
556
src/crewai/rag/chromadb/client.py
Normal file
@@ -0,0 +1,556 @@
|
||||
"""ChromaDB client implementation."""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from chromadb.api.types import (
|
||||
Embeddable,
|
||||
EmbeddingFunction as ChromaEmbeddingFunction,
|
||||
QueryResult,
|
||||
)
|
||||
from typing_extensions import Unpack
|
||||
|
||||
from crewai.rag.chromadb.types import (
|
||||
ChromaDBClientType,
|
||||
ChromaDBCollectionCreateParams,
|
||||
ChromaDBCollectionSearchParams,
|
||||
)
|
||||
from crewai.rag.chromadb.utils import (
|
||||
_extract_search_params,
|
||||
_is_async_client,
|
||||
_is_sync_client,
|
||||
_prepare_documents_for_chromadb,
|
||||
_process_query_results,
|
||||
)
|
||||
from crewai.rag.core.base_client import (
|
||||
BaseClient,
|
||||
BaseCollectionParams,
|
||||
BaseCollectionAddParams,
|
||||
)
|
||||
from crewai.rag.types import SearchResult
|
||||
|
||||
|
||||
class ChromaDBClient(BaseClient):
|
||||
"""ChromaDB implementation of the BaseClient protocol.
|
||||
|
||||
Provides vector database operations for ChromaDB, supporting both
|
||||
synchronous and asynchronous clients.
|
||||
|
||||
Attributes:
|
||||
client: ChromaDB client instance (ClientAPI or AsyncClientAPI).
|
||||
embedding_function: Function to generate embeddings for documents.
|
||||
"""
|
||||
|
||||
client: ChromaDBClientType
|
||||
embedding_function: ChromaEmbeddingFunction[Embeddable]
|
||||
|
||||
def create_collection(
|
||||
self, **kwargs: Unpack[ChromaDBCollectionCreateParams]
|
||||
) -> None:
|
||||
"""Create a new collection in ChromaDB.
|
||||
|
||||
Uses the client's default embedding function if none provided.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: Name of the collection to create. Must be unique.
|
||||
configuration: Optional collection configuration specifying distance metrics,
|
||||
HNSW parameters, or other backend-specific settings.
|
||||
metadata: Optional metadata dictionary to attach to the collection.
|
||||
embedding_function: Optional custom embedding function. If not provided,
|
||||
uses the client's default embedding function.
|
||||
data_loader: Optional data loader for batch loading data into the collection.
|
||||
get_or_create: If True, returns existing collection if it already exists
|
||||
instead of raising an error. Defaults to False.
|
||||
|
||||
Raises:
|
||||
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
|
||||
ValueError: If collection with the same name already exists and get_or_create
|
||||
is False.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
|
||||
Example:
|
||||
>>> client = ChromaDBClient()
|
||||
>>> client.create_collection(
|
||||
... collection_name="documents",
|
||||
... metadata={"description": "Product documentation"},
|
||||
... get_or_create=True
|
||||
... )
|
||||
"""
|
||||
if not _is_sync_client(self.client):
|
||||
raise TypeError(
|
||||
"Synchronous method create_collection() requires a ClientAPI. "
|
||||
"Use acreate_collection() for AsyncClientAPI."
|
||||
)
|
||||
|
||||
metadata = kwargs.get("metadata", {})
|
||||
if "hnsw:space" not in metadata:
|
||||
metadata["hnsw:space"] = "cosine"
|
||||
|
||||
self.client.create_collection(
|
||||
name=kwargs["collection_name"],
|
||||
configuration=kwargs.get("configuration"),
|
||||
metadata=metadata,
|
||||
embedding_function=kwargs.get(
|
||||
"embedding_function", self.embedding_function
|
||||
),
|
||||
data_loader=kwargs.get("data_loader"),
|
||||
get_or_create=kwargs.get("get_or_create", False),
|
||||
)
|
||||
|
||||
async def acreate_collection(
|
||||
self, **kwargs: Unpack[ChromaDBCollectionCreateParams]
|
||||
) -> None:
|
||||
"""Create a new collection in ChromaDB asynchronously.
|
||||
|
||||
Creates a new collection with the specified name and optional configuration.
|
||||
If an embedding function is not provided, uses the client's default embedding function.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: Name of the collection to create. Must be unique.
|
||||
configuration: Optional collection configuration specifying distance metrics,
|
||||
HNSW parameters, or other backend-specific settings.
|
||||
metadata: Optional metadata dictionary to attach to the collection.
|
||||
embedding_function: Optional custom embedding function. If not provided,
|
||||
uses the client's default embedding function.
|
||||
data_loader: Optional data loader for batch loading data into the collection.
|
||||
get_or_create: If True, returns existing collection if it already exists
|
||||
instead of raising an error. Defaults to False.
|
||||
|
||||
Raises:
|
||||
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
|
||||
ValueError: If collection with the same name already exists and get_or_create
|
||||
is False.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> async def main():
|
||||
... client = ChromaDBClient()
|
||||
... await client.acreate_collection(
|
||||
... collection_name="documents",
|
||||
... metadata={"description": "Product documentation"},
|
||||
... get_or_create=True
|
||||
... )
|
||||
>>> asyncio.run(main())
|
||||
"""
|
||||
if not _is_async_client(self.client):
|
||||
raise TypeError(
|
||||
"Asynchronous method acreate_collection() requires an AsyncClientAPI. "
|
||||
"Use create_collection() for ClientAPI."
|
||||
)
|
||||
|
||||
metadata = kwargs.get("metadata", {})
|
||||
if "hnsw:space" not in metadata:
|
||||
metadata["hnsw:space"] = "cosine"
|
||||
|
||||
await self.client.create_collection(
|
||||
name=kwargs["collection_name"],
|
||||
configuration=kwargs.get("configuration"),
|
||||
metadata=metadata,
|
||||
embedding_function=kwargs.get(
|
||||
"embedding_function", self.embedding_function
|
||||
),
|
||||
data_loader=kwargs.get("data_loader"),
|
||||
get_or_create=kwargs.get("get_or_create", False),
|
||||
)
|
||||
|
||||
def get_or_create_collection(
|
||||
self, **kwargs: Unpack[ChromaDBCollectionCreateParams]
|
||||
) -> Any:
|
||||
"""Get an existing collection or create it if it doesn't exist.
|
||||
|
||||
Returns existing collection if found, otherwise creates a new one.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: Name of the collection to get or create.
|
||||
configuration: Optional collection configuration specifying distance metrics,
|
||||
HNSW parameters, or other backend-specific settings.
|
||||
metadata: Optional metadata dictionary to attach to the collection.
|
||||
embedding_function: Optional custom embedding function. If not provided,
|
||||
uses the client's default embedding function.
|
||||
data_loader: Optional data loader for batch loading data into the collection.
|
||||
|
||||
Returns:
|
||||
A ChromaDB Collection object.
|
||||
|
||||
Raises:
|
||||
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
|
||||
Example:
|
||||
>>> client = ChromaDBClient()
|
||||
>>> collection = client.get_or_create_collection(
|
||||
... collection_name="documents",
|
||||
... metadata={"description": "Product documentation"}
|
||||
... )
|
||||
"""
|
||||
if not _is_sync_client(self.client):
|
||||
raise TypeError(
|
||||
"Synchronous method get_or_create_collection() requires a ClientAPI. "
|
||||
"Use aget_or_create_collection() for AsyncClientAPI."
|
||||
)
|
||||
|
||||
metadata = kwargs.get("metadata", {})
|
||||
if "hnsw:space" not in metadata:
|
||||
metadata["hnsw:space"] = "cosine"
|
||||
|
||||
return self.client.get_or_create_collection(
|
||||
name=kwargs["collection_name"],
|
||||
configuration=kwargs.get("configuration"),
|
||||
metadata=metadata,
|
||||
embedding_function=kwargs.get(
|
||||
"embedding_function", self.embedding_function
|
||||
),
|
||||
data_loader=kwargs.get("data_loader"),
|
||||
)
|
||||
|
||||
async def aget_or_create_collection(
|
||||
self, **kwargs: Unpack[ChromaDBCollectionCreateParams]
|
||||
) -> Any:
|
||||
"""Get an existing collection or create it if it doesn't exist asynchronously.
|
||||
|
||||
Returns existing collection if found, otherwise creates a new one.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: Name of the collection to get or create.
|
||||
configuration: Optional collection configuration specifying distance metrics,
|
||||
HNSW parameters, or other backend-specific settings.
|
||||
metadata: Optional metadata dictionary to attach to the collection.
|
||||
embedding_function: Optional custom embedding function. If not provided,
|
||||
uses the client's default embedding function.
|
||||
data_loader: Optional data loader for batch loading data into the collection.
|
||||
|
||||
Returns:
|
||||
A ChromaDB AsyncCollection object.
|
||||
|
||||
Raises:
|
||||
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> async def main():
|
||||
... client = ChromaDBClient()
|
||||
... collection = await client.aget_or_create_collection(
|
||||
... collection_name="documents",
|
||||
... metadata={"description": "Product documentation"}
|
||||
... )
|
||||
>>> asyncio.run(main())
|
||||
"""
|
||||
if not _is_async_client(self.client):
|
||||
raise TypeError(
|
||||
"Asynchronous method aget_or_create_collection() requires an AsyncClientAPI. "
|
||||
"Use get_or_create_collection() for ClientAPI."
|
||||
)
|
||||
|
||||
metadata = kwargs.get("metadata", {})
|
||||
if "hnsw:space" not in metadata:
|
||||
metadata["hnsw:space"] = "cosine"
|
||||
|
||||
return await self.client.get_or_create_collection(
|
||||
name=kwargs["collection_name"],
|
||||
configuration=kwargs.get("configuration"),
|
||||
metadata=metadata,
|
||||
embedding_function=kwargs.get(
|
||||
"embedding_function", self.embedding_function
|
||||
),
|
||||
data_loader=kwargs.get("data_loader"),
|
||||
)
|
||||
|
||||
def add_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
|
||||
"""Add documents with their embeddings to a collection.
|
||||
|
||||
Performs an upsert operation - documents with existing IDs are updated.
|
||||
Generates embeddings automatically using the configured embedding function.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to add documents to.
|
||||
documents: List of BaseRecord dicts containing:
|
||||
- content: The text content (required)
|
||||
- doc_id: Optional unique identifier (auto-generated if missing)
|
||||
- metadata: Optional metadata dictionary
|
||||
|
||||
Raises:
|
||||
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
|
||||
ValueError: If collection doesn't exist or documents list is empty.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
"""
|
||||
if not _is_sync_client(self.client):
|
||||
raise TypeError(
|
||||
"Synchronous method add_documents() requires a ClientAPI. "
|
||||
"Use aadd_documents() for AsyncClientAPI."
|
||||
)
|
||||
|
||||
collection_name = kwargs["collection_name"]
|
||||
documents = kwargs["documents"]
|
||||
|
||||
if not documents:
|
||||
raise ValueError("Documents list cannot be empty")
|
||||
|
||||
collection = self.client.get_collection(
|
||||
name=collection_name,
|
||||
embedding_function=self.embedding_function,
|
||||
)
|
||||
|
||||
prepared = _prepare_documents_for_chromadb(documents)
|
||||
collection.add(
|
||||
ids=prepared.ids,
|
||||
documents=prepared.texts,
|
||||
metadatas=prepared.metadatas,
|
||||
)
|
||||
|
||||
async def aadd_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
|
||||
"""Add documents with their embeddings to a collection asynchronously.
|
||||
|
||||
Performs an upsert operation - documents with existing IDs are updated.
|
||||
Generates embeddings automatically using the configured embedding function.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to add documents to.
|
||||
documents: List of BaseRecord dicts containing:
|
||||
- content: The text content (required)
|
||||
- doc_id: Optional unique identifier (auto-generated if missing)
|
||||
- metadata: Optional metadata dictionary
|
||||
|
||||
Raises:
|
||||
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
|
||||
ValueError: If collection doesn't exist or documents list is empty.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
"""
|
||||
if not _is_async_client(self.client):
|
||||
raise TypeError(
|
||||
"Asynchronous method aadd_documents() requires an AsyncClientAPI. "
|
||||
"Use add_documents() for ClientAPI."
|
||||
)
|
||||
|
||||
collection_name = kwargs["collection_name"]
|
||||
documents = kwargs["documents"]
|
||||
|
||||
if not documents:
|
||||
raise ValueError("Documents list cannot be empty")
|
||||
|
||||
collection = await self.client.get_collection(
|
||||
name=collection_name,
|
||||
embedding_function=self.embedding_function,
|
||||
)
|
||||
prepared = _prepare_documents_for_chromadb(documents)
|
||||
await collection.add(
|
||||
ids=prepared.ids,
|
||||
documents=prepared.texts,
|
||||
metadatas=prepared.metadatas,
|
||||
)
|
||||
|
||||
def search(
|
||||
self, **kwargs: Unpack[ChromaDBCollectionSearchParams]
|
||||
) -> list[SearchResult]:
|
||||
"""Search for similar documents using a query.
|
||||
|
||||
Performs semantic search to find documents similar to the query text.
|
||||
Uses the configured embedding function to generate query embeddings.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: Name of the collection to search in.
|
||||
query: The text query to search for.
|
||||
limit: Maximum number of results to return (default: 10).
|
||||
metadata_filter: Optional filter for metadata fields.
|
||||
score_threshold: Optional minimum similarity score (0-1) for results.
|
||||
where: Optional ChromaDB where clause for metadata filtering.
|
||||
where_document: Optional ChromaDB where clause for document content filtering.
|
||||
include: Optional list of fields to include in results.
|
||||
|
||||
Returns:
|
||||
List of SearchResult dicts containing id, content, metadata, and score.
|
||||
|
||||
Raises:
|
||||
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
|
||||
ValueError: If collection doesn't exist.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
"""
|
||||
if not _is_sync_client(self.client):
|
||||
raise TypeError(
|
||||
"Synchronous method search() requires a ClientAPI. "
|
||||
"Use asearch() for AsyncClientAPI."
|
||||
)
|
||||
|
||||
params = _extract_search_params(kwargs)
|
||||
|
||||
collection = self.client.get_collection(
|
||||
name=params.collection_name,
|
||||
embedding_function=self.embedding_function,
|
||||
)
|
||||
|
||||
where = params.where if params.where is not None else params.metadata_filter
|
||||
|
||||
results: QueryResult = collection.query(
|
||||
query_texts=[params.query],
|
||||
n_results=params.limit,
|
||||
where=where,
|
||||
where_document=params.where_document,
|
||||
include=params.include,
|
||||
)
|
||||
|
||||
return _process_query_results(
|
||||
collection=collection,
|
||||
results=results,
|
||||
params=params,
|
||||
)
|
||||
|
||||
async def asearch(
|
||||
self, **kwargs: Unpack[ChromaDBCollectionSearchParams]
|
||||
) -> list[SearchResult]:
|
||||
"""Search for similar documents using a query asynchronously.
|
||||
|
||||
Performs semantic search to find documents similar to the query text.
|
||||
Uses the configured embedding function to generate query embeddings.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: Name of the collection to search in.
|
||||
query: The text query to search for.
|
||||
limit: Maximum number of results to return (default: 10).
|
||||
metadata_filter: Optional filter for metadata fields.
|
||||
score_threshold: Optional minimum similarity score (0-1) for results.
|
||||
where: Optional ChromaDB where clause for metadata filtering.
|
||||
where_document: Optional ChromaDB where clause for document content filtering.
|
||||
include: Optional list of fields to include in results.
|
||||
|
||||
Returns:
|
||||
List of SearchResult dicts containing id, content, metadata, and score.
|
||||
|
||||
Raises:
|
||||
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
|
||||
ValueError: If collection doesn't exist.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
"""
|
||||
if not _is_async_client(self.client):
|
||||
raise TypeError(
|
||||
"Asynchronous method asearch() requires an AsyncClientAPI. "
|
||||
"Use search() for ClientAPI."
|
||||
)
|
||||
|
||||
params = _extract_search_params(kwargs)
|
||||
|
||||
collection = await self.client.get_collection(
|
||||
name=params.collection_name,
|
||||
embedding_function=self.embedding_function,
|
||||
)
|
||||
|
||||
where = params.where if params.where is not None else params.metadata_filter
|
||||
|
||||
results: QueryResult = await collection.query(
|
||||
query_texts=[params.query],
|
||||
n_results=params.limit,
|
||||
where=where,
|
||||
where_document=params.where_document,
|
||||
include=params.include,
|
||||
)
|
||||
|
||||
return _process_query_results(
|
||||
collection=collection,
|
||||
results=results,
|
||||
params=params,
|
||||
)
|
||||
|
||||
def delete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
|
||||
"""Delete a collection and all its data.
|
||||
|
||||
Permanently removes a collection and all documents, embeddings, and metadata it contains.
|
||||
This operation cannot be undone.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: Name of the collection to delete.
|
||||
|
||||
Raises:
|
||||
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
|
||||
ValueError: If collection doesn't exist.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
|
||||
Example:
|
||||
>>> client = ChromaDBClient()
|
||||
>>> client.delete_collection(collection_name="old_documents")
|
||||
"""
|
||||
if not _is_sync_client(self.client):
|
||||
raise TypeError(
|
||||
"Synchronous method delete_collection() requires a ClientAPI. "
|
||||
"Use adelete_collection() for AsyncClientAPI."
|
||||
)
|
||||
|
||||
collection_name = kwargs["collection_name"]
|
||||
self.client.delete_collection(name=collection_name)
|
||||
|
||||
async def adelete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
|
||||
"""Delete a collection and all its data asynchronously.
|
||||
|
||||
Permanently removes a collection and all documents, embeddings, and metadata it contains.
|
||||
This operation cannot be undone.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: Name of the collection to delete.
|
||||
|
||||
Raises:
|
||||
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
|
||||
ValueError: If collection doesn't exist.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> async def main():
|
||||
... client = ChromaDBClient()
|
||||
... await client.adelete_collection(collection_name="old_documents")
|
||||
>>> asyncio.run(main())
|
||||
"""
|
||||
if not _is_async_client(self.client):
|
||||
raise TypeError(
|
||||
"Asynchronous method adelete_collection() requires an AsyncClientAPI. "
|
||||
"Use delete_collection() for ClientAPI."
|
||||
)
|
||||
|
||||
collection_name = kwargs["collection_name"]
|
||||
await self.client.delete_collection(name=collection_name)
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset the vector database by deleting all collections and data.
|
||||
|
||||
Completely clears the ChromaDB instance, removing all collections,
|
||||
documents, embeddings, and metadata. This operation cannot be undone.
|
||||
Use with extreme caution in production environments.
|
||||
|
||||
Raises:
|
||||
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
|
||||
Example:
|
||||
>>> client = ChromaDBClient()
|
||||
>>> client.reset() # Removes ALL data from ChromaDB
|
||||
"""
|
||||
if not _is_sync_client(self.client):
|
||||
raise TypeError(
|
||||
"Synchronous method reset() requires a ClientAPI. "
|
||||
"Use areset() for AsyncClientAPI."
|
||||
)
|
||||
|
||||
self.client.reset()
|
||||
|
||||
async def areset(self) -> None:
|
||||
"""Reset the vector database by deleting all collections and data asynchronously.
|
||||
|
||||
Completely clears the ChromaDB instance, removing all collections,
|
||||
documents, embeddings, and metadata. This operation cannot be undone.
|
||||
Use with extreme caution in production environments.
|
||||
|
||||
Raises:
|
||||
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
|
||||
ConnectionError: If unable to connect to ChromaDB server.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> async def main():
|
||||
... client = ChromaDBClient()
|
||||
... await client.areset() # Removes ALL data from ChromaDB
|
||||
>>> asyncio.run(main())
|
||||
"""
|
||||
if not _is_async_client(self.client):
|
||||
raise TypeError(
|
||||
"Asynchronous method areset() requires an AsyncClientAPI. "
|
||||
"Use reset() for ClientAPI."
|
||||
)
|
||||
|
||||
await self.client.reset()
|
||||
85
src/crewai/rag/chromadb/types.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""Type definitions specific to ChromaDB implementation."""
|
||||
|
||||
from collections.abc import Mapping
|
||||
from typing import Any, NamedTuple
|
||||
|
||||
from chromadb.api import ClientAPI, AsyncClientAPI
|
||||
from chromadb.api.configuration import CollectionConfigurationInterface
|
||||
from chromadb.api.types import (
|
||||
CollectionMetadata,
|
||||
DataLoader,
|
||||
Embeddable,
|
||||
EmbeddingFunction as ChromaEmbeddingFunction,
|
||||
Include,
|
||||
Loadable,
|
||||
Where,
|
||||
WhereDocument,
|
||||
)
|
||||
|
||||
from crewai.rag.core.base_client import BaseCollectionParams, BaseCollectionSearchParams
|
||||
|
||||
ChromaDBClientType = ClientAPI | AsyncClientAPI
|
||||
|
||||
|
||||
class PreparedDocuments(NamedTuple):
|
||||
"""Prepared documents ready for ChromaDB insertion.
|
||||
|
||||
Attributes:
|
||||
ids: List of document IDs
|
||||
texts: List of document texts
|
||||
metadatas: List of document metadata mappings
|
||||
"""
|
||||
|
||||
ids: list[str]
|
||||
texts: list[str]
|
||||
metadatas: list[Mapping[str, str | int | float | bool]]
|
||||
|
||||
|
||||
class ExtractedSearchParams(NamedTuple):
|
||||
"""Extracted search parameters for ChromaDB queries.
|
||||
|
||||
Attributes:
|
||||
collection_name: Name of the collection to search
|
||||
query: Search query text
|
||||
limit: Maximum number of results
|
||||
metadata_filter: Optional metadata filter
|
||||
score_threshold: Optional minimum similarity score
|
||||
where: Optional ChromaDB where clause
|
||||
where_document: Optional ChromaDB document filter
|
||||
include: Fields to include in results
|
||||
"""
|
||||
|
||||
collection_name: str
|
||||
query: str
|
||||
limit: int
|
||||
metadata_filter: dict[str, Any] | None
|
||||
score_threshold: float | None
|
||||
where: Where | None
|
||||
where_document: WhereDocument | None
|
||||
include: Include
|
||||
|
||||
|
||||
class ChromaDBCollectionCreateParams(BaseCollectionParams, total=False):
|
||||
"""Parameters for creating a ChromaDB collection.
|
||||
|
||||
This class extends BaseCollectionParams to include any additional
|
||||
parameters specific to ChromaDB collection creation.
|
||||
"""
|
||||
|
||||
configuration: CollectionConfigurationInterface
|
||||
metadata: CollectionMetadata
|
||||
embedding_function: ChromaEmbeddingFunction[Embeddable]
|
||||
data_loader: DataLoader[Loadable]
|
||||
get_or_create: bool
|
||||
|
||||
|
||||
class ChromaDBCollectionSearchParams(BaseCollectionSearchParams, total=False):
|
||||
"""Parameters for searching a ChromaDB collection.
|
||||
|
||||
This class extends BaseCollectionSearchParams to include ChromaDB-specific
|
||||
search parameters like where clauses and include options.
|
||||
"""
|
||||
|
||||
where: Where
|
||||
where_document: WhereDocument
|
||||
include: Include
|
||||
220
src/crewai/rag/chromadb/utils.py
Normal file
@@ -0,0 +1,220 @@
|
||||
"""Utility functions for ChromaDB client implementation."""
|
||||
|
||||
import hashlib
|
||||
from collections.abc import Mapping
|
||||
from typing import Literal, TypeGuard, cast
|
||||
|
||||
from chromadb.api import AsyncClientAPI, ClientAPI
|
||||
from chromadb.api.types import (
|
||||
Include,
|
||||
IncludeEnum,
|
||||
QueryResult,
|
||||
)
|
||||
|
||||
from chromadb.api.models.AsyncCollection import AsyncCollection
|
||||
from chromadb.api.models.Collection import Collection
|
||||
|
||||
from crewai.rag.chromadb.types import (
|
||||
ChromaDBClientType,
|
||||
ChromaDBCollectionSearchParams,
|
||||
ExtractedSearchParams,
|
||||
PreparedDocuments,
|
||||
)
|
||||
from crewai.rag.types import BaseRecord, SearchResult
|
||||
|
||||
|
||||
def _is_sync_client(client: ChromaDBClientType) -> TypeGuard[ClientAPI]:
|
||||
"""Type guard to check if the client is a synchronous ClientAPI.
|
||||
|
||||
Args:
|
||||
client: The client to check.
|
||||
|
||||
Returns:
|
||||
True if the client is a ClientAPI, False otherwise.
|
||||
"""
|
||||
return isinstance(client, ClientAPI)
|
||||
|
||||
|
||||
def _is_async_client(client: ChromaDBClientType) -> TypeGuard[AsyncClientAPI]:
|
||||
"""Type guard to check if the client is an asynchronous AsyncClientAPI.
|
||||
|
||||
Args:
|
||||
client: The client to check.
|
||||
|
||||
Returns:
|
||||
True if the client is an AsyncClientAPI, False otherwise.
|
||||
"""
|
||||
return isinstance(client, AsyncClientAPI)
|
||||
|
||||
|
||||
def _prepare_documents_for_chromadb(
|
||||
documents: list[BaseRecord],
|
||||
) -> PreparedDocuments:
|
||||
"""Prepare documents for ChromaDB by extracting IDs, texts, and metadata.
|
||||
|
||||
Args:
|
||||
documents: List of BaseRecord documents to prepare.
|
||||
|
||||
Returns:
|
||||
PreparedDocuments with ids, texts, and metadatas ready for ChromaDB.
|
||||
"""
|
||||
ids: list[str] = []
|
||||
texts: list[str] = []
|
||||
metadatas: list[Mapping[str, str | int | float | bool]] = []
|
||||
|
||||
for doc in documents:
|
||||
if "doc_id" in doc:
|
||||
ids.append(doc["doc_id"])
|
||||
else:
|
||||
content_hash = hashlib.sha256(doc["content"].encode()).hexdigest()[:16]
|
||||
ids.append(content_hash)
|
||||
|
||||
texts.append(doc["content"])
|
||||
metadata = doc.get("metadata")
|
||||
if metadata:
|
||||
if isinstance(metadata, list):
|
||||
metadatas.append(metadata[0] if metadata else {})
|
||||
else:
|
||||
metadatas.append(metadata)
|
||||
else:
|
||||
metadatas.append({})
|
||||
|
||||
return PreparedDocuments(ids, texts, metadatas)
|
||||
|
||||
|
||||
def _extract_search_params(
|
||||
kwargs: ChromaDBCollectionSearchParams,
|
||||
) -> ExtractedSearchParams:
|
||||
"""Extract search parameters from kwargs.
|
||||
|
||||
Args:
|
||||
kwargs: Keyword arguments containing search parameters.
|
||||
|
||||
Returns:
|
||||
ExtractedSearchParams with all extracted parameters.
|
||||
"""
|
||||
return ExtractedSearchParams(
|
||||
collection_name=kwargs["collection_name"],
|
||||
query=kwargs["query"],
|
||||
limit=kwargs.get("limit", 10),
|
||||
metadata_filter=kwargs.get("metadata_filter"),
|
||||
score_threshold=kwargs.get("score_threshold"),
|
||||
where=kwargs.get("where"),
|
||||
where_document=kwargs.get("where_document"),
|
||||
include=kwargs.get(
|
||||
"include",
|
||||
[IncludeEnum.metadatas, IncludeEnum.documents, IncludeEnum.distances],
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _convert_distance_to_score(
|
||||
distance: float,
|
||||
distance_metric: Literal["l2", "cosine", "ip"],
|
||||
) -> float:
|
||||
"""Convert ChromaDB distance to similarity score.
|
||||
|
||||
Notes:
|
||||
Assuming all embedding are unit-normalized for now, including custom embeddings.
|
||||
|
||||
Args:
|
||||
distance: The distance value from ChromaDB.
|
||||
distance_metric: The distance metric used ("l2", "cosine", or "ip").
|
||||
|
||||
Returns:
|
||||
Similarity score in range [0, 1] where 1 is most similar.
|
||||
"""
|
||||
if distance_metric == "cosine":
|
||||
score = 1.0 - 0.5 * distance
|
||||
return max(0.0, min(1.0, score))
|
||||
raise ValueError(f"Unsupported distance metric: {distance_metric}")
|
||||
|
||||
|
||||
def _convert_chromadb_results_to_search_results(
|
||||
results: QueryResult,
|
||||
include: Include,
|
||||
distance_metric: Literal["l2", "cosine", "ip"],
|
||||
score_threshold: float | None = None,
|
||||
) -> list[SearchResult]:
|
||||
"""Convert ChromaDB query results to SearchResult format.
|
||||
|
||||
Args:
|
||||
results: ChromaDB query results.
|
||||
include: List of fields that were included in the query.
|
||||
distance_metric: The distance metric used by the collection.
|
||||
score_threshold: Optional minimum similarity score (0-1) for results.
|
||||
|
||||
Returns:
|
||||
List of SearchResult dicts containing id, content, metadata, and score.
|
||||
"""
|
||||
search_results: list[SearchResult] = []
|
||||
|
||||
include_strings = [item.value for item in include]
|
||||
|
||||
ids = results["ids"][0] if results.get("ids") else []
|
||||
|
||||
documents_list = results.get("documents")
|
||||
documents = (
|
||||
documents_list[0] if documents_list and "documents" in include_strings else []
|
||||
)
|
||||
|
||||
metadatas_list = results.get("metadatas")
|
||||
metadatas = (
|
||||
metadatas_list[0] if metadatas_list and "metadatas" in include_strings else []
|
||||
)
|
||||
|
||||
distances_list = results.get("distances")
|
||||
distances = (
|
||||
distances_list[0] if distances_list and "distances" in include_strings else []
|
||||
)
|
||||
|
||||
for i, doc_id in enumerate(ids):
|
||||
if not distances or i >= len(distances):
|
||||
continue
|
||||
|
||||
distance = distances[i]
|
||||
score = _convert_distance_to_score(
|
||||
distance=distance, distance_metric=distance_metric
|
||||
)
|
||||
|
||||
if score_threshold and score < score_threshold:
|
||||
continue
|
||||
|
||||
result: SearchResult = {
|
||||
"id": doc_id,
|
||||
"content": documents[i] if documents and i < len(documents) else "",
|
||||
"metadata": dict(metadatas[i]) if metadatas and i < len(metadatas) else {},
|
||||
"score": score,
|
||||
}
|
||||
search_results.append(result)
|
||||
|
||||
return search_results
|
||||
|
||||
|
||||
def _process_query_results(
|
||||
collection: Collection | AsyncCollection,
|
||||
results: QueryResult,
|
||||
params: ExtractedSearchParams,
|
||||
) -> list[SearchResult]:
|
||||
"""Process ChromaDB query results and convert to SearchResult format.
|
||||
|
||||
Args:
|
||||
collection: The ChromaDB collection (sync or async) that was queried.
|
||||
results: Raw query results from ChromaDB.
|
||||
params: The search parameters used for the query.
|
||||
|
||||
Returns:
|
||||
List of SearchResult dicts containing id, content, metadata, and score.
|
||||
"""
|
||||
|
||||
distance_metric = cast(
|
||||
Literal["l2", "cosine", "ip"],
|
||||
collection.metadata.get("hnsw:space", "l2") if collection.metadata else "l2",
|
||||
)
|
||||
|
||||
return _convert_chromadb_results_to_search_results(
|
||||
results=results,
|
||||
include=params.include,
|
||||
distance_metric=distance_metric,
|
||||
score_threshold=params.score_threshold,
|
||||
)
|
||||
1
src/crewai/rag/core/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Core abstract base classes and protocols for RAG systems."""
|
||||
433
src/crewai/rag/core/base_client.py
Normal file
@@ -0,0 +1,433 @@
|
||||
"""Protocol for vector database client implementations."""
|
||||
|
||||
from abc import abstractmethod
|
||||
from typing import Any, Protocol, runtime_checkable, TypedDict, Annotated
|
||||
from typing_extensions import Unpack, Required
|
||||
|
||||
|
||||
from crewai.rag.types import (
|
||||
EmbeddingFunction,
|
||||
BaseRecord,
|
||||
SearchResult,
|
||||
)
|
||||
|
||||
|
||||
class BaseCollectionParams(TypedDict):
|
||||
"""Base parameters for collection operations.
|
||||
|
||||
Attributes:
|
||||
collection_name: The name of the collection/index to operate on.
|
||||
"""
|
||||
|
||||
collection_name: Required[
|
||||
Annotated[
|
||||
str,
|
||||
"Name of the collection/index. Implementations may have specific constraints (e.g., character limits, allowed characters, case sensitivity).",
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
class BaseCollectionAddParams(BaseCollectionParams):
|
||||
"""Parameters for adding documents to a collection.
|
||||
|
||||
Extends BaseCollectionParams with document-specific fields.
|
||||
|
||||
Attributes:
|
||||
collection_name: The name of the collection to add documents to.
|
||||
documents: List of BaseRecord dictionaries containing document data.
|
||||
"""
|
||||
|
||||
documents: list[BaseRecord]
|
||||
|
||||
|
||||
class BaseCollectionSearchParams(BaseCollectionParams, total=False):
|
||||
"""Parameters for searching within a collection.
|
||||
|
||||
Extends BaseCollectionParams with search-specific optional fields.
|
||||
All fields except collection_name and query are optional.
|
||||
|
||||
Attributes:
|
||||
query: The text query to search for (required).
|
||||
limit: Maximum number of results to return.
|
||||
metadata_filter: Filter results by metadata fields.
|
||||
score_threshold: Minimum similarity score for results (0-1).
|
||||
"""
|
||||
|
||||
query: Required[str]
|
||||
limit: int
|
||||
metadata_filter: dict[str, Any]
|
||||
score_threshold: float
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class BaseClient(Protocol):
|
||||
"""Protocol for vector store client implementations.
|
||||
|
||||
This protocol defines the interface that all vector store client implementations
|
||||
must follow. It provides a consistent API for storing and retrieving
|
||||
documents with their vector embeddings across different vector database
|
||||
backends (e.g., Qdrant, ChromaDB, Weaviate). Implementing classes should
|
||||
handle connection management, data persistence, and vector similarity
|
||||
search operations specific to their backend.
|
||||
|
||||
Implementation Guidelines:
|
||||
Implementations should accept BaseClientParams in their constructor to allow
|
||||
passing pre-configured client instances:
|
||||
|
||||
class MyVectorClient:
|
||||
def __init__(self, client: Any | None = None, **kwargs):
|
||||
if client:
|
||||
self.client = client
|
||||
else:
|
||||
self.client = self._create_default_client(**kwargs)
|
||||
|
||||
Notes:
|
||||
This protocol replaces the former BaseRAGStorage abstraction,
|
||||
providing a cleaner interface for vector store operations.
|
||||
|
||||
Attributes:
|
||||
embedding_function: Callable that takes a list of text strings
|
||||
and returns a list of embedding vectors. Implementations
|
||||
should always provide a default embedding function.
|
||||
client: The underlying vector database client instance. This could be
|
||||
passed via BaseClientParams during initialization or created internally.
|
||||
"""
|
||||
|
||||
client: Any
|
||||
embedding_function: EmbeddingFunction
|
||||
|
||||
@abstractmethod
|
||||
def create_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
|
||||
"""Create a new collection/index in the vector database.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to create. Must be unique within
|
||||
the vector database instance.
|
||||
|
||||
Raises:
|
||||
ValueError: If collection name already exists.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def acreate_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
|
||||
"""Create a new collection/index in the vector database asynchronously.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to create. Must be unique within
|
||||
the vector database instance.
|
||||
|
||||
Raises:
|
||||
ValueError: If collection name already exists.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def get_or_create_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> Any:
|
||||
"""Get an existing collection or create it if it doesn't exist.
|
||||
|
||||
This method provides a convenient way to ensure a collection exists
|
||||
without having to check for its existence first.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to get or create.
|
||||
|
||||
Returns:
|
||||
A collection object whose type depends on the backend implementation.
|
||||
This could be a collection reference, ID, or client object.
|
||||
|
||||
Raises:
|
||||
ValueError: If unable to create the collection.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def aget_or_create_collection(
|
||||
self, **kwargs: Unpack[BaseCollectionParams]
|
||||
) -> Any:
|
||||
"""Get an existing collection or create it if it doesn't exist asynchronously.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to get or create.
|
||||
|
||||
Returns:
|
||||
A collection object whose type depends on the backend implementation.
|
||||
|
||||
Raises:
|
||||
ValueError: If unable to create the collection.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def add_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
|
||||
"""Add documents with their embeddings to a collection.
|
||||
|
||||
This method performs an upsert operation - if a document with the same ID
|
||||
already exists, it will be updated with the new content and metadata.
|
||||
|
||||
Implementations should handle embedding generation internally based on
|
||||
the configured embedding function.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to add documents to.
|
||||
documents: List of BaseRecord dicts containing:
|
||||
- content: The text content (required)
|
||||
- doc_id: Optional unique identifier (auto-generated from content hash if missing)
|
||||
- metadata: Optional metadata dictionary
|
||||
Embeddings will be generated automatically.
|
||||
|
||||
Raises:
|
||||
ValueError: If collection doesn't exist or documents list is empty.
|
||||
TypeError: If documents are not BaseRecord dict instances.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>> from crewai.rag.types import BaseRecord
|
||||
>>> client = ChromaDBClient()
|
||||
>>>
|
||||
>>> records: list[BaseRecord] = [
|
||||
... {
|
||||
... "content": "Machine learning basics",
|
||||
... "metadata": {"source": "file3", "topic": "ML"}
|
||||
... },
|
||||
... {
|
||||
... "doc_id": "custom_id",
|
||||
... "content": "Deep learning fundamentals",
|
||||
... "metadata": {"source": "file4", "topic": "DL"}
|
||||
... }
|
||||
... ]
|
||||
>>> client.add_documents(collection_name="my_docs", documents=records)
|
||||
>>>
|
||||
>>> records_with_id: list[BaseRecord] = [
|
||||
... {
|
||||
... "doc_id": "nlp_001",
|
||||
... "content": "Advanced NLP techniques",
|
||||
... "metadata": {"source": "file5", "topic": "NLP"}
|
||||
... }
|
||||
... ]
|
||||
>>> client.add_documents(collection_name="my_docs", documents=records_with_id)
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def aadd_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
|
||||
"""Add documents with their embeddings to a collection asynchronously.
|
||||
|
||||
Implementations should handle embedding generation internally based on
|
||||
the configured embedding function.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to add documents to.
|
||||
documents: List of BaseRecord dicts containing:
|
||||
- content: The text content (required)
|
||||
- doc_id: Optional unique identifier (auto-generated from content hash if missing)
|
||||
- metadata: Optional metadata dictionary
|
||||
Embeddings will be generated automatically.
|
||||
|
||||
Raises:
|
||||
ValueError: If collection doesn't exist or documents list is empty.
|
||||
TypeError: If documents are not BaseRecord dict instances.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>> from crewai.rag.types import BaseRecord
|
||||
>>>
|
||||
>>> async def add_documents():
|
||||
... client = ChromaDBClient()
|
||||
...
|
||||
... records: list[BaseRecord] = [
|
||||
... {
|
||||
... "doc_id": "doc2",
|
||||
... "content": "Async operations in Python",
|
||||
... "metadata": {"source": "file2", "topic": "async"}
|
||||
... }
|
||||
... ]
|
||||
... await client.aadd_documents(collection_name="my_docs", documents=records)
|
||||
...
|
||||
>>> asyncio.run(add_documents())
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def search(
|
||||
self, **kwargs: Unpack[BaseCollectionSearchParams]
|
||||
) -> list[SearchResult]:
|
||||
"""Search for similar documents using a query.
|
||||
|
||||
Performs a vector similarity search to find the most similar documents
|
||||
to the provided query.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to search in.
|
||||
query: The text query to search for. The implementation handles
|
||||
embedding generation internally.
|
||||
limit: Maximum number of results to return. Defaults to 10.
|
||||
metadata_filter: Optional metadata filter to apply to the search. The exact
|
||||
format depends on the backend, but typically supports equality
|
||||
and range queries on metadata fields.
|
||||
score_threshold: Optional minimum similarity score threshold. Only
|
||||
results with scores >= this threshold will be returned. The
|
||||
score interpretation depends on the distance metric used.
|
||||
|
||||
Returns:
|
||||
A list of SearchResult dictionaries ordered by similarity score in
|
||||
descending order. Each result contains:
|
||||
- id: Document ID
|
||||
- content: Document text content
|
||||
- metadata: Document metadata
|
||||
- score: Similarity score (0-1, higher is better)
|
||||
|
||||
Raises:
|
||||
ValueError: If collection doesn't exist.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>> client = ChromaDBClient()
|
||||
>>>
|
||||
>>> results = client.search(
|
||||
... collection_name="my_docs",
|
||||
... query="What is machine learning?",
|
||||
... limit=5,
|
||||
... metadata_filter={"source": "file1"},
|
||||
... score_threshold=0.7
|
||||
... )
|
||||
>>> for result in results:
|
||||
... print(f"{result['id']}: {result['score']:.2f}")
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def asearch(
|
||||
self, **kwargs: Unpack[BaseCollectionSearchParams]
|
||||
) -> list[SearchResult]:
|
||||
"""Search for similar documents using a query asynchronously.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to search in.
|
||||
query: The text query to search for. The implementation handles
|
||||
embedding generation internally.
|
||||
limit: Maximum number of results to return. Defaults to 10.
|
||||
metadata_filter: Optional metadata filter to apply to the search.
|
||||
score_threshold: Optional minimum similarity score threshold.
|
||||
|
||||
Returns:
|
||||
A list of SearchResult dictionaries ordered by similarity score.
|
||||
|
||||
Raises:
|
||||
ValueError: If collection doesn't exist.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>>
|
||||
>>> async def search_documents():
|
||||
... client = ChromaDBClient()
|
||||
... results = await client.asearch(
|
||||
... collection_name="my_docs",
|
||||
... query="Python programming best practices",
|
||||
... limit=5,
|
||||
... metadata_filter={"source": "file1"},
|
||||
... score_threshold=0.7
|
||||
... )
|
||||
... for result in results:
|
||||
... print(f"{result['id']}: {result['score']:.2f}")
|
||||
...
|
||||
>>> asyncio.run(search_documents())
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def delete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
|
||||
"""Delete a collection and all its data.
|
||||
|
||||
This operation is irreversible and will permanently remove all documents,
|
||||
embeddings, and metadata associated with the collection.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to delete.
|
||||
|
||||
Raises:
|
||||
ValueError: If the collection doesn't exist.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>> client = ChromaDBClient()
|
||||
>>> client.delete_collection(collection_name="old_docs")
|
||||
>>> print("Collection 'old_docs' deleted successfully")
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def adelete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
|
||||
"""Delete a collection and all its data asynchronously.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to delete.
|
||||
|
||||
Raises:
|
||||
ValueError: If the collection doesn't exist.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>>
|
||||
>>> async def delete_old_collection():
|
||||
... client = ChromaDBClient()
|
||||
... await client.adelete_collection(collection_name="old_docs")
|
||||
... print("Collection 'old_docs' deleted successfully")
|
||||
...
|
||||
>>> asyncio.run(delete_old_collection())
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def reset(self) -> None:
|
||||
"""Reset the vector database by deleting all collections and data.
|
||||
|
||||
This method provides a way to completely clear the vector database,
|
||||
removing all collections and their contents. Use with caution as
|
||||
this operation is irreversible.
|
||||
|
||||
Raises:
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
PermissionError: If the operation is not allowed by the backend.
|
||||
|
||||
Example:
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>> client = ChromaDBClient()
|
||||
>>> client.reset()
|
||||
>>> print("Vector database completely reset - all data deleted")
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def areset(self) -> None:
|
||||
"""Reset the vector database by deleting all collections and data asynchronously.
|
||||
|
||||
Raises:
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
PermissionError: If the operation is not allowed by the backend.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>>
|
||||
>>> async def reset_database():
|
||||
... client = ChromaDBClient()
|
||||
... await client.areset()
|
||||
... print("Vector database completely reset - all data deleted")
|
||||
...
|
||||
>>> asyncio.run(reset_database())
|
||||
"""
|
||||
...
|
||||
30
src/crewai/rag/core/base_provider.py
Normal file
@@ -0,0 +1,30 @@
|
||||
"""Base provider protocol for vector database client creation."""
|
||||
|
||||
from abc import ABC
|
||||
from typing import Any, Protocol, runtime_checkable, Union
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.rag.types import EmbeddingFunction
|
||||
from crewai.rag.embeddings.types import EmbeddingOptions
|
||||
|
||||
|
||||
class BaseProviderOptions(BaseModel, ABC):
|
||||
"""Base configuration for all provider options."""
|
||||
|
||||
client_type: str = Field(..., description="Type of client to create")
|
||||
embedding_config: Union[EmbeddingOptions, EmbeddingFunction, None] = Field(
|
||||
default=None,
|
||||
description="Embedding configuration - either options for built-in providers or a custom function",
|
||||
)
|
||||
options: Any = Field(
|
||||
default=None, description="Additional provider-specific options"
|
||||
)
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class BaseProvider(Protocol):
|
||||
"""Protocol for vector database client providers."""
|
||||
|
||||
def __call__(self, options: BaseProviderOptions) -> Any:
|
||||
"""Create and return a configured client instance."""
|
||||
...
|
||||
148
src/crewai/rag/embeddings/factory.py
Normal file
@@ -0,0 +1,148 @@
|
||||
"""Minimal embedding function factory for CrewAI."""
|
||||
|
||||
import os
|
||||
|
||||
from chromadb import EmbeddingFunction
|
||||
from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import (
|
||||
AmazonBedrockEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.cohere_embedding_function import (
|
||||
CohereEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.google_embedding_function import (
|
||||
GooglePalmEmbeddingFunction,
|
||||
GoogleGenerativeAiEmbeddingFunction,
|
||||
GoogleVertexEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.huggingface_embedding_function import (
|
||||
HuggingFaceEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.instructor_embedding_function import (
|
||||
InstructorEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.jina_embedding_function import (
|
||||
JinaEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.ollama_embedding_function import (
|
||||
OllamaEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.onnx_mini_lm_l6_v2 import ONNXMiniLM_L6_V2
|
||||
from chromadb.utils.embedding_functions.open_clip_embedding_function import (
|
||||
OpenCLIPEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.roboflow_embedding_function import (
|
||||
RoboflowEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.sentence_transformer_embedding_function import (
|
||||
SentenceTransformerEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.text2vec_embedding_function import (
|
||||
Text2VecEmbeddingFunction,
|
||||
)
|
||||
|
||||
from crewai.rag.embeddings.types import EmbeddingOptions
|
||||
|
||||
|
||||
def get_embedding_function(
|
||||
config: EmbeddingOptions | dict | None = None,
|
||||
) -> EmbeddingFunction:
|
||||
"""Get embedding function - delegates to ChromaDB.
|
||||
|
||||
Args:
|
||||
config: Optional configuration - either an EmbeddingOptions object or a dict with:
|
||||
- provider: The embedding provider to use (default: "openai")
|
||||
- Any other provider-specific parameters
|
||||
|
||||
Returns:
|
||||
EmbeddingFunction instance ready for use with ChromaDB
|
||||
|
||||
Supported providers:
|
||||
- openai: OpenAI embeddings (default)
|
||||
- cohere: Cohere embeddings
|
||||
- ollama: Ollama local embeddings
|
||||
- huggingface: HuggingFace embeddings
|
||||
- sentence-transformer: Local sentence transformers
|
||||
- instructor: Instructor embeddings for specialized tasks
|
||||
- google-palm: Google PaLM embeddings
|
||||
- google-generativeai: Google Generative AI embeddings
|
||||
- google-vertex: Google Vertex AI embeddings
|
||||
- amazon-bedrock: AWS Bedrock embeddings
|
||||
- jina: Jina AI embeddings
|
||||
- roboflow: Roboflow embeddings for vision tasks
|
||||
- openclip: OpenCLIP embeddings for multimodal tasks
|
||||
- text2vec: Text2Vec embeddings
|
||||
- onnx: ONNX MiniLM-L6-v2 (no API key needed, included with ChromaDB)
|
||||
|
||||
Examples:
|
||||
# Use default OpenAI with retry logic
|
||||
>>> embedder = get_embedding_function()
|
||||
|
||||
# Use Cohere with dict
|
||||
>>> embedder = get_embedding_function({
|
||||
... "provider": "cohere",
|
||||
... "api_key": "your-key",
|
||||
... "model_name": "embed-english-v3.0"
|
||||
... })
|
||||
|
||||
# Use with EmbeddingOptions
|
||||
>>> embedder = get_embedding_function(
|
||||
... EmbeddingOptions(provider="sentence-transformer", model_name="all-MiniLM-L6-v2")
|
||||
... )
|
||||
|
||||
# Use local sentence transformers (no API key needed)
|
||||
>>> embedder = get_embedding_function({
|
||||
... "provider": "sentence-transformer",
|
||||
... "model_name": "all-MiniLM-L6-v2"
|
||||
... })
|
||||
|
||||
# Use Ollama for local embeddings
|
||||
>>> embedder = get_embedding_function({
|
||||
... "provider": "ollama",
|
||||
... "model_name": "nomic-embed-text"
|
||||
... })
|
||||
|
||||
# Use ONNX (no API key needed)
|
||||
>>> embedder = get_embedding_function({
|
||||
... "provider": "onnx"
|
||||
... })
|
||||
"""
|
||||
if config is None:
|
||||
return OpenAIEmbeddingFunction(
|
||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||
)
|
||||
|
||||
# Handle EmbeddingOptions object
|
||||
if isinstance(config, EmbeddingOptions):
|
||||
config_dict = config.model_dump(exclude_none=True)
|
||||
else:
|
||||
config_dict = config.copy()
|
||||
|
||||
provider = config_dict.pop("provider", "openai")
|
||||
|
||||
embedding_functions = {
|
||||
"openai": OpenAIEmbeddingFunction,
|
||||
"cohere": CohereEmbeddingFunction,
|
||||
"ollama": OllamaEmbeddingFunction,
|
||||
"huggingface": HuggingFaceEmbeddingFunction,
|
||||
"sentence-transformer": SentenceTransformerEmbeddingFunction,
|
||||
"instructor": InstructorEmbeddingFunction,
|
||||
"google-palm": GooglePalmEmbeddingFunction,
|
||||
"google-generativeai": GoogleGenerativeAiEmbeddingFunction,
|
||||
"google-vertex": GoogleVertexEmbeddingFunction,
|
||||
"amazon-bedrock": AmazonBedrockEmbeddingFunction,
|
||||
"jina": JinaEmbeddingFunction,
|
||||
"roboflow": RoboflowEmbeddingFunction,
|
||||
"openclip": OpenCLIPEmbeddingFunction,
|
||||
"text2vec": Text2VecEmbeddingFunction,
|
||||
"onnx": ONNXMiniLM_L6_V2,
|
||||
}
|
||||
|
||||
if provider not in embedding_functions:
|
||||
raise ValueError(
|
||||
f"Unsupported provider: {provider}. "
|
||||
f"Available providers: {list(embedding_functions.keys())}"
|
||||
)
|
||||
return embedding_functions[provider](**config_dict)
|
||||
62
src/crewai/rag/embeddings/types.py
Normal file
@@ -0,0 +1,62 @@
|
||||
"""Type definitions for the embeddings module."""
|
||||
|
||||
from typing import Literal
|
||||
from pydantic import BaseModel, Field, SecretStr
|
||||
|
||||
from crewai.rag.types import EmbeddingFunction
|
||||
|
||||
|
||||
EmbeddingProvider = Literal[
|
||||
"openai",
|
||||
"cohere",
|
||||
"ollama",
|
||||
"huggingface",
|
||||
"sentence-transformer",
|
||||
"instructor",
|
||||
"google-palm",
|
||||
"google-generativeai",
|
||||
"google-vertex",
|
||||
"amazon-bedrock",
|
||||
"jina",
|
||||
"roboflow",
|
||||
"openclip",
|
||||
"text2vec",
|
||||
"onnx",
|
||||
]
|
||||
"""Supported embedding providers.
|
||||
|
||||
These correspond to the embedding functions available in ChromaDB's
|
||||
embedding_functions module. Each provider has specific requirements
|
||||
and configuration options.
|
||||
"""
|
||||
|
||||
|
||||
class EmbeddingOptions(BaseModel):
|
||||
"""Configuration options for embedding providers.
|
||||
|
||||
Generic attributes that can be passed to get_embedding_function
|
||||
to configure various embedding providers.
|
||||
"""
|
||||
|
||||
provider: EmbeddingProvider = Field(
|
||||
..., description="Embedding provider name (e.g., 'openai', 'cohere', 'onnx')"
|
||||
)
|
||||
model_name: str | None = Field(
|
||||
default=None, description="Model name for the embedding provider"
|
||||
)
|
||||
api_key: SecretStr | None = Field(
|
||||
default=None, description="API key for the embedding provider"
|
||||
)
|
||||
|
||||
|
||||
class EmbeddingConfig(BaseModel):
|
||||
"""Configuration wrapper for embedding functions.
|
||||
|
||||
Accepts either a pre-configured EmbeddingFunction or EmbeddingOptions
|
||||
to create one. This provides flexibility in how embeddings are configured.
|
||||
|
||||
Attributes:
|
||||
function: Either a callable EmbeddingFunction or EmbeddingOptions to create one
|
||||
"""
|
||||
|
||||
function: EmbeddingFunction | EmbeddingOptions
|
||||
50
src/crewai/rag/types.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""Type definitions for RAG (Retrieval-Augmented Generation) systems."""
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from typing import TypeAlias, TypedDict, Any
|
||||
|
||||
from typing_extensions import Required
|
||||
|
||||
|
||||
class BaseRecord(TypedDict, total=False):
|
||||
"""A typed dictionary representing a document record.
|
||||
|
||||
Attributes:
|
||||
doc_id: Optional unique identifier for the document. If not provided,
|
||||
a content-based ID will be generated using SHA256 hash.
|
||||
content: The text content of the document (required)
|
||||
metadata: Optional metadata associated with the document
|
||||
"""
|
||||
|
||||
doc_id: str
|
||||
content: Required[str]
|
||||
metadata: (
|
||||
Mapping[str, str | int | float | bool]
|
||||
| list[Mapping[str, str | int | float | bool]]
|
||||
)
|
||||
|
||||
|
||||
DenseVector: TypeAlias = list[float]
|
||||
IntVector: TypeAlias = list[int]
|
||||
|
||||
EmbeddingFunction: TypeAlias = Callable[..., Any]
|
||||
|
||||
|
||||
class SearchResult(TypedDict):
|
||||
"""Standard search result format for vector store queries.
|
||||
|
||||
This provides a consistent interface for search results across different
|
||||
vector store implementations. Each implementation should convert their
|
||||
native result format to this standard format.
|
||||
|
||||
Attributes:
|
||||
id: Unique identifier of the document
|
||||
content: The text content of the document
|
||||
metadata: Optional metadata associated with the document
|
||||
score: Similarity score (higher is better, typically between 0 and 1)
|
||||
"""
|
||||
|
||||
id: str
|
||||
content: str
|
||||
metadata: dict[str, Any]
|
||||
score: float
|
||||
@@ -72,6 +72,10 @@ class Task(BaseModel):
|
||||
output_pydantic: Pydantic model for task output.
|
||||
security_config: Security configuration including fingerprinting.
|
||||
tools: List of tools/resources limited for task execution.
|
||||
allow_crewai_trigger_context: Optional flag to control crewai_trigger_payload injection.
|
||||
None (default): Auto-inject for first task only.
|
||||
True: Always inject trigger payload for this task.
|
||||
False: Never inject trigger payload, even for first task.
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
@@ -163,6 +167,10 @@ class Task(BaseModel):
|
||||
end_time: Optional[datetime.datetime] = Field(
|
||||
default=None, description="End time of the task execution"
|
||||
)
|
||||
allow_crewai_trigger_context: Optional[bool] = Field(
|
||||
default=None,
|
||||
description="Whether this task should append 'Trigger Payload: {crewai_trigger_payload}' to the task description when crewai_trigger_payload exists in crew inputs.",
|
||||
)
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
@field_validator("guardrail")
|
||||
@@ -548,12 +556,23 @@ class Task(BaseModel):
|
||||
str: The formatted prompt string containing the task description,
|
||||
expected output, and optional markdown formatting instructions.
|
||||
"""
|
||||
tasks_slices = [self.description]
|
||||
description = self.description
|
||||
|
||||
should_inject = self.allow_crewai_trigger_context
|
||||
|
||||
if should_inject and self.agent:
|
||||
crew = getattr(self.agent, 'crew', None)
|
||||
if crew and hasattr(crew, '_inputs') and crew._inputs:
|
||||
trigger_payload = crew._inputs.get("crewai_trigger_payload")
|
||||
if trigger_payload is not None:
|
||||
description += f"\n\nTrigger Payload: {trigger_payload}"
|
||||
|
||||
tasks_slices = [description]
|
||||
|
||||
output = self.i18n.slice("expected_output").format(
|
||||
expected_output=self.expected_output
|
||||
)
|
||||
tasks_slices = [self.description, output]
|
||||
tasks_slices = [description, output]
|
||||
|
||||
if self.markdown:
|
||||
markdown_instruction = """Your final answer MUST be formatted in Markdown syntax.
|
||||
|
||||
@@ -14,12 +14,14 @@ from pydantic import BaseModel as PydanticBaseModel
|
||||
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
|
||||
|
||||
class EnvVar(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
required: bool = True
|
||||
default: Optional[str] = None
|
||||
|
||||
|
||||
class BaseTool(BaseModel, ABC):
|
||||
class _ArgsSchemaPlaceholder(PydanticBaseModel):
|
||||
pass
|
||||
@@ -108,7 +110,7 @@ class BaseTool(BaseModel, ABC):
|
||||
def to_structured_tool(self) -> CrewStructuredTool:
|
||||
"""Convert this tool to a CrewStructuredTool instance."""
|
||||
self._set_args_schema()
|
||||
return CrewStructuredTool(
|
||||
structured_tool = CrewStructuredTool(
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
args_schema=self.args_schema,
|
||||
@@ -117,6 +119,8 @@ class BaseTool(BaseModel, ABC):
|
||||
max_usage_count=self.max_usage_count,
|
||||
current_usage_count=self.current_usage_count,
|
||||
)
|
||||
structured_tool._original_tool = self
|
||||
return structured_tool
|
||||
|
||||
@classmethod
|
||||
def from_langchain(cls, tool: Any) -> "BaseTool":
|
||||
@@ -276,7 +280,9 @@ def to_langchain(
|
||||
return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools]
|
||||
|
||||
|
||||
def tool(*args, result_as_answer: bool = False, max_usage_count: int | None = None) -> Callable:
|
||||
def tool(
|
||||
*args, result_as_answer: bool = False, max_usage_count: int | None = None
|
||||
) -> Callable:
|
||||
"""
|
||||
Decorator to create a tool from a function.
|
||||
|
||||
|
||||
@@ -10,6 +10,11 @@ from pydantic import BaseModel, Field, create_model
|
||||
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
class CrewStructuredTool:
|
||||
"""A structured tool that can operate on any number of inputs.
|
||||
@@ -18,6 +23,8 @@ class CrewStructuredTool:
|
||||
that integrates better with CrewAI's ecosystem.
|
||||
"""
|
||||
|
||||
_original_tool: BaseTool | None = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
@@ -47,6 +54,7 @@ class CrewStructuredTool:
|
||||
self.result_as_answer = result_as_answer
|
||||
self.max_usage_count = max_usage_count
|
||||
self.current_usage_count = current_usage_count
|
||||
self._original_tool = None
|
||||
|
||||
# Validate the function signature matches the schema
|
||||
self._validate_function_signature()
|
||||
@@ -219,6 +227,8 @@ class CrewStructuredTool:
|
||||
"""
|
||||
parsed_args = self._parse_args(input)
|
||||
|
||||
self._increment_usage_count()
|
||||
|
||||
if inspect.iscoroutinefunction(self.func):
|
||||
return await self.func(**parsed_args, **kwargs)
|
||||
else:
|
||||
@@ -242,6 +252,8 @@ class CrewStructuredTool:
|
||||
"""Main method for tool execution."""
|
||||
parsed_args = self._parse_args(input)
|
||||
|
||||
self._increment_usage_count()
|
||||
|
||||
if inspect.iscoroutinefunction(self.func):
|
||||
result = asyncio.run(self.func(**parsed_args, **kwargs))
|
||||
return result
|
||||
@@ -253,6 +265,12 @@ class CrewStructuredTool:
|
||||
|
||||
return result
|
||||
|
||||
def _increment_usage_count(self) -> None:
|
||||
"""Increment the usage count."""
|
||||
self.current_usage_count += 1
|
||||
if self._original_tool is not None:
|
||||
self._original_tool.current_usage_count = self.current_usage_count
|
||||
|
||||
@property
|
||||
def args(self) -> dict:
|
||||
"""Get the tool's input arguments schema."""
|
||||
|
||||
18
src/crewai/types/hitl.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from typing import List, Dict, TypedDict
|
||||
|
||||
|
||||
class HITLResumeInfo(TypedDict, total=False):
|
||||
"""HITL resume information passed from flow to crew."""
|
||||
|
||||
task_id: str
|
||||
crew_execution_id: str
|
||||
task_key: str
|
||||
task_output: str
|
||||
human_feedback: str
|
||||
previous_messages: List[Dict[str, str]]
|
||||
|
||||
|
||||
class CrewInputsWithHITL(TypedDict, total=False):
|
||||
"""Crew inputs that may contain HITL resume information."""
|
||||
|
||||
_hitl_resume: HITLResumeInfo
|
||||
@@ -1,9 +1,10 @@
|
||||
import os
|
||||
import re
|
||||
import portalocker
|
||||
from chromadb import PersistentClient
|
||||
from hashlib import md5
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
MIN_COLLECTION_LENGTH = 3
|
||||
MAX_COLLECTION_LENGTH = 63
|
||||
@@ -27,7 +28,9 @@ def is_ipv4_pattern(name: str) -> bool:
|
||||
return bool(IPV4_PATTERN.match(name))
|
||||
|
||||
|
||||
def sanitize_collection_name(name: Optional[str], max_collection_length: int = MAX_COLLECTION_LENGTH) -> str:
|
||||
def sanitize_collection_name(
|
||||
name: Optional[str], max_collection_length: int = MAX_COLLECTION_LENGTH
|
||||
) -> str:
|
||||
"""
|
||||
Sanitize a collection name to meet ChromaDB requirements:
|
||||
1. 3-63 characters long
|
||||
@@ -72,7 +75,8 @@ def create_persistent_client(path: str, **kwargs):
|
||||
concurrent creations. Works for both multi-threads and multi-processes
|
||||
environments.
|
||||
"""
|
||||
lockfile = f"chromadb-{md5(path.encode(), usedforsecurity=False).hexdigest()}.lock"
|
||||
lock_id = md5(path.encode(), usedforsecurity=False).hexdigest()
|
||||
lockfile = os.path.join(db_storage_path(), f"chromadb-{lock_id}.lock")
|
||||
with portalocker.Lock(lockfile):
|
||||
client = PersistentClient(path=path, **kwargs)
|
||||
|
||||
|
||||
@@ -67,11 +67,9 @@ from .memory_events import (
|
||||
|
||||
# events
|
||||
from .event_listener import EventListener
|
||||
from .third_party.agentops_listener import agentops_listener
|
||||
|
||||
__all__ = [
|
||||
"EventListener",
|
||||
"agentops_listener",
|
||||
"CrewAIEventsBus",
|
||||
"crewai_event_bus",
|
||||
"AgentExecutionStartedEvent",
|
||||
@@ -105,7 +103,6 @@ __all__ = [
|
||||
"MemoryRetrievalStartedEvent",
|
||||
"MemoryRetrievalCompletedEvent",
|
||||
"EventListener",
|
||||
"agentops_listener",
|
||||
"CrewKickoffStartedEvent",
|
||||
"CrewKickoffCompletedEvent",
|
||||
"CrewKickoffFailedEvent",
|
||||
|
||||
@@ -161,8 +161,10 @@ class EventListener(BaseEventListener):
|
||||
def on_task_started(source, event: TaskStartedEvent):
|
||||
span = self._telemetry.task_started(crew=source.agent.crew, task=source)
|
||||
self.execution_spans[source] = span
|
||||
# Pass both task ID and task name (if set)
|
||||
task_name = source.name if hasattr(source, 'name') and source.name else None
|
||||
self.formatter.create_task_branch(
|
||||
self.formatter.current_crew_tree, source.id
|
||||
self.formatter.current_crew_tree, source.id, task_name
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(TaskCompletedEvent)
|
||||
@@ -173,11 +175,14 @@ class EventListener(BaseEventListener):
|
||||
self._telemetry.task_ended(span, source, source.agent.crew)
|
||||
self.execution_spans[source] = None
|
||||
|
||||
# Pass task name if it exists
|
||||
task_name = source.name if hasattr(source, 'name') and source.name else None
|
||||
self.formatter.update_task_status(
|
||||
self.formatter.current_crew_tree,
|
||||
source.id,
|
||||
source.agent.role,
|
||||
"completed",
|
||||
task_name
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(TaskFailedEvent)
|
||||
@@ -188,11 +193,14 @@ class EventListener(BaseEventListener):
|
||||
self._telemetry.task_ended(span, source, source.agent.crew)
|
||||
self.execution_spans[source] = None
|
||||
|
||||
# Pass task name if it exists
|
||||
task_name = source.name if hasattr(source, 'name') and source.name else None
|
||||
self.formatter.update_task_status(
|
||||
self.formatter.current_crew_tree,
|
||||
source.id,
|
||||
source.agent.role,
|
||||
"failed",
|
||||
task_name
|
||||
)
|
||||
|
||||
# ----------- AGENT EVENTS -----------
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from crewai.utilities.constants import CREWAI_BASE_URL
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
@@ -40,30 +40,42 @@ class TraceBatch:
|
||||
class TraceBatchManager:
|
||||
"""Single responsibility: Manage batches and event buffering"""
|
||||
|
||||
is_current_batch_ephemeral: bool = False
|
||||
|
||||
def __init__(self):
|
||||
self.plus_api = PlusAPI(api_key=get_auth_token())
|
||||
try:
|
||||
self.plus_api = PlusAPI(api_key=get_auth_token())
|
||||
except AuthError:
|
||||
self.plus_api = PlusAPI(api_key="")
|
||||
|
||||
self.trace_batch_id: Optional[str] = None # Backend ID
|
||||
self.current_batch: Optional[TraceBatch] = None
|
||||
self.event_buffer: List[TraceEvent] = []
|
||||
self.execution_start_times: Dict[str, datetime] = {}
|
||||
|
||||
def initialize_batch(
|
||||
self, user_context: Dict[str, str], execution_metadata: Dict[str, Any]
|
||||
self,
|
||||
user_context: Dict[str, str],
|
||||
execution_metadata: Dict[str, Any],
|
||||
use_ephemeral: bool = False,
|
||||
) -> TraceBatch:
|
||||
"""Initialize a new trace batch"""
|
||||
self.current_batch = TraceBatch(
|
||||
user_context=user_context, execution_metadata=execution_metadata
|
||||
)
|
||||
self.event_buffer.clear()
|
||||
self.is_current_batch_ephemeral = use_ephemeral
|
||||
|
||||
self.record_start_time("execution")
|
||||
|
||||
self._initialize_backend_batch(user_context, execution_metadata)
|
||||
self._initialize_backend_batch(user_context, execution_metadata, use_ephemeral)
|
||||
|
||||
return self.current_batch
|
||||
|
||||
def _initialize_backend_batch(
|
||||
self, user_context: Dict[str, str], execution_metadata: Dict[str, Any]
|
||||
self,
|
||||
user_context: Dict[str, str],
|
||||
execution_metadata: Dict[str, Any],
|
||||
use_ephemeral: bool = False,
|
||||
):
|
||||
"""Send batch initialization to backend"""
|
||||
|
||||
@@ -74,6 +86,7 @@ class TraceBatchManager:
|
||||
payload = {
|
||||
"trace_id": self.current_batch.batch_id,
|
||||
"execution_type": execution_metadata.get("execution_type", "crew"),
|
||||
"user_identifier": execution_metadata.get("user_context", None),
|
||||
"execution_context": {
|
||||
"crew_fingerprint": execution_metadata.get("crew_fingerprint"),
|
||||
"crew_name": execution_metadata.get("crew_name", None),
|
||||
@@ -91,12 +104,22 @@ class TraceBatchManager:
|
||||
"execution_started_at": datetime.now(timezone.utc).isoformat(),
|
||||
},
|
||||
}
|
||||
if use_ephemeral:
|
||||
payload["ephemeral_trace_id"] = self.current_batch.batch_id
|
||||
|
||||
response = self.plus_api.initialize_trace_batch(payload)
|
||||
response = (
|
||||
self.plus_api.initialize_ephemeral_trace_batch(payload)
|
||||
if use_ephemeral
|
||||
else self.plus_api.initialize_trace_batch(payload)
|
||||
)
|
||||
|
||||
if response.status_code == 201 or response.status_code == 200:
|
||||
response_data = response.json()
|
||||
self.trace_batch_id = response_data["trace_id"]
|
||||
self.trace_batch_id = (
|
||||
response_data["trace_id"]
|
||||
if not use_ephemeral
|
||||
else response_data["ephemeral_trace_id"]
|
||||
)
|
||||
console = Console()
|
||||
panel = Panel(
|
||||
f"✅ Trace batch initialized with session ID: {self.trace_batch_id}",
|
||||
@@ -134,7 +157,11 @@ class TraceBatchManager:
|
||||
if not self.trace_batch_id:
|
||||
raise Exception("❌ Trace batch ID not found")
|
||||
|
||||
response = self.plus_api.send_trace_events(self.trace_batch_id, payload)
|
||||
response = (
|
||||
self.plus_api.send_ephemeral_trace_events(self.trace_batch_id, payload)
|
||||
if self.is_current_batch_ephemeral
|
||||
else self.plus_api.send_trace_events(self.trace_batch_id, payload)
|
||||
)
|
||||
|
||||
if response.status_code == 200 or response.status_code == 201:
|
||||
self.event_buffer.clear()
|
||||
@@ -153,7 +180,6 @@ class TraceBatchManager:
|
||||
|
||||
if self.event_buffer:
|
||||
self._send_events_to_backend()
|
||||
|
||||
self._finalize_backend_batch()
|
||||
|
||||
self.current_batch.events = self.event_buffer.copy()
|
||||
@@ -163,6 +189,7 @@ class TraceBatchManager:
|
||||
self.current_batch = None
|
||||
self.event_buffer.clear()
|
||||
self.trace_batch_id = None
|
||||
self.is_current_batch_ephemeral = False
|
||||
|
||||
self._cleanup_batch_data()
|
||||
|
||||
@@ -182,12 +209,24 @@ class TraceBatchManager:
|
||||
"final_event_count": total_events,
|
||||
}
|
||||
|
||||
response = self.plus_api.finalize_trace_batch(self.trace_batch_id, payload)
|
||||
response = (
|
||||
self.plus_api.finalize_ephemeral_trace_batch(
|
||||
self.trace_batch_id, payload
|
||||
)
|
||||
if self.is_current_batch_ephemeral
|
||||
else self.plus_api.finalize_trace_batch(self.trace_batch_id, payload)
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
access_code = response.json().get("access_code", None)
|
||||
console = Console()
|
||||
return_link = (
|
||||
f"{CREWAI_BASE_URL}/crewai_plus/trace_batches/{self.trace_batch_id}"
|
||||
if not self.is_current_batch_ephemeral and access_code is None
|
||||
else f"{CREWAI_BASE_URL}/crewai_plus/ephemeral_trace_batches/{self.trace_batch_id}?access_code={access_code}"
|
||||
)
|
||||
panel = Panel(
|
||||
f"✅ Trace batch finalized with session ID: {self.trace_batch_id}. View here: {CREWAI_BASE_URL}/crewai_plus/trace_batches/{self.trace_batch_id}",
|
||||
f"✅ Trace batch finalized with session ID: {self.trace_batch_id}. View here: {return_link} {f', Access Code: {access_code}' if access_code else ''}",
|
||||
title="Trace Batch Finalization",
|
||||
border_style="green",
|
||||
)
|
||||
|
||||
@@ -13,7 +13,6 @@ from crewai.utilities.events.agent_events import (
|
||||
AgentExecutionErrorEvent,
|
||||
)
|
||||
from crewai.utilities.events.listeners.tracing.types import TraceEvent
|
||||
from crewai.utilities.events.listeners.tracing.utils import is_tracing_enabled
|
||||
from crewai.utilities.events.reasoning_events import (
|
||||
AgentReasoningStartedEvent,
|
||||
AgentReasoningCompletedEvent,
|
||||
@@ -67,7 +66,7 @@ from crewai.utilities.events.memory_events import (
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
@@ -76,13 +75,12 @@ class TraceCollectionListener(BaseEventListener):
|
||||
Trace collection listener that orchestrates trace collection
|
||||
"""
|
||||
|
||||
trace_enabled: Optional[bool] = False
|
||||
complex_events = ["task_started", "llm_call_started", "llm_call_completed"]
|
||||
|
||||
_instance = None
|
||||
_initialized = False
|
||||
|
||||
def __new__(cls, batch_manager=None, tracing: Optional[bool] = False):
|
||||
def __new__(cls, batch_manager=None):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
@@ -90,25 +88,22 @@ class TraceCollectionListener(BaseEventListener):
|
||||
def __init__(
|
||||
self,
|
||||
batch_manager: Optional[TraceBatchManager] = None,
|
||||
tracing: Optional[bool] = False,
|
||||
):
|
||||
if self._initialized:
|
||||
return
|
||||
|
||||
super().__init__()
|
||||
self.batch_manager = batch_manager or TraceBatchManager()
|
||||
self.tracing = tracing or False
|
||||
self.trace_enabled = self._check_trace_enabled()
|
||||
self._initialized = True
|
||||
|
||||
def _check_trace_enabled(self) -> bool:
|
||||
def _check_authenticated(self) -> bool:
|
||||
"""Check if tracing should be enabled"""
|
||||
auth_token = get_auth_token()
|
||||
if not auth_token:
|
||||
try:
|
||||
res = bool(get_auth_token())
|
||||
return res
|
||||
except AuthError:
|
||||
return False
|
||||
|
||||
return is_tracing_enabled() or self.tracing
|
||||
|
||||
def _get_user_context(self) -> Dict[str, str]:
|
||||
"""Extract user context for tracing"""
|
||||
return {
|
||||
@@ -120,8 +115,6 @@ class TraceCollectionListener(BaseEventListener):
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
"""Setup event listeners - delegates to specific handlers"""
|
||||
if not self.trace_enabled:
|
||||
return
|
||||
|
||||
self._register_flow_event_handlers(crewai_event_bus)
|
||||
self._register_context_event_handlers(crewai_event_bus)
|
||||
@@ -167,7 +160,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
@event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_started(source, event):
|
||||
if not self.batch_manager.is_batch_initialized():
|
||||
self._initialize_batch(source, event)
|
||||
self._initialize_crew_batch(source, event)
|
||||
self._handle_trace_event("crew_kickoff_started", source, event)
|
||||
|
||||
@event_bus.on(CrewKickoffCompletedEvent)
|
||||
@@ -287,7 +280,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
def on_agent_reasoning_failed(source, event):
|
||||
self._handle_action_event("agent_reasoning_failed", source, event)
|
||||
|
||||
def _initialize_batch(self, source: Any, event: Any):
|
||||
def _initialize_crew_batch(self, source: Any, event: Any):
|
||||
"""Initialize trace batch"""
|
||||
user_context = self._get_user_context()
|
||||
execution_metadata = {
|
||||
@@ -296,7 +289,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
"crewai_version": get_crewai_version(),
|
||||
}
|
||||
|
||||
self.batch_manager.initialize_batch(user_context, execution_metadata)
|
||||
self._initialize_batch(user_context, execution_metadata)
|
||||
|
||||
def _initialize_flow_batch(self, source: Any, event: Any):
|
||||
"""Initialize trace batch for Flow execution"""
|
||||
@@ -308,7 +301,20 @@ class TraceCollectionListener(BaseEventListener):
|
||||
"execution_type": "flow",
|
||||
}
|
||||
|
||||
self.batch_manager.initialize_batch(user_context, execution_metadata)
|
||||
self._initialize_batch(user_context, execution_metadata)
|
||||
|
||||
def _initialize_batch(
|
||||
self, user_context: Dict[str, str], execution_metadata: Dict[str, Any]
|
||||
):
|
||||
"""Initialize trace batch if ephemeral"""
|
||||
if not self._check_authenticated():
|
||||
self.batch_manager.initialize_batch(
|
||||
user_context, execution_metadata, use_ephemeral=True
|
||||
)
|
||||
else:
|
||||
self.batch_manager.initialize_batch(
|
||||
user_context, execution_metadata, use_ephemeral=False
|
||||
)
|
||||
|
||||
def _handle_trace_event(self, event_type: str, source: Any, event: Any):
|
||||
"""Generic handler for context end events"""
|
||||
|
||||
@@ -1,5 +1,153 @@
|
||||
import os
|
||||
import platform
|
||||
import uuid
|
||||
import hashlib
|
||||
import subprocess
|
||||
import getpass
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
import re
|
||||
import json
|
||||
|
||||
import click
|
||||
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
|
||||
def is_tracing_enabled() -> bool:
|
||||
return os.getenv("CREWAI_TRACING_ENABLED", "false").lower() == "true"
|
||||
|
||||
|
||||
def on_first_execution_tracing_confirmation() -> bool:
|
||||
if _is_test_environment():
|
||||
return False
|
||||
|
||||
if is_first_execution():
|
||||
mark_first_execution_done()
|
||||
return click.confirm(
|
||||
"This is the first execution of CrewAI. Do you want to enable tracing?",
|
||||
default=True,
|
||||
show_default=True,
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def _is_test_environment() -> bool:
|
||||
"""Detect if we're running in a test environment."""
|
||||
return os.environ.get("CREWAI_TESTING", "").lower() == "true"
|
||||
|
||||
|
||||
def _get_machine_id() -> str:
|
||||
"""Stable, privacy-preserving machine fingerprint (cross-platform)."""
|
||||
parts = []
|
||||
|
||||
try:
|
||||
mac = ":".join(
|
||||
["{:02x}".format((uuid.getnode() >> b) & 0xFF) for b in range(0, 12, 2)][
|
||||
::-1
|
||||
]
|
||||
)
|
||||
parts.append(mac)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
sysname = platform.system()
|
||||
parts.append(sysname)
|
||||
|
||||
try:
|
||||
if sysname == "Darwin":
|
||||
res = subprocess.run(
|
||||
["system_profiler", "SPHardwareDataType"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=2,
|
||||
)
|
||||
m = re.search(r"Hardware UUID:\s*([A-Fa-f0-9\-]+)", res.stdout)
|
||||
if m:
|
||||
parts.append(m.group(1))
|
||||
elif sysname == "Linux":
|
||||
try:
|
||||
parts.append(Path("/etc/machine-id").read_text().strip())
|
||||
except Exception:
|
||||
parts.append(Path("/sys/class/dmi/id/product_uuid").read_text().strip())
|
||||
elif sysname == "Windows":
|
||||
res = subprocess.run(
|
||||
["wmic", "csproduct", "get", "UUID"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=2,
|
||||
)
|
||||
lines = [line.strip() for line in res.stdout.splitlines() if line.strip()]
|
||||
if len(lines) >= 2:
|
||||
parts.append(lines[1])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return hashlib.sha256("".join(parts).encode()).hexdigest()
|
||||
|
||||
|
||||
def _user_data_file() -> Path:
|
||||
base = Path(db_storage_path())
|
||||
base.mkdir(parents=True, exist_ok=True)
|
||||
return base / ".crewai_user.json"
|
||||
|
||||
|
||||
def _load_user_data() -> dict:
|
||||
p = _user_data_file()
|
||||
if p.exists():
|
||||
try:
|
||||
return json.loads(p.read_text())
|
||||
except Exception:
|
||||
pass
|
||||
return {}
|
||||
|
||||
|
||||
def _save_user_data(data: dict) -> None:
|
||||
try:
|
||||
p = _user_data_file()
|
||||
p.write_text(json.dumps(data, indent=2))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def get_user_id() -> str:
|
||||
"""Stable, anonymized user identifier with caching."""
|
||||
data = _load_user_data()
|
||||
|
||||
if "user_id" in data:
|
||||
return data["user_id"]
|
||||
|
||||
try:
|
||||
username = getpass.getuser()
|
||||
except Exception:
|
||||
username = "unknown"
|
||||
|
||||
seed = f"{username}|{_get_machine_id()}"
|
||||
uid = hashlib.sha256(seed.encode()).hexdigest()
|
||||
|
||||
data["user_id"] = uid
|
||||
_save_user_data(data)
|
||||
return uid
|
||||
|
||||
|
||||
def is_first_execution() -> bool:
|
||||
"""True if this is the first execution for this user."""
|
||||
data = _load_user_data()
|
||||
return not data.get("first_execution_done", False)
|
||||
|
||||
|
||||
def mark_first_execution_done() -> None:
|
||||
"""Mark that the first execution has been completed."""
|
||||
data = _load_user_data()
|
||||
if data.get("first_execution_done", False):
|
||||
return
|
||||
|
||||
data.update(
|
||||
{
|
||||
"first_execution_done": True,
|
||||
"first_execution_at": datetime.now().timestamp(),
|
||||
"user_id": get_user_id(),
|
||||
"machine_id": _get_machine_id(),
|
||||
}
|
||||
)
|
||||
_save_user_data(data)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
from .agentops_listener import agentops_listener
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
|
||||
from crewai.utilities.events.task_events import TaskEvaluationEvent
|
||||
|
||||
try:
|
||||
import agentops
|
||||
|
||||
AGENTOPS_INSTALLED = True
|
||||
except ImportError:
|
||||
AGENTOPS_INSTALLED = False
|
||||
|
||||
|
||||
class AgentOpsListener(BaseEventListener):
|
||||
tool_event: Optional["agentops.ToolEvent"] = None
|
||||
session: Optional["agentops.Session"] = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
if not AGENTOPS_INSTALLED:
|
||||
return
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
|
||||
self.session = agentops.init()
|
||||
for agent in source.agents:
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
|
||||
if self.session:
|
||||
self.session.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
||||
self.tool_event = agentops.ToolEvent(name=event.tool_name)
|
||||
if self.session:
|
||||
self.session.record(self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(TaskEvaluationEvent)
|
||||
def on_task_evaluation(source, event: TaskEvaluationEvent):
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name="Task Evaluator", agent_id=str(source.original_agent.id)
|
||||
)
|
||||
|
||||
|
||||
agentops_listener = AgentOpsListener()
|
||||
@@ -220,14 +220,22 @@ class ConsoleFormatter:
|
||||
return tree
|
||||
|
||||
def create_task_branch(
|
||||
self, crew_tree: Optional[Tree], task_id: str
|
||||
self, crew_tree: Optional[Tree], task_id: str, task_name: Optional[str] = None
|
||||
) -> Optional[Tree]:
|
||||
"""Create and initialize a task branch."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
task_content = Text()
|
||||
task_content.append(f"📋 Task: {task_id}", style="yellow bold")
|
||||
|
||||
# Display task name if available, otherwise just the ID
|
||||
if task_name:
|
||||
task_content.append("📋 Task: ", style="yellow bold")
|
||||
task_content.append(f"{task_name}", style="yellow bold")
|
||||
task_content.append(f" (ID: {task_id})", style="yellow dim")
|
||||
else:
|
||||
task_content.append(f"📋 Task: {task_id}", style="yellow bold")
|
||||
|
||||
task_content.append("\nStatus: ", style="white")
|
||||
task_content.append("Executing Task...", style="yellow dim")
|
||||
|
||||
@@ -251,6 +259,7 @@ class ConsoleFormatter:
|
||||
task_id: str,
|
||||
agent_role: str,
|
||||
status: str = "completed",
|
||||
task_name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Update task status in the tree."""
|
||||
if not self.verbose or crew_tree is None:
|
||||
@@ -270,8 +279,13 @@ class ConsoleFormatter:
|
||||
if str(task_id) in str(branch.label):
|
||||
# Build label without introducing stray blank lines
|
||||
task_content = Text()
|
||||
# First line: Task ID
|
||||
task_content.append(f"📋 Task: {task_id}", style=f"{style} bold")
|
||||
# First line: Task ID/name
|
||||
if task_name:
|
||||
task_content.append("📋 Task: ", style=f"{style} bold")
|
||||
task_content.append(f"{task_name}", style=f"{style} bold")
|
||||
task_content.append(f" (ID: {task_id})", style=f"{style} dim")
|
||||
else:
|
||||
task_content.append(f"📋 Task: {task_id}", style=f"{style} bold")
|
||||
|
||||
# Second line: Assigned to
|
||||
task_content.append("\nAssigned to: ", style="white")
|
||||
@@ -285,8 +299,9 @@ class ConsoleFormatter:
|
||||
break
|
||||
|
||||
# Show status panel
|
||||
display_name = task_name if task_name else str(task_id)
|
||||
content = self.create_status_content(
|
||||
f"Task {status.title()}", str(task_id), style, Agent=agent_role
|
||||
f"Task {status.title()}", display_name, style, Agent=agent_role
|
||||
)
|
||||
self.print_panel(content, panel_title, style)
|
||||
|
||||
|
||||