Compare commits

..

4 Commits

Author SHA1 Message Date
Devin AI
d9babfae81 Fix lint: Remove unused variable assignment
Co-Authored-By: João <joao@crewai.com>
2025-08-13 13:02:31 +00:00
Devin AI
2c98d0c1d9 Add test scripts for manual verification
Co-Authored-By: João <joao@crewai.com>
2025-08-13 13:00:25 +00:00
Devin AI
ac69573afa Fix lint: Remove unused pytest import
Co-Authored-By: João <joao@crewai.com>
2025-08-13 12:58:26 +00:00
Devin AI
70104e9651 Fix verbose output to show task names instead of UUIDs
- Modified event listener to pass task objects instead of IDs to console formatter
- Updated console formatter methods to display task names with ID fallback
- Added helper method _get_task_display_name with fallback logic:
  1. Task name + partial ID: 'Research Analysis (ID: 12345678...)'
  2. Description + partial ID: 'Analyze market trends... (ID: 12345678...)'
  3. Full ID as final fallback: '12345678-1234-5678-9012-123456789abc'
- Added comprehensive tests for verbose output functionality

Fixes #3310

Co-Authored-By: João <joao@crewai.com>
2025-08-13 12:55:13 +00:00
72 changed files with 1319 additions and 3875 deletions

4
.gitignore vendored
View File

@@ -21,12 +21,10 @@ crew_tasks_output.json
.mypy_cache
.ruff_cache
.venv
agentops.log
test_flow.html
crewairules.mdc
plan.md
conceptual_plan.md
build_image
chromadb-*.lock
# AgentOps
agentops.log

View File

@@ -238,8 +238,7 @@
"en/observability/opik",
"en/observability/patronus-evaluation",
"en/observability/portkey",
"en/observability/weave",
"en/observability/truefoundry"
"en/observability/weave"
]
},
{
@@ -282,7 +281,6 @@
{
"group": "Features",
"pages": [
"en/enterprise/features/rbac",
"en/enterprise/features/tool-repository",
"en/enterprise/features/webhook-streaming",
"en/enterprise/features/traces",
@@ -346,7 +344,7 @@
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.en.yaml"
"openapi": "enterprise-api.yaml"
}
]
},
@@ -355,7 +353,7 @@
"groups": [
{
"group": "Examples",
"pages": ["en/examples/example", "en/examples/cookbooks"]
"pages": ["en/examples/example"]
}
]
}
@@ -577,8 +575,7 @@
"pt-BR/observability/opik",
"pt-BR/observability/patronus-evaluation",
"pt-BR/observability/portkey",
"pt-BR/observability/weave",
"pt-BR/observability/truefoundry"
"pt-BR/observability/weave"
]
},
{
@@ -621,7 +618,6 @@
{
"group": "Funcionalidades",
"pages": [
"pt-BR/enterprise/features/rbac",
"pt-BR/enterprise/features/tool-repository",
"pt-BR/enterprise/features/webhook-streaming",
"pt-BR/enterprise/features/traces",
@@ -686,7 +682,7 @@
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.pt-BR.yaml"
"openapi": "enterprise-api.yaml"
}
]
},
@@ -695,7 +691,7 @@
"groups": [
{
"group": "Exemplos",
"pages": ["pt-BR/examples/example", "pt-BR/examples/cookbooks"]
"pages": ["pt-BR/examples/example"]
}
]
}
@@ -711,7 +707,7 @@
"icon": "globe"
},
{
"anchor": "포럼",
"anchor": "법정",
"href": "https://community.crewai.com",
"icon": "discourse"
},
@@ -721,7 +717,7 @@
"icon": "robot"
},
{
"anchor": "릴리스",
"anchor": "출시",
"href": "https://github.com/crewAIInc/crewAI/releases",
"icon": "tag"
}
@@ -736,22 +732,22 @@
"pages": ["ko/introduction", "ko/installation", "ko/quickstart"]
},
{
"group": "가이드",
"group": "안내서",
"pages": [
{
"group": "전략",
"pages": ["ko/guides/concepts/evaluating-use-cases"]
},
{
"group": "에이전트 (Agents)",
"group": "Agents",
"pages": ["ko/guides/agents/crafting-effective-agents"]
},
{
"group": "크루 (Crews)",
"group": "Crews",
"pages": ["ko/guides/crews/first-crew"]
},
{
"group": "플로우 (Flows)",
"group": "Flows",
"pages": [
"ko/guides/flows/first-flow",
"ko/guides/flows/mastering-flow-state"
@@ -799,7 +795,7 @@
]
},
{
"group": "도구 (Tools)",
"group": "도구",
"pages": [
"ko/tools/overview",
{
@@ -889,7 +885,7 @@
]
},
{
"group": "클라우드 & 스토리지",
"group": "클라우드 & 저장",
"pages": [
"ko/tools/cloud-storage/overview",
"ko/tools/cloud-storage/s3readertool",
@@ -911,7 +907,7 @@
]
},
{
"group": "Observability",
"group": "오브저버빌리티",
"pages": [
"ko/observability/overview",
"ko/observability/agentops",
@@ -930,7 +926,7 @@
]
},
{
"group": "학습",
"group": "익히다",
"pages": [
"ko/learn/overview",
"ko/learn/llm-selection-guide",
@@ -954,13 +950,13 @@
]
},
{
"group": "Telemetry",
"group": "원격측정",
"pages": ["ko/telemetry"]
}
]
},
{
"tab": "엔터프라이즈",
"tab": "기업",
"groups": [
{
"group": "시작 안내",
@@ -969,7 +965,6 @@
{
"group": "특징",
"pages": [
"ko/enterprise/features/rbac",
"ko/enterprise/features/tool-repository",
"ko/enterprise/features/webhook-streaming",
"ko/enterprise/features/traces",
@@ -1000,7 +995,7 @@
]
},
{
"group": "How-To Guides",
"group": "사용 안내서",
"pages": [
"ko/enterprise/guides/build-crew",
"ko/enterprise/guides/deploy-crew",
@@ -1033,7 +1028,7 @@
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.ko.yaml"
"openapi": "enterprise-api.yaml"
}
]
},
@@ -1042,7 +1037,7 @@
"groups": [
{
"group": "예시",
"pages": ["ko/examples/example", "ko/examples/cookbooks"]
"pages": ["ko/examples/example"]
}
]
}

View File

@@ -177,7 +177,14 @@ class MyCustomCrew:
# Your crew implementation...
```
This is how third-party event listeners are registered in the CrewAI codebase.
This is exactly how CrewAI's built-in `agentops_listener` is registered. In the CrewAI codebase, you'll find:
```python
# src/crewai/utilities/events/third_party/__init__.py
from .agentops_listener import agentops_listener
```
This ensures the `agentops_listener` is loaded when the `crewai.utilities.events` package is imported.
## Available Event Types
@@ -273,6 +280,77 @@ The structure of the event object depends on the event type, but all events inhe
Additional fields vary by event type. For example, `CrewKickoffCompletedEvent` includes `crew_name` and `output` fields.
## Real-World Example: Integration with AgentOps
CrewAI includes an example of a third-party integration with [AgentOps](https://github.com/AgentOps-AI/agentops), a monitoring and observability platform for AI agents. Here's how it's implemented:
```python
from typing import Optional
from crewai.utilities.events import (
CrewKickoffCompletedEvent,
ToolUsageErrorEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
from crewai.utilities.events.task_events import TaskEvaluationEvent
try:
import agentops
AGENTOPS_INSTALLED = True
except ImportError:
AGENTOPS_INSTALLED = False
class AgentOpsListener(BaseEventListener):
tool_event: Optional["agentops.ToolEvent"] = None
session: Optional["agentops.Session"] = None
def __init__(self):
super().__init__()
def setup_listeners(self, crewai_event_bus):
if not AGENTOPS_INSTALLED:
return
@crewai_event_bus.on(CrewKickoffStartedEvent)
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
self.session = agentops.init()
for agent in source.agents:
if self.session:
self.session.create_agent(
name=agent.role,
agent_id=str(agent.id),
)
@crewai_event_bus.on(CrewKickoffCompletedEvent)
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
if self.session:
self.session.end_session(
end_state="Success",
end_state_reason="Finished Execution",
)
@crewai_event_bus.on(ToolUsageStartedEvent)
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
self.tool_event = agentops.ToolEvent(name=event.tool_name)
if self.session:
self.session.record(self.tool_event)
@crewai_event_bus.on(ToolUsageErrorEvent)
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
```
This listener initializes an AgentOps session when a Crew starts, registers agents with AgentOps, tracks tool usage, and ends the session when the Crew completes.
The AgentOps listener is registered in CrewAI's event system through the import in `src/crewai/utilities/events/third_party/__init__.py`:
```python
from .agentops_listener import agentops_listener
```
This ensures the `agentops_listener` is loaded when the `crewai.utilities.events` package is imported.
## Advanced Usage: Scoped Handlers

View File

@@ -539,71 +539,16 @@ crew = Crew(
)
```
### Mem0 Provider
Short-Term Memory and Entity Memory both supports a tight integration with both Mem0 OSS and Mem0 Client as a provider. Here is how you can use Mem0 as a provider.
```python
from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.memory.entity_entity_memory import EntityMemory
mem0_oss_embedder_config = {
"provider": "mem0",
"config": {
"user_id": "john",
"local_mem0_config": {
"vector_store": {"provider": "qdrant","config": {"host": "localhost", "port": 6333}},
"llm": {"provider": "openai","config": {"api_key": "your-api-key", "model": "gpt-4"}},
"embedder": {"provider": "openai","config": {"api_key": "your-api-key", "model": "text-embedding-3-small"}}
},
"infer": True # Optional defaults to True
},
}
mem0_client_embedder_config = {
"provider": "mem0",
"config": {
"user_id": "john",
"org_id": "my_org_id", # Optional
"project_id": "my_project_id", # Optional
"api_key": "custom-api-key" # Optional - overrides env var
"run_id": "my_run_id", # Optional - for short-term memory
"includes": "include1", # Optional
"excludes": "exclude1", # Optional
"infer": True # Optional defaults to True
"custom_categories": new_categories # Optional - custom categories for user memory
},
}
short_term_memory_mem0_oss = ShortTermMemory(embedder_config=mem0_oss_embedder_config) # Short Term Memory with Mem0 OSS
short_term_memory_mem0_client = ShortTermMemory(embedder_config=mem0_client_embedder_config) # Short Term Memory with Mem0 Client
entity_memory_mem0_oss = EntityMemory(embedder_config=mem0_oss_embedder_config) # Entity Memory with Mem0 OSS
entity_memory_mem0_client = EntityMemory(embedder_config=mem0_client_embedder_config) # Short Term Memory with Mem0 Client
crew = Crew(
memory=True,
short_term_memory=short_term_memory_mem0_oss, # or short_term_memory_mem0_client
entity_memory=entity_memory_mem0_oss # or entity_memory_mem0_client
)
```
### Choosing the Right Embedding Provider
When selecting an embedding provider, consider factors like performance, privacy, cost, and integration needs.
Below is a comparison to help you decide:
| Provider | Best For | Pros | Cons |
| -------------- | ------------------------------ | --------------------------------- | ------------------------- |
| **OpenAI** | General use, high reliability | High quality, widely tested | Paid service, API key required |
| **Ollama** | Privacy-focused, cost savings | Free, runs locally, fully private | Requires local installation/setup |
| **Google AI** | Integration in Google ecosystem| Strong performance, good support | Google account required |
| **Azure OpenAI** | Enterprise & compliance needs| Enterprise-grade features, security | More complex setup process |
| **Cohere** | Multilingual content handling | Excellent language support | More niche use cases |
| **VoyageAI** | Information retrieval & search | Optimized for retrieval tasks | Relatively new provider |
| **Mem0** | Per-user personalization | Search-optimized embeddings | Paid service, API key required |
| Provider | Best For | Pros | Cons |
|:---------|:----------|:------|:------|
| **OpenAI** | General use, reliability | High quality, well-tested | Cost, requires API key |
| **Ollama** | Privacy, cost savings | Free, local, private | Requires local setup |
| **Google AI** | Google ecosystem | Good performance | Requires Google account |
| **Azure OpenAI** | Enterprise, compliance | Enterprise features | Complex setup |
| **Cohere** | Multilingual content | Great language support | Specialized use case |
| **VoyageAI** | Retrieval tasks | Optimized for search | Newer provider |
### Environment Variable Configuration

View File

@@ -1,103 +0,0 @@
---
title: "Role-Based Access Control (RBAC)"
description: "Control access to crews, tools, and data with roles, scopes, and granular permissions."
icon: "shield"
---
## Overview
RBAC in CrewAI Enterprise enables secure, scalable access management through a combination of organizationlevel roles and automationlevel visibility controls.
<Frame>
<img src="/images/enterprise/users_and_roles.png" alt="RBAC overview in CrewAI Enterprise" />
</Frame>
## Users and Roles
Each member in your CrewAI workspace is assigned a role, which determines their access across various features.
You can:
- Use predefined roles (Owner, Member)
- Create custom roles tailored to specific permissions
- Assign roles at any time through the settings panel
You can configure users and roles in Settings → Roles.
<Steps>
<Step title="Open Roles settings">
Go to <b>Settings → Roles</b> in CrewAI Enterprise.
</Step>
<Step title="Choose a role type">
Use a predefined role (<b>Owner</b>, <b>Member</b>) or click <b>Create role</b> to define a custom one.
</Step>
<Step title="Assign to members">
Select users and assign the role. You can change this anytime.
</Step>
</Steps>
### Configuration summary
| Area | Where to configure | Options |
|:---|:---|:---|
| Users & Roles | Settings → Roles | Predefined: Owner, Member; Custom roles |
| Automation visibility | Automation → Settings → Visibility | Private; Whitelist users/roles |
## Automationlevel Access Control
In addition to organizationwide roles, CrewAI Automations support finegrained visibility settings that let you restrict access to specific automations by user or role.
This is useful for:
- Keeping sensitive or experimental automations private
- Managing visibility across large teams or external collaborators
- Testing automations in isolated contexts
Deployments can be configured as private, meaning only whitelisted users and roles will be able to:
- View the deployment
- Run it or interact with its API
- Access its logs, metrics, and settings
The organization owner always has access, regardless of visibility settings.
You can configure automationlevel access control in Automation → Settings → Visibility tab.
<Steps>
<Step title="Open Visibility tab">
Navigate to <b>Automation → Settings → Visibility</b>.
</Step>
<Step title="Set visibility">
Choose <b>Private</b> to restrict access. The organization owner always retains access.
</Step>
<Step title="Whitelist access">
Add specific users and roles allowed to view, run, and access logs/metrics/settings.
</Step>
<Step title="Save and verify">
Save changes, then confirm that nonwhitelisted users cannot view or run the automation.
</Step>
</Steps>
### Private visibility: access outcomes
| Action | Owner | Whitelisted user/role | Not whitelisted |
|:---|:---|:---|:---|
| View automation | ✓ | ✓ | ✗ |
| Run automation/API | ✓ | ✓ | ✗ |
| Access logs/metrics/settings | ✓ | ✓ | ✗ |
<Tip>
The organization owner always has access. In private mode, only whitelisted users and roles can view, run, and access logs/metrics/settings.
</Tip>
<Frame>
<img src="/images/enterprise/visibility.png" alt="Automation Visibility settings in CrewAI Enterprise" />
</Frame>
<Card title="Need Help?" icon="headset" href="mailto:support@crewai.com">
Contact our support team for assistance with RBAC questions.
</Card>

View File

@@ -1,22 +0,0 @@
---
title: CrewAI Cookbooks
description: Feature-focused quickstarts and notebooks for learning patterns fast.
icon: book
---
## Quickstarts & Demos
<CardGroup cols={2}>
<Card title="Task Guardrails" icon="shield-check" href="https://github.com/crewAIInc/crewAI-quickstarts/tree/main/Task%20Guardrails">
Interactive notebooks for hands-on exploration.
</Card>
<Card title="Browse Quickstarts" icon="bolt" href="https://github.com/crewAIInc/crewAI-quickstarts">
Feature demos and small projects showcasing specific CrewAI capabilities.
</Card>
</CardGroup>
<Tip>
Use Cookbooks to learn a pattern quickly, then jump to Full Examples for productiongrade implementations.
</Tip>

View File

@@ -1,85 +1,62 @@
---
title: CrewAI Examples
description: Explore curated examples organized by Crews, Flows, Integrations, and Notebooks.
description: A collection of examples that show how to use CrewAI framework to automate workflows.
icon: rocket-launch
---
## Crews
<CardGroup cols={3}>
<Card title="Marketing Strategy" icon="bullhorn" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/marketing_strategy">
Multiagent marketing campaign planning.
</Card>
<Card title="Surprise Trip" icon="plane" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/surprise_trip">
Personalized surprise travel planning.
</Card>
<Card title="Match Profile to Positions" icon="id-card" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/match_profile_to_positions">
CVtojob matching with vector search.
</Card>
<Card title="Job Posting" icon="newspaper" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/job-posting">
Automated job description creation.
</Card>
<Card title="Game Builder Crew" icon="gamepad" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/game-builder-crew">
Multiagent team that designs and builds Python games.
</Card>
<Card title="Recruitment" icon="user-group" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/recruitment">
Candidate sourcing and evaluation.
</Card>
<Card title="Browse all Crews" icon="users" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews">
See the full list of crew examples.
</Card>
</CardGroup>
## Flows
<CardGroup cols={3}>
<Card title="Content Creator Flow" icon="pen" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/content_creator_flow">
Multicrew content generation with routing.
</Card>
<Card title="Email Auto Responder" icon="envelope" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/email_auto_responder_flow">
Automated email monitoring and replies.
</Card>
<Card title="Lead Score Flow" icon="chart-line" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/lead_score_flow">
Lead qualification with humanintheloop.
</Card>
<Card title="Meeting Assistant Flow" icon="calendar" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/meeting_assistant_flow">
Notes processing with integrations.
</Card>
<Card title="Self Evaluation Loop" icon="rotate" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/self_evaluation_loop_flow">
Iterative selfimprovement workflows.
</Card>
<Card title="Write a Book (Flows)" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/write_a_book_with_flows">
Parallel chapter generation.
</Card>
<Card title="Browse all Flows" icon="diagram-project" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows">
See the full list of flow examples.
</Card>
</CardGroup>
## Integrations
<CardGroup cols={3}>
<Card title="CrewAI ↔ LangGraph" icon="link" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/crewai-langgraph">
Integration with LangGraph framework.
</Card>
<Card title="Azure OpenAI" icon="cloud" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/azure_model">
Using CrewAI with Azure OpenAI.
</Card>
<Card title="NVIDIA Models" icon="microchip" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/nvidia_models">
NVIDIA ecosystem integrations.
</Card>
<Card title="Browse Integrations" icon="puzzle-piece" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations">
See all integration examples.
</Card>
</CardGroup>
## Notebooks
<CardGroup cols={2}>
<Card title="Simple QA Crew + Flow" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks/Simple%20QA%20Crew%20%2B%20Flow">
Simple QA Crew + Flow.
</Card>
<Card title="All Notebooks" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks">
Interactive examples for learning and experimentation.
<Card
title="Marketing Strategy"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/marketing_strategy"
icon="bullhorn"
iconType="solid"
>
Automate marketing strategy creation with CrewAI.
</Card>
<Card
title="Surprise Trip"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/surprise_trip"
icon="plane"
iconType="duotone"
>
Create a surprise trip itinerary with CrewAI.
</Card>
<Card
title="Match Profile to Positions"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/match_profile_to_positions"
icon="linkedin"
iconType="duotone"
>
Match a profile to jobpositions with CrewAI.
</Card>
<Card
title="Create Job Posting"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/job-posting"
icon="newspaper"
iconType="duotone"
>
Create a job posting with CrewAI.
</Card>
<Card
title="Game Generator"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/game-builder-crew"
icon="gamepad"
iconType="duotone"
>
Create a game with CrewAI.
</Card>
<Card
title="Find Job Candidates"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/recruitment"
icon="user-group"
iconType="duotone"
>
Find job candidates with CrewAI.
</Card>
</CardGroup>

View File

@@ -1,184 +1,126 @@
---
title: "AgentOps Integration"
description: "Monitor and analyze your CrewAI agents with AgentOps observability platform"
title: AgentOps Integration
description: Understanding and logging your agent performance with AgentOps.
icon: paperclip
---
# AgentOps Integration
# Introduction
AgentOps is a powerful observability platform designed specifically for AI agents. It provides comprehensive monitoring, analytics, and debugging capabilities for your CrewAI crews.
Observability is a key aspect of developing and deploying conversational AI agents. It allows developers to understand how their agents are performing,
how their agents are interacting with users, and how their agents use external tools and APIs.
AgentOps is a product independent of CrewAI that provides a comprehensive observability solution for agents.
## Features
## AgentOps
- **Real-time Monitoring**: Track agent performance and behavior in real-time
- **Session Replay**: Review complete agent sessions with detailed execution traces
- **Performance Analytics**: Analyze crew efficiency, tool usage, and task completion rates
- **Error Tracking**: Identify and debug issues in agent workflows
- **Cost Tracking**: Monitor LLM usage and associated costs
- **Team Collaboration**: Share insights and collaborate on agent optimization
[AgentOps](https://agentops.ai/?=crew) provides session replays, metrics, and monitoring for agents.
## Installation
At a high level, AgentOps gives you the ability to monitor cost, token usage, latency, agent failures, session-wide statistics, and more.
For more info, check out the [AgentOps Repo](https://github.com/AgentOps-AI/agentops).
Install AgentOps alongside CrewAI:
### Overview
```bash
pip install crewai[agentops]
```
AgentOps provides monitoring for agents in development and production.
It provides a dashboard for tracking agent performance, session replays, and custom reporting.
Or install AgentOps separately:
Additionally, AgentOps provides session drilldowns for viewing Crew agent interactions, LLM calls, and tool usage in real-time.
This feature is useful for debugging and understanding how agents interact with users as well as other agents.
```bash
pip install agentops
```
![Overview of a select series of agent session runs](/images/agentops-overview.png)
![Overview of session drilldowns for examining agent runs](/images/agentops-session.png)
![Viewing a step-by-step agent replay execution graph](/images/agentops-replay.png)
## Setup
### Features
1. **Get your API Key**: Sign up at [AgentOps](https://agentops.ai) and get your API key
- **LLM Cost Management and Tracking**: Track spend with foundation model providers.
- **Replay Analytics**: Watch step-by-step agent execution graphs.
- **Recursive Thought Detection**: Identify when agents fall into infinite loops.
- **Custom Reporting**: Create custom analytics on agent performance.
- **Analytics Dashboard**: Monitor high-level statistics about agents in development and production.
- **Public Model Testing**: Test your agents against benchmarks and leaderboards.
- **Custom Tests**: Run your agents against domain-specific tests.
- **Time Travel Debugging**: Restart your sessions from checkpoints.
- **Compliance and Security**: Create audit logs and detect potential threats such as profanity and PII leaks.
- **Prompt Injection Detection**: Identify potential code injection and secret leaks.
2. **Configure your environment**: Set your AgentOps API key as an environment variable:
### Using AgentOps
```bash
export AGENTOPS_API_KEY="your-api-key-here"
```
<Steps>
<Step title="Create an API Key">
Create a user API key here: [Create API Key](https://app.agentops.ai/account)
</Step>
<Step title="Configure Your Environment">
Add your API key to your environment variables:
```bash
AGENTOPS_API_KEY=<YOUR_AGENTOPS_API_KEY>
```
</Step>
<Step title="Install AgentOps">
Install AgentOps with:
```bash
pip install 'crewai[agentops]'
```
or
```bash
pip install agentops
```
</Step>
<Step title="Initialize AgentOps">
Before using `Crew` in your script, include these lines:
3. **Initialize AgentOps**: Add this to your CrewAI script:
```python
import agentops
agentops.init()
```
```python
import agentops
from crewai import Agent, Task, Crew
This will initiate an AgentOps session as well as automatically track Crew agents. For further info on how to outfit more complex agentic systems,
check out the [AgentOps documentation](https://docs.agentops.ai) or join the [Discord](https://discord.gg/j4f3KbeH).
</Step>
</Steps>
# Initialize AgentOps
agentops.init()
### Crew + AgentOps Examples
# Your CrewAI code here
agent = Agent(
role="Data Analyst",
goal="Analyze data and provide insights",
backstory="You are an expert data analyst...",
)
<CardGroup cols={3}>
<Card
title="Job Posting"
color="#F3A78B"
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting"
icon="briefcase"
iconType="solid"
>
Example of a Crew agent that generates job posts.
</Card>
<Card
title="Markdown Validator"
color="#F3A78B"
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/markdown_validator"
icon="markdown"
iconType="solid"
>
Example of a Crew agent that validates Markdown files.
</Card>
<Card
title="Instagram Post"
color="#F3A78B"
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post"
icon="square-instagram"
iconType="brands"
>
Example of a Crew agent that generates Instagram posts.
</Card>
</CardGroup>
task = Task(
description="Analyze the sales data and provide insights",
agent=agent,
)
### Further Information
crew = Crew(
agents=[agent],
tasks=[task],
)
To get started, create an [AgentOps account](https://agentops.ai/?=crew).
# Run your crew
result = crew.kickoff()
For feature requests or bug reports, please reach out to the AgentOps team on the [AgentOps Repo](https://github.com/AgentOps-AI/agentops).
# End the AgentOps session
agentops.end_session("Success")
```
#### Extra links
## Automatic Integration
CrewAI automatically integrates with AgentOps when the library is installed. The integration captures:
- **Crew Kickoff Events**: Start and completion of crew executions
- **Tool Usage**: All tool calls and their results
- **Task Evaluations**: Task performance metrics and feedback
- **Error Events**: Any errors that occur during execution
## Configuration Options
You can customize the AgentOps integration:
```python
import agentops
# Configure AgentOps with custom settings
agentops.init(
api_key="your-api-key",
tags=["production", "data-analysis"],
auto_start_session=True,
instrument_llm_calls=True,
)
```
## Viewing Your Data
1. **Dashboard**: Visit the AgentOps dashboard to view your agent sessions
2. **Session Details**: Click on any session to see detailed execution traces
3. **Analytics**: Use the analytics tab to identify performance trends
4. **Errors**: Monitor the errors tab for debugging information
## Best Practices
- **Tag Your Sessions**: Use meaningful tags to organize your agent runs
- **Monitor Costs**: Keep track of LLM usage and associated costs
- **Review Errors**: Regularly check for and address any errors
- **Optimize Performance**: Use analytics to identify bottlenecks and optimization opportunities
## Troubleshooting
### AgentOps Not Recording Data
1. Verify your API key is set correctly
2. Check that AgentOps is properly initialized
3. Ensure you're calling `agentops.end_session()` at the end of your script
### Missing Events
If some events aren't being captured:
1. Make sure you have the latest version of both CrewAI and AgentOps
2. Check that the AgentOps listener is properly registered
3. Review the logs for any error messages
## Example: Complete Integration
```python
import os
import agentops
from crewai import Agent, Task, Crew, Process
# Initialize AgentOps
agentops.init(
api_key=os.getenv("AGENTOPS_API_KEY"),
tags=["example", "tutorial"],
)
# Define your agents
researcher = Agent(
role="Research Specialist",
goal="Conduct thorough research on given topics",
backstory="You are an expert researcher with access to various tools...",
)
writer = Agent(
role="Content Writer",
goal="Create engaging content based on research",
backstory="You are a skilled writer who can transform research into compelling content...",
)
# Define your tasks
research_task = Task(
description="Research the latest trends in AI and machine learning",
agent=researcher,
)
writing_task = Task(
description="Write a blog post about AI trends based on the research",
agent=writer,
)
# Create and run your crew
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, writing_task],
process=Process.sequential,
)
try:
result = crew.kickoff()
print(result)
agentops.end_session("Success")
except Exception as e:
print(f"Error: {e}")
agentops.end_session("Fail")
```
This integration provides comprehensive observability for your CrewAI agents, helping you monitor, debug, and optimize your AI workflows.
<a href="https://twitter.com/agentopsai/">🐦 Twitter</a>
<span>&nbsp;&nbsp;•&nbsp;&nbsp;</span>
<a href="https://discord.gg/JHPt4C7r">📢 Discord</a>
<span>&nbsp;&nbsp;•&nbsp;&nbsp;</span>
<a href="https://app.agentops.ai/?=crew">🖇️ AgentOps Dashboard</a>
<span>&nbsp;&nbsp;•&nbsp;&nbsp;</span>
<a href="https://docs.agentops.ai/introduction">📙 Documentation</a>

View File

@@ -21,6 +21,9 @@ Observability is crucial for understanding how your CrewAI agents perform, ident
### Monitoring & Tracing Platforms
<CardGroup cols={2}>
<Card title="AgentOps" icon="paperclip" href="/en/observability/agentops">
Session replays, metrics, and monitoring for agent development and production.
</Card>
<Card title="LangDB" icon="database" href="/en/observability/langdb">
End-to-end tracing for CrewAI workflows with automatic agent interaction capture.

View File

@@ -1,146 +0,0 @@
---
title: TrueFoundry Integration
icon: chart-line
---
TrueFoundry provides an enterprise-ready [AI Gateway](https://www.truefoundry.com/ai-gateway) which can integrate with agentic frameworks like CrewAI and provides governance and observability for your AI Applications. TrueFoundry AI Gateway serves as a unified interface for LLM access, providing:
- **Unified API Access**: Connect to 250+ LLMs (OpenAI, Claude, Gemini, Groq, Mistral) through one API
- **Low Latency**: Sub-3ms internal latency with intelligent routing and load balancing
- **Enterprise Security**: SOC 2, HIPAA, GDPR compliance with RBAC and audit logging
- **Quota and cost management**: Token-based quotas, rate limiting, and comprehensive usage tracking
- **Observability**: Full request/response logging, metrics, and traces with customizable retention
## How TrueFoundry Integrates with CrewAI
### Installation & Setup
<Steps>
<Step title="Install CrewAI">
```bash
pip install crewai
```
</Step>
<Step title="Get TrueFoundry Access Token">
1. Sign up for a [TrueFoundry account](https://www.truefoundry.com/register)
2. Follow the steps here in [Quick start](https://docs.truefoundry.com/gateway/quick-start)
</Step>
<Step title="Configure CrewAI with TrueFoundry">
![TrueFoundry Code Configuration](/images/new-code-snippet.png)
```python
from crewai import LLM
# Create an LLM instance with TrueFoundry AI Gateway
truefoundry_llm = LLM(
model="openai-main/gpt-4o", # Similarly, you can call any model from any provider
base_url="your_truefoundry_gateway_base_url",
api_key="your_truefoundry_api_key"
)
# Use in your CrewAI agents
from crewai import Agent
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config['researcher'],
llm=truefoundry_llm,
verbose=True
)
```
</Step>
</Steps>
### Complete CrewAI Example
```python
from crewai import Agent, Task, Crew, LLM
# Configure LLM with TrueFoundry
llm = LLM(
model="openai-main/gpt-4o",
base_url="your_truefoundry_gateway_base_url",
api_key="your_truefoundry_api_key"
)
# Create agents
researcher = Agent(
role='Research Analyst',
goal='Conduct detailed market research',
backstory='Expert market analyst with attention to detail',
llm=llm,
verbose=True
)
writer = Agent(
role='Content Writer',
goal='Create comprehensive reports',
backstory='Experienced technical writer',
llm=llm,
verbose=True
)
# Create tasks
research_task = Task(
description='Research AI market trends for 2024',
agent=researcher,
expected_output='Comprehensive research summary'
)
writing_task = Task(
description='Create a market research report',
agent=writer,
expected_output='Well-structured report with insights',
context=[research_task]
)
# Create and execute crew
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, writing_task],
verbose=True
)
result = crew.kickoff()
```
### Observability and Governance
Monitor your CrewAI agents through TrueFoundry's metrics tab:
![TrueFoundry metrics](/images/gateway-metrics.png)
With Truefoundry's AI gateway, you can monitor and analyze:
- **Performance Metrics**: Track key latency metrics like Request Latency, Time to First Token (TTFS), and Inter-Token Latency (ITL) with P99, P90, and P50 percentiles
- **Cost and Token Usage**: Gain visibility into your application's costs with detailed breakdowns of input/output tokens and the associated expenses for each model
- **Usage Patterns**: Understand how your application is being used with detailed analytics on user activity, model distribution, and team-based usage
- **Rate limit and Load balancing**: You can set up rate limiting, load balancing and fallback for your models
## Tracing
For a more detailed understanding on tracing, please see [getting-started-tracing](https://docs.truefoundry.com/docs/tracing/tracing-getting-started).For tracing, you can add the Traceloop SDK:
For tracing, you can add the Traceloop SDK:
```bash
pip install traceloop-sdk
```
```python
from traceloop.sdk import Traceloop
# Initialize enhanced tracing
Traceloop.init(
api_endpoint="https://your-truefoundry-endpoint/api/tracing",
headers={
"Authorization": f"Bearer {your_truefoundry_pat_token}",
"TFY-Tracing-Project": "your_project_name",
},
)
```
This provides additional trace correlation across your entire CrewAI workflow.
![TrueFoundry CrewAI Tracing](/images/tracing_crewai.png)

View File

@@ -1,435 +0,0 @@
openapi: 3.0.3
info:
title: CrewAI Enterprise API
description: |
REST API for interacting with your deployed CrewAI crews on CrewAI Enterprise.
## Getting Started
1. **Find your crew URL**: Get your unique crew URL from the CrewAI Enterprise dashboard
2. **Copy examples**: Use the code examples from each endpoint page as templates
3. **Replace placeholders**: Update URLs and tokens with your actual values
4. **Test with your tools**: Use cURL, Postman, or your preferred API client
## Authentication
All API requests require a bearer token for authentication. There are two types of tokens:
- **Bearer Token**: Organization-level token for full crew operations
- **User Bearer Token**: User-scoped token for individual access with limited permissions
You can find your bearer tokens in the Status tab of your crew's detail page in the CrewAI Enterprise dashboard.
## Reference Documentation
This documentation provides comprehensive examples for each endpoint:
- **Request formats** with all required and optional parameters
- **Response examples** for success and error scenarios
- **Code samples** in multiple programming languages
- **Authentication patterns** with proper Bearer token usage
Copy the examples and customize them with your actual crew URL and authentication tokens.
## Workflow
1. **Discover inputs** using `GET /inputs`
2. **Start execution** using `POST /kickoff`
3. **Monitor progress** using `GET /status/{kickoff_id}`
version: 1.0.0
contact:
name: CrewAI Support
email: support@crewai.com
url: https://crewai.com
servers:
- url: https://your-actual-crew-name.crewai.com
description: Replace with your actual deployed crew URL from the CrewAI Enterprise dashboard
- url: https://my-travel-crew.crewai.com
description: Example travel planning crew (replace with your URL)
- url: https://content-creation-crew.crewai.com
description: Example content creation crew (replace with your URL)
- url: https://research-assistant-crew.crewai.com
description: Example research assistant crew (replace with your URL)
security:
- BearerAuth: []
paths:
/inputs:
get:
summary: Get Required Inputs
description: |
**📋 Reference Example Only** - *This shows the request format. To test with your actual crew, copy the cURL example and replace the URL + token with your real values.*
Retrieves the list of all required input parameters that your crew expects for execution.
Use this endpoint to discover what inputs you need to provide when starting a crew execution.
operationId: getRequiredInputs
responses:
'200':
description: Successfully retrieved required inputs
content:
application/json:
schema:
type: object
properties:
inputs:
type: array
items:
type: string
description: Array of required input parameter names
example: ["budget", "interests", "duration", "age"]
examples:
travel_crew:
summary: Travel planning crew inputs
value:
inputs: ["budget", "interests", "duration", "age"]
outreach_crew:
summary: Outreach crew inputs
value:
inputs: ["name", "title", "company", "industry", "our_product", "linkedin_url"]
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
$ref: '#/components/responses/NotFoundError'
'500':
$ref: '#/components/responses/ServerError'
/kickoff:
post:
summary: Start Crew Execution
description: |
**📋 Reference Example Only** - *This shows the request format. To test with your actual crew, copy the cURL example and replace the URL + token with your real values.*
Initiates a new crew execution with the provided inputs. Returns a kickoff ID that can be used
to track the execution progress and retrieve results.
Crew executions can take anywhere from seconds to minutes depending on their complexity.
Consider using webhooks for real-time notifications or implement polling with the status endpoint.
operationId: startCrewExecution
requestBody:
required: true
content:
application/json:
schema:
type: object
required:
- inputs
properties:
inputs:
type: object
description: Key-value pairs of all required inputs for your crew
additionalProperties:
type: string
example:
budget: "1000 USD"
interests: "games, tech, ai, relaxing hikes, amazing food"
duration: "7 days"
age: "35"
meta:
type: object
description: Additional metadata to pass to the crew
additionalProperties: true
example:
requestId: "user-request-12345"
source: "mobile-app"
taskWebhookUrl:
type: string
format: uri
description: Callback URL executed after each task completion
example: "https://your-server.com/webhooks/task"
stepWebhookUrl:
type: string
format: uri
description: Callback URL executed after each agent thought/action
example: "https://your-server.com/webhooks/step"
crewWebhookUrl:
type: string
format: uri
description: Callback URL executed when the crew execution completes
example: "https://your-server.com/webhooks/crew"
examples:
travel_planning:
summary: Travel planning crew
value:
inputs:
budget: "1000 USD"
interests: "games, tech, ai, relaxing hikes, amazing food"
duration: "7 days"
age: "35"
meta:
requestId: "travel-req-123"
source: "web-app"
outreach_campaign:
summary: Outreach crew with webhooks
value:
inputs:
name: "John Smith"
title: "CTO"
company: "TechCorp"
industry: "Software"
our_product: "AI Development Platform"
linkedin_url: "https://linkedin.com/in/johnsmith"
taskWebhookUrl: "https://api.example.com/webhooks/task"
crewWebhookUrl: "https://api.example.com/webhooks/crew"
responses:
'200':
description: Crew execution started successfully
content:
application/json:
schema:
type: object
properties:
kickoff_id:
type: string
format: uuid
description: Unique identifier for tracking this execution
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
'400':
description: Invalid request body or missing required inputs
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
'401':
$ref: '#/components/responses/UnauthorizedError'
'422':
description: Validation error - ensure all required inputs are provided
content:
application/json:
schema:
$ref: '#/components/schemas/ValidationError'
'500':
$ref: '#/components/responses/ServerError'
/status/{kickoff_id}:
get:
summary: Get Execution Status
description: |
**📋 Reference Example Only** - *This shows the request format. To test with your actual crew, copy the cURL example and replace the URL + token with your real values.*
Retrieves the current status and results of a crew execution using its kickoff ID.
The response structure varies depending on the execution state:
- **running**: Execution in progress with current task info
- **completed**: Execution finished with full results
- **error**: Execution failed with error details
operationId: getExecutionStatus
parameters:
- name: kickoff_id
in: path
required: true
description: The kickoff ID returned from the /kickoff endpoint
schema:
type: string
format: uuid
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
responses:
'200':
description: Successfully retrieved execution status
content:
application/json:
schema:
oneOf:
- $ref: '#/components/schemas/ExecutionRunning'
- $ref: '#/components/schemas/ExecutionCompleted'
- $ref: '#/components/schemas/ExecutionError'
examples:
running:
summary: Execution in progress
value:
status: "running"
current_task: "research_task"
progress:
completed_tasks: 1
total_tasks: 3
completed:
summary: Execution completed successfully
value:
status: "completed"
result:
output: "Comprehensive travel itinerary for 7 days in Japan focusing on tech culture..."
tasks:
- task_id: "research_task"
output: "Research findings on tech destinations in Japan..."
agent: "Travel Researcher"
execution_time: 45.2
- task_id: "planning_task"
output: "7-day detailed itinerary with activities and recommendations..."
agent: "Trip Planner"
execution_time: 62.8
execution_time: 108.5
error:
summary: Execution failed
value:
status: "error"
error: "Task execution failed: Invalid API key for external service"
execution_time: 23.1
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
description: Kickoff ID not found
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
example:
error: "Execution not found"
message: "No execution found with ID: abcd1234-5678-90ef-ghij-klmnopqrstuv"
'500':
$ref: '#/components/responses/ServerError'
components:
securitySchemes:
BearerAuth:
type: http
scheme: bearer
description: |
**📋 Reference Documentation** - *The tokens shown in examples are placeholders for reference only.*
Use your actual Bearer Token or User Bearer Token from the CrewAI Enterprise dashboard for real API calls.
**Bearer Token**: Organization-level access for full crew operations
**User Bearer Token**: User-scoped access with limited permissions
schemas:
ExecutionRunning:
type: object
properties:
status:
type: string
enum: ["running"]
example: "running"
current_task:
type: string
description: Name of the currently executing task
example: "research_task"
progress:
type: object
properties:
completed_tasks:
type: integer
description: Number of completed tasks
example: 1
total_tasks:
type: integer
description: Total number of tasks in the crew
example: 3
ExecutionCompleted:
type: object
properties:
status:
type: string
enum: ["completed"]
example: "completed"
result:
type: object
properties:
output:
type: string
description: Final output from the crew execution
example: "Comprehensive travel itinerary..."
tasks:
type: array
items:
$ref: '#/components/schemas/TaskResult'
execution_time:
type: number
description: Total execution time in seconds
example: 108.5
ExecutionError:
type: object
properties:
status:
type: string
enum: ["error"]
example: "error"
error:
type: string
description: Error message describing what went wrong
example: "Task execution failed: Invalid API key"
execution_time:
type: number
description: Time until error occurred in seconds
example: 23.1
TaskResult:
type: object
properties:
task_id:
type: string
description: Unique identifier for the task
example: "research_task"
output:
type: string
description: Output generated by this task
example: "Research findings..."
agent:
type: string
description: Name of the agent that executed this task
example: "Travel Researcher"
execution_time:
type: number
description: Time taken to execute this task in seconds
example: 45.2
Error:
type: object
properties:
error:
type: string
description: Error type or title
example: "Authentication Error"
message:
type: string
description: Detailed error message
example: "Invalid bearer token provided"
ValidationError:
type: object
properties:
error:
type: string
example: "Validation Error"
message:
type: string
example: "Missing required inputs"
details:
type: object
properties:
missing_inputs:
type: array
items:
type: string
example: ["budget", "interests"]
responses:
UnauthorizedError:
description: Authentication failed - check your bearer token
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
example:
error: "Unauthorized"
message: "Invalid or missing bearer token"
NotFoundError:
description: Resource not found
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
example:
error: "Not Found"
message: "The requested resource was not found"
ServerError:
description: Internal server error
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
example:
error: "Internal Server Error"
message: "An unexpected error occurred"

View File

@@ -1,231 +0,0 @@
openapi: 3.0.3
info:
title: CrewAI 엔터프라이즈 API
description: |
CrewAI Enterprise에 배포된 crew와 상호작용하기 위한 REST API입니다.
## 시작하기
1. **Crew URL 확인**: 대시보드에서 고유한 crew URL을 확인하세요
2. **예제 복사**: 각 엔드포인트의 예제를 템플릿으로 사용하세요
3. **플레이스홀더 교체**: 실제 URL과 토큰으로 바꾸세요
4. **도구로 테스트**: cURL, Postman 등 선호하는 도구로 테스트하세요
version: 1.0.0
contact:
name: CrewAI 지원
email: support@crewai.com
url: https://crewai.com
servers:
- url: https://your-actual-crew-name.crewai.com
description: 대시보드의 실제 crew URL로 교체하세요
security:
- BearerAuth: []
paths:
/inputs:
get:
summary: 필요 입력값 조회
description: |
**📋 참조 예제만 제공** - *요청 형식을 보여줍니다. 실제 호출은 cURL 예제를 복사해 URL과 토큰을 교체하세요.*
실행에 필요한 입력 파라미터 목록을 반환합니다.
operationId: getRequiredInputs
responses:
'200':
description: 입력값을 성공적으로 조회
content:
application/json:
schema:
type: object
properties:
inputs:
type: array
items:
type: string
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
$ref: '#/components/responses/NotFoundError'
'500':
$ref: '#/components/responses/ServerError'
/kickoff:
post:
summary: Crew 실행 시작
description: |
**📋 참조 예제만 제공** - *요청 형식을 보여줍니다. 실제 호출은 cURL 예제를 복사해 URL과 토큰을 교체하세요.*
제공된 입력으로 새로운 실행을 시작하고 kickoff ID를 반환합니다.
operationId: startCrewExecution
requestBody:
required: true
content:
application/json:
schema:
type: object
required:
- inputs
properties:
inputs:
type: object
additionalProperties:
type: string
responses:
'200':
description: 실행이 성공적으로 시작됨
content:
application/json:
schema:
type: object
properties:
kickoff_id:
type: string
format: uuid
'401':
$ref: '#/components/responses/UnauthorizedError'
'500':
$ref: '#/components/responses/ServerError'
/status/{kickoff_id}:
get:
summary: 실행 상태 조회
description: |
**📋 참조 예제만 제공** - *요청 형식을 보여줍니다. 실제 호출은 cURL 예제를 복사해 URL과 토큰을 교체하세요.*
kickoff ID로 실행 상태와 결과를 조회합니다.
operationId: getExecutionStatus
parameters:
- name: kickoff_id
in: path
required: true
schema:
type: string
format: uuid
responses:
'200':
description: 상태를 성공적으로 조회
content:
application/json:
schema:
oneOf:
- $ref: '#/components/schemas/ExecutionRunning'
- $ref: '#/components/schemas/ExecutionCompleted'
- $ref: '#/components/schemas/ExecutionError'
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
description: Kickoff ID를 찾을 수 없음
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
'500':
$ref: '#/components/responses/ServerError'
components:
securitySchemes:
BearerAuth:
type: http
scheme: bearer
description: |
**📋 참고** - *예시의 토큰은 자리 표시자입니다.* 실제 토큰을 사용하세요.
schemas:
ExecutionRunning:
type: object
properties:
status:
type: string
enum: ["running"]
current_task:
type: string
progress:
type: object
properties:
completed_tasks:
type: integer
total_tasks:
type: integer
ExecutionCompleted:
type: object
properties:
status:
type: string
enum: ["completed"]
result:
type: object
properties:
output:
type: string
tasks:
type: array
items:
$ref: '#/components/schemas/TaskResult'
execution_time:
type: number
ExecutionError:
type: object
properties:
status:
type: string
enum: ["error"]
error:
type: string
execution_time:
type: number
TaskResult:
type: object
properties:
task_id:
type: string
output:
type: string
agent:
type: string
execution_time:
type: number
Error:
type: object
properties:
error:
type: string
message:
type: string
ValidationError:
type: object
properties:
error:
type: string
message:
type: string
details:
type: object
properties:
missing_inputs:
type: array
items:
type: string
responses:
UnauthorizedError:
description: 인증 실패
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
NotFoundError:
description: 리소스를 찾을 수 없음
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
ServerError:
description: 서버 내부 오류
content:
application/json:
schema:
$ref: '#/components/schemas/Error'

View File

@@ -1,268 +0,0 @@
openapi: 3.0.3
info:
title: CrewAI Enterprise API
description: |
REST API para interagir com suas crews implantadas no CrewAI Enterprise.
## Introdução
1. **Encontre a URL da sua crew**: Obtenha sua URL única no painel do CrewAI Enterprise
2. **Copie os exemplos**: Use os exemplos de cada endpoint como modelo
3. **Substitua os placeholders**: Atualize URLs e tokens com seus valores reais
4. **Teste com suas ferramentas**: Use cURL, Postman ou seu cliente preferido
## Autenticação
Todas as requisições exigem um token bearer. Existem dois tipos:
- **Bearer Token**: Token em nível de organização para operações completas
- **User Bearer Token**: Token com escopo de usuário com permissões limitadas
Você encontra os tokens na aba Status da sua crew no painel do CrewAI Enterprise.
## Documentação de Referência
Este documento fornece exemplos completos para cada endpoint:
- **Formatos de requisição** com parâmetros obrigatórios e opcionais
- **Exemplos de resposta** para sucesso e erro
- **Amostras de código** em várias linguagens
- **Padrões de autenticação** com uso correto de Bearer token
Copie os exemplos e personalize com sua URL e tokens reais.
## Fluxo
1. **Descubra os inputs** usando `GET /inputs`
2. **Inicie a execução** usando `POST /kickoff`
3. **Monitore o progresso** usando `GET /status/{kickoff_id}`
version: 1.0.0
contact:
name: CrewAI Suporte
email: support@crewai.com
url: https://crewai.com
servers:
- url: https://your-actual-crew-name.crewai.com
description: Substitua pela URL real da sua crew no painel do CrewAI Enterprise
security:
- BearerAuth: []
paths:
/inputs:
get:
summary: Obter Inputs Requeridos
description: |
**📋 Exemplo de Referência** - *Mostra o formato da requisição. Para testar com sua crew real, copie o cURL e substitua URL + token.*
Retorna a lista de parâmetros de entrada que sua crew espera.
operationId: getRequiredInputs
responses:
'200':
description: Inputs requeridos obtidos com sucesso
content:
application/json:
schema:
type: object
properties:
inputs:
type: array
items:
type: string
description: Nomes dos parâmetros de entrada
example: ["budget", "interests", "duration", "age"]
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
$ref: '#/components/responses/NotFoundError'
'500':
$ref: '#/components/responses/ServerError'
/kickoff:
post:
summary: Iniciar Execução da Crew
description: |
**📋 Exemplo de Referência** - *Mostra o formato da requisição. Para testar com sua crew real, copie o cURL e substitua URL + token.*
Inicia uma nova execução da crew com os inputs fornecidos e retorna um kickoff ID.
operationId: startCrewExecution
requestBody:
required: true
content:
application/json:
schema:
type: object
required:
- inputs
properties:
inputs:
type: object
additionalProperties:
type: string
example:
budget: "1000 USD"
interests: "games, tech, ai, relaxing hikes, amazing food"
duration: "7 days"
age: "35"
responses:
'200':
description: Execução iniciada com sucesso
content:
application/json:
schema:
type: object
properties:
kickoff_id:
type: string
format: uuid
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
'401':
$ref: '#/components/responses/UnauthorizedError'
'500':
$ref: '#/components/responses/ServerError'
/status/{kickoff_id}:
get:
summary: Obter Status da Execução
description: |
**📋 Exemplo de Referência** - *Mostra o formato da requisição. Para testar com sua crew real, copie o cURL e substitua URL + token.*
Retorna o status atual e os resultados de uma execução usando o kickoff ID.
operationId: getExecutionStatus
parameters:
- name: kickoff_id
in: path
required: true
schema:
type: string
format: uuid
responses:
'200':
description: Status recuperado com sucesso
content:
application/json:
schema:
oneOf:
- $ref: '#/components/schemas/ExecutionRunning'
- $ref: '#/components/schemas/ExecutionCompleted'
- $ref: '#/components/schemas/ExecutionError'
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
description: Kickoff ID não encontrado
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
'500':
$ref: '#/components/responses/ServerError'
components:
securitySchemes:
BearerAuth:
type: http
scheme: bearer
description: |
**📋 Referência** - *Os tokens mostrados são apenas exemplos.*
Use seus tokens reais do painel do CrewAI Enterprise.
schemas:
ExecutionRunning:
type: object
properties:
status:
type: string
enum: ["running"]
current_task:
type: string
progress:
type: object
properties:
completed_tasks:
type: integer
total_tasks:
type: integer
ExecutionCompleted:
type: object
properties:
status:
type: string
enum: ["completed"]
result:
type: object
properties:
output:
type: string
tasks:
type: array
items:
$ref: '#/components/schemas/TaskResult'
execution_time:
type: number
ExecutionError:
type: object
properties:
status:
type: string
enum: ["error"]
error:
type: string
execution_time:
type: number
TaskResult:
type: object
properties:
task_id:
type: string
output:
type: string
agent:
type: string
execution_time:
type: number
Error:
type: object
properties:
error:
type: string
message:
type: string
ValidationError:
type: object
properties:
error:
type: string
message:
type: string
details:
type: object
properties:
missing_inputs:
type: array
items:
type: string
responses:
UnauthorizedError:
description: Autenticação falhou
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
NotFoundError:
description: Recurso não encontrado
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
ServerError:
description: Erro interno do servidor
content:
application/json:
schema:
$ref: '#/components/schemas/Error'

Binary file not shown.

After

Width:  |  Height:  |  Size: 288 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 419 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 263 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 248 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 472 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 530 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 554 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 128 KiB

View File

@@ -177,7 +177,14 @@ class MyCustomCrew:
# Your crew implementation...
```
이것이 CrewAI 코드베이스에서 서드파티 이벤트 리스너가 등록되는 방식입니다.
이것이 바로 CrewAI의 내장 `agentops_listener`가 등록되는 방식과 동일합니다. CrewAI 코드베이스에서는 다음과 같이 되어 있습니다:
```python
# src/crewai/utilities/events/third_party/__init__.py
from .agentops_listener import agentops_listener
```
이렇게 하면 `crewai.utilities.events` 패키지가 임포트될 때 `agentops_listener`가 자동으로 로드됩니다.
## 사용 가능한 이벤트 유형
@@ -273,6 +280,77 @@ CrewAI는 여러분이 청취할 수 있는 다양한 이벤트를 제공합니
추가 필드는 이벤트 타입에 따라 다릅니다. 예를 들어, `CrewKickoffCompletedEvent`에는 `crew_name`과 `output` 필드가 포함됩니다.
## 실제 예시: AgentOps와의 통합
CrewAI는 AI 에이전트를 위한 모니터링 및 관찰 플랫폼인 [AgentOps](https://github.com/AgentOps-AI/agentops)와의 서드파티 통합 예시를 포함하고 있습니다. 구현 방식은 다음과 같습니다:
```python
from typing import Optional
from crewai.utilities.events import (
CrewKickoffCompletedEvent,
ToolUsageErrorEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
from crewai.utilities.events.task_events import TaskEvaluationEvent
try:
import agentops
AGENTOPS_INSTALLED = True
except ImportError:
AGENTOPS_INSTALLED = False
class AgentOpsListener(BaseEventListener):
tool_event: Optional["agentops.ToolEvent"] = None
session: Optional["agentops.Session"] = None
def __init__(self):
super().__init__()
def setup_listeners(self, crewai_event_bus):
if not AGENTOPS_INSTALLED:
return
@crewai_event_bus.on(CrewKickoffStartedEvent)
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
self.session = agentops.init()
for agent in source.agents:
if self.session:
self.session.create_agent(
name=agent.role,
agent_id=str(agent.id),
)
@crewai_event_bus.on(CrewKickoffCompletedEvent)
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
if self.session:
self.session.end_session(
end_state="Success",
end_state_reason="Finished Execution",
)
@crewai_event_bus.on(ToolUsageStartedEvent)
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
self.tool_event = agentops.ToolEvent(name=event.tool_name)
if self.session:
self.session.record(self.tool_event)
@crewai_event_bus.on(ToolUsageErrorEvent)
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
```
이 listener는 crew가 시작될 때 AgentOps 세션을 초기화하고, agent를 AgentOps에 등록하며, 도구 사용을 추적하고, crew가 완료되면 세션을 종료합니다.
AgentOps listener는 `src/crewai/utilities/events/third_party/__init__.py` 파일의 import를 통해 CrewAI 이벤트 시스템에 등록됩니다:
```python
from .agentops_listener import agentops_listener
```
이렇게 하면 `crewai.utilities.events` 패키지가 import될 때 `agentops_listener`가 로드되는 것이 보장됩니다.
## 고급 사용법: Scoped Handlers

View File

@@ -1,103 +0,0 @@
---
title: "역할 기반 접근 제어 (RBAC)"
description: "역할과 자동화별 가시성으로 crews, 도구, 데이터 접근을 제어합니다."
icon: "shield"
---
## 개요
CrewAI Enterprise의 RBAC는 **조직 수준 역할**과 **자동화(Automation) 수준 가시성**을 결합하여 안전하고 확장 가능한 접근 제어를 제공합니다.
<Frame>
<img src="/images/enterprise/users_and_roles.png" alt="CrewAI Enterprise RBAC 개요" />
</Frame>
## 사용자와 역할
워크스페이스의 각 구성원은 역할이 있으며, 이는 기능 접근 범위를 결정합니다.
가능한 작업:
- 사전 정의된 역할 사용 (Owner, Member)
- 권한을 세분화한 커스텀 역할 생성
- 설정 화면에서 언제든 역할 할당/변경
설정 위치: Settings → Roles
<Steps>
<Step title="Roles 열기">
<b>Settings → Roles</b>로 이동합니다.
</Step>
<Step title="역할 선택">
<b>Owner</b> 또는 <b>Member</b>를 사용하거나 <b>Create role</b>로 커스텀 역할을 만듭니다.
</Step>
<Step title="멤버에 할당">
사용자들을 선택하여 역할을 지정합니다. 언제든 변경할 수 있습니다.
</Step>
</Steps>
### 구성 요약
| 영역 | 위치 | 옵션 |
|:---|:---|:---|
| 사용자 & 역할 | Settings → Roles | Owner, Member; 커스텀 역할 |
| 자동화 가시성 | Automation → Settings → Visibility | Private; 사용자/역할 화이트리스트 |
## 자동화 수준 접근 제어
조직 역할과 별개로, **Automations**는 사용자/역할별로 특정 자동화 접근을 제한하는 가시성 설정을 제공합니다.
유용한 경우:
- 민감/실험 자동화를 비공개로 유지
- 대규모 팀/외부 협업에서 가시성 관리
- 격리된 컨텍스트에서 자동화 테스트
Private 모드에서는 화이트리스트에 포함된 사용자/역할만 다음 작업이 가능합니다:
- 자동화 보기
- 실행/API 사용
- 로그, 메트릭, 설정 접근
조직 Owner는 항상 접근 가능하며, 가시성 설정에 영향을 받지 않습니다.
설정 위치: Automation → Settings → Visibility
<Steps>
<Step title="Visibility 탭 열기">
<b>Automation → Settings → Visibility</b>로 이동합니다.
</Step>
<Step title="가시성 설정">
<b>Private</b>를 선택합니다. Owner는 항상 접근 가능합니다.
</Step>
<Step title="허용 대상 추가">
보기/실행/로그·메트릭·설정 접근이 가능한 사용자/역할을 추가합니다.
</Step>
<Step title="저장 및 확인">
저장 후, 목록에 없는 사용자가 보거나 실행할 수 없는지 확인합니다.
</Step>
</Steps>
### Private 모드 접근 결과
| 동작 | Owner | 화이트리스트 사용자/역할 | 비포함 |
|:---|:---|:---|:---|
| 자동화 보기 | ✓ | ✓ | ✗ |
| 실행/API | ✓ | ✓ | ✗ |
| 로그/메트릭/설정 | ✓ | ✓ | ✗ |
<Tip>
Owner는 항상 접근 가능하며, Private 모드에서는 화이트리스트에 포함된 사용자/역할만 권한이 부여됩니다.
</Tip>
<Frame>
<img src="/images/enterprise/visibility.png" alt="CrewAI Enterprise 가시성 설정" />
</Frame>
<Card title="도움이 필요하신가요?" icon="headset" href="mailto:support@crewai.com">
RBAC 구성과 점검에 대한 지원이 필요하면 연락해 주세요.
</Card>

View File

@@ -1,22 +0,0 @@
---
title: CrewAI Cookbooks
description: 패턴을 빠르게 익히기 위한 기능 중심 Quickstarts와 노트북.
icon: book
---
## Quickstarts & Demos
<CardGroup cols={2}>
<Card title="Quickstarts 저장소" icon="bolt" href="https://github.com/crewAIInc/crewAI-quickstarts">
특정 CrewAI 기능을 보여주는 데모와 소규모 프로젝트.
</Card>
<Card title="예시의 노트북" icon="book-open" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks">
실습을 위한 인터랙티브 노트북.
</Card>
</CardGroup>
<Tip>
Cookbooks로 패턴을 빠르게 익힌 뒤, 프로덕션급 구현은 Full Examples에서 확인하세요.
</Tip>

View File

@@ -1,85 +1,62 @@
---
title: CrewAI 예시
description: Crews, Flows, 통합, Notebooks로 구성된 예시 모음입니다.
description: CrewAI 프레임워크를 사용하여 워크플로우를 자동화하는 방법을 보여주는 예시 모음입니다.
icon: rocket-launch
---
## Crews
<CardGroup cols={3}>
<Card title="마케팅 전략" icon="bullhorn" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/marketing_strategy">
다중 에이전트 마케팅 캠페인 기획.
</Card>
<Card title="깜짝 여행" icon="plane" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/surprise_trip">
개인화된 여행 계획.
</Card>
<Card title="프로필-포지션 매칭" icon="id-card" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/match_profile_to_positions">
벡터 검색 기반 이력서 매칭.
</Card>
<Card title="채용 공고" icon="newspaper" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/job-posting">
채용 공고 자동 생성.
</Card>
<Card title="게임 빌더 Crew" icon="gamepad" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/game-builder-crew">
파이썬 게임을 설계·구축하는 멀티 에이전트 팀.
</Card>
<Card title="리크루팅" icon="user-group" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/recruitment">
후보자 소싱 및 평가.
</Card>
<Card title="모든 Crews 보기" icon="users" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews">
전체 crew 예시 목록.
</Card>
</CardGroup>
## Flows
<CardGroup cols={3}>
<Card title="Content Creator Flow" icon="pen" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/content_creator_flow">
라우팅 기반 콘텐츠 생성.
</Card>
<Card title="이메일 자동 응답" icon="envelope" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/email_auto_responder_flow">
이메일 모니터링과 자동 응답.
</Card>
<Card title="리드 점수 Flow" icon="chart-line" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/lead_score_flow">
휴먼‑인‑더‑루프 리드 평가.
</Card>
<Card title="미팅 어시스턴트 Flow" icon="calendar" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/meeting_assistant_flow">
노트 처리 및 연동.
</Card>
<Card title="Self Evaluation Loop" icon="rotate" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/self_evaluation_loop_flow">
반복적 자가 개선 워크플로우.
</Card>
<Card title="책 쓰기 (Flows)" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/write_a_book_with_flows">
병렬 챕터 생성.
</Card>
<Card title="모든 Flows 보기" icon="diagram-project" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows">
전체 flow 예시 목록.
</Card>
</CardGroup>
## 통합 (Integrations)
<CardGroup cols={3}>
<Card title="CrewAI ↔ LangGraph" icon="link" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/crewai-langgraph">
LangGraph 프레임워크 연동.
</Card>
<Card title="Azure OpenAI" icon="cloud" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/azure_model">
Azure OpenAI와 함께 사용.
</Card>
<Card title="NVIDIA 모델" icon="microchip" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/nvidia_models">
NVIDIA 생태계 연동.
</Card>
<Card title="모든 통합 보기" icon="puzzle-piece" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations">
전체 통합 예시.
</Card>
</CardGroup>
## 노트북 (Notebooks)
<CardGroup cols={2}>
<Card title="Simple QA Crew + Flow" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks/Simple%20QA%20Crew%20%2B%20Flow">
Simple QA Crew + Flow.
</Card>
<Card title="모든 노트북" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks">
학습과 실험을 위한 인터랙티브 예시 모음.
<Card
title="마케팅 전략"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/marketing_strategy"
icon="bullhorn"
iconType="solid"
>
CrewAI로 마케팅 전략 생성을 자동화하세요.
</Card>
<Card
title="깜짝 여행"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/surprise_trip"
icon="plane"
iconType="duotone"
>
CrewAI로 깜짝 여행 일정표를 만들어보세요.
</Card>
<Card
title="프로필과 포지션 매칭"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/match_profile_to_positions"
icon="linkedin"
iconType="duotone"
>
CrewAI로 프로필을 채용 포지션에 매칭하세요.
</Card>
<Card
title="채용 공고 생성"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/job-posting"
icon="newspaper"
iconType="duotone"
>
CrewAI로 채용 공고를 만드세요.
</Card>
<Card
title="게임 생성기"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/game-builder-crew"
icon="gamepad"
iconType="duotone"
>
CrewAI로 게임을 만들어보세요.
</Card>
<Card
title="채용 후보자 찾기"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/recruitment"
icon="user-group"
iconType="duotone"
>
CrewAI로 채용 후보자를 찾으세요.
</Card>
</CardGroup>

View File

@@ -1,65 +1,65 @@
---
title: 소개
description: 함께 협력하여 복잡한 작업을 해결하는 AI agent 팀 구축
description: 함께 협력하여 복잡한 작업을 해결하는 AI 에이전트 팀 구축
icon: handshake
---
# CrewAI란 무엇인가?
**CrewAI는 LangChain이나 기타 agent 프레임워크에 의존하지 않고, 완전히 독립적으로 처음부터 스크래치로 개발된 가볍고 매우 빠른 Python 프레임워크입니다.**
**CrewAI는 완전히 독립적으로, LangChain이나 기타 agent 프레임워크에 의존하지 않고 처음부터 스크래치로 개발된 가볍고 매우 빠른 Python 프레임워크입니다.**
CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공하여, 어떤 시나리오에도 맞춤화된 자율 AI agent를 만드는 데 이상적입니다:
- **[CrewAI Crews](/ko/guides/crews/first-crew)**: 자율성과 협업 지능을 극대화하여, 각 agent가 특정 역할, 도구, 목표를 가진 AI 팀을 만들 수 있습니다.
- **[CrewAI Flows](/ko/guides/flows/first-flow)**: 이벤트 기반의 세밀한 제어와 단일 LLM 호출을 통한 정확한 작업 orchestration을 지원하며, Crews 네이티브로 통합됩니다.
- **[CrewAI Flows](/ko/guides/flows/first-flow)**: 세밀한 이벤트 기반 제어와 단일 LLM 호출을 통한 정확한 작업 오케스트레이션을 가능하게 하며 Crews 네이티브로 지원합니다.
10만 명이 넘는 개발자가 커뮤니티 과정을 통해 인증을 받았으며, CrewAI는 기업용 AI 자동화의 표준으로 빠르게 자리잡고 있습니다.
## Crew의 작동 방식
## 크루 작동 방식
<Note>
회사가 비즈니스 목표를 달성하기 위해 여러 부서(영업, 엔지니어링, 마케팅 등)가 리더십 아래에서 함께 일하는 것처럼, CrewAI는 복잡한 작업을 달성하기 위해 전문화된 역할의 AI agent들이 협력하는 조직을 만들 수 있도록 도와줍니다.
회사가 비즈니스 목표를 달성하기 위해 여러 부서(영업, 엔지니어링, 마케팅 등)가 리더십 아래에서 함께 일하는 것처럼, CrewAI는 복잡한 작업을 달성하기 위해 전문화된 역할의 AI 에이전트들이 협력하는 조직을 만들 수 있도록 도와줍니다.
</Note>
<Frame caption="CrewAI Framework Overview">
<Frame caption="CrewAI 프레임워크 개요">
<img src="/images/crews.png" alt="CrewAI Framework Overview" />
</Frame>
| 구성 요소 | 설명 | 주요 특징 |
|:----------|:----:|:----------|
| **Crew** | 최상위 조직 | • AI agent 팀 관리<br/>• workflow 감독<br/>• 협업 보장<br/>• 결과 전달 |
| **AI agents** | 전문 팀원 | • 특정 역할 보유(Researcher, Writer 등)<br/>• 지정된 도구 사용<br/>• 작업 위임 가능<br/>• 자율적 의사결정 가능 |
| **Process** | workflow 관리 시스템 | • 협업 패턴 정의<br/>• 작업 할당 제어<br/>• 상호작용 관리<br/>• 효율적 실행 보장 |
| **Task** | 개별 할당 | • 명확한 목표 보유<br/>• 특정 도구 사용<br/>• 더 큰 프로세스에 기여<br/>• 실행 가능한 결과 도출 |
| 구성 요소 | 설명 | 주요 특징 |
|:--------------|:---------------------:|:----------|
| **크루** | 최상위 조직 | • AI 에이전트 팀 관리<br/>• 워크플로우 감독<br/>• 협업 보장<br/>• 결과 전달 |
| **AI 에이전트** | 전문 팀원 | • 특정 역할 보유(연구원, 작가 등)<br/>• 지정된 도구 사용<br/>• 작업 위임 가능<br/>• 자율적 의사결정 가능 |
| **프로세스** | 워크플로우 관리 시스템 | • 협업 패턴 정의<br/>• 작업 할당 제어<br/>• 상호작용 관리<br/>• 효율적 실행 보장 |
| **작업** | 개별 할당 | • 명확한 목표 보유<br/>• 특정 도구 사용<br/>• 더 큰 프로세스에 기여<br/>• 실행 가능한 결과 도출 |
### 전체 구조의 동작 방식
### 어떻게 모두 함께 작동하는가
1. **Crew**가 전체 운영을 조직합니다
2. **AI agents**가 자신들의 전문 작업을 수행합니다
2. **AI Agents**가 자신들의 전문 작업을 수행합니다
3. **Process**가 원활한 협업을 보장합니다
4. **Tasks**가 완료되어 목표를 달성합니다
## 주요 기능
<CardGroup cols={2}>
<Card title="역할 기반 agent" icon="users">
Researcher, Analyst, Writer 등 다양한 역할 전문성, 목표를 가진 agent를 생성할 수 있습니다
<Card title="역할 기반 에이전트" icon="users">
연구원, 분석가, 작가 등 다양한 역할, 전문성, 목표를 가진 전문 에이전트를 생성할 수 있습니다
</Card>
<Card title="유연한 도구" icon="screwdriver-wrench">
agent에게 외부 서비스 및 데이터 소스와 상호작용할 수 있는 맞춤형 도구와 API를 제공합니다
에이전트에게 외부 서비스 및 데이터 소스와 상호작용할 수 있는 맞춤형 도구와 API를 제공합니다
</Card>
<Card title="지능형 협업" icon="people-arrows">
agent들이 함께 작업하며, 인사이트를 공유하고 작업을 조율하여 복잡한 목표를 달성합니다
에이전트가 함께 작업하며, 인사이트를 공유하고 작업을 조율하여 복잡한 목표를 달성합니다
</Card>
<Card title="작업 관리" icon="list-check">
순차적 또는 병렬 workflow를 정의할 수 있으며, agent가 작업 의존성을 자동으로 처리합니다
순차적 또는 병렬 워크플로우를 정의할 수 있으며, 에이전트가 작업 의존성을 자동으로 처리합니다
</Card>
</CardGroup>
## Flow의 작동 원리
## 플로우의 작동 원리
<Note>
Crew 자율 협업에 탁월하다면, Flow는 구조화된 자동화를 제공하여 workflow 실행에 대한 세밀한 제어를 제공합니다. Flow는 조건부 로직, 반복문, 동적 상태 관리를 정확하게 처리하면서 작업이 신뢰성 있게, 안전하게, 효율적으로 실행되도록 보장합니다. FlowCrew와 원활하게 통합되어 높은 자율성과 엄격한 제어의 균형을 이룰 수 있게 해줍니다.
crew 자율 협업에 탁월한 반면, 플로우는 구조화된 자동화를 제공하여 워크플로우 실행에 대한 세밀한 제어를 제공합니다. 플로우는 조건부 로직, 반복문, 동적 상태 관리를 정확하게 처리하면서 작업이 신뢰성 있게, 안전하게, 효율적으로 실행되도록 보장합니다. 플로우crew와 원활하게 통합되어 높은 자율성과 엄격한 제어의 균형을 이룰 수 있게 해줍니다.
</Note>
<Frame caption="CrewAI Framework Overview">
@@ -68,41 +68,41 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
| 구성 요소 | 설명 | 주요 기능 |
|:----------|:-----------:|:------------|
| **Flow** | 구조화된 workflow orchestration | • 실행 경로 관리<br/>• 상태 전환 처리<br/>• 작업 순서 제어<br/>• 신뢰성 있는 실행 보장 |
| **Events** | workflow 액션 트리거 | • 특정 프로세스 시작<br/>• 동적 응답 가능<br/>• 조건부 분기 지원<br/>• 실시간 적응 허용 |
| **States** | workflow 실행 컨텍스트 | • 실행 데이터 유지<br/>• 데이터 영속성 지원<br/>• 재개 가능성 보장<br/>• 실행 무결성 확보 |
| **Crew Support** | workflow 자동화 강화 | • 필요할 때 agency 삽입<br/>• 구조화된 workflow 보완<br/>• 자동화와 인텔리전스의 균형<br/>• 적응적 의사결정 지원 |
| **Flow** | 구조화된 워크플로우 오케스트레이션 | • 실행 경로 관리<br/>• 상태 전환 처리<br/>• 작업 순서 제어<br/>• 신뢰성 있는 실행 보장 |
| **Events** | 워크플로우 액션 트리거 | • 특정 프로세스 시작<br/>• 동적 응답 가능<br/>• 조건부 분기 지원<br/>• 실시간 적응 허용 |
| **States** | 워크플로우 실행 컨텍스트 | • 실행 데이터 유지<br/>• 데이터 영속성 지원<br/>• 재개 가능성 보장<br/>• 실행 무결성 확보 |
| **Crew Support** | 워크플로우 자동화 강화 | • 필요할 때 agency 삽입<br/>• 구조화된 워크플로우 보완<br/>• 자동화와 인텔리전스의 균형<br/>• 적응적 의사결정 지원 |
### 주요 기능
<CardGroup cols={2}>
<Card title="이벤트 기반 orchestration" icon="bolt">
이벤트에 동적으로 반응하여 정밀한 실행 경로 정의합니다
<Card title="이벤트 기반 오케스트레이션" icon="bolt">
이벤트에 동적으로 반응하여 정밀한 실행 경로 정의
</Card>
<Card title="세밀한 제어" icon="sliders">
workflow 상태와 조건부 실행을 안전하고 효율적으로 관리합니다
워크플로우 상태와 조건부 실행을 안전하고 효율적으로 관리
</Card>
<Card title="네이티브 Crew 통합" icon="puzzle-piece">
Crews와 손쉽게 결합하여 자율성과 지능 강화합니다
Crews와 손쉽게 결합하여 자율성과 지능 강화
</Card>
<Card title="결정론적 실행" icon="route">
명시적 제어 흐름과 오류 처리로 예측 가능한 결과 보장합니다
명시적 제어 흐름과 오류 처리로 예측 가능한 결과 보장
</Card>
</CardGroup>
## CrewFlow를 언제 사용할까
## 크루(Crews)와 플로우(Flows)를 언제 사용할까
<Note>
[Crew](/ko/guides/crews/first-crew)와 [Flow](/ko/guides/flows/first-flow)를 언제 사용할지 이해하는 것은 CrewAI의 잠재력을 애플리케이션에서 극대화하는 데 핵심적입니다.
[크루](/ko/guides/crews/first-crew)와 [플로우](/ko/guides/flows/first-flow)를 언제 사용할지 이해하는 것은 CrewAI의 잠재력을 애플리케이션에서 극대화하는 데 핵심적입니다.
</Note>
| 사용 사례 | 권장 접근 방식 | 이유 |
|:---------|:---------------------|:-----|
| **개방형 연구** | [Crew](/ko/guides/crews/first-crew) | 창의적 사고, 탐색, 적응이 필요한 작업에 적합 |
| **콘텐츠 생성** | [Crew](/ko/guides/crews/first-crew) | 기사, 보고서, 마케팅 자료 등 협업형 생성에 적합 |
| **의사결정 workflow** | [Flow](/ko/guides/flows/first-flow) | 예측 가능하고 감사 가능한 의사결정 경로 및 정밀 제어가 필요할 때 |
| **API orchestration** | [Flow](/ko/guides/flows/first-flow) | 특정 순서로 여러 외부 서비스에 신뢰성 있게 통합할 때 |
| **하이브리드 애플리케이션** | 혼합 접근 방식 | [Flow](/ko/guides/flows/first-flow)로 전체 프로세스를 orchestration하고, [Crew](/ko/guides/crews/first-crew)로 복잡한 하위 작업을 처리 |
| **개방형 연구** | [크루](/ko/guides/crews/first-crew) | 과제가 창의적 사고, 탐색, 적응이 필요할 때 |
| **콘텐츠 생성** | [크루](/ko/guides/crews/first-crew) | 기사, 보고서, 마케팅 자료 등 협업형 생성 |
| **의사결정 워크플로우** | [플로우](/ko/guides/flows/first-flow) | 예측 가능하고 감사 가능한 의사결정 경로 및 정밀 제어가 필요할 때 |
| **API 오케스트레이션** | [플로우](/ko/guides/flows/first-flow) | 특정 순서로 여러 외부 서비스에 신뢰성 있게 통합할 때 |
| **하이브리드 애플리케이션** | 혼합 접근 방식 | [플로우](/ko/guides/flows/first-flow)로 전체 프로세스를 오케스트레이션하고, [크루](/ko/guides/crews/first-crew)로 복잡한 하위 작업을 처리 |
### 의사결정 프레임워크
@@ -112,8 +112,8 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
## CrewAI를 선택해야 하는 이유?
- 🧠 **자율적 운영**: agent가 자신의 역할과 사용 가능한 도구를 바탕으로 지능적인 결정을 내립니다
- 📝 **자연스러운 상호작용**: agent가 인간 팀원처럼 소통하고 협업합니다
- 🧠 **자율적 운영**: 에이전트가 자신의 역할과 사용 가능한 도구를 바탕으로 지능적인 결정을 내립니다
- 📝 **자연스러운 상호작용**: 에이전트가 인간 팀원처럼 소통하고 협업합니다
- 🛠️ **확장 가능한 설계**: 새로운 도구, 역할, 기능을 쉽게 추가할 수 있습니다
- 🚀 **프로덕션 준비 완료**: 실제 환경에서의 신뢰성과 확장성을 고려하여 구축되었습니다
- 🔒 **보안 중심**: 엔터프라이즈 보안 요구 사항을 고려하여 설계되었습니다
@@ -134,7 +134,7 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
icon="diagram-project"
href="/ko/guides/flows/first-flow"
>
실행을 정밀하게 제어할 수 있는 구조화된, 이벤트 기반 workflow를 만드는 방법을 배워보세요.
실행을 정밀하게 제어할 수 있는 구조화된, 이벤트 기반 워크플로우를 만드는 방법을 배워보세요.
</Card>
</CardGroup>
@@ -151,7 +151,7 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
icon="bolt"
href="ko/quickstart"
>
빠른 시작 가이드를 따라 첫 번째 CrewAI agent를 만들고 직접 경험해 보세요.
빠른 시작 가이드를 따라 첫 번째 CrewAI 에이전트를 만들고 직접 경험해 보세요.
</Card>
<Card
title="커뮤니티 가입하기"

View File

@@ -1,131 +1,124 @@
---
title: "AgentOps 통합"
description: "AgentOps 관찰 가능성 플랫폼으로 CrewAI 에이전트를 모니터링하고 분석하세요"
title: AgentOps 통합
description: AgentOps를 사용하여 에이전트 성능을 이해하고 로깅하기
icon: paperclip
---
# AgentOps 통합
# 소개
AgentOps는 AI 에이전트를 위해 특별히 설계된 강력한 관찰 가능성 플랫폼입니다. CrewAI 크루를 위한 포괄적인 모니터링, 분석 및 디버깅 기능을 제공합니다.
Observability는 대화형 AI 에이전트를 개발하고 배포하는 데 있어 핵심적인 요소입니다. 이는 개발자가 에이전트의 성능을 이해하고, 에이전트가 사용자와 어떻게 상호작용하는지, 그리고 에이전트가 외부 도구와 API를 어떻게 사용하는지를 파악할 수 있게 해줍니다.
AgentOps는 CrewAI와 독립적인 제품으로, 에이전트를 위한 종합적인 observability 솔루션을 제공합니다.
## 기능
## AgentOps
- **실시간 모니터링**: 에이전트 성능과 동작을 실시간으로 추적
- **세션 재생**: 상세한 실행 추적과 함께 완전한 에이전트 세션 검토
- **성능 분석**: 크루 효율성, 도구 사용량 및 작업 완료율 분석
- **오류 추적**: 에이전트 워크플로우의 문제 식별 및 디버그
- **비용 추적**: LLM 사용량 및 관련 비용 모니터링
- **팀 협업**: 인사이트 공유 및 에이전트 최적화 협업
[AgentOps](https://agentops.ai/?=crew)은 에이전트에 대한 세션 리플레이, 메트릭, 모니터링을 제공합니다.
## 설치
AgentOps는 높은 수준에서 비용, 토큰 사용량, 대기 시간, 에이전트 실패, 세션 전체 통계 등 다양한 항목을 모니터링할 수 있는 기능을 제공합니다.
더 자세한 내용은 [AgentOps Repo](https://github.com/AgentOps-AI/agentops)를 확인하세요.
CrewAI와 함께 AgentOps 설치:
### 개요
```bash
pip install crewai[agentops]
```
AgentOps는 개발 및 프로덕션 환경에서 에이전트에 대한 모니터링을 제공합니다.
에이전트 성능, 세션 리플레이, 맞춤형 리포팅을 추적할 수 있는 대시보드를 제공합니다.
AgentOps를 별도로 설치:
한, AgentOps는 Crew 에이전트 상호작용, LLM 호출, 툴 사용을 실시간으로 볼 수 있는 세션 드릴다운 기능을 제공합니다.
이 기능은 에이전트가 사용자 및 다른 에이전트와 어떻게 상호작용하는지 디버깅하고 이해하는 데 유용합니다.
```bash
pip install agentops
```
![선택된 에이전트 세션 실행 시리즈의 개요](/images/agentops-overview.png)
![에이전트 실행을 조사하기 위한 세션 드릴다운 개요](/images/agentops-session.png)
![단계별 에이전트 리플레이 실행 그래프 보기](/images/agentops-replay.png)
## 설정
### 특징
1. **API 키 받기**: [AgentOps](https://agentops.ai)에 가입하고 API 키를 받으세요
- **LLM 비용 관리 및 추적**: 기반 모델 공급자와의 지출을 추적합니다.
- **재생 분석**: 단계별 에이전트 실행 그래프를 시청할 수 있습니다.
- **재귀적 사고 감지**: 에이전트가 무한 루프에 빠졌는지 식별합니다.
- **맞춤형 보고서**: 에이전트 성능에 대한 맞춤형 분석을 생성합니다.
- **분석 대시보드**: 개발 및 운영 중인 에이전트에 대한 상위 수준 통계를 모니터링합니다.
- **공개 모델 테스트**: 벤치마크 및 리더보드를 통해 에이전트를 테스트할 수 있습니다.
- **맞춤형 테스트**: 도메인별 테스트로 에이전트를 실행합니다.
- **타임 트래블 디버깅**: 체크포인트에서 세션을 재시작합니다.
- **컴플라이언스 및 보안**: 감사 로그를 생성하고 욕설 및 PII 유출과 같은 잠재적 위협을 감지합니다.
- **프롬프트 인젝션 감지**: 잠재적 코드 인젝션 및 시크릿 유출을 식별합니다.
2. **환경 구성**: AgentOps API 키를 환경 변수로 설정:
### AgentOps 사용하기
```bash
export AGENTOPS_API_KEY="여기에-api-키-입력"
```
<Steps>
<Step title="API 키 생성">
사용자 API 키를 여기서 생성하세요: [API 키 생성](https://app.agentops.ai/account)
</Step>
<Step title="환경 설정">
API 키를 환경 변수에 추가하세요:
```bash
AGENTOPS_API_KEY=<YOUR_AGENTOPS_API_KEY>
```
</Step>
<Step title="AgentOps 설치">
다음 명령어로 AgentOps를 설치하세요:
```bash
pip install 'crewai[agentops]'
```
또는
```bash
pip install agentops
```
</Step>
<Step title="AgentOps 초기화">
스크립트에서 `Crew`를 사용하기 전에 다음 코드를 포함하세요:
3. **AgentOps 초기화**: CrewAI 스크립트에 다음을 추가:
```python
import agentops
agentops.init()
```
```python
import agentops
from crewai import Agent, Task, Crew
이렇게 하면 AgentOps 세션이 시작되고 Crew 에이전트가 자동으로 추적됩니다. 더 복잡한 agentic 시스템을 구성하는 방법에 대한 자세한 정보는 [AgentOps 문서](https://docs.agentops.ai) 또는 [Discord](https://discord.gg/j4f3KbeH)를 참조하세요.
</Step>
</Steps>
# AgentOps 초기화
agentops.init()
### Crew + AgentOps 예시
# 여기에 CrewAI 코드
agent = Agent(
role="데이터 분석가",
goal="데이터를 분석하고 인사이트 제공",
backstory="당신은 전문 데이터 분석가입니다...",
)
<CardGroup cols={3}>
<Card
title="Job Posting"
color="#F3A78B"
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting"
icon="briefcase"
iconType="solid"
>
채용 공고를 생성하는 Crew agent의 예시입니다.
</Card>
<Card
title="Markdown Validator"
color="#F3A78B"
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/markdown_validator"
icon="markdown"
iconType="solid"
>
Markdown 파일을 검증하는 Crew agent의 예시입니다.
</Card>
<Card
title="Instagram Post"
color="#F3A78B"
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post"
icon="square-instagram"
iconType="brands"
>
Instagram 게시물을 생성하는 Crew agent의 예시입니다.
</Card>
</CardGroup>
task = Task(
description="판매 데이터를 분석하고 인사이트를 제공하세요",
agent=agent,
)
### 추가 정보
crew = Crew(
agents=[agent],
tasks=[task],
)
시작하려면 [AgentOps 계정](https://agentops.ai/?=crew)을 생성하세요.
# 크루 실행
result = crew.kickoff()
기능 요청이나 버그 보고가 필요하시면 [AgentOps Repo](https://github.com/AgentOps-AI/agentops)에서 AgentOps 팀에 문의해 주세요.
# AgentOps 세션 종료
agentops.end_session("Success")
```
#### 추가 링크
## 자동 통합
CrewAI는 라이브러리가 설치되면 AgentOps와 자동으로 통합됩니다. 통합은 다음을 캡처합니다:
- **크루 킥오프 이벤트**: 크루 실행의 시작과 완료
- **도구 사용**: 모든 도구 호출과 결과
- **작업 평가**: 작업 성능 메트릭과 피드백
- **오류 이벤트**: 실행 중 발생하는 모든 오류
## 구성 옵션
AgentOps 통합을 사용자 정의할 수 있습니다:
```python
import agentops
# 사용자 정의 설정으로 AgentOps 구성
agentops.init(
api_key="당신의-api-키",
tags=["프로덕션", "데이터-분석"],
auto_start_session=True,
instrument_llm_calls=True,
)
```
## 데이터 보기
1. **대시보드**: AgentOps 대시보드를 방문하여 에이전트 세션 보기
2. **세션 세부사항**: 세션을 클릭하여 상세한 실행 추적 보기
3. **분석**: 분석 탭을 사용하여 성능 트렌드 식별
4. **오류**: 디버깅 정보를 위해 오류 탭 모니터링
## 모범 사례
- **세션 태그 지정**: 의미 있는 태그를 사용하여 에이전트 실행 정리
- **비용 모니터링**: LLM 사용량과 관련 비용 추적
- **오류 검토**: 정기적으로 오류 확인 및 해결
- **성능 최적화**: 분석을 사용하여 병목 현상과 최적화 기회 식별
## 문제 해결
### AgentOps가 데이터를 기록하지 않음
1. API 키가 올바르게 설정되었는지 확인
2. AgentOps가 제대로 초기화되었는지 확인
3. 스크립트 끝에서 `agentops.end_session()`을 호출하는지 확인
### 누락된 이벤트
일부 이벤트가 캡처되지 않는 경우:
1. CrewAI와 AgentOps의 최신 버전이 있는지 확인
2. AgentOps 리스너가 제대로 등록되었는지 확인
3. 오류 메시지에 대한 로그 검토
이 통합은 CrewAI 에이전트에 대한 포괄적인 관찰 가능성을 제공하여 AI 워크플로우를 모니터링, 디버그 및 최적화하는 데 도움이 됩니다.
<a href="https://twitter.com/agentopsai/">🐦 트위터</a>
<span>&nbsp;&nbsp;•&nbsp;&nbsp;</span>
<a href="https://discord.gg/JHPt4C7r">📢 디스코드</a>
<span>&nbsp;&nbsp;•&nbsp;&nbsp;</span>
<a href="https://app.agentops.ai/?=crew">🖇️ AgentOps 대시보드</a>
<span>&nbsp;&nbsp;•&nbsp;&nbsp;</span>
<a href="https://docs.agentops.ai/introduction">📙 문서화</a>

View File

@@ -21,6 +21,9 @@ icon: "face-smile"
### 모니터링 & 트레이싱 플랫폼
<CardGroup cols={2}>
<Card title="AgentOps" icon="paperclip" href="/ko/observability/agentops">
에이전트 개발 및 운영을 위한 세션 리플레이, 메트릭, 모니터링 제공.
</Card>
<Card title="LangDB" icon="database" href="/ko/observability/langdb">
자동 에이전트 상호작용 캡처를 포함한 CrewAI 워크플로의 엔드-투-엔드 트레이싱.

View File

@@ -177,7 +177,14 @@ class MyCustomCrew:
# Sua implementação do crew...
```
É assim que listeners de eventos de terceiros são registrados no código do CrewAI.
É exatamente assim que o `agentops_listener` integrado do CrewAI é registrado. No código-fonte do CrewAI, você encontrará:
```python
# src/crewai/utilities/events/third_party/__init__.py
from .agentops_listener import agentops_listener
```
Isso garante que o `agentops_listener` seja carregado quando o pacote `crewai.utilities.events` for importado.
## Tipos de Eventos Disponíveis
@@ -262,6 +269,77 @@ A estrutura do objeto de evento depende do tipo do evento, mas todos herdam de `
Campos adicionais variam pelo tipo de evento. Por exemplo, `CrewKickoffCompletedEvent` inclui os campos `crew_name` e `output`.
## Exemplo Real: Integração com AgentOps
O CrewAI inclui um exemplo de integração com [AgentOps](https://github.com/AgentOps-AI/agentops), uma plataforma de monitoramento e observabilidade para agentes de IA. Veja como é implementado:
```python
from typing import Optional
from crewai.utilities.events import (
CrewKickoffCompletedEvent,
ToolUsageErrorEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
from crewai.utilities.events.task_events import TaskEvaluationEvent
try:
import agentops
AGENTOPS_INSTALLED = True
except ImportError:
AGENTOPS_INSTALLED = False
class AgentOpsListener(BaseEventListener):
tool_event: Optional["agentops.ToolEvent"] = None
session: Optional["agentops.Session"] = None
def __init__(self):
super().__init__()
def setup_listeners(self, crewai_event_bus):
if not AGENTOPS_INSTALLED:
return
@crewai_event_bus.on(CrewKickoffStartedEvent)
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
self.session = agentops.init()
for agent in source.agents:
if self.session:
self.session.create_agent(
name=agent.role,
agent_id=str(agent.id),
)
@crewai_event_bus.on(CrewKickoffCompletedEvent)
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
if self.session:
self.session.end_session(
end_state="Success",
end_state_reason="Finished Execution",
)
@crewai_event_bus.on(ToolUsageStartedEvent)
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
self.tool_event = agentops.ToolEvent(name=event.tool_name)
if self.session:
self.session.record(self.tool_event)
@crewai_event_bus.on(ToolUsageErrorEvent)
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
```
Esse listener inicializa uma sessão do AgentOps quando um Crew inicia, cadastra agentes no AgentOps, rastreia o uso de ferramentas e finaliza a sessão quando o Crew é concluído.
O listener AgentOps é registrado no sistema de eventos do CrewAI via importação em `src/crewai/utilities/events/third_party/__init__.py`:
```python
from .agentops_listener import agentops_listener
```
Isso garante que o `agentops_listener` seja carregado quando o pacote `crewai.utilities.events` for importado.
## Uso Avançado: Handlers Escopados

View File

@@ -1,103 +0,0 @@
---
title: "Controle de Acesso Baseado em Funções (RBAC)"
description: "Controle o acesso a crews, ferramentas e dados com funções e visibilidade por automação."
icon: "shield"
---
## Visão Geral
O RBAC no CrewAI Enterprise permite gerenciar acesso de forma segura e escalável combinando **funções em nível de organização** com **controles de visibilidade em nível de automação**.
<Frame>
<img src="/images/enterprise/users_and_roles.png" alt="Visão geral de RBAC no CrewAI Enterprise" />
</Frame>
## Usuários e Funções
Cada membro da sua workspace possui uma função, que determina o acesso aos recursos.
Você pode:
- Usar funções pré-definidas (Owner, Member)
- Criar funções personalizadas com permissões específicas
- Atribuir funções a qualquer momento no painel de configurações
A configuração de usuários e funções é feita em Settings → Roles.
<Steps>
<Step title="Abrir Roles">
Vá em <b>Settings → Roles</b> no CrewAI Enterprise.
</Step>
<Step title="Escolher a função">
Use <b>Owner</b> ou <b>Member</b>, ou clique em <b>Create role</b> para criar uma função personalizada.
</Step>
<Step title="Atribuir aos membros">
Selecione os usuários e atribua a função. Você pode alterar depois.
</Step>
</Steps>
### Resumo de configuração
| Área | Onde configurar | Opções |
|:---|:---|:---|
| Usuários & Funções | Settings → Roles | Pré-definidas: Owner, Member; Funções personalizadas |
| Visibilidade da automação | Automation → Settings → Visibility | Private; Lista de usuários/funções |
## Controle de Acesso em Nível de Automação
Além das funções na organização, as **Automations** suportam visibilidade refinada para restringir acesso por usuário ou função.
Útil para:
- Manter automações sensíveis/experimentais privadas
- Gerenciar visibilidade em equipes grandes ou colaboradores externos
- Testar automações em contexto isolado
Em modo privado, somente usuários/funções na whitelist poderão:
- Ver a automação
- Executar/usar a API
- Acessar logs, métricas e configurações
O owner da organização sempre tem acesso, independente da visibilidade.
Configure em Automation → Settings → Visibility.
<Steps>
<Step title="Abrir a aba Visibility">
Acesse <b>Automation → Settings → Visibility</b>.
</Step>
<Step title="Definir visibilidade">
Selecione <b>Private</b> para restringir o acesso. O owner mantém acesso.
</Step>
<Step title="Permitir acesso">
Adicione usuários e funções que poderão ver/executar e acessar logs/métricas/configurações.
</Step>
<Step title="Salvar e verificar">
Salve e confirme que não listados não conseguem ver ou executar a automação.
</Step>
</Steps>
### Resultado de acesso no modo Private
| Ação | Owner | Usuário/função na whitelist | Não listado |
|:---|:---|:---|:---|
| Ver automação | ✓ | ✓ | ✗ |
| Executar/API | ✓ | ✓ | ✗ |
| Logs/métricas/configurações | ✓ | ✓ | ✗ |
<Tip>
O owner sempre possui acesso. Em modo privado, somente usuários/funções na whitelist têm permissão.
</Tip>
<Frame>
<img src="/images/enterprise/visibility.png" alt="Configuração de visibilidade no CrewAI Enterprise" />
</Frame>
<Card title="Precisa de Ajuda?" icon="headset" href="mailto:support@crewai.com">
Fale com o nosso time para suporte em configuração e auditoria de RBAC.
</Card>

View File

@@ -1,22 +0,0 @@
---
title: CrewAI Cookbooks
description: Quickstarts e notebooks focados em recursos para aprender padrões rapidamente.
icon: book
---
## Quickstarts & Demos
<CardGroup cols={2}>
<Card title="Repositório de Quickstarts" icon="bolt" href="https://github.com/crewAIInc/crewAI-quickstarts">
Demos e projetos pequenos que mostram capacidades específicas do CrewAI.
</Card>
<Card title="Notebooks nos Exemplos" icon="book-open" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks">
Notebooks interativos para aprendizado prático.
</Card>
</CardGroup>
<Tip>
Use Cookbooks para aprender um padrão rapidamente e, em seguida, avance para os Exemplos completos para implementações de produção.
</Tip>

View File

@@ -1,85 +1,62 @@
---
title: Exemplos CrewAI
description: Explore exemplos organizados por Crews, Flows, Integrações e Notebooks.
description: Uma coleção de exemplos que mostram como usar o framework CrewAI para automatizar fluxos de trabalho.
icon: rocket-launch
---
## Crews
<CardGroup cols={3}>
<Card title="Estratégia de Marketing" icon="bullhorn" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/marketing_strategy">
Planejamento de campanhas com múltiplos agentes.
</Card>
<Card title="Viagem Surpresa" icon="plane" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/surprise_trip">
Planejamento de viagens personalizadas.
</Card>
<Card title="Relacionar Perfil a Posições" icon="id-card" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/match_profile_to_positions">
Correspondência de CV para vagas com busca vetorial.
</Card>
<Card title="Criar Vaga" icon="newspaper" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/job-posting">
Criação automatizada de descrições de vagas.
</Card>
<Card title="Crew Construtor de Jogos" icon="gamepad" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/game-builder-crew">
Equipe multiagente que projeta e constrói jogos em Python.
</Card>
<Card title="Recrutamento" icon="user-group" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews/recruitment">
Prospecção e avaliação de candidatos.
</Card>
<Card title="Ver todos os Crews" icon="users" href="https://github.com/crewAIInc/crewAI-examples/tree/main/crews">
Lista completa de exemplos de crews.
</Card>
</CardGroup>
## Flows
<CardGroup cols={3}>
<Card title="Content Creator Flow" icon="pen" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/content_creator_flow">
Geração de conteúdo com roteamento.
</Card>
<Card title="Email Auto Responder" icon="envelope" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/email_auto_responder_flow">
Monitoramento e respostas de email.
</Card>
<Card title="Lead Score Flow" icon="chart-line" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/lead_score_flow">
Qualificação de leads com revisão humana.
</Card>
<Card title="Meeting Assistant Flow" icon="calendar" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/meeting_assistant_flow">
Processamento de notas com integrações.
</Card>
<Card title="Self Evaluation Loop" icon="rotate" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/self_evaluation_loop_flow">
Fluxos de autoaperfeiçoamento iterativo.
</Card>
<Card title="Escrever um Livro (Flows)" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/write_a_book_with_flows">
Geração paralela de capítulos.
</Card>
<Card title="Ver todos os Flows" icon="diagram-project" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows">
Lista completa de exemplos de flows.
</Card>
</CardGroup>
## Integrações
<CardGroup cols={3}>
<Card title="CrewAI ↔ LangGraph" icon="link" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/crewai-langgraph">
Integração com o framework LangGraph.
</Card>
<Card title="Azure OpenAI" icon="cloud" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/azure_model">
Usando CrewAI com Azure OpenAI.
</Card>
<Card title="Modelos NVIDIA" icon="microchip" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations/nvidia_models">
Integrações com o ecossistema NVIDIA.
</Card>
<Card title="Ver todas as Integrações" icon="puzzle-piece" href="https://github.com/crewAIInc/crewAI-examples/tree/main/integrations">
Todos os exemplos de integrações.
</Card>
</CardGroup>
## Notebooks
<CardGroup cols={2}>
<Card title="Simple QA Crew + Flow" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks/Simple%20QA%20Crew%20%2B%20Flow">
Simple QA Crew + Flow.
</Card>
<Card title="Todos os Notebooks" icon="book" href="https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks">
Exemplos interativos para aprendizado e experimentação.
<Card
title="Estratégia de Marketing"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/marketing_strategy"
icon="bullhorn"
iconType="solid"
>
Automatize a criação de estratégias de marketing com CrewAI.
</Card>
<Card
title="Viagem Surpresa"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/surprise_trip"
icon="plane"
iconType="duotone"
>
Crie um roteiro de viagem surpresa com CrewAI.
</Card>
<Card
title="Relacionar Perfil a Posições"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/match_profile_to_positions"
icon="linkedin"
iconType="duotone"
>
Relacione um perfil a vagas de emprego com CrewAI.
</Card>
<Card
title="Criar Vaga"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/job-posting"
icon="newspaper"
iconType="duotone"
>
Crie uma vaga de emprego com CrewAI.
</Card>
<Card
title="Gerador de Jogos"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/game-builder-crew"
icon="gamepad"
iconType="duotone"
>
Crie um jogo com CrewAI.
</Card>
<Card
title="Encontrar Candidatos"
color="#F3A78B"
href="https://github.com/crewAIInc/crewAI-examples/tree/main/recruitment"
icon="user-group"
iconType="duotone"
>
Encontre candidatos a vagas com CrewAI.
</Card>
</CardGroup>

View File

@@ -1,131 +1,126 @@
---
title: "Integração AgentOps"
description: "Monitore e analise seus agentes CrewAI com a plataforma de observabilidade AgentOps"
title: Integração com AgentOps
description: Entendendo e registrando a performance do seu agente com AgentOps.
icon: paperclip
---
# Integração AgentOps
# Introdução
AgentOps é uma poderosa plataforma de observabilidade projetada especificamente para agentes de IA. Ela fornece capacidades abrangentes de monitoramento, análise e depuração para suas crews CrewAI.
Observabilidade é um aspecto fundamental no desenvolvimento e implantação de agentes de IA conversacional. Ela permite que desenvolvedores compreendam como seus agentes estão performando,
como eles estão interagindo com os usuários e como utilizam ferramentas externas e APIs.
AgentOps é um produto independente do CrewAI que fornece uma solução completa de observabilidade para agentes.
## Recursos
## AgentOps
- **Monitoramento em Tempo Real**: Acompanhe o desempenho e comportamento dos agentes em tempo real
- **Replay de Sessão**: Revise sessões completas de agentes com rastreamentos detalhados de execução
- **Análise de Desempenho**: Analise eficiência da crew, uso de ferramentas e taxas de conclusão de tarefas
- **Rastreamento de Erros**: Identifique e depure problemas em fluxos de trabalho de agentes
- **Rastreamento de Custos**: Monitore o uso de LLM e custos associados
- **Colaboração em Equipe**: Compartilhe insights e colabore na otimização de agentes
[AgentOps](https://agentops.ai/?=crew) oferece replay de sessões, métricas e monitoramento para agentes.
## Instalação
Em um alto nível, o AgentOps oferece a capacidade de monitorar custos, uso de tokens, latência, falhas do agente, estatísticas de sessão e muito mais.
Para mais informações, confira o [Repositório do AgentOps](https://github.com/AgentOps-AI/agentops).
Instale o AgentOps junto com o CrewAI:
### Visão Geral
```bash
pip install crewai[agentops]
```
AgentOps fornece monitoramento para agentes em desenvolvimento e produção.
Disponibiliza um dashboard para acompanhamento de performance dos agentes, replay de sessões e relatórios personalizados.
Ou instale o AgentOps separadamente:
Além disso, o AgentOps traz análises detalhadas das sessões para visualizar interações do agente Crew, chamadas LLM e uso de ferramentas em tempo real.
Esse recurso é útil para depuração e entendimento de como os agentes interagem com usuários e entre si.
```bash
pip install agentops
```
![Visão geral de uma série selecionada de execuções de sessões do agente](/images/agentops-overview.png)
![Visão geral das análises detalhadas de sessões para examinar execuções de agentes](/images/agentops-session.png)
![Visualizando um gráfico de execução passo a passo do replay do agente](/images/agentops-replay.png)
## Configuração
### Funcionalidades
1. **Obtenha sua Chave API**: Cadastre-se no [AgentOps](https://agentops.ai) e obtenha sua chave API
- **Gerenciamento e Rastreamento de Custos de LLM**: Acompanhe gastos com provedores de modelos fundamentais.
- **Análises de Replay**: Assista gráficos de execução do agente, passo a passo.
- **Detecção de Pensamento Recursivo**: Identifique quando agentes entram em loops infinitos.
- **Relatórios Personalizados**: Crie análises customizadas sobre a performance dos agentes.
- **Dashboard Analítico**: Monitore estatísticas gerais de agentes em desenvolvimento e produção.
- **Teste de Modelos Públicos**: Teste seus agentes em benchmarks e rankings.
- **Testes Personalizados**: Execute seus agentes em testes específicos de domínio.
- **Depuração com Viagem no Tempo**: Reinicie suas sessões a partir de checkpoints.
- **Conformidade e Segurança**: Crie registros de auditoria e detecte possíveis ameaças como uso de palavrões e vazamento de dados pessoais.
- **Detecção de Prompt Injection**: Identifique possíveis injeções de código e vazamentos de segredos.
2. **Configure seu ambiente**: Defina sua chave API do AgentOps como uma variável de ambiente:
### Utilizando o AgentOps
```bash
export AGENTOPS_API_KEY="sua-chave-api-aqui"
```
<Steps>
<Step title="Crie uma Chave de API">
Crie uma chave de API de usuário aqui: [Create API Key](https://app.agentops.ai/account)
</Step>
<Step title="Configure seu Ambiente">
Adicione sua chave API nas variáveis de ambiente:
```bash
AGENTOPS_API_KEY=<YOUR_AGENTOPS_API_KEY>
```
</Step>
<Step title="Instale o AgentOps">
Instale o AgentOps com:
```bash
pip install 'crewai[agentops]'
```
ou
```bash
pip install agentops
```
</Step>
<Step title="Inicialize o AgentOps">
Antes de utilizar o `Crew` no seu script, inclua estas linhas:
3. **Inicialize o AgentOps**: Adicione isso ao seu script CrewAI:
```python
import agentops
agentops.init()
```
```python
import agentops
from crewai import Agent, Task, Crew
Isso irá iniciar uma sessão do AgentOps e também rastrear automaticamente os agentes Crew. Para mais detalhes sobre como adaptar sistemas de agentes mais complexos,
confira a [documentação do AgentOps](https://docs.agentops.ai) ou participe do [Discord](https://discord.gg/j4f3KbeH).
</Step>
</Steps>
# Inicializar AgentOps
agentops.init()
### Exemplos de Crew + AgentOps
# Seu código CrewAI aqui
agent = Agent(
role="Analista de Dados",
goal="Analisar dados e fornecer insights",
backstory="Você é um analista de dados especialista...",
)
<CardGroup cols={3}>
<Card
title="Vaga de Emprego"
color="#F3A78B"
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting"
icon="briefcase"
iconType="solid"
>
Exemplo de um agente Crew que gera vagas de emprego.
</Card>
<Card
title="Validador de Markdown"
color="#F3A78B"
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/markdown_validator"
icon="markdown"
iconType="solid"
>
Exemplo de um agente Crew que valida arquivos Markdown.
</Card>
<Card
title="Post no Instagram"
color="#F3A78B"
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post"
icon="square-instagram"
iconType="brands"
>
Exemplo de um agente Crew que gera posts para Instagram.
</Card>
</CardGroup>
task = Task(
description="Analise os dados de vendas e forneça insights",
agent=agent,
)
### Mais Informações
crew = Crew(
agents=[agent],
tasks=[task],
)
Para começar, crie uma [conta AgentOps](https://agentops.ai/?=crew).
# Execute sua crew
result = crew.kickoff()
Para sugestões de funcionalidades ou relatos de bugs, entre em contato com o time do AgentOps pelo [Repositório do AgentOps](https://github.com/AgentOps-AI/agentops).
# Finalize a sessão AgentOps
agentops.end_session("Success")
```
#### Links Extras
## Integração Automática
O CrewAI se integra automaticamente com o AgentOps quando a biblioteca está instalada. A integração captura:
- **Eventos de Kickoff da Crew**: Início e conclusão de execuções da crew
- **Uso de Ferramentas**: Todas as chamadas de ferramentas e seus resultados
- **Avaliações de Tarefas**: Métricas de desempenho de tarefas e feedback
- **Eventos de Erro**: Quaisquer erros que ocorram durante a execução
## Opções de Configuração
Você pode personalizar a integração do AgentOps:
```python
import agentops
# Configure AgentOps com configurações personalizadas
agentops.init(
api_key="sua-chave-api",
tags=["producao", "analise-dados"],
auto_start_session=True,
instrument_llm_calls=True,
)
```
## Visualizando Seus Dados
1. **Dashboard**: Visite o dashboard do AgentOps para ver suas sessões de agentes
2. **Detalhes da Sessão**: Clique em qualquer sessão para ver rastreamentos detalhados de execução
3. **Análises**: Use a aba de análises para identificar tendências de desempenho
4. **Erros**: Monitore a aba de erros para informações de depuração
## Melhores Práticas
- **Marque Suas Sessões**: Use tags significativas para organizar suas execuções de agentes
- **Monitore Custos**: Acompanhe o uso de LLM e custos associados
- **Revise Erros**: Verifique e resolva regularmente quaisquer erros
- **Otimize Desempenho**: Use análises para identificar gargalos e oportunidades de otimização
## Solução de Problemas
### AgentOps Não Está Gravando Dados
1. Verifique se sua chave API está definida corretamente
2. Verifique se o AgentOps está inicializado adequadamente
3. Certifique-se de estar chamando `agentops.end_session()` no final do seu script
### Eventos Ausentes
Se alguns eventos não estão sendo capturados:
1. Certifique-se de ter a versão mais recente do CrewAI e AgentOps
2. Verifique se o listener do AgentOps está registrado adequadamente
3. Revise os logs para quaisquer mensagens de erro
Esta integração fornece observabilidade abrangente para seus agentes CrewAI, ajudando você a monitorar, depurar e otimizar seus fluxos de trabalho de IA.
<a href="https://twitter.com/agentopsai/">🐦 Twitter</a>
<span>&nbsp;&nbsp;•&nbsp;&nbsp;</span>
<a href="https://discord.gg/JHPt4C7r">📢 Discord</a>
<span>&nbsp;&nbsp;•&nbsp;&nbsp;</span>
<a href="https://app.agentops.ai/?=crew">🖇️ Dashboard AgentOps</a>
<span>&nbsp;&nbsp;•&nbsp;&nbsp;</span>
<a href="https://docs.agentops.ai/introduction">📙 Documentação</a>

View File

@@ -21,6 +21,9 @@ A observabilidade é fundamental para entender como seus agentes CrewAI estão d
### Plataformas de Monitoramento e Rastreamento
<CardGroup cols={2}>
<Card title="AgentOps" icon="paperclip" href="/pt-BR/observability/agentops">
Replays de sessões, métricas e monitoramento para desenvolvimento e produção de agentes.
</Card>
<Card title="LangDB" icon="database" href="/pt-BR/observability/langdb">
Rastreamento ponta a ponta para fluxos de trabalho CrewAI com captura automática de interações de agentes.

View File

@@ -1,145 +0,0 @@
---
title: Integração com a TrueFoundry
icon: chart-line
---
A TrueFoundry fornece um [AI Gateway](https://www.truefoundry.com/ai-gateway) pronto para uso empresarial, que pode ser usado para governança e observabilidade em frameworks agentivos como o CrewAI. O AI Gateway da TrueFoundry funciona como uma interface unificada para acesso a LLMs, oferecendo:
- **Acesso unificado à API**: Conecte-se a 250+ LLMs (OpenAI, Claude, Gemini, Groq, Mistral) por meio de uma única API
- **Baixa latência**: Latência interna abaixo de 3 ms com roteamento inteligente e balanceamento de carga
- **Segurança corporativa**: Conformidade com SOC 2, HIPAA e GDPR, com RBAC e auditoria de logs
- **Gestão de cotas e custos**: Cotas baseadas em tokens, rate limiting e rastreamento abrangente de uso
- **Observabilidade**: Registro completo de requisições/respostas, métricas e traces com retenção personalizável
## Como a TrueFoundry se integra ao CrewAI
### Instalação e configuração
<Steps>
<Step title="Instalar o CrewAI">
```bash
pip install crewai
```
</Step>
<Step title="Obter o token de acesso da TrueFoundry">
1. Crie uma conta na [TrueFoundry](https://www.truefoundry.com/register)
2. Siga os passos do [Início rápido](https://docs.truefoundry.com/gateway/quick-start)
</Step>
<Step title="Configurar o CrewAI com a TrueFoundry">
![Configuração de código da TrueFoundry](/images/new-code-snippet.png)
```python
from crewai import LLM
# Criar uma instância de LLM com o AI Gateway da TrueFoundry
truefoundry_llm = LLM(
model="openai-main/gpt-4o", # Da mesma forma, você pode chamar qualquer modelo de qualquer provedor
base_url="your_truefoundry_gateway_base_url",
api_key="your_truefoundry_api_key"
)
# Usar nos seus agentes do CrewAI
from crewai import Agent
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config['researcher'],
llm=truefoundry_llm,
verbose=True
)
```
</Step>
</Steps>
### Exemplo completo do CrewAI
```python
from crewai import Agent, Task, Crew, LLM
# Configurar o LLM com a TrueFoundry
llm = LLM(
model="openai-main/gpt-4o",
base_url="your_truefoundry_gateway_base_url",
api_key="your_truefoundry_api_key"
)
# Criar agentes
researcher = Agent(
role='Analista de Pesquisa',
goal='Conduzir pesquisa de mercado detalhada',
backstory='Analista de mercado especialista com atenção aos detalhes',
llm=llm,
verbose=True
)
writer = Agent(
role='Redator de Conteúdo',
goal='Criar relatórios abrangentes',
backstory='Redator técnico experiente',
llm=llm,
verbose=True
)
# Criar tarefas
research_task = Task(
description='Pesquisar tendências do mercado de IA para 2024',
agent=researcher,
expected_output='Resumo de pesquisa abrangente'
)
writing_task = Task(
description='Criar um relatório de pesquisa de mercado',
agent=writer,
expected_output='Relatório bem estruturado com insights',
context=[research_task]
)
# Criar e executar a crew
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, writing_task],
verbose=True
)
result = crew.kickoff()
```
### Observabilidade e governança
Monitore seus agentes do CrewAI pela aba de métricas da TrueFoundry:
![Métricas da TrueFoundry](/images/gateway-metrics.png)
Com o AI Gateway da TrueFoundry, você pode monitorar e analisar:
- **Métricas de desempenho**: Acompanhe métricas-chave de latência como Latência da Requisição, Tempo até o Primeiro Token (TTFS) e Latência entre Tokens (ITL), com percentis P99, P90 e P50
- **Custos e uso de tokens**: Tenha visibilidade dos custos da sua aplicação com detalhamento de tokens de entrada/saída e das despesas associadas a cada modelo
- **Padrões de uso**: Entenda como sua aplicação está sendo utilizada com análises detalhadas sobre atividade de usuários, distribuição de modelos e uso por equipe
- **Limite de taxa e balanceamento de carga**: Você pode configurar rate limiting, balanceamento de carga e fallback para seus modelos
## Rastreamento
Para uma compreensão mais detalhada sobre rastreamento, consulte [getting-started-tracing](https://docs.truefoundry.com/docs/tracing/tracing-getting-started). Para rastreamento, você pode adicionar o SDK do Traceloop:
```bash
pip install traceloop-sdk
```
```python
from traceloop.sdk import Traceloop
# Inicializar rastreamento avançado
Traceloop.init(
api_endpoint="https://your-truefoundry-endpoint/api/tracing",
headers={
"Authorization": f"Bearer {your_truefoundry_pat_token}",
"TFY-Tracing-Project": "your_project_name",
},
)
```
Isso oferece correlação adicional de rastreamentos em todo o seu fluxo de trabalho com o CrewAI.
![Rastreamento do CrewAI na TrueFoundry](/images/tracing_crewai.png)

View File

@@ -48,10 +48,11 @@ Documentation = "https://docs.crewai.com"
Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = ["crewai-tools~=0.62.0"]
tools = ["crewai-tools~=0.60.0"]
embeddings = [
"tiktoken~=0.8.0"
]
agentops = ["agentops==0.3.18"]
pdfplumber = [
"pdfplumber>=0.11.4",
]
@@ -68,7 +69,6 @@ docling = [
aisuite = [
"aisuite>=0.1.10",
]
agentops = ["agentops==0.3.18"]
[tool.uv]
dev-dependencies = [
@@ -99,11 +99,6 @@ exclude = ["cli/templates"]
[tool.bandit]
exclude_dirs = ["src/crewai/cli/templates"]
[tool.pytest.ini_options]
markers = [
"telemetry: mark test as a telemetry test (don't mock telemetry)",
]
# PyTorch index configuration, since torch 2.5.0 is not compatible with python 3.13
[[tool.uv.index]]
name = "pytorch-nightly"

57
simple_test_verbose.py Normal file
View File

@@ -0,0 +1,57 @@
#!/usr/bin/env python3
"""Simple test to verify the verbose task name fix works."""
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
from unittest.mock import Mock
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
def test_task_display_name():
"""Test the _get_task_display_name method directly."""
print("Testing _get_task_display_name method...")
formatter = ConsoleFormatter(verbose=True)
task1 = Mock()
task1.name = "Research Market Trends"
task1.id = "12345678-1234-5678-9012-123456789abc"
result1 = formatter._get_task_display_name(task1)
print(f"Test 1 - Task with name: {result1}")
assert "Research Market Trends" in result1
assert "12345678" in result1
print("✅ Test 1 passed")
task2 = Mock()
task2.name = None
task2.description = "Analyze current market trends and provide insights"
task2.id = "87654321-4321-8765-2109-987654321abc"
result2 = formatter._get_task_display_name(task2)
print(f"Test 2 - Task with description: {result2}")
assert "Analyze current market trends" in result2
assert "87654321" in result2
print("✅ Test 2 passed")
task3 = Mock()
task3.name = None
task3.description = None
task3.id = "abcdef12-3456-7890-abcd-ef1234567890"
result3 = formatter._get_task_display_name(task3)
print(f"Test 3 - Task with ID only: {result3}")
assert result3 == "abcdef12-3456-7890-abcd-ef1234567890"
print("✅ Test 3 passed")
print("\n🎉 All tests passed! The verbose task name fix is working correctly.")
return True
if __name__ == "__main__":
try:
test_task_display_name()
print("\n✅ Implementation verified successfully!")
except Exception as e:
print(f"\n❌ Test failed: {e}")
sys.exit(1)

View File

@@ -54,7 +54,7 @@ def _track_install_async():
_track_install_async()
__version__ = "0.159.0"
__version__ = "0.157.0"
__all__ = [
"Agent",
"Crew",

View File

@@ -1,5 +1,5 @@
import time
from typing import TYPE_CHECKING, Dict, List
from typing import TYPE_CHECKING
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
@@ -21,7 +21,6 @@ class CrewAgentExecutorMixin:
task: "Task"
iterations: int
max_iter: int
messages: List[Dict[str, str]]
_i18n: I18N
_printer: Printer = Printer()
@@ -63,7 +62,6 @@ class CrewAgentExecutorMixin:
value=output.text,
metadata={
"description": self.task.description,
"messages": self.messages,
},
agent=self.agent.role,
)
@@ -129,6 +127,7 @@ class CrewAgentExecutorMixin:
def _ask_human_input(self, final_answer: str) -> str:
"""Prompt human input with mode-appropriate messaging."""
event_listener.formatter.pause_live_updates()
try:
self._printer.print(
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"

View File

@@ -1,13 +1,9 @@
from .utils import TokenManager
class AuthError(Exception):
pass
def get_auth_token() -> str:
"""Get the authentication token."""
access_token = TokenManager().get_token()
if not access_token:
raise AuthError("No token found, make sure you are logged in")
raise Exception("No token found, make sure you are logged in")
return access_token

View File

@@ -18,7 +18,6 @@ class PlusAPI:
CREWS_RESOURCE = "/crewai_plus/api/v1/crews"
AGENTS_RESOURCE = "/crewai_plus/api/v1/agents"
TRACING_RESOURCE = "/crewai_plus/api/v1/tracing"
EPHEMERAL_TRACING_RESOURCE = "/crewai_plus/api/v1/tracing/ephemeral"
def __init__(self, api_key: str) -> None:
self.api_key = api_key
@@ -125,11 +124,6 @@ class PlusAPI:
"POST", f"{self.TRACING_RESOURCE}/batches", json=payload
)
def initialize_ephemeral_trace_batch(self, payload) -> requests.Response:
return self._make_request(
"POST", f"{self.EPHEMERAL_TRACING_RESOURCE}/batches", json=payload
)
def send_trace_events(self, trace_batch_id: str, payload) -> requests.Response:
return self._make_request(
"POST",
@@ -137,27 +131,9 @@ class PlusAPI:
json=payload,
)
def send_ephemeral_trace_events(
self, trace_batch_id: str, payload
) -> requests.Response:
return self._make_request(
"POST",
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/events",
json=payload,
)
def finalize_trace_batch(self, trace_batch_id: str, payload) -> requests.Response:
return self._make_request(
"PATCH",
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
json=payload,
)
def finalize_ephemeral_trace_batch(
self, trace_batch_id: str, payload
) -> requests.Response:
return self._make_request(
"PATCH",
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
json=payload,
)

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.159.0,<1.0.0"
"crewai[tools]>=0.157.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.159.0,<1.0.0",
"crewai[tools]>=0.157.0,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.159.0"
"crewai[tools]>=0.157.0"
]
[tool.crewai]

View File

@@ -77,10 +77,7 @@ from crewai.utilities.events.listeners.tracing.trace_listener import (
)
from crewai.utilities.events.listeners.tracing.utils import (
is_tracing_enabled,
on_first_execution_tracing_confirmation,
)
from crewai.utilities.events.listeners.tracing.utils import is_tracing_enabled
from crewai.utilities.formatter import (
aggregate_raw_outputs_from_task_outputs,
aggregate_raw_outputs_from_tasks,
@@ -286,11 +283,8 @@ class Crew(FlowTrackable, BaseModel):
self._cache_handler = CacheHandler()
event_listener = EventListener()
if on_first_execution_tracing_confirmation():
self.tracing = True
if is_tracing_enabled() or self.tracing:
trace_listener = TraceCollectionListener()
trace_listener = TraceCollectionListener(tracing=self.tracing)
trace_listener.setup_listeners(crewai_event_bus)
event_listener.verbose = self.verbose
event_listener.formatter.verbose = self.verbose

View File

@@ -17,13 +17,10 @@ from typing import (
)
from uuid import uuid4
from opentelemetry import baggage
from opentelemetry.context import attach, detach
from pydantic import BaseModel, Field, ValidationError
from crewai.flow.flow_visualizer import plot_flow
from crewai.flow.persistence.base import FlowPersistence
from crewai.flow.types import FlowExecutionData
from crewai.flow.utils import get_possible_return_constants
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.flow_events import (
@@ -38,10 +35,7 @@ from crewai.utilities.events.flow_events import (
from crewai.utilities.events.listeners.tracing.trace_listener import (
TraceCollectionListener,
)
from crewai.utilities.events.listeners.tracing.utils import (
is_tracing_enabled,
on_first_execution_tracing_confirmation,
)
from crewai.utilities.events.listeners.tracing.utils import is_tracing_enabled
from crewai.utilities.printer import Printer
logger = logging.getLogger(__name__)
@@ -473,18 +467,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
self._method_execution_counts: Dict[str, int] = {}
self._pending_and_listeners: Dict[str, Set[str]] = {}
self._method_outputs: List[Any] = [] # List to store all method outputs
self._completed_methods: Set[str] = set() # Track completed methods for reload
self._persistence: Optional[FlowPersistence] = persistence
# Initialize state with initial values
self._state = self._create_initial_state()
self.tracing = tracing
if (
on_first_execution_tracing_confirmation()
or is_tracing_enabled()
or self.tracing
):
trace_listener = TraceCollectionListener()
if is_tracing_enabled() or tracing:
trace_listener = TraceCollectionListener(tracing=tracing)
trace_listener.setup_listeners(crewai_event_bus)
# Apply any additional kwargs
if kwargs:
@@ -729,73 +718,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
else:
raise TypeError(f"State must be dict or BaseModel, got {type(self._state)}")
def reload(self, execution_data: FlowExecutionData) -> None:
"""Reloads the flow from an execution data dict.
This method restores the flow's execution ID, completed methods, and state,
allowing it to resume from where it left off.
Args:
execution_data: Flow execution data containing:
- id: Flow execution ID
- flow: Flow structure
- completed_methods: List of successfully completed methods
- execution_methods: All execution methods with their status
"""
flow_id = execution_data.get("id")
if flow_id:
self._update_state_field("id", flow_id)
self._completed_methods = {
name
for method_data in execution_data.get("completed_methods", [])
if (name := method_data.get("flow_method", {}).get("name")) is not None
}
execution_methods = execution_data.get("execution_methods", [])
if not execution_methods:
return
sorted_methods = sorted(
execution_methods,
key=lambda m: m.get("started_at", ""),
)
state_to_apply = None
for method in reversed(sorted_methods):
if method.get("final_state"):
state_to_apply = method["final_state"]
break
if not state_to_apply and sorted_methods:
last_method = sorted_methods[-1]
if last_method.get("initial_state"):
state_to_apply = last_method["initial_state"]
if state_to_apply:
self._apply_state_updates(state_to_apply)
for i, method in enumerate(sorted_methods[:-1]):
method_name = method.get("flow_method", {}).get("name")
if method_name:
self._completed_methods.add(method_name)
def _update_state_field(self, field_name: str, value: Any) -> None:
"""Update a single field in the state."""
if isinstance(self._state, dict):
self._state[field_name] = value
elif hasattr(self._state, field_name):
object.__setattr__(self._state, field_name, value)
def _apply_state_updates(self, updates: Dict[str, Any]) -> None:
"""Apply multiple state updates efficiently."""
if isinstance(self._state, dict):
self._state.update(updates)
elif hasattr(self._state, "__dict__"):
for key, value in updates.items():
if hasattr(self._state, key):
object.__setattr__(self._state, key, value)
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
"""
Start the flow execution in a synchronous context.
@@ -824,81 +746,68 @@ class Flow(Generic[T], metaclass=FlowMeta):
Returns:
The final output from the flow, which is the result of the last executed method.
"""
ctx = baggage.set_baggage("flow_inputs", inputs or {})
flow_token = attach(ctx)
if inputs:
# Override the id in the state if it exists in inputs
if "id" in inputs:
if isinstance(self._state, dict):
self._state["id"] = inputs["id"]
elif isinstance(self._state, BaseModel):
setattr(self._state, "id", inputs["id"])
try:
# Reset flow state for fresh execution unless restoring from persistence
is_restoring = inputs and "id" in inputs and self._persistence is not None
if not is_restoring:
# Clear completed methods and outputs for a fresh start
self._completed_methods.clear()
self._method_outputs.clear()
# If persistence is enabled, attempt to restore the stored state using the provided id.
if "id" in inputs and self._persistence is not None:
restore_uuid = inputs["id"]
stored_state = self._persistence.load_state(restore_uuid)
if stored_state:
self._log_flow_event(
f"Loading flow state from memory for UUID: {restore_uuid}",
color="yellow",
)
self._restore_state(stored_state)
else:
self._log_flow_event(
f"No flow state found for UUID: {restore_uuid}", color="red"
)
if inputs:
# Override the id in the state if it exists in inputs
if "id" in inputs:
if isinstance(self._state, dict):
self._state["id"] = inputs["id"]
elif isinstance(self._state, BaseModel):
setattr(self._state, "id", inputs["id"])
# Update state with any additional inputs (ignoring the 'id' key)
filtered_inputs = {k: v for k, v in inputs.items() if k != "id"}
if filtered_inputs:
self._initialize_state(filtered_inputs)
# If persistence is enabled, attempt to restore the stored state using the provided id.
if "id" in inputs and self._persistence is not None:
restore_uuid = inputs["id"]
stored_state = self._persistence.load_state(restore_uuid)
if stored_state:
self._log_flow_event(
f"Loading flow state from memory for UUID: {restore_uuid}",
color="yellow",
)
self._restore_state(stored_state)
else:
self._log_flow_event(
f"No flow state found for UUID: {restore_uuid}", color="red"
)
# Emit FlowStartedEvent and log the start of the flow.
crewai_event_bus.emit(
self,
FlowStartedEvent(
type="flow_started",
flow_name=self.name or self.__class__.__name__,
inputs=inputs,
),
)
self._log_flow_event(
f"Flow started with ID: {self.flow_id}", color="bold_magenta"
)
# Update state with any additional inputs (ignoring the 'id' key)
filtered_inputs = {k: v for k, v in inputs.items() if k != "id"}
if filtered_inputs:
self._initialize_state(filtered_inputs)
if inputs is not None and "id" not in inputs:
self._initialize_state(inputs)
# Emit FlowStartedEvent and log the start of the flow.
crewai_event_bus.emit(
self,
FlowStartedEvent(
type="flow_started",
flow_name=self.name or self.__class__.__name__,
inputs=inputs,
),
)
self._log_flow_event(
f"Flow started with ID: {self.flow_id}", color="bold_magenta"
)
tasks = [
self._execute_start_method(start_method)
for start_method in self._start_methods
]
await asyncio.gather(*tasks)
if inputs is not None and "id" not in inputs:
self._initialize_state(inputs)
final_output = self._method_outputs[-1] if self._method_outputs else None
tasks = [
self._execute_start_method(start_method)
for start_method in self._start_methods
]
await asyncio.gather(*tasks)
crewai_event_bus.emit(
self,
FlowFinishedEvent(
type="flow_finished",
flow_name=self.name or self.__class__.__name__,
result=final_output,
),
)
final_output = self._method_outputs[-1] if self._method_outputs else None
crewai_event_bus.emit(
self,
FlowFinishedEvent(
type="flow_finished",
flow_name=self.name or self.__class__.__name__,
result=final_output,
),
)
return final_output
finally:
detach(flow_token)
return final_output
async def _execute_start_method(self, start_method_name: str) -> None:
"""
@@ -917,13 +826,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
- Executes the start method and captures its result
- Triggers execution of any listeners waiting on this start method
- Part of the flow's initialization sequence
- Skips execution if method was already completed (e.g., after reload)
"""
if start_method_name in self._completed_methods:
last_output = self._method_outputs[-1] if self._method_outputs else None
await self._execute_listeners(start_method_name, last_output)
return
result = await self._execute_method(
start_method_name, self._methods[start_method_name]
)
@@ -958,7 +861,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
self._method_execution_counts.get(method_name, 0) + 1
)
self._completed_methods.add(method_name)
crewai_event_bus.emit(
self,
MethodExecutionFinishedEvent(
@@ -1121,18 +1023,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
- Handles errors gracefully with detailed logging
- Recursively triggers listeners of this listener
- Supports both parameterized and parameter-less listeners
- Skips execution if method was already completed (e.g., after reload)
Error Handling
-------------
Catches and logs any exceptions during execution, preventing
individual listener failures from breaking the entire flow.
"""
# TODO: greyson fix
# if listener_name in self._completed_methods:
# await self._execute_listeners(listener_name, None)
# return
try:
method = self._methods[listener_name]
@@ -1151,7 +1047,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
await self._execute_listeners(listener_name, listener_result)
except Exception as e:
logger.error(f"Error executing listener {listener_name}: {e}")
print(
f"[Flow._execute_single_listener] Error in method {listener_name}: {e}"
)
import traceback
traceback.print_exc()
raise
def _log_flow_event(

View File

@@ -1,95 +0,0 @@
"""Type definitions for CrewAI Flow module.
This module contains TypedDict definitions and type aliases used throughout
the Flow system.
"""
from typing import Any, TypedDict
from typing_extensions import NotRequired, Required
class FlowMethodData(TypedDict):
"""Flow method information.
Attributes:
name: The name of the flow method.
starting_point: Whether this method is a starting point for the flow.
"""
name: str
starting_point: NotRequired[bool]
class CompletedMethodData(TypedDict):
"""Completed method information.
Represents a flow method that has been successfully executed.
Attributes:
flow_method: The flow method information.
status: The completion status of the method.
"""
flow_method: FlowMethodData
status: str
class ExecutionMethodData(TypedDict, total=False):
"""Execution method information.
Contains detailed information about a method's execution, including
timing, state, and any error details.
Attributes:
flow_method: The flow method information.
started_at: ISO timestamp when the method started execution.
finished_at: ISO timestamp when the method finished execution, if completed.
status: Current status of the method execution.
initial_state: The state before method execution.
final_state: The state after method execution.
error_details: Details about any error that occurred during execution.
"""
flow_method: Required[FlowMethodData]
started_at: Required[str]
status: Required[str]
finished_at: str
initial_state: dict[str, Any]
final_state: dict[str, Any]
error_details: dict[str, Any]
class FlowData(TypedDict):
"""Flow structure information.
Contains metadata about the flow structure and its methods.
Attributes:
name: The name of the flow.
flow_methods_attributes: List of all flow methods and their attributes.
"""
name: str
flow_methods_attributes: list[FlowMethodData]
class FlowExecutionData(TypedDict):
"""Flow execution data.
Complete execution data for a flow, including its current state,
completed methods, and execution history. Used for resuming flows
from a previous state.
Attributes:
id: Unique identifier for the flow execution.
flow: Flow structure and metadata.
inputs: Input data provided to the flow.
completed_methods: List of methods that have been successfully completed.
execution_methods: Detailed execution history for all methods.
"""
id: str
flow: FlowData
inputs: dict[str, Any]
completed_methods: list[CompletedMethodData]
execution_methods: list[ExecutionMethodData]

View File

@@ -11,8 +11,6 @@ import chromadb.errors
from chromadb.api import ClientAPI
from chromadb.api.types import OneOrMany
from chromadb.config import Settings
from pydantic.warnings import PydanticDeprecatedSince211
import warnings
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
@@ -87,15 +85,6 @@ class KnowledgeStorage(BaseKnowledgeStorage):
raise Exception("Collection not initialized")
def initialize_knowledge_storage(self):
# Suppress deprecation warnings from chromadb, which are not relevant to us
# TODO: Remove this once we upgrade chromadb to at least 1.0.8.
warnings.filterwarnings(
"ignore",
category=PydanticDeprecatedSince211,
message=r".*'model_fields'.*is deprecated.*",
module=r"^chromadb(\.|$)",
)
self.app = create_persistent_client(
path=os.path.join(db_storage_path(), "knowledge"),
settings=Settings(allow_reset=True),

View File

@@ -12,8 +12,6 @@ from crewai.rag.embeddings.configurator import EmbeddingConfigurator
from crewai.utilities.chromadb import create_persistent_client
from crewai.utilities.constants import MAX_FILE_NAME_LENGTH
from crewai.utilities.paths import db_storage_path
import warnings
from pydantic.warnings import PydanticDeprecatedSince211
@contextlib.contextmanager
@@ -64,15 +62,6 @@ class RAGStorage(BaseRAGStorage):
def _initialize_app(self):
from chromadb.config import Settings
# Suppress deprecation warnings from chromadb, which are not relevant to us
# TODO: Remove this once we upgrade chromadb to at least 1.0.8.
warnings.filterwarnings(
"ignore",
category=PydanticDeprecatedSince211,
message=r".*'model_fields'.*is deprecated.*",
module=r"^chromadb(\.|$)",
)
self._set_embedder_config()
self.app = create_persistent_client(

View File

@@ -1,18 +0,0 @@
from typing import List, Dict, TypedDict
class HITLResumeInfo(TypedDict, total=False):
"""HITL resume information passed from flow to crew."""
task_id: str
crew_execution_id: str
task_key: str
task_output: str
human_feedback: str
previous_messages: List[Dict[str, str]]
class CrewInputsWithHITL(TypedDict, total=False):
"""Crew inputs that may contain HITL resume information."""
_hitl_resume: HITLResumeInfo

View File

@@ -71,6 +71,7 @@ from .third_party.agentops_listener import agentops_listener
__all__ = [
"EventListener",
"agentops_listener",
"CrewAIEventsBus",
"crewai_event_bus",
"AgentExecutionStartedEvent",
@@ -104,6 +105,7 @@ __all__ = [
"MemoryRetrievalStartedEvent",
"MemoryRetrievalCompletedEvent",
"EventListener",
"agentops_listener",
"CrewKickoffStartedEvent",
"CrewKickoffCompletedEvent",
"CrewKickoffFailedEvent",
@@ -122,5 +124,4 @@ __all__ = [
"ToolSelectionErrorEvent",
"ToolUsageEvent",
"ToolValidateInputErrorEvent",
"agentops_listener",
]

View File

@@ -162,7 +162,7 @@ class EventListener(BaseEventListener):
span = self._telemetry.task_started(crew=source.agent.crew, task=source)
self.execution_spans[source] = span
self.formatter.create_task_branch(
self.formatter.current_crew_tree, source.id
self.formatter.current_crew_tree, source
)
@crewai_event_bus.on(TaskCompletedEvent)
@@ -175,7 +175,7 @@ class EventListener(BaseEventListener):
self.formatter.update_task_status(
self.formatter.current_crew_tree,
source.id,
source,
source.agent.role,
"completed",
)
@@ -190,7 +190,7 @@ class EventListener(BaseEventListener):
self.formatter.update_task_status(
self.formatter.current_crew_tree,
source.id,
source,
source.agent.role,
"failed",
)

View File

@@ -4,7 +4,7 @@ from typing import Dict, List, Any, Optional
from dataclasses import dataclass, field
from crewai.utilities.constants import CREWAI_BASE_URL
from crewai.cli.authentication.token import AuthError, get_auth_token
from crewai.cli.authentication.token import get_auth_token
from crewai.cli.version import get_crewai_version
from crewai.cli.plus_api import PlusAPI
@@ -41,21 +41,14 @@ class TraceBatchManager:
"""Single responsibility: Manage batches and event buffering"""
def __init__(self):
try:
self.plus_api = PlusAPI(api_key=get_auth_token())
except AuthError:
self.plus_api = PlusAPI(api_key="")
self.plus_api = PlusAPI(api_key=get_auth_token())
self.trace_batch_id: Optional[str] = None # Backend ID
self.current_batch: Optional[TraceBatch] = None
self.event_buffer: List[TraceEvent] = []
self.execution_start_times: Dict[str, datetime] = {}
def initialize_batch(
self,
user_context: Dict[str, str],
execution_metadata: Dict[str, Any],
use_ephemeral: bool = False,
self, user_context: Dict[str, str], execution_metadata: Dict[str, Any]
) -> TraceBatch:
"""Initialize a new trace batch"""
self.current_batch = TraceBatch(
@@ -64,15 +57,13 @@ class TraceBatchManager:
self.event_buffer.clear()
self.record_start_time("execution")
self._initialize_backend_batch(user_context, execution_metadata, use_ephemeral)
self._initialize_backend_batch(user_context, execution_metadata)
return self.current_batch
def _initialize_backend_batch(
self,
user_context: Dict[str, str],
execution_metadata: Dict[str, Any],
use_ephemeral: bool = False,
self, user_context: Dict[str, str], execution_metadata: Dict[str, Any]
):
"""Send batch initialization to backend"""
@@ -83,7 +74,6 @@ class TraceBatchManager:
payload = {
"trace_id": self.current_batch.batch_id,
"execution_type": execution_metadata.get("execution_type", "crew"),
"user_identifier": execution_metadata.get("user_context", None),
"execution_context": {
"crew_fingerprint": execution_metadata.get("crew_fingerprint"),
"crew_name": execution_metadata.get("crew_name", None),
@@ -101,22 +91,12 @@ class TraceBatchManager:
"execution_started_at": datetime.now(timezone.utc).isoformat(),
},
}
if use_ephemeral:
payload["ephemeral_trace_id"] = self.current_batch.batch_id
response = (
self.plus_api.initialize_ephemeral_trace_batch(payload)
if use_ephemeral
else self.plus_api.initialize_trace_batch(payload)
)
response = self.plus_api.initialize_trace_batch(payload)
if response.status_code == 201 or response.status_code == 200:
response_data = response.json()
self.trace_batch_id = (
response_data["trace_id"]
if not use_ephemeral
else response_data["ephemeral_trace_id"]
)
self.trace_batch_id = response_data["trace_id"]
console = Console()
panel = Panel(
f"✅ Trace batch initialized with session ID: {self.trace_batch_id}",
@@ -136,7 +116,7 @@ class TraceBatchManager:
"""Add event to buffer"""
self.event_buffer.append(trace_event)
def _send_events_to_backend(self, ephemeral: bool = True):
def _send_events_to_backend(self):
"""Send buffered events to backend"""
if not self.plus_api or not self.trace_batch_id or not self.event_buffer:
return
@@ -154,11 +134,7 @@ class TraceBatchManager:
if not self.trace_batch_id:
raise Exception("❌ Trace batch ID not found")
response = (
self.plus_api.send_ephemeral_trace_events(self.trace_batch_id, payload)
if ephemeral
else self.plus_api.send_trace_events(self.trace_batch_id, payload)
)
response = self.plus_api.send_trace_events(self.trace_batch_id, payload)
if response.status_code == 200 or response.status_code == 201:
self.event_buffer.clear()
@@ -170,15 +146,15 @@ class TraceBatchManager:
except Exception as e:
logger.error(f"❌ Error sending events to backend: {str(e)}")
def finalize_batch(self, ephemeral: bool = True) -> Optional[TraceBatch]:
def finalize_batch(self) -> Optional[TraceBatch]:
"""Finalize batch and return it for sending"""
if not self.current_batch:
return None
if self.event_buffer:
self._send_events_to_backend(ephemeral)
self._send_events_to_backend()
self._finalize_backend_batch(ephemeral)
self._finalize_backend_batch()
self.current_batch.events = self.event_buffer.copy()
@@ -192,7 +168,7 @@ class TraceBatchManager:
return finalized_batch
def _finalize_backend_batch(self, ephemeral: bool = True):
def _finalize_backend_batch(self):
"""Send batch finalization to backend"""
if not self.plus_api or not self.trace_batch_id:
return
@@ -206,24 +182,12 @@ class TraceBatchManager:
"final_event_count": total_events,
}
response = (
self.plus_api.finalize_ephemeral_trace_batch(
self.trace_batch_id, payload
)
if ephemeral
else self.plus_api.finalize_trace_batch(self.trace_batch_id, payload)
)
response = self.plus_api.finalize_trace_batch(self.trace_batch_id, payload)
if response.status_code == 200:
access_code = response.json().get("access_code", None)
console = Console()
return_link = (
f"{CREWAI_BASE_URL}/crewai_plus/trace_batches/{self.trace_batch_id}"
if not ephemeral and access_code
else f"{CREWAI_BASE_URL}/crewai_plus/ephemeral_trace_batches/{self.trace_batch_id}?access_code={access_code}"
)
panel = Panel(
f"✅ Trace batch finalized with session ID: {self.trace_batch_id}. View here: {return_link} {f', Access Code: {access_code}' if access_code else ''}",
f"✅ Trace batch finalized with session ID: {self.trace_batch_id}. View here: {CREWAI_BASE_URL}/crewai_plus/trace_batches/{self.trace_batch_id}",
title="Trace Batch Finalization",
border_style="green",
)

View File

@@ -13,6 +13,7 @@ from crewai.utilities.events.agent_events import (
AgentExecutionErrorEvent,
)
from crewai.utilities.events.listeners.tracing.types import TraceEvent
from crewai.utilities.events.listeners.tracing.utils import is_tracing_enabled
from crewai.utilities.events.reasoning_events import (
AgentReasoningStartedEvent,
AgentReasoningCompletedEvent,
@@ -66,7 +67,7 @@ from crewai.utilities.events.memory_events import (
MemorySaveFailedEvent,
)
from crewai.cli.authentication.token import AuthError, get_auth_token
from crewai.cli.authentication.token import get_auth_token
from crewai.cli.version import get_crewai_version
@@ -75,12 +76,13 @@ class TraceCollectionListener(BaseEventListener):
Trace collection listener that orchestrates trace collection
"""
trace_enabled: Optional[bool] = False
complex_events = ["task_started", "llm_call_started", "llm_call_completed"]
_instance = None
_initialized = False
def __new__(cls, batch_manager=None):
def __new__(cls, batch_manager=None, tracing: Optional[bool] = False):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
@@ -88,22 +90,25 @@ class TraceCollectionListener(BaseEventListener):
def __init__(
self,
batch_manager: Optional[TraceBatchManager] = None,
tracing: Optional[bool] = False,
):
if self._initialized:
return
super().__init__()
self.batch_manager = batch_manager or TraceBatchManager()
self.tracing = tracing or False
self.trace_enabled = self._check_trace_enabled()
self._initialized = True
def _check_authenticated(self) -> bool:
def _check_trace_enabled(self) -> bool:
"""Check if tracing should be enabled"""
try:
res = bool(get_auth_token())
return res
except AuthError:
auth_token = get_auth_token()
if not auth_token:
return False
return is_tracing_enabled() or self.tracing
def _get_user_context(self) -> Dict[str, str]:
"""Extract user context for tracing"""
return {
@@ -115,6 +120,8 @@ class TraceCollectionListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):
"""Setup event listeners - delegates to specific handlers"""
if not self.trace_enabled:
return
self._register_flow_event_handlers(crewai_event_bus)
self._register_context_event_handlers(crewai_event_bus)
@@ -160,13 +167,13 @@ class TraceCollectionListener(BaseEventListener):
@event_bus.on(CrewKickoffStartedEvent)
def on_crew_started(source, event):
if not self.batch_manager.is_batch_initialized():
self._initialize_crew_batch(source, event)
self._initialize_batch(source, event)
self._handle_trace_event("crew_kickoff_started", source, event)
@event_bus.on(CrewKickoffCompletedEvent)
def on_crew_completed(source, event):
self._handle_trace_event("crew_kickoff_completed", source, event)
self.batch_manager.finalize_batch(ephemeral=True)
self.batch_manager.finalize_batch()
@event_bus.on(CrewKickoffFailedEvent)
def on_crew_failed(source, event):
@@ -280,7 +287,7 @@ class TraceCollectionListener(BaseEventListener):
def on_agent_reasoning_failed(source, event):
self._handle_action_event("agent_reasoning_failed", source, event)
def _initialize_crew_batch(self, source: Any, event: Any):
def _initialize_batch(self, source: Any, event: Any):
"""Initialize trace batch"""
user_context = self._get_user_context()
execution_metadata = {
@@ -289,7 +296,7 @@ class TraceCollectionListener(BaseEventListener):
"crewai_version": get_crewai_version(),
}
self._initialize_batch(user_context, execution_metadata)
self.batch_manager.initialize_batch(user_context, execution_metadata)
def _initialize_flow_batch(self, source: Any, event: Any):
"""Initialize trace batch for Flow execution"""
@@ -301,20 +308,7 @@ class TraceCollectionListener(BaseEventListener):
"execution_type": "flow",
}
self._initialize_batch(user_context, execution_metadata)
def _initialize_batch(
self, user_context: Dict[str, str], execution_metadata: Dict[str, Any]
):
"""Initialize trace batch if ephemeral"""
if not self._check_authenticated():
self.batch_manager.initialize_batch(
user_context, execution_metadata, use_ephemeral=True
)
else:
self.batch_manager.initialize_batch(
user_context, execution_metadata, use_ephemeral=False
)
self.batch_manager.initialize_batch(user_context, execution_metadata)
def _handle_trace_event(self, event_type: str, source: Any, event: Any):
"""Generic handler for context end events"""

View File

@@ -1,153 +1,5 @@
import os
import platform
import uuid
import hashlib
import subprocess
import getpass
from pathlib import Path
from datetime import datetime
import re
import json
import click
from crewai.utilities.paths import db_storage_path
def is_tracing_enabled() -> bool:
return os.getenv("CREWAI_TRACING_ENABLED", "false").lower() == "true"
def on_first_execution_tracing_confirmation() -> bool:
if _is_test_environment():
return False
if is_first_execution():
mark_first_execution_done()
return click.confirm(
"This is the first execution of CrewAI. Do you want to enable tracing?",
default=True,
show_default=True,
)
return False
def _is_test_environment() -> bool:
"""Detect if we're running in a test environment."""
return os.environ.get("CREWAI_TESTING", "").lower() == "true"
def _get_machine_id() -> str:
"""Stable, privacy-preserving machine fingerprint (cross-platform)."""
parts = []
try:
mac = ":".join(
["{:02x}".format((uuid.getnode() >> b) & 0xFF) for b in range(0, 12, 2)][
::-1
]
)
parts.append(mac)
except Exception:
pass
sysname = platform.system()
parts.append(sysname)
try:
if sysname == "Darwin":
res = subprocess.run(
["system_profiler", "SPHardwareDataType"],
capture_output=True,
text=True,
timeout=2,
)
m = re.search(r"Hardware UUID:\s*([A-Fa-f0-9\-]+)", res.stdout)
if m:
parts.append(m.group(1))
elif sysname == "Linux":
try:
parts.append(Path("/etc/machine-id").read_text().strip())
except Exception:
parts.append(Path("/sys/class/dmi/id/product_uuid").read_text().strip())
elif sysname == "Windows":
res = subprocess.run(
["wmic", "csproduct", "get", "UUID"],
capture_output=True,
text=True,
timeout=2,
)
lines = [line.strip() for line in res.stdout.splitlines() if line.strip()]
if len(lines) >= 2:
parts.append(lines[1])
except Exception:
pass
return hashlib.sha256("".join(parts).encode()).hexdigest()
def _user_data_file() -> Path:
base = Path(db_storage_path())
base.mkdir(parents=True, exist_ok=True)
return base / ".crewai_user.json"
def _load_user_data() -> dict:
p = _user_data_file()
if p.exists():
try:
return json.loads(p.read_text())
except Exception:
pass
return {}
def _save_user_data(data: dict) -> None:
try:
p = _user_data_file()
p.write_text(json.dumps(data, indent=2))
except Exception:
pass
def get_user_id() -> str:
"""Stable, anonymized user identifier with caching."""
data = _load_user_data()
if "user_id" in data:
return data["user_id"]
try:
username = getpass.getuser()
except Exception:
username = "unknown"
seed = f"{username}|{_get_machine_id()}"
uid = hashlib.sha256(seed.encode()).hexdigest()
data["user_id"] = uid
_save_user_data(data)
return uid
def is_first_execution() -> bool:
"""True if this is the first execution for this user."""
data = _load_user_data()
return not data.get("first_execution_done", False)
def mark_first_execution_done() -> None:
"""Mark that the first execution has been completed."""
data = _load_user_data()
if data.get("first_execution_done", False):
return
data.update(
{
"first_execution_done": True,
"first_execution_at": datetime.now().timestamp(),
"user_id": get_user_id(),
"machine_id": _get_machine_id(),
}
)
_save_user_data(data)

View File

@@ -1 +1 @@
from .agentops_listener import agentops_listener as agentops_listener
from .agentops_listener import agentops_listener

View File

@@ -1,137 +1,67 @@
import logging
from typing import Optional
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus
from crewai.utilities.events.crew_events import (
from crewai.utilities.events import (
CrewKickoffCompletedEvent,
CrewKickoffStartedEvent,
)
from crewai.utilities.events.task_events import TaskEvaluationEvent
from crewai.utilities.events.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
from crewai.utilities.events.task_events import TaskEvaluationEvent
logger = logging.getLogger(__name__)
try:
import agentops
AGENTOPS_INSTALLED = True
except ImportError:
AGENTOPS_INSTALLED = False
class AgentOpsListener(BaseEventListener):
tool_event: Optional["agentops.ToolEvent"] = None
session: Optional["agentops.Session"] = None
def __init__(self):
self.agentops = None
try:
import agentops
self.agentops = agentops
logger.info("AgentOps integration enabled")
except ImportError:
logger.debug("AgentOps not installed, skipping AgentOps integration")
super().__init__()
def setup_listeners(self, crewai_event_bus: CrewAIEventsBus):
if self.agentops is None:
def setup_listeners(self, crewai_event_bus):
if not AGENTOPS_INSTALLED:
return
@crewai_event_bus.on(CrewKickoffStartedEvent)
def on_crew_kickoff_started(source, event):
self._handle_crew_kickoff_started(source, event)
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
self.session = agentops.init()
for agent in source.agents:
if self.session:
self.session.create_agent(
name=agent.role,
agent_id=str(agent.id),
)
@crewai_event_bus.on(CrewKickoffCompletedEvent)
def on_crew_kickoff_completed(source, event):
self._handle_crew_kickoff_completed(source, event)
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
if self.session:
self.session.end_session(
end_state="Success",
end_state_reason="Finished Execution",
)
@crewai_event_bus.on(ToolUsageStartedEvent)
def on_tool_usage_started(source, event):
self._handle_tool_usage_started(source, event)
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
self.tool_event = agentops.ToolEvent(name=event.tool_name)
if self.session:
self.session.record(self.tool_event)
@crewai_event_bus.on(ToolUsageErrorEvent)
def on_tool_usage_error(source, event):
self._handle_tool_usage_error(source, event)
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
@crewai_event_bus.on(TaskEvaluationEvent)
def on_task_evaluation(source, event):
self._handle_task_evaluation(source, event)
def _handle_crew_kickoff_started(self, source, event: CrewKickoffStartedEvent):
if self.agentops is None:
return
try:
self.agentops.start_session(
tags=["crewai", "crew_kickoff"],
config=self.agentops.Configuration(
auto_start_session=False,
instrument_llm_calls=True,
),
)
logger.debug("AgentOps session started for crew kickoff")
except Exception as e:
logger.warning(f"Failed to start AgentOps session: {e}")
def _handle_crew_kickoff_completed(self, source, event: CrewKickoffCompletedEvent):
if self.agentops is None:
return
try:
self.agentops.end_session("Success")
logger.debug("AgentOps session ended for crew kickoff completion")
except Exception as e:
logger.warning(f"Failed to end AgentOps session: {e}")
def _handle_tool_usage_started(self, source, event: ToolUsageStartedEvent):
if self.agentops is None:
return
try:
self.agentops.record(
self.agentops.ActionEvent(
action_type="tool_usage",
params={
"tool_name": event.tool_name,
"tool_args": event.tool_args,
},
def on_task_evaluation(source, event: TaskEvaluationEvent):
if self.session:
self.session.create_agent(
name="Task Evaluator", agent_id=str(source.original_agent.id)
)
)
logger.debug(f"AgentOps recorded tool usage: {event.tool_name}")
except Exception as e:
logger.warning(f"Failed to record tool usage in AgentOps: {e}")
def _handle_tool_usage_error(self, source, event: ToolUsageErrorEvent):
if self.agentops is None:
return
try:
self.agentops.record(
self.agentops.ErrorEvent(
message=f"Tool usage error: {event.error}",
error_type="ToolUsageError",
details={
"tool_name": event.tool_name,
"tool_args": event.tool_args,
},
)
)
logger.debug(f"AgentOps recorded tool usage error: {event.tool_name}")
except Exception as e:
logger.warning(f"Failed to record tool usage error in AgentOps: {e}")
def _handle_task_evaluation(self, source, event: TaskEvaluationEvent):
if self.agentops is None:
return
try:
self.agentops.record(
self.agentops.ActionEvent(
action_type="task_evaluation",
params={
"evaluation_type": event.evaluation_type,
"task": str(event.task) if event.task else None,
},
)
)
logger.debug(f"AgentOps recorded task evaluation: {event.evaluation_type}")
except Exception as e:
logger.warning(f"Failed to record task evaluation in AgentOps: {e}")
agentops_listener = AgentOpsListener()

View File

@@ -220,14 +220,16 @@ class ConsoleFormatter:
return tree
def create_task_branch(
self, crew_tree: Optional[Tree], task_id: str
self, crew_tree: Optional[Tree], task: Any
) -> Optional[Tree]:
"""Create and initialize a task branch."""
if not self.verbose:
return None
task_display = self._get_task_display_name(task)
task_content = Text()
task_content.append(f"📋 Task: {task_id}", style="yellow bold")
task_content.append(f"📋 Task: {task_display}", style="yellow bold")
task_content.append("\nStatus: ", style="white")
task_content.append("Executing Task...", style="yellow dim")
@@ -248,7 +250,7 @@ class ConsoleFormatter:
def update_task_status(
self,
crew_tree: Optional[Tree],
task_id: str,
task: Any,
agent_role: str,
status: str = "completed",
) -> None:
@@ -256,6 +258,9 @@ class ConsoleFormatter:
if not self.verbose or crew_tree is None:
return
task_display = self._get_task_display_name(task)
task_id = str(task.id)
if status == "completed":
style = "green"
status_text = "✅ Completed"
@@ -267,11 +272,11 @@ class ConsoleFormatter:
# Update tree label
for branch in crew_tree.children:
if str(task_id) in str(branch.label):
if task_id in str(branch.label):
# Build label without introducing stray blank lines
task_content = Text()
# First line: Task ID
task_content.append(f"📋 Task: {task_id}", style=f"{style} bold")
# First line: Task name/description
task_content.append(f"📋 Task: {task_display}", style=f"{style} bold")
# Second line: Assigned to
task_content.append("\nAssigned to: ", style="white")
@@ -286,7 +291,7 @@ class ConsoleFormatter:
# Show status panel
content = self.create_status_content(
f"Task {status.title()}", str(task_id), style, Agent=agent_role
f"Task {status.title()}", task_display, style, Agent=agent_role
)
self.print_panel(content, panel_title, style)
@@ -1754,3 +1759,13 @@ class ConsoleFormatter:
Attempts=f"{retry_count + 1}",
)
self.print_panel(content, "🛡️ Guardrail Failed", "red")
def _get_task_display_name(self, task: Any) -> str:
"""Get display name for a task, with fallback logic."""
if hasattr(task, 'name') and task.name:
return f"{task.name} (ID: {str(task.id)[:8]}...)"
elif hasattr(task, 'description') and task.description:
desc = task.description[:50] + "..." if len(task.description) > 50 else task.description
return f"{desc} (ID: {str(task.id)[:8]}...)"
else:
return str(task.id)

44
test_verbose_fix.py Normal file
View File

@@ -0,0 +1,44 @@
#!/usr/bin/env python3
"""Test script to verify verbose output shows task names instead of IDs."""
from crewai import Agent, Task, Crew
def test_verbose_output():
"""Test that verbose output shows task names instead of UUIDs."""
print("Testing verbose output with task names...")
agent = Agent(
role="Research Analyst",
goal="Analyze data and provide insights",
backstory="You are an experienced data analyst.",
verbose=True
)
task = Task(
name="Market Research Analysis",
description="Research current market trends in AI technology",
expected_output="A comprehensive report on AI market trends",
agent=agent
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True
)
print("Task name:", task.name)
print("Task ID:", task.id)
print("\nRunning crew with verbose=True...")
print("Expected: Should show task name 'Market Research Analysis' instead of UUID")
try:
crew.kickoff()
print("\nCrew execution completed successfully!")
return True
except Exception as e:
print(f"Error during execution: {e}")
return False
if __name__ == "__main__":
test_verbose_output()

View File

@@ -1896,7 +1896,7 @@ def test_agent_with_knowledge_sources_generate_search_query():
assert "red" in result.raw.lower()
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
@pytest.mark.vcr(record_mode='none', filter_headers=["authorization"])
def test_agent_with_knowledge_with_no_crewai_knowledge():
mock_knowledge = MagicMock(spec=Knowledge)
@@ -1904,11 +1904,8 @@ def test_agent_with_knowledge_with_no_crewai_knowledge():
role="Information Agent",
goal="Provide information based on knowledge sources",
backstory="You have access to specific knowledge sources.",
llm=LLM(
model="openrouter/openai/gpt-4o-mini",
api_key=os.getenv("OPENROUTER_API_KEY"),
),
knowledge=mock_knowledge,
llm=LLM(model="openrouter/openai/gpt-4o-mini",api_key=os.getenv('OPENROUTER_API_KEY')),
knowledge=mock_knowledge
)
# Create a task that requires the agent to use the knowledge
@@ -1923,7 +1920,7 @@ def test_agent_with_knowledge_with_no_crewai_knowledge():
mock_knowledge.query.assert_called_once()
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
@pytest.mark.vcr(record_mode='none', filter_headers=["authorization"])
def test_agent_with_only_crewai_knowledge():
mock_knowledge = MagicMock(spec=Knowledge)
@@ -1931,38 +1928,33 @@ def test_agent_with_only_crewai_knowledge():
role="Information Agent",
goal="Provide information based on knowledge sources",
backstory="You have access to specific knowledge sources.",
llm=LLM(
model="openrouter/openai/gpt-4o-mini",
api_key=os.getenv("OPENROUTER_API_KEY"),
),
llm=LLM(model="openrouter/openai/gpt-4o-mini",api_key=os.getenv('OPENROUTER_API_KEY'))
)
# Create a task that requires the agent to use the knowledge
task = Task(
description="What is Vidit's favorite color?",
expected_output="Vidit's favorclearite color.",
agent=agent,
agent=agent
)
crew = Crew(agents=[agent], tasks=[task], knowledge=mock_knowledge)
crew = Crew(agents=[agent], tasks=[task],knowledge=mock_knowledge)
crew.kickoff()
mock_knowledge.query.assert_called_once()
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
@pytest.mark.vcr(record_mode='none', filter_headers=["authorization"])
def test_agent_knowledege_with_crewai_knowledge():
crew_knowledge = MagicMock(spec=Knowledge)
agent_knowledge = MagicMock(spec=Knowledge)
agent = Agent(
role="Information Agent",
goal="Provide information based on knowledge sources",
backstory="You have access to specific knowledge sources.",
llm=LLM(
model="openrouter/openai/gpt-4o-mini",
api_key=os.getenv("OPENROUTER_API_KEY"),
),
knowledge=agent_knowledge,
llm=LLM(model="openrouter/openai/gpt-4o-mini",api_key=os.getenv('OPENROUTER_API_KEY')),
knowledge=agent_knowledge
)
# Create a task that requires the agent to use the knowledge
@@ -1972,7 +1964,7 @@ def test_agent_knowledege_with_crewai_knowledge():
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], knowledge=crew_knowledge)
crew = Crew(agents=[agent],tasks=[task],knowledge=crew_knowledge)
crew.kickoff()
agent_knowledge.query.assert_called_once()
crew_knowledge.query.assert_called_once()
@@ -2172,12 +2164,7 @@ def mock_get_auth_token():
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
from crewai_tools import (
SerperDevTool,
XMLSearchTool,
CSVSearchTool,
EnterpriseActionTool,
)
from crewai_tools import SerperDevTool, XMLSearchTool, CSVSearchTool, EnterpriseActionTool
mock_get_response = MagicMock()
mock_get_response.status_code = 200
@@ -2186,23 +2173,12 @@ def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
"goal": "test goal",
"backstory": "test backstory",
"tools": [
{
"module": "crewai_tools",
"name": "SerperDevTool",
"init_params": {"n_results": "30"},
},
{
"module": "crewai_tools",
"name": "XMLSearchTool",
"init_params": {"summarize": "true"},
},
{"module": "crewai_tools", "name": "SerperDevTool", "init_params": {"n_results": 30}},
{"module": "crewai_tools", "name": "XMLSearchTool", "init_params": {"summarize": True}},
{"module": "crewai_tools", "name": "CSVSearchTool", "init_params": {}},
# using a tools that returns a list of BaseTools
{
"module": "crewai_tools",
"name": "CrewaiEnterpriseTools",
"init_params": {"actions_list": [], "enterprise_token": "test_key"},
},
{"module": "crewai_tools", "name": "CrewaiEnterpriseTools", "init_params": {"actions_list": [], "enterprise_token": "test_key"}},
],
}
mock_get_agent.return_value = mock_get_response
@@ -2245,9 +2221,7 @@ def test_agent_from_repository_override_attributes(mock_get_agent, mock_get_auth
"role": "test role",
"goal": "test goal",
"backstory": "test backstory",
"tools": [
{"name": "SerperDevTool", "module": "crewai_tools", "init_params": {}}
],
"tools": [{"name": "SerperDevTool", "module": "crewai_tools", "init_params": {}}],
}
mock_get_agent.return_value = mock_get_response
agent = Agent(from_repository="test_agent", role="Custom Role")

File diff suppressed because one or more lines are too long

View File

@@ -2,7 +2,6 @@
import os
import tempfile
from pathlib import Path
from unittest.mock import Mock, patch
import pytest
from dotenv import load_dotenv
@@ -35,130 +34,14 @@ def setup_test_environment():
f"Test storage directory {storage_dir} is not writable: {e}"
)
# Set environment variable to point to the test storage directory
os.environ["CREWAI_STORAGE_DIR"] = str(storage_dir)
os.environ["CREWAI_TESTING"] = "true"
yield
os.environ.pop("CREWAI_TESTING", None)
# Cleanup is handled automatically when tempfile context exits
def pytest_configure(config):
config.addinivalue_line(
"markers", "telemetry: mark test as a telemetry test (don't mock telemetry)"
)
@pytest.fixture(autouse=True)
def auto_mock_telemetry(request):
if request.node.get_closest_marker("telemetry"):
telemetry_env = {
key: value
for key, value in os.environ.items()
if key not in ["CREWAI_DISABLE_TELEMETRY", "OTEL_SDK_DISABLED"]
}
with patch.dict(os.environ, telemetry_env, clear=True):
yield
return
if "telemetry" in str(request.fspath):
telemetry_env = {
key: value
for key, value in os.environ.items()
if key not in ["CREWAI_DISABLE_TELEMETRY", "OTEL_SDK_DISABLED"]
}
with patch.dict(os.environ, telemetry_env, clear=True):
yield
return
with patch.dict(
os.environ, {"CREWAI_DISABLE_TELEMETRY": "true", "OTEL_SDK_DISABLED": "true"}
):
with patch("crewai.telemetry.Telemetry") as mock_telemetry_class:
mock_instance = create_mock_telemetry_instance()
mock_telemetry_class.return_value = mock_instance
with (
patch(
"crewai.utilities.events.event_listener.Telemetry",
mock_telemetry_class,
),
patch("crewai.tools.tool_usage.Telemetry", mock_telemetry_class),
patch("crewai.cli.command.Telemetry", mock_telemetry_class),
patch("crewai.cli.create_flow.Telemetry", mock_telemetry_class),
):
yield mock_instance
def create_mock_telemetry_instance():
mock_instance = Mock()
mock_instance.ready = False
mock_instance.trace_set = False
mock_instance._initialized = True
mock_instance._is_telemetry_disabled.return_value = True
mock_instance._should_execute_telemetry.return_value = False
telemetry_methods = [
"set_tracer",
"crew_creation",
"task_started",
"task_ended",
"tool_usage",
"tool_repeated_usage",
"tool_usage_error",
"crew_execution_span",
"end_crew",
"flow_creation_span",
"flow_execution_span",
"individual_test_result_span",
"test_execution_span",
"deploy_signup_error_span",
"start_deployment_span",
"create_crew_deployment_span",
"get_crew_logs_span",
"remove_crew_span",
"flow_plotting_span",
"_add_attribute",
"_safe_telemetry_operation",
]
for method in telemetry_methods:
setattr(mock_instance, method, Mock(return_value=None))
mock_instance.task_started.return_value = None
return mock_instance
@pytest.fixture
def mock_opentelemetry_components():
with (
patch("opentelemetry.trace.get_tracer") as mock_get_tracer,
patch("opentelemetry.trace.set_tracer_provider") as mock_set_provider,
patch("opentelemetry.baggage.set_baggage") as mock_set_baggage,
patch("opentelemetry.baggage.get_baggage") as mock_get_baggage,
patch("opentelemetry.context.attach") as mock_attach,
patch("opentelemetry.context.detach") as mock_detach,
):
mock_tracer = Mock()
mock_span = Mock()
mock_tracer.start_span.return_value = mock_span
mock_get_tracer.return_value = mock_tracer
yield {
"get_tracer": mock_get_tracer,
"set_tracer_provider": mock_set_provider,
"tracer": mock_tracer,
"span": mock_span,
"set_baggage": mock_set_baggage,
"get_baggage": mock_get_baggage,
"attach": mock_attach,
"detach": mock_detach,
}
@pytest.fixture(scope="module")
def vcr_config(request) -> dict:
return {

View File

@@ -1,4 +1,5 @@
"""Test Agent creation and execution basic functionality."""
import hashlib
import json
from concurrent.futures import Future
@@ -50,7 +51,7 @@ from crewai.utilities.events.memory_events import (
MemoryRetrievalStartedEvent,
MemoryRetrievalCompletedEvent,
)
from crewai.memory.external.external_memory import ExternalMemory
@pytest.fixture
def ceo():
@@ -311,6 +312,7 @@ def test_crew_creation(researcher, writer):
@pytest.mark.vcr(filter_headers=["authorization"])
def test_sync_task_execution(researcher, writer):
from unittest.mock import patch
tasks = [
Task(
@@ -959,6 +961,7 @@ def test_cache_hitting_between_agents(researcher, writer, ceo):
@pytest.mark.vcr(filter_headers=["authorization"])
def test_api_calls_throttling(capsys):
from unittest.mock import patch
from crewai.tools import tool
@@ -1393,6 +1396,7 @@ def test_kickoff_for_each_invalid_input():
def test_kickoff_for_each_error_handling():
"""Tests error handling in kickoff_for_each when kickoff raises an error."""
from unittest.mock import patch
inputs = [
{"topic": "dog"},
@@ -1429,6 +1433,7 @@ def test_kickoff_for_each_error_handling():
@pytest.mark.asyncio
async def test_kickoff_async_basic_functionality_and_output():
"""Tests the basic functionality and output of kickoff_async."""
from unittest.mock import patch
inputs = {"topic": "dog"}
@@ -1535,6 +1540,7 @@ async def test_async_kickoff_for_each_async_empty_input():
def test_set_agents_step_callback():
from unittest.mock import patch
researcher_agent = Agent(
role="Researcher",
@@ -1564,6 +1570,7 @@ def test_set_agents_step_callback():
def test_dont_set_agents_step_callback_if_already_set():
from unittest.mock import patch
def agent_callback(_):
pass
@@ -2028,6 +2035,7 @@ def test_crew_inputs_interpolate_both_agents_and_tasks():
def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
from unittest.mock import patch
agent = Agent(
role="{topic} Researcher",
@@ -2060,6 +2068,7 @@ def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_does_not_interpolate_without_inputs():
from unittest.mock import patch
agent = Agent(
role="{topic} Researcher",
@@ -2194,6 +2203,7 @@ def test_task_same_callback_both_on_task_and_crew():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_tools_with_custom_caching():
from unittest.mock import patch
from crewai.tools import tool
@@ -2474,6 +2484,7 @@ def test_multiple_conditional_tasks(researcher, writer):
@pytest.mark.vcr(filter_headers=["authorization"])
def test_using_contextual_memory():
from unittest.mock import patch
math_researcher = Agent(
role="Researcher",
@@ -2572,6 +2583,7 @@ def test_memory_events_are_emitted():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_using_contextual_memory_with_long_term_memory():
from unittest.mock import patch
math_researcher = Agent(
role="Researcher",
@@ -2602,6 +2614,7 @@ def test_using_contextual_memory_with_long_term_memory():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_warning_long_term_memory_without_entity_memory():
from unittest.mock import patch
math_researcher = Agent(
role="Researcher",
@@ -2638,6 +2651,7 @@ def test_warning_long_term_memory_without_entity_memory():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_long_term_memory_with_memory_flag():
from unittest.mock import patch
math_researcher = Agent(
role="Researcher",
@@ -2672,6 +2686,7 @@ def test_long_term_memory_with_memory_flag():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_using_contextual_memory_with_short_term_memory():
from unittest.mock import patch
math_researcher = Agent(
role="Researcher",
@@ -2702,6 +2717,7 @@ def test_using_contextual_memory_with_short_term_memory():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_disabled_memory_using_contextual_memory():
from unittest.mock import patch
math_researcher = Agent(
role="Researcher",
@@ -2829,6 +2845,7 @@ def test_crew_output_file_validation_failures():
def test_manager_agent(researcher, writer):
from unittest.mock import patch
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
@@ -4735,43 +4752,3 @@ def test_default_crew_name(researcher, writer):
],
)
assert crew.name == "crew"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_ensure_exchanged_messages_are_propagated_to_external_memory():
external_memory = ExternalMemory(storage=MagicMock())
math_researcher = Agent(
role="Researcher",
goal="You research about math.",
backstory="You're an expert in research and you love to learn new things.",
allow_delegation=False,
)
task1 = Task(
description="Research a topic to teach a kid aged 6 about math.",
expected_output="A topic, explanation, angle, and examples.",
agent=math_researcher,
)
crew = Crew(
agents=[math_researcher],
tasks=[task1],
external_memory=external_memory,
)
with patch.object(
ExternalMemory, "save", return_value=None
) as external_memory_save:
crew.kickoff()
expected_messages = [
{'role': 'system', 'content': "You are Researcher. You're an expert in research and you love to learn new things.\nYour personal goal is: You research about math.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},
{'role': 'user', 'content': '\nCurrent Task: Research a topic to teach a kid aged 6 about math.\n\nThis is the expected criteria for your final answer: A topic, explanation, angle, and examples.\nyou MUST return the actual complete content as the final answer, not a summary.\n\n# Useful context: \nExternal memories:\n\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:'},
{'role': 'assistant', 'content': 'I now can give a great answer \nFinal Answer: \n\n**Topic: Understanding Shapes (Geometry)**\n\n**Explanation:** \nShapes are everywhere around us! They are the special forms that we can see in everyday objects. Teaching a 6-year-old about shapes is not only fun but also a way to help them think about the world around them and develop their spatial awareness. We will focus on basic shapes: circle, square, triangle, and rectangle. Understanding these shapes helps kids recognize and describe their environment.\n\n**Angle:** \nLets make learning about shapes an adventure! We can turn it into a treasure hunt where the child has to find objects around the house or outside that match the shapes we learn. This hands-on approach helps make the learning stick!\n\n**Examples:** \n1. **Circle:** \n - Explanation: A circle is round and has no corners. It looks like a wheel or a cookie! \n - Activity: Find objects that are circles, such as a clock, a dinner plate, or a ball. Draw a big circle on a paper and then try to draw smaller circles inside it.\n\n2. **Square:** \n - Explanation: A square has four equal sides and four corners. It looks like a box! \n - Activity: Look for squares in books, in windows, or in building blocks. Try to build a tall tower using square blocks!\n\n3. **Triangle:** \n - Explanation: A triangle has three sides and three corners. It looks like a slice of pizza or a roof! \n - Activity: Use crayons to draw a big triangle and then find things that are shaped like a triangle, like a slice of cheese or a traffic sign.\n\n4. **Rectangle:** \n - Explanation: A rectangle has four sides but only opposite sides are equal. Its like a stretched square! \n - Activity: Search for rectangles, such as a book cover or a door. You can cut out rectangles from colored paper and create a collage!\n\nBy relating the shapes to fun activities and using real-world examples, we not only make learning more enjoyable but also help the child better remember and understand the concept of shapes in math. This foundation forms the basis of their future learning in geometry!'}
]
external_memory_save.assert_called_once_with(
value=ANY,
metadata={"description": ANY, "messages": expected_messages},
agent=ANY,
)

View File

@@ -39,7 +39,6 @@ def test_short_term_memory_search_events(short_term_memory):
events = defaultdict(list)
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MemoryQueryStartedEvent)
def on_search_started(source, event):
events["MemoryQueryStartedEvent"].append(event)
@@ -60,34 +59,33 @@ def test_short_term_memory_search_events(short_term_memory):
assert len(events["MemoryQueryFailedEvent"]) == 0
assert dict(events["MemoryQueryStartedEvent"][0]) == {
"timestamp": ANY,
"type": "memory_query_started",
"source_fingerprint": None,
"source_type": "short_term_memory",
"fingerprint_metadata": None,
"query": "test value",
"limit": 3,
"score_threshold": 0.35,
'timestamp': ANY,
'type': 'memory_query_started',
'source_fingerprint': None,
'source_type': 'short_term_memory',
'fingerprint_metadata': None,
'query': 'test value',
'limit': 3,
'score_threshold': 0.35
}
assert dict(events["MemoryQueryCompletedEvent"][0]) == {
"timestamp": ANY,
"type": "memory_query_completed",
"source_fingerprint": None,
"source_type": "short_term_memory",
"fingerprint_metadata": None,
"query": "test value",
"results": [],
"limit": 3,
"score_threshold": 0.35,
"query_time_ms": ANY,
'timestamp': ANY,
'type': 'memory_query_completed',
'source_fingerprint': None,
'source_type': 'short_term_memory',
'fingerprint_metadata': None,
'query': 'test value',
'results': [],
'limit': 3,
'score_threshold': 0.35,
'query_time_ms': ANY
}
def test_short_term_memory_save_events(short_term_memory):
events = defaultdict(list)
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MemorySaveStartedEvent)
def on_save_started(source, event):
events["MemorySaveStartedEvent"].append(event)
@@ -107,29 +105,28 @@ def test_short_term_memory_save_events(short_term_memory):
assert len(events["MemorySaveFailedEvent"]) == 0
assert dict(events["MemorySaveStartedEvent"][0]) == {
"timestamp": ANY,
"type": "memory_save_started",
"source_fingerprint": None,
"source_type": "short_term_memory",
"fingerprint_metadata": None,
"value": "test value",
"metadata": {"task": "test_task"},
"agent_role": "test_agent",
'timestamp': ANY,
'type': 'memory_save_started',
'source_fingerprint': None,
'source_type': 'short_term_memory',
'fingerprint_metadata': None,
'value': 'test value',
'metadata': {'task': 'test_task'},
'agent_role': "test_agent"
}
assert dict(events["MemorySaveCompletedEvent"][0]) == {
"timestamp": ANY,
"type": "memory_save_completed",
"source_fingerprint": None,
"source_type": "short_term_memory",
"fingerprint_metadata": None,
"value": "test value",
"metadata": {"task": "test_task", "agent": "test_agent"},
"agent_role": "test_agent",
"save_time_ms": ANY,
'timestamp': ANY,
'type': 'memory_save_completed',
'source_fingerprint': None,
'source_type': 'short_term_memory',
'fingerprint_metadata': None,
'value': 'test value',
'metadata': {'task': 'test_task', 'agent': 'test_agent'},
'agent_role': "test_agent",
'save_time_ms': ANY
}
def test_save_and_search(short_term_memory):
memory = ShortTermMemoryItem(
data="""test value test value test value test value test value test value

View File

@@ -1,5 +1,4 @@
import os
import threading
from unittest.mock import patch
import pytest
@@ -12,16 +11,12 @@ from opentelemetry import trace
@pytest.fixture(autouse=True)
def cleanup_telemetry():
"""Automatically clean up Telemetry singleton between tests."""
Telemetry._instance = None
if hasattr(Telemetry, "_lock"):
Telemetry._lock = threading.Lock()
yield
Telemetry._instance = None
if hasattr(Telemetry, "_lock"):
Telemetry._lock = threading.Lock()
@pytest.mark.telemetry
@pytest.mark.parametrize(
"env_var,value,expected_ready",
[
@@ -41,7 +36,6 @@ def test_telemetry_environment_variables(env_var, value, expected_ready):
assert telemetry.ready is expected_ready
@pytest.mark.telemetry
def test_telemetry_enabled_by_default():
"""Test that telemetry is enabled by default."""
with patch.dict(os.environ, {}, clear=True):
@@ -50,7 +44,6 @@ def test_telemetry_enabled_by_default():
assert telemetry.ready is True
@pytest.mark.telemetry
@patch("crewai.telemetry.telemetry.logger.error")
@patch(
"opentelemetry.exporter.otlp.proto.http.trace_exporter.OTLPSpanExporter.export",
@@ -83,7 +76,6 @@ def test_telemetry_fails_due_connect_timeout(export_mock, logger_mock):
logger_mock.assert_called_once_with(error)
@pytest.mark.telemetry
def test_telemetry_singleton_pattern():
"""Test that Telemetry uses the singleton pattern correctly."""
Telemetry._instance = None

View File

@@ -14,18 +14,14 @@ def cleanup_telemetry():
Telemetry._instance = None
@pytest.mark.telemetry
@pytest.mark.parametrize(
"env_var,value,expected_ready",
[
("OTEL_SDK_DISABLED", "true", False),
("OTEL_SDK_DISABLED", "TRUE", False),
("CREWAI_DISABLE_TELEMETRY", "true", False),
("CREWAI_DISABLE_TELEMETRY", "TRUE", False),
("OTEL_SDK_DISABLED", "false", True),
("CREWAI_DISABLE_TELEMETRY", "false", True),
],
)
@pytest.mark.parametrize("env_var,value,expected_ready", [
("OTEL_SDK_DISABLED", "true", False),
("OTEL_SDK_DISABLED", "TRUE", False),
("CREWAI_DISABLE_TELEMETRY", "true", False),
("CREWAI_DISABLE_TELEMETRY", "TRUE", False),
("OTEL_SDK_DISABLED", "false", True),
("CREWAI_DISABLE_TELEMETRY", "false", True),
])
def test_telemetry_environment_variables(env_var, value, expected_ready):
"""Test telemetry state with different environment variable configurations."""
with patch.dict(os.environ, {env_var: value}):
@@ -34,7 +30,6 @@ def test_telemetry_environment_variables(env_var, value, expected_ready):
assert telemetry.ready is expected_ready
@pytest.mark.telemetry
def test_telemetry_enabled_by_default():
"""Test that telemetry is enabled by default."""
with patch.dict(os.environ, {}, clear=True):
@@ -43,60 +38,57 @@ def test_telemetry_enabled_by_default():
assert telemetry.ready is True
@pytest.mark.telemetry
def test_telemetry_disable_after_singleton_creation():
"""Test that telemetry operations are disabled when env var is set after singleton creation."""
with patch.dict(os.environ, {}, clear=True):
with patch("crewai.telemetry.telemetry.TracerProvider"):
telemetry = Telemetry()
assert telemetry.ready is True
mock_operation = MagicMock()
telemetry._safe_telemetry_operation(mock_operation)
mock_operation.assert_called_once()
mock_operation.reset_mock()
os.environ["CREWAI_DISABLE_TELEMETRY"] = "true"
os.environ['CREWAI_DISABLE_TELEMETRY'] = 'true'
telemetry._safe_telemetry_operation(mock_operation)
mock_operation.assert_not_called()
@pytest.mark.telemetry
def test_telemetry_disable_with_multiple_instances():
"""Test that multiple telemetry instances respect dynamically changed env vars."""
with patch.dict(os.environ, {}, clear=True):
with patch("crewai.telemetry.telemetry.TracerProvider"):
telemetry1 = Telemetry()
assert telemetry1.ready is True
os.environ["CREWAI_DISABLE_TELEMETRY"] = "true"
os.environ['CREWAI_DISABLE_TELEMETRY'] = 'true'
telemetry2 = Telemetry()
assert telemetry2 is telemetry1
assert telemetry2.ready is True
mock_operation = MagicMock()
telemetry2._safe_telemetry_operation(mock_operation)
mock_operation.assert_not_called()
@pytest.mark.telemetry
def test_telemetry_otel_sdk_disabled_after_creation():
"""Test that OTEL_SDK_DISABLED also works when set after singleton creation."""
with patch.dict(os.environ, {}, clear=True):
with patch("crewai.telemetry.telemetry.TracerProvider"):
telemetry = Telemetry()
assert telemetry.ready is True
mock_operation = MagicMock()
telemetry._safe_telemetry_operation(mock_operation)
mock_operation.assert_called_once()
mock_operation.reset_mock()
os.environ["OTEL_SDK_DISABLED"] = "true"
os.environ['OTEL_SDK_DISABLED'] = 'true'
telemetry._safe_telemetry_operation(mock_operation)
mock_operation.assert_not_called()

View File

@@ -2,8 +2,8 @@ import os
import pytest
from unittest.mock import patch, MagicMock
# Remove the module-level patch
from crewai import Agent, Task, Crew
from crewai.flow.flow import Flow, start
from crewai.utilities.events.listeners.tracing.trace_listener import (
TraceCollectionListener,
)
@@ -284,42 +284,29 @@ class TestTraceListenerSetup:
f"Found {len(trace_handlers)} trace handlers when tracing should be disabled"
)
def test_trace_listener_setup_correctly_for_crew(self):
def test_trace_listener_setup_correctly(self):
"""Test that trace listener is set up correctly when enabled"""
with patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}):
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello to the world",
expected_output="hello world",
agent=agent,
)
with patch.object(
TraceCollectionListener, "setup_listeners"
) as mock_listener_setup:
Crew(agents=[agent], tasks=[task], verbose=True)
assert mock_listener_setup.call_count >= 1
trace_listener = TraceCollectionListener()
def test_trace_listener_setup_correctly_for_flow(self):
assert trace_listener.trace_enabled is True
assert trace_listener.batch_manager is not None
@pytest.mark.vcr(filter_headers=["authorization"])
def test_trace_listener_setup_correctly_with_tracing_flag(self):
"""Test that trace listener is set up correctly when enabled"""
with patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}):
class FlowExample(Flow):
@start()
def start(self):
pass
with patch.object(
TraceCollectionListener, "setup_listeners"
) as mock_listener_setup:
FlowExample()
assert mock_listener_setup.call_count >= 1
agent = Agent(role="Test Agent", goal="Test goal", backstory="Test backstory")
task = Task(
description="Say hello to the world",
expected_output="hello world",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=True, tracing=True)
crew.kickoff()
trace_listener = TraceCollectionListener(tracing=True)
assert trace_listener.trace_enabled is True
assert trace_listener.batch_manager is not None
# Helper method to ensure cleanup
def teardown_method(self):

View File

@@ -1 +0,0 @@

View File

@@ -1,196 +0,0 @@
from unittest.mock import Mock, patch
from crewai.utilities.events.third_party.agentops_listener import AgentOpsListener
from crewai.utilities.events.crew_events import (
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent,
)
from crewai.utilities.events.task_events import TaskEvaluationEvent
from crewai.utilities.events.tool_usage_events import (
ToolUsageStartedEvent,
ToolUsageErrorEvent,
)
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus
class TestAgentOpsListener:
def test_agentops_listener_initialization_with_agentops_installed(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops"):
listener = AgentOpsListener()
assert listener.agentops is not None
def test_agentops_listener_initialization_without_agentops_installed(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops", side_effect=ImportError):
listener = AgentOpsListener()
assert listener.agentops is None
def test_setup_listeners_with_agentops_installed(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops"):
listener = AgentOpsListener()
mock_event_bus = Mock(spec=CrewAIEventsBus)
listener.setup_listeners(mock_event_bus)
assert mock_event_bus.register_handler.call_count == 5
mock_event_bus.register_handler.assert_any_call(
CrewKickoffStartedEvent, listener._handle_crew_kickoff_started
)
mock_event_bus.register_handler.assert_any_call(
CrewKickoffCompletedEvent, listener._handle_crew_kickoff_completed
)
mock_event_bus.register_handler.assert_any_call(
ToolUsageStartedEvent, listener._handle_tool_usage_started
)
mock_event_bus.register_handler.assert_any_call(
ToolUsageErrorEvent, listener._handle_tool_usage_error
)
mock_event_bus.register_handler.assert_any_call(
TaskEvaluationEvent, listener._handle_task_evaluation
)
def test_setup_listeners_without_agentops_installed(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops", side_effect=ImportError):
listener = AgentOpsListener()
mock_event_bus = Mock(spec=CrewAIEventsBus)
listener.setup_listeners(mock_event_bus)
mock_event_bus.register_handler.assert_not_called()
def test_handle_crew_kickoff_started_with_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
listener = AgentOpsListener()
event = CrewKickoffStartedEvent(crew_id="test-crew")
listener._handle_crew_kickoff_started(event)
mock_agentops.start_session.assert_called_once()
call_args = mock_agentops.start_session.call_args
assert call_args[1]["tags"] == ["crewai", "crew_kickoff"]
def test_handle_crew_kickoff_started_without_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops", side_effect=ImportError):
listener = AgentOpsListener()
event = CrewKickoffStartedEvent(crew_id="test-crew")
listener._handle_crew_kickoff_started(event)
def test_handle_crew_kickoff_completed_with_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
listener = AgentOpsListener()
event = CrewKickoffCompletedEvent(crew_id="test-crew", crew_output=Mock())
listener._handle_crew_kickoff_completed(event)
mock_agentops.end_session.assert_called_once_with("Success")
def test_handle_crew_kickoff_completed_without_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops", side_effect=ImportError):
listener = AgentOpsListener()
event = CrewKickoffCompletedEvent(crew_id="test-crew", crew_output=Mock())
listener._handle_crew_kickoff_completed(event)
def test_handle_tool_usage_started_with_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
listener = AgentOpsListener()
event = ToolUsageStartedEvent(
tool_name="test_tool",
arguments={"arg1": "value1"},
agent_id="test-agent",
task_id="test-task"
)
listener._handle_tool_usage_started(event)
mock_agentops.record.assert_called_once()
call_args = mock_agentops.record.call_args[0][0]
assert hasattr(call_args, "action_type")
def test_handle_tool_usage_error_with_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
listener = AgentOpsListener()
event = ToolUsageErrorEvent(
tool_name="test_tool",
arguments={"arg1": "value1"},
error="Test error",
agent_id="test-agent",
task_id="test-task"
)
listener._handle_tool_usage_error(event)
mock_agentops.record.assert_called_once()
def test_handle_task_evaluation_with_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
listener = AgentOpsListener()
event = TaskEvaluationEvent(
task_id="test-task",
score=0.85,
feedback="Good performance"
)
listener._handle_task_evaluation(event)
mock_agentops.record.assert_called_once()
def test_handle_crew_kickoff_started_with_exception(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
mock_agentops.start_session.side_effect = Exception("Test exception")
listener = AgentOpsListener()
event = CrewKickoffStartedEvent(crew_id="test-crew")
listener._handle_crew_kickoff_started(event)
def test_handle_crew_kickoff_completed_with_exception(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
mock_agentops.end_session.side_effect = Exception("Test exception")
listener = AgentOpsListener()
event = CrewKickoffCompletedEvent(crew_id="test-crew", crew_output=Mock())
listener._handle_crew_kickoff_completed(event)
def test_handle_tool_usage_started_with_exception(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
mock_agentops.record.side_effect = Exception("Test exception")
listener = AgentOpsListener()
event = ToolUsageStartedEvent(
tool_name="test_tool",
arguments={"arg1": "value1"},
agent_id="test-agent",
task_id="test-task"
)
listener._handle_tool_usage_started(event)
def test_handle_tool_usage_error_with_exception(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
mock_agentops.record.side_effect = Exception("Test exception")
listener = AgentOpsListener()
event = ToolUsageErrorEvent(
tool_name="test_tool",
arguments={"arg1": "value1"},
error="Test error",
agent_id="test-agent",
task_id="test-task"
)
listener._handle_tool_usage_error(event)
def test_handle_task_evaluation_with_exception(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
mock_agentops.record.side_effect = Exception("Test exception")
listener = AgentOpsListener()
event = TaskEvaluationEvent(
task_id="test-task",
score=0.85,
feedback="Good performance"
)
listener._handle_task_evaluation(event)
def test_agentops_listener_instance_creation(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops"):
from crewai.utilities.events.third_party.agentops_listener import agentops_listener
assert agentops_listener is not None
assert isinstance(agentops_listener, AgentOpsListener)

View File

@@ -0,0 +1,95 @@
from unittest.mock import Mock, patch
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
class TestConsoleFormatterVerbose:
"""Test verbose output functionality in console formatter."""
def setup_method(self):
"""Set up test fixtures."""
self.formatter = ConsoleFormatter(verbose=True)
def test_get_task_display_name_with_name(self):
"""Test task display name when task has a name."""
task = Mock()
task.name = "Research Market Trends"
task.id = "12345678-1234-5678-9012-123456789abc"
result = self.formatter._get_task_display_name(task)
assert "Research Market Trends" in result
assert "12345678" in result
def test_get_task_display_name_with_description_only(self):
"""Test task display name when task has no name but has description."""
task = Mock()
task.name = None
task.description = "Analyze current market trends and provide insights"
task.id = "12345678-1234-5678-9012-123456789abc"
result = self.formatter._get_task_display_name(task)
assert "Analyze current market trends" in result
assert "12345678" in result
def test_get_task_display_name_long_description_truncated(self):
"""Test task display name truncates long descriptions."""
task = Mock()
task.name = None
task.description = "This is a very long task description that should be truncated because it exceeds the maximum length"
task.id = "12345678-1234-5678-9012-123456789abc"
result = self.formatter._get_task_display_name(task)
assert len(result.split("(ID:")[0].strip()) <= 53
assert "..." in result
def test_get_task_display_name_fallback_to_id(self):
"""Test task display name falls back to ID when no name or description."""
task = Mock()
task.name = None
task.description = None
task.id = "12345678-1234-5678-9012-123456789abc"
result = self.formatter._get_task_display_name(task)
assert result == "12345678-1234-5678-9012-123456789abc"
@patch('crewai.utilities.events.utils.console_formatter.ConsoleFormatter.print')
def test_create_task_branch_uses_task_name(self, mock_print):
"""Test create_task_branch displays task name instead of ID."""
task = Mock()
task.name = "Write Blog Post"
task.id = "12345678-1234-5678-9012-123456789abc"
crew_tree = Mock()
crew_tree.add.return_value = Mock()
self.formatter.create_task_branch(crew_tree, task)
call_args = crew_tree.add.call_args[0][0]
assert "Write Blog Post" in str(call_args)
assert "12345678" in str(call_args)
@patch('crewai.utilities.events.utils.console_formatter.ConsoleFormatter.print')
def test_update_task_status_uses_task_name(self, mock_print):
"""Test update_task_status displays task name instead of ID."""
task = Mock()
task.name = "Data Analysis"
task.id = "12345678-1234-5678-9012-123456789abc"
crew_tree = Mock()
branch = Mock()
branch.label = "12345678-1234-5678-9012-123456789abc"
crew_tree.children = [branch]
self.formatter.update_task_status(crew_tree, task, "Data Analyst", "completed")
updated_label = branch.label
assert "Data Analysis" in str(updated_label)
def test_verbose_disabled_returns_none(self):
"""Test that methods return None when verbose is disabled."""
formatter = ConsoleFormatter(verbose=False)
task = Mock()
result = formatter.create_task_branch(Mock(), task)
assert result is None
formatter.update_task_status(Mock(), task, "Agent", "completed")

53
uv.lock generated
View File

@@ -34,6 +34,22 @@ resolution-markers = [
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version < '3.11' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
[[package]]
name = "agentops"
version = "0.3.18"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "packaging" },
{ name = "psutil" },
{ name = "pyyaml" },
{ name = "requests" },
{ name = "termcolor" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c5/52/46bb2f29b9e5f2e1d8b124296b7794934a9048de635d9e7d6a95e791ad7b/agentops-0.3.18.tar.gz", hash = "sha256:4d509754df7be52579597cc9f53939c5218131a0379463e0ff6f6f40cde9fcc4", size = 55394, upload-time = "2024-11-19T19:06:21.306Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/97/8d/bd4cad95dad722dc2d3e4179feab1058ef846828c0e15e51e8bfaea373ee/agentops-0.3.18-py3-none-any.whl", hash = "sha256:8b48d8a1662f276653430fd541c77fa4f9a15a43e881b518ff88ea56925afcf7", size = 58032, upload-time = "2024-11-19T19:06:19.068Z" },
]
[[package]]
name = "aiohappyeyeballs"
version = "2.6.1"
@@ -728,6 +744,9 @@ dependencies = [
]
[package.optional-dependencies]
agentops = [
{ name = "agentops" },
]
aisuite = [
{ name = "aisuite" },
]
@@ -773,12 +792,13 @@ dev = [
[package.metadata]
requires-dist = [
{ name = "agentops", marker = "extra == 'agentops'", specifier = "==0.3.18" },
{ name = "aisuite", marker = "extra == 'aisuite'", specifier = ">=0.1.10" },
{ name = "appdirs", specifier = ">=1.4.4" },
{ name = "blinker", specifier = ">=1.9.0" },
{ name = "chromadb", specifier = ">=0.5.23" },
{ name = "click", specifier = ">=8.1.7" },
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.62.0" },
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.60.0" },
{ name = "docling", marker = "extra == 'docling'", specifier = ">=2.12.0" },
{ name = "instructor", specifier = ">=1.3.3" },
{ name = "json-repair", specifier = "==0.25.2" },
@@ -808,7 +828,7 @@ requires-dist = [
{ name = "tomli-w", specifier = ">=1.1.0" },
{ name = "uv", specifier = ">=0.4.25" },
]
provides-extras = ["aisuite", "docling", "embeddings", "mem0", "openpyxl", "pandas", "pdfplumber", "tools"]
provides-extras = ["agentops", "aisuite", "docling", "embeddings", "mem0", "openpyxl", "pandas", "pdfplumber", "tools"]
[package.metadata.requires-dev]
dev = [
@@ -830,7 +850,7 @@ dev = [
[[package]]
name = "crewai-tools"
version = "0.62.0"
version = "0.60.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "chromadb" },
@@ -848,9 +868,9 @@ dependencies = [
{ name = "stagehand" },
{ name = "tiktoken" },
]
sdist = { url = "https://files.pythonhosted.org/packages/e9/24/0287edbaa33c52b5541a564d92d3324d4839a76a4b023540c0fd5c7ee330/crewai_tools-0.62.0.tar.gz", hash = "sha256:71a24c173677f108516e1cde286e476e9aeb60da78d911bec0f15caa3c6af15a", size = 1059534, upload-time = "2025-08-13T21:13:49.879Z" }
sdist = { url = "https://files.pythonhosted.org/packages/bb/60/04fd70a8a15eaf4147ff648ada44f1d4afd453a528cf8facd618ef32e576/crewai_tools-0.60.0.tar.gz", hash = "sha256:9234f6912b65495afe5e1bfa330abca09a40725d47fe2c71a22387bf6eeb8e72", size = 1032373, upload-time = "2025-08-06T20:27:16.003Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a9/16/cf26f4eaf1edc2f62d0c9cb1ec97f573f8d7401663da047c52b3ce4c4628/crewai_tools-0.62.0-py3-none-any.whl", hash = "sha256:b5e7035563cb00601431286b1c56933966acea1f220052cd33e1d4ee35590017", size = 677008, upload-time = "2025-08-13T21:13:46.903Z" },
{ url = "https://files.pythonhosted.org/packages/6d/55/984f3d2d5afbcfa87c380c7c17b728804e80617b768b3748f25220b2b32c/crewai_tools-0.60.0-py3-none-any.whl", hash = "sha256:a54277c973753de4a3269da17e5a7e4995d4c70fc331eb2872189b5f92cfdaaf", size = 657128, upload-time = "2025-08-06T20:27:14.295Z" },
]
[[package]]
@@ -3993,6 +4013,20 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/7e/cc/7e77861000a0691aeea8f4566e5d3aa716f2b1dece4a24439437e41d3d25/protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5", size = 172823, upload-time = "2025-05-28T23:51:58.157Z" },
]
[[package]]
name = "psutil"
version = "5.9.8"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/90/c7/6dc0a455d111f68ee43f27793971cf03fe29b6ef972042549db29eec39a2/psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c", size = 503247, upload-time = "2024-01-19T20:47:09.517Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e7/e3/07ae864a636d70a8a6f58da27cb1179192f1140d5d1da10886ade9405797/psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81", size = 248702, upload-time = "2024-01-19T20:47:36.303Z" },
{ url = "https://files.pythonhosted.org/packages/b3/bd/28c5f553667116b2598b9cc55908ec435cb7f77a34f2bff3e3ca765b0f78/psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421", size = 285242, upload-time = "2024-01-19T20:47:39.65Z" },
{ url = "https://files.pythonhosted.org/packages/c5/4f/0e22aaa246f96d6ac87fe5ebb9c5a693fbe8877f537a1022527c47ca43c5/psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4", size = 288191, upload-time = "2024-01-19T20:47:43.078Z" },
{ url = "https://files.pythonhosted.org/packages/6e/f5/2aa3a4acdc1e5940b59d421742356f133185667dd190b166dbcfcf5d7b43/psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0", size = 251252, upload-time = "2024-01-19T20:47:52.88Z" },
{ url = "https://files.pythonhosted.org/packages/93/52/3e39d26feae7df0aa0fd510b14012c3678b36ed068f7d78b8d8784d61f0e/psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf", size = 255090, upload-time = "2024-01-19T20:47:56.019Z" },
{ url = "https://files.pythonhosted.org/packages/05/33/2d74d588408caedd065c2497bdb5ef83ce6082db01289a1e1147f6639802/psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8", size = 249898, upload-time = "2024-01-19T20:47:59.238Z" },
]
[[package]]
name = "ptyprocess"
version = "0.7.0"
@@ -5468,6 +5502,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" },
]
[[package]]
name = "termcolor"
version = "2.4.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/10/56/d7d66a84f96d804155f6ff2873d065368b25a07222a6fd51c4f24ef6d764/termcolor-2.4.0.tar.gz", hash = "sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a", size = 12664, upload-time = "2023-12-01T11:04:51.66Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d9/5f/8c716e47b3a50cbd7c146f45881e11d9414def768b7cd9c5e6650ec2a80a/termcolor-2.4.0-py3-none-any.whl", hash = "sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63", size = 7719, upload-time = "2023-12-01T11:04:50.019Z" },
]
[[package]]
name = "tifffile"
version = "2025.5.10"