mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-25 00:38:30 +00:00
Compare commits
28 Commits
0.159.0
...
docs/train
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e08d210bb8 | ||
|
|
34100d290b | ||
|
|
46f8fa59c1 | ||
|
|
4867dced0e | ||
|
|
ed187b495b | ||
|
|
2773996b49 | ||
|
|
95923b78c6 | ||
|
|
7065ad4336 | ||
|
|
d6254918fd | ||
|
|
95e3d6db7a | ||
|
|
d7f8002baa | ||
|
|
d743e12a06 | ||
|
|
6068fe941f | ||
|
|
2a0cefc98b | ||
|
|
a4f65e4870 | ||
|
|
a1b3edd79c | ||
|
|
80b3d9689a | ||
|
|
ec03a53121 | ||
|
|
2fdf3f3a6a | ||
|
|
1d3d7ebf5e | ||
|
|
2c2196f415 | ||
|
|
c9f30b175c | ||
|
|
a17b93a7f8 | ||
|
|
0d3e462791 | ||
|
|
947c9552f0 | ||
|
|
04a03d332f | ||
|
|
992e093610 | ||
|
|
07f8e73958 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,7 +21,6 @@ crew_tasks_output.json
|
||||
.mypy_cache
|
||||
.ruff_cache
|
||||
.venv
|
||||
agentops.log
|
||||
test_flow.html
|
||||
crewairules.mdc
|
||||
plan.md
|
||||
|
||||
@@ -226,7 +226,6 @@
|
||||
"group": "Observability",
|
||||
"pages": [
|
||||
"en/observability/overview",
|
||||
"en/observability/agentops",
|
||||
"en/observability/arize-phoenix",
|
||||
"en/observability/langdb",
|
||||
"en/observability/langfuse",
|
||||
@@ -342,11 +341,12 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Getting Started",
|
||||
"pages": ["en/api-reference/introduction"]
|
||||
},
|
||||
{
|
||||
"group": "Endpoints",
|
||||
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.en.yaml"
|
||||
"pages": [
|
||||
"en/api-reference/introduction",
|
||||
"en/api-reference/inputs",
|
||||
"en/api-reference/kickoff",
|
||||
"en/api-reference/status"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -566,7 +566,6 @@
|
||||
"group": "Observabilidade",
|
||||
"pages": [
|
||||
"pt-BR/observability/overview",
|
||||
"pt-BR/observability/agentops",
|
||||
"pt-BR/observability/arize-phoenix",
|
||||
"pt-BR/observability/langdb",
|
||||
"pt-BR/observability/langfuse",
|
||||
@@ -682,11 +681,12 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Começando",
|
||||
"pages": ["pt-BR/api-reference/introduction"]
|
||||
},
|
||||
{
|
||||
"group": "Endpoints",
|
||||
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.pt-BR.yaml"
|
||||
"pages": [
|
||||
"pt-BR/api-reference/introduction",
|
||||
"pt-BR/api-reference/inputs",
|
||||
"pt-BR/api-reference/kickoff",
|
||||
"pt-BR/api-reference/status"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -711,7 +711,7 @@
|
||||
"icon": "globe"
|
||||
},
|
||||
{
|
||||
"anchor": "법정",
|
||||
"anchor": "포럼",
|
||||
"href": "https://community.crewai.com",
|
||||
"icon": "discourse"
|
||||
},
|
||||
@@ -721,7 +721,7 @@
|
||||
"icon": "robot"
|
||||
},
|
||||
{
|
||||
"anchor": "출시",
|
||||
"anchor": "릴리스",
|
||||
"href": "https://github.com/crewAIInc/crewAI/releases",
|
||||
"icon": "tag"
|
||||
}
|
||||
@@ -736,22 +736,22 @@
|
||||
"pages": ["ko/introduction", "ko/installation", "ko/quickstart"]
|
||||
},
|
||||
{
|
||||
"group": "안내서",
|
||||
"group": "가이드",
|
||||
"pages": [
|
||||
{
|
||||
"group": "전략",
|
||||
"pages": ["ko/guides/concepts/evaluating-use-cases"]
|
||||
},
|
||||
{
|
||||
"group": "Agents",
|
||||
"group": "에이전트 (Agents)",
|
||||
"pages": ["ko/guides/agents/crafting-effective-agents"]
|
||||
},
|
||||
{
|
||||
"group": "Crews",
|
||||
"group": "크루 (Crews)",
|
||||
"pages": ["ko/guides/crews/first-crew"]
|
||||
},
|
||||
{
|
||||
"group": "Flows",
|
||||
"group": "플로우 (Flows)",
|
||||
"pages": [
|
||||
"ko/guides/flows/first-flow",
|
||||
"ko/guides/flows/mastering-flow-state"
|
||||
@@ -799,7 +799,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "도구",
|
||||
"group": "도구 (Tools)",
|
||||
"pages": [
|
||||
"ko/tools/overview",
|
||||
{
|
||||
@@ -889,7 +889,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "클라우드 & 저장",
|
||||
"group": "클라우드 & 스토리지",
|
||||
"pages": [
|
||||
"ko/tools/cloud-storage/overview",
|
||||
"ko/tools/cloud-storage/s3readertool",
|
||||
@@ -911,10 +911,9 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "오브저버빌리티",
|
||||
"group": "Observability",
|
||||
"pages": [
|
||||
"ko/observability/overview",
|
||||
"ko/observability/agentops",
|
||||
"ko/observability/arize-phoenix",
|
||||
"ko/observability/langdb",
|
||||
"ko/observability/langfuse",
|
||||
@@ -930,7 +929,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "익히다",
|
||||
"group": "학습",
|
||||
"pages": [
|
||||
"ko/learn/overview",
|
||||
"ko/learn/llm-selection-guide",
|
||||
@@ -954,13 +953,13 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "원격측정",
|
||||
"group": "Telemetry",
|
||||
"pages": ["ko/telemetry"]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"tab": "기업",
|
||||
"tab": "엔터프라이즈",
|
||||
"groups": [
|
||||
{
|
||||
"group": "시작 안내",
|
||||
@@ -1000,7 +999,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "사용 안내서",
|
||||
"group": "How-To Guides",
|
||||
"pages": [
|
||||
"ko/enterprise/guides/build-crew",
|
||||
"ko/enterprise/guides/deploy-crew",
|
||||
@@ -1029,11 +1028,12 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "시작 안내",
|
||||
"pages": ["ko/api-reference/introduction"]
|
||||
},
|
||||
{
|
||||
"group": "Endpoints",
|
||||
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.ko.yaml"
|
||||
"pages": [
|
||||
"ko/api-reference/introduction",
|
||||
"ko/api-reference/inputs",
|
||||
"ko/api-reference/kickoff",
|
||||
"ko/api-reference/status"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1084,6 +1084,10 @@
|
||||
"indexing": "all"
|
||||
},
|
||||
"redirects": [
|
||||
{
|
||||
"source": "/api-reference",
|
||||
"destination": "/en/api-reference/introduction"
|
||||
},
|
||||
{
|
||||
"source": "/introduction",
|
||||
"destination": "/en/introduction"
|
||||
@@ -1136,6 +1140,18 @@
|
||||
"source": "/api-reference/:path*",
|
||||
"destination": "/en/api-reference/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/en/api-reference",
|
||||
"destination": "/en/api-reference/introduction"
|
||||
},
|
||||
{
|
||||
"source": "/pt-BR/api-reference",
|
||||
"destination": "/pt-BR/api-reference/introduction"
|
||||
},
|
||||
{
|
||||
"source": "/ko/api-reference",
|
||||
"destination": "/ko/api-reference/introduction"
|
||||
},
|
||||
{
|
||||
"source": "/examples/:path*",
|
||||
"destination": "/en/examples/:path*"
|
||||
|
||||
7
docs/en/api-reference/inputs.mdx
Normal file
7
docs/en/api-reference/inputs.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /inputs"
|
||||
description: "Get required inputs for your crew"
|
||||
openapi: "/enterprise-api.en.yaml GET /inputs"
|
||||
---
|
||||
|
||||
|
||||
7
docs/en/api-reference/kickoff.mdx
Normal file
7
docs/en/api-reference/kickoff.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "POST /kickoff"
|
||||
description: "Start a crew execution"
|
||||
openapi: "/enterprise-api.en.yaml POST /kickoff"
|
||||
---
|
||||
|
||||
|
||||
7
docs/en/api-reference/status.mdx
Normal file
7
docs/en/api-reference/status.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /status/{kickoff_id}"
|
||||
description: "Get execution status"
|
||||
openapi: "/enterprise-api.en.yaml GET /status/{kickoff_id}"
|
||||
---
|
||||
|
||||
|
||||
@@ -177,14 +177,7 @@ class MyCustomCrew:
|
||||
# Your crew implementation...
|
||||
```
|
||||
|
||||
This is exactly how CrewAI's built-in `agentops_listener` is registered. In the CrewAI codebase, you'll find:
|
||||
|
||||
```python
|
||||
# src/crewai/utilities/events/third_party/__init__.py
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
This ensures the `agentops_listener` is loaded when the `crewai.utilities.events` package is imported.
|
||||
This is how third-party event listeners are registered in the CrewAI codebase.
|
||||
|
||||
## Available Event Types
|
||||
|
||||
@@ -280,77 +273,6 @@ The structure of the event object depends on the event type, but all events inhe
|
||||
|
||||
Additional fields vary by event type. For example, `CrewKickoffCompletedEvent` includes `crew_name` and `output` fields.
|
||||
|
||||
## Real-World Example: Integration with AgentOps
|
||||
|
||||
CrewAI includes an example of a third-party integration with [AgentOps](https://github.com/AgentOps-AI/agentops), a monitoring and observability platform for AI agents. Here's how it's implemented:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
|
||||
from crewai.utilities.events.task_events import TaskEvaluationEvent
|
||||
|
||||
try:
|
||||
import agentops
|
||||
AGENTOPS_INSTALLED = True
|
||||
except ImportError:
|
||||
AGENTOPS_INSTALLED = False
|
||||
|
||||
class AgentOpsListener(BaseEventListener):
|
||||
tool_event: Optional["agentops.ToolEvent"] = None
|
||||
session: Optional["agentops.Session"] = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
if not AGENTOPS_INSTALLED:
|
||||
return
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
|
||||
self.session = agentops.init()
|
||||
for agent in source.agents:
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
|
||||
if self.session:
|
||||
self.session.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
||||
self.tool_event = agentops.ToolEvent(name=event.tool_name)
|
||||
if self.session:
|
||||
self.session.record(self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
|
||||
```
|
||||
|
||||
This listener initializes an AgentOps session when a Crew starts, registers agents with AgentOps, tracks tool usage, and ends the session when the Crew completes.
|
||||
|
||||
The AgentOps listener is registered in CrewAI's event system through the import in `src/crewai/utilities/events/third_party/__init__.py`:
|
||||
|
||||
```python
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
This ensures the `agentops_listener` is loaded when the `crewai.utilities.events` package is imported.
|
||||
|
||||
## Advanced Usage: Scoped Handlers
|
||||
|
||||
|
||||
@@ -539,16 +539,71 @@ crew = Crew(
|
||||
)
|
||||
```
|
||||
|
||||
### Mem0 Provider
|
||||
|
||||
Short-Term Memory and Entity Memory both supports a tight integration with both Mem0 OSS and Mem0 Client as a provider. Here is how you can use Mem0 as a provider.
|
||||
|
||||
```python
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.memory.entity_entity_memory import EntityMemory
|
||||
|
||||
mem0_oss_embedder_config = {
|
||||
"provider": "mem0",
|
||||
"config": {
|
||||
"user_id": "john",
|
||||
"local_mem0_config": {
|
||||
"vector_store": {"provider": "qdrant","config": {"host": "localhost", "port": 6333}},
|
||||
"llm": {"provider": "openai","config": {"api_key": "your-api-key", "model": "gpt-4"}},
|
||||
"embedder": {"provider": "openai","config": {"api_key": "your-api-key", "model": "text-embedding-3-small"}}
|
||||
},
|
||||
"infer": True # Optional defaults to True
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
mem0_client_embedder_config = {
|
||||
"provider": "mem0",
|
||||
"config": {
|
||||
"user_id": "john",
|
||||
"org_id": "my_org_id", # Optional
|
||||
"project_id": "my_project_id", # Optional
|
||||
"api_key": "custom-api-key" # Optional - overrides env var
|
||||
"run_id": "my_run_id", # Optional - for short-term memory
|
||||
"includes": "include1", # Optional
|
||||
"excludes": "exclude1", # Optional
|
||||
"infer": True # Optional defaults to True
|
||||
"custom_categories": new_categories # Optional - custom categories for user memory
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
short_term_memory_mem0_oss = ShortTermMemory(embedder_config=mem0_oss_embedder_config) # Short Term Memory with Mem0 OSS
|
||||
short_term_memory_mem0_client = ShortTermMemory(embedder_config=mem0_client_embedder_config) # Short Term Memory with Mem0 Client
|
||||
entity_memory_mem0_oss = EntityMemory(embedder_config=mem0_oss_embedder_config) # Entity Memory with Mem0 OSS
|
||||
entity_memory_mem0_client = EntityMemory(embedder_config=mem0_client_embedder_config) # Short Term Memory with Mem0 Client
|
||||
|
||||
crew = Crew(
|
||||
memory=True,
|
||||
short_term_memory=short_term_memory_mem0_oss, # or short_term_memory_mem0_client
|
||||
entity_memory=entity_memory_mem0_oss # or entity_memory_mem0_client
|
||||
)
|
||||
```
|
||||
|
||||
### Choosing the Right Embedding Provider
|
||||
|
||||
| Provider | Best For | Pros | Cons |
|
||||
|:---------|:----------|:------|:------|
|
||||
| **OpenAI** | General use, reliability | High quality, well-tested | Cost, requires API key |
|
||||
| **Ollama** | Privacy, cost savings | Free, local, private | Requires local setup |
|
||||
| **Google AI** | Google ecosystem | Good performance | Requires Google account |
|
||||
| **Azure OpenAI** | Enterprise, compliance | Enterprise features | Complex setup |
|
||||
| **Cohere** | Multilingual content | Great language support | Specialized use case |
|
||||
| **VoyageAI** | Retrieval tasks | Optimized for search | Newer provider |
|
||||
When selecting an embedding provider, consider factors like performance, privacy, cost, and integration needs.
|
||||
Below is a comparison to help you decide:
|
||||
|
||||
| Provider | Best For | Pros | Cons |
|
||||
| -------------- | ------------------------------ | --------------------------------- | ------------------------- |
|
||||
| **OpenAI** | General use, high reliability | High quality, widely tested | Paid service, API key required |
|
||||
| **Ollama** | Privacy-focused, cost savings | Free, runs locally, fully private | Requires local installation/setup |
|
||||
| **Google AI** | Integration in Google ecosystem| Strong performance, good support | Google account required |
|
||||
| **Azure OpenAI** | Enterprise & compliance needs| Enterprise-grade features, security | More complex setup process |
|
||||
| **Cohere** | Multilingual content handling | Excellent language support | More niche use cases |
|
||||
| **VoyageAI** | Information retrieval & search | Optimized for retrieval tasks | Relatively new provider |
|
||||
| **Mem0** | Per-user personalization | Search-optimized embeddings | Paid service, API key required |
|
||||
|
||||
|
||||
### Environment Variable Configuration
|
||||
|
||||
|
||||
@@ -21,13 +21,17 @@ To use the training feature, follow these steps:
|
||||
3. Run the following command:
|
||||
|
||||
```shell
|
||||
crewai train -n <n_iterations> <filename> (optional)
|
||||
crewai train -n <n_iterations> -f <filename.pkl>
|
||||
```
|
||||
<Tip>
|
||||
Replace `<n_iterations>` with the desired number of training iterations and `<filename>` with the appropriate filename ending with `.pkl`.
|
||||
</Tip>
|
||||
|
||||
### Training Your Crew Programmatically
|
||||
<Note>
|
||||
If you omit `-f`, the output defaults to `trained_agents_data.pkl` in the current working directory. You can pass an absolute path to control where the file is written.
|
||||
</Note>
|
||||
|
||||
### Training your Crew programmatically
|
||||
|
||||
To train your crew programmatically, use the following steps:
|
||||
|
||||
@@ -51,19 +55,65 @@ except Exception as e:
|
||||
raise Exception(f"An error occurred while training the crew: {e}")
|
||||
```
|
||||
|
||||
### Key Points to Note
|
||||
## How trained data is used by agents
|
||||
|
||||
- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met.
|
||||
- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met.
|
||||
- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user.
|
||||
CrewAI uses the training artifacts in two ways: during training to incorporate your human feedback, and after training to guide agents with consolidated suggestions.
|
||||
|
||||
It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration.
|
||||
### Training data flow
|
||||
|
||||
Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights.
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A["Start training<br/>CLI: crewai train -n -f<br/>or Python: crew.train(...)"] --> B["Setup training mode<br/>- task.human_input = true<br/>- disable delegation<br/>- init training_data.pkl + trained file"]
|
||||
|
||||
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.
|
||||
subgraph "Iterations"
|
||||
direction LR
|
||||
C["Iteration i<br/>initial_output"] --> D["User human_feedback"]
|
||||
D --> E["improved_output"]
|
||||
E --> F["Append to training_data.pkl<br/>by agent_id and iteration"]
|
||||
end
|
||||
|
||||
Happy training with CrewAI! 🚀
|
||||
B --> C
|
||||
F --> G{"More iterations?"}
|
||||
G -- "Yes" --> C
|
||||
G -- "No" --> H["Evaluate per agent<br/>aggregate iterations"]
|
||||
|
||||
H --> I["Consolidate<br/>suggestions[] + quality + final_summary"]
|
||||
I --> J["Save by agent role to trained file<br/>(default: trained_agents_data.pkl)"]
|
||||
|
||||
J --> K["Normal (non-training) runs"]
|
||||
K --> L["Auto-load suggestions<br/>from trained_agents_data.pkl"]
|
||||
L --> M["Append to prompt<br/>for consistent improvements"]
|
||||
```
|
||||
|
||||
### During training runs
|
||||
|
||||
- On each iteration, the system records for every agent:
|
||||
- `initial_output`: the agent’s first answer
|
||||
- `human_feedback`: your inline feedback when prompted
|
||||
- `improved_output`: the agent’s follow-up answer after feedback
|
||||
- This data is stored in a working file named `training_data.pkl` keyed by the agent’s internal ID and iteration.
|
||||
- While training is active, the agent automatically appends your prior human feedback to its prompt to enforce those instructions on subsequent attempts within the training session.
|
||||
Training is interactive: tasks set `human_input = true`, so running in a non-interactive environment will block on user input.
|
||||
|
||||
### After training completes
|
||||
|
||||
- When `train(...)` finishes, CrewAI evaluates the collected training data per agent and produces a consolidated result containing:
|
||||
- `suggestions`: clear, actionable instructions distilled from your feedback and the difference between initial/improved outputs
|
||||
- `quality`: a 0–10 score capturing improvement
|
||||
- `final_summary`: a step-by-step set of action items for future tasks
|
||||
- These consolidated results are saved to the filename you pass to `train(...)` (default via CLI is `trained_agents_data.pkl`). Entries are keyed by the agent’s `role` so they can be applied across sessions.
|
||||
- During normal (non-training) execution, each agent automatically loads its consolidated `suggestions` and appends them to the task prompt as mandatory instructions. This gives you consistent improvements without changing your agent definitions.
|
||||
|
||||
### File summary
|
||||
|
||||
- `training_data.pkl` (ephemeral, per-session):
|
||||
- Structure: `agent_id -> { iteration_number: { initial_output, human_feedback, improved_output } }`
|
||||
- Purpose: capture raw data and human feedback during training
|
||||
- Location: saved in the current working directory (CWD)
|
||||
- `trained_agents_data.pkl` (or your custom filename):
|
||||
- Structure: `agent_role -> { suggestions: string[], quality: number, final_summary: string }`
|
||||
- Purpose: persist consolidated guidance for future runs
|
||||
- Location: written to the CWD by default; use `-f` to set a custom (including absolute) path
|
||||
|
||||
## Small Language Model Considerations
|
||||
|
||||
@@ -129,3 +179,18 @@ Happy training with CrewAI! 🚀
|
||||
</Warning>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Key Points to Note
|
||||
|
||||
- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met.
|
||||
- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met.
|
||||
- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user.
|
||||
- Trained guidance is applied at prompt time; it does not modify your Python/YAML agent configuration.
|
||||
- Agents automatically load trained suggestions from a file named `trained_agents_data.pkl` located in the current working directory. If you trained to a different filename, either rename it to `trained_agents_data.pkl` before running, or adjust the loader in code.
|
||||
- You can change the output filename when calling `crewai train` with `-f/--filename`. Absolute paths are supported if you want to save outside the CWD.
|
||||
|
||||
It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration.
|
||||
|
||||
Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights.
|
||||
|
||||
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.
|
||||
|
||||
@@ -35,6 +35,22 @@ crewai tool install <tool-name>
|
||||
|
||||
This installs the tool and adds it to `pyproject.toml`.
|
||||
|
||||
You can use the tool by importing it and adding it to your agents:
|
||||
|
||||
```python
|
||||
from your_tool.tool import YourTool
|
||||
|
||||
custom_tool = YourTool()
|
||||
|
||||
researcher = Agent(
|
||||
role='Market Research Analyst',
|
||||
goal='Provide up-to-date market analysis of the AI industry',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[custom_tool],
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
## Creating and Publishing Tools
|
||||
|
||||
To create a new tool project:
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
---
|
||||
title: AgentOps Integration
|
||||
description: Understanding and logging your agent performance with AgentOps.
|
||||
icon: paperclip
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
Observability is a key aspect of developing and deploying conversational AI agents. It allows developers to understand how their agents are performing,
|
||||
how their agents are interacting with users, and how their agents use external tools and APIs.
|
||||
AgentOps is a product independent of CrewAI that provides a comprehensive observability solution for agents.
|
||||
|
||||
## AgentOps
|
||||
|
||||
[AgentOps](https://agentops.ai/?=crew) provides session replays, metrics, and monitoring for agents.
|
||||
|
||||
At a high level, AgentOps gives you the ability to monitor cost, token usage, latency, agent failures, session-wide statistics, and more.
|
||||
For more info, check out the [AgentOps Repo](https://github.com/AgentOps-AI/agentops).
|
||||
|
||||
### Overview
|
||||
|
||||
AgentOps provides monitoring for agents in development and production.
|
||||
It provides a dashboard for tracking agent performance, session replays, and custom reporting.
|
||||
|
||||
Additionally, AgentOps provides session drilldowns for viewing Crew agent interactions, LLM calls, and tool usage in real-time.
|
||||
This feature is useful for debugging and understanding how agents interact with users as well as other agents.
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
### Features
|
||||
|
||||
- **LLM Cost Management and Tracking**: Track spend with foundation model providers.
|
||||
- **Replay Analytics**: Watch step-by-step agent execution graphs.
|
||||
- **Recursive Thought Detection**: Identify when agents fall into infinite loops.
|
||||
- **Custom Reporting**: Create custom analytics on agent performance.
|
||||
- **Analytics Dashboard**: Monitor high-level statistics about agents in development and production.
|
||||
- **Public Model Testing**: Test your agents against benchmarks and leaderboards.
|
||||
- **Custom Tests**: Run your agents against domain-specific tests.
|
||||
- **Time Travel Debugging**: Restart your sessions from checkpoints.
|
||||
- **Compliance and Security**: Create audit logs and detect potential threats such as profanity and PII leaks.
|
||||
- **Prompt Injection Detection**: Identify potential code injection and secret leaks.
|
||||
|
||||
### Using AgentOps
|
||||
|
||||
<Steps>
|
||||
<Step title="Create an API Key">
|
||||
Create a user API key here: [Create API Key](https://app.agentops.ai/account)
|
||||
</Step>
|
||||
<Step title="Configure Your Environment">
|
||||
Add your API key to your environment variables:
|
||||
```bash
|
||||
AGENTOPS_API_KEY=<YOUR_AGENTOPS_API_KEY>
|
||||
```
|
||||
</Step>
|
||||
<Step title="Install AgentOps">
|
||||
Install AgentOps with:
|
||||
```bash
|
||||
pip install 'crewai[agentops]'
|
||||
```
|
||||
or
|
||||
```bash
|
||||
pip install agentops
|
||||
```
|
||||
</Step>
|
||||
<Step title="Initialize AgentOps">
|
||||
Before using `Crew` in your script, include these lines:
|
||||
|
||||
```python
|
||||
import agentops
|
||||
agentops.init()
|
||||
```
|
||||
|
||||
This will initiate an AgentOps session as well as automatically track Crew agents. For further info on how to outfit more complex agentic systems,
|
||||
check out the [AgentOps documentation](https://docs.agentops.ai) or join the [Discord](https://discord.gg/j4f3KbeH).
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Crew + AgentOps Examples
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="Job Posting"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting"
|
||||
icon="briefcase"
|
||||
iconType="solid"
|
||||
>
|
||||
Example of a Crew agent that generates job posts.
|
||||
</Card>
|
||||
<Card
|
||||
title="Markdown Validator"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/markdown_validator"
|
||||
icon="markdown"
|
||||
iconType="solid"
|
||||
>
|
||||
Example of a Crew agent that validates Markdown files.
|
||||
</Card>
|
||||
<Card
|
||||
title="Instagram Post"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post"
|
||||
icon="square-instagram"
|
||||
iconType="brands"
|
||||
>
|
||||
Example of a Crew agent that generates Instagram posts.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
### Further Information
|
||||
|
||||
To get started, create an [AgentOps account](https://agentops.ai/?=crew).
|
||||
|
||||
For feature requests or bug reports, please reach out to the AgentOps team on the [AgentOps Repo](https://github.com/AgentOps-AI/agentops).
|
||||
|
||||
#### Extra links
|
||||
|
||||
<a href="https://twitter.com/agentopsai/">🐦 Twitter</a>
|
||||
<span> • </span>
|
||||
<a href="https://discord.gg/JHPt4C7r">📢 Discord</a>
|
||||
<span> • </span>
|
||||
<a href="https://app.agentops.ai/?=crew">🖇️ AgentOps Dashboard</a>
|
||||
<span> • </span>
|
||||
<a href="https://docs.agentops.ai/introduction">📙 Documentation</a>
|
||||
@@ -21,9 +21,6 @@ Observability is crucial for understanding how your CrewAI agents perform, ident
|
||||
### Monitoring & Tracing Platforms
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="AgentOps" icon="paperclip" href="/en/observability/agentops">
|
||||
Session replays, metrics, and monitoring for agent development and production.
|
||||
</Card>
|
||||
|
||||
<Card title="LangDB" icon="database" href="/en/observability/langdb">
|
||||
End-to-end tracing for CrewAI workflows with automatic agent interaction capture.
|
||||
|
||||
@@ -117,4 +117,19 @@ agent = Agent(
|
||||
)
|
||||
```
|
||||
|
||||
## **Max Usage Count**
|
||||
|
||||
You can set a maximum usage count for a tool to prevent it from being used more than a certain number of times.
|
||||
By default, the max usage count is unlimited.
|
||||
|
||||
|
||||
|
||||
```python
|
||||
from crewai_tools import FileReadTool
|
||||
|
||||
tool = FileReadTool(max_usage_count=5, ...)
|
||||
```
|
||||
|
||||
|
||||
|
||||
Ready to explore? Pick a category above to discover tools that fit your use case!
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 288 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 419 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 263 KiB |
7
docs/ko/api-reference/inputs.mdx
Normal file
7
docs/ko/api-reference/inputs.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /inputs"
|
||||
description: "크루가 필요로 하는 입력 확인"
|
||||
openapi: "/enterprise-api.ko.yaml GET /inputs"
|
||||
---
|
||||
|
||||
|
||||
7
docs/ko/api-reference/kickoff.mdx
Normal file
7
docs/ko/api-reference/kickoff.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "POST /kickoff"
|
||||
description: "크루 실행 시작"
|
||||
openapi: "/enterprise-api.ko.yaml POST /kickoff"
|
||||
---
|
||||
|
||||
|
||||
7
docs/ko/api-reference/status.mdx
Normal file
7
docs/ko/api-reference/status.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /status/{kickoff_id}"
|
||||
description: "실행 상태 조회"
|
||||
openapi: "/enterprise-api.ko.yaml GET /status/{kickoff_id}"
|
||||
---
|
||||
|
||||
|
||||
@@ -177,14 +177,7 @@ class MyCustomCrew:
|
||||
# Your crew implementation...
|
||||
```
|
||||
|
||||
이것이 바로 CrewAI의 내장 `agentops_listener`가 등록되는 방식과 동일합니다. CrewAI 코드베이스에서는 다음과 같이 되어 있습니다:
|
||||
|
||||
```python
|
||||
# src/crewai/utilities/events/third_party/__init__.py
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
이렇게 하면 `crewai.utilities.events` 패키지가 임포트될 때 `agentops_listener`가 자동으로 로드됩니다.
|
||||
이것이 CrewAI 코드베이스에서 서드파티 이벤트 리스너가 등록되는 방식입니다.
|
||||
|
||||
## 사용 가능한 이벤트 유형
|
||||
|
||||
@@ -280,77 +273,6 @@ CrewAI는 여러분이 청취할 수 있는 다양한 이벤트를 제공합니
|
||||
|
||||
추가 필드는 이벤트 타입에 따라 다릅니다. 예를 들어, `CrewKickoffCompletedEvent`에는 `crew_name`과 `output` 필드가 포함됩니다.
|
||||
|
||||
## 실제 예시: AgentOps와의 통합
|
||||
|
||||
CrewAI는 AI 에이전트를 위한 모니터링 및 관찰 플랫폼인 [AgentOps](https://github.com/AgentOps-AI/agentops)와의 서드파티 통합 예시를 포함하고 있습니다. 구현 방식은 다음과 같습니다:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
|
||||
from crewai.utilities.events.task_events import TaskEvaluationEvent
|
||||
|
||||
try:
|
||||
import agentops
|
||||
AGENTOPS_INSTALLED = True
|
||||
except ImportError:
|
||||
AGENTOPS_INSTALLED = False
|
||||
|
||||
class AgentOpsListener(BaseEventListener):
|
||||
tool_event: Optional["agentops.ToolEvent"] = None
|
||||
session: Optional["agentops.Session"] = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
if not AGENTOPS_INSTALLED:
|
||||
return
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
|
||||
self.session = agentops.init()
|
||||
for agent in source.agents:
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
|
||||
if self.session:
|
||||
self.session.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
||||
self.tool_event = agentops.ToolEvent(name=event.tool_name)
|
||||
if self.session:
|
||||
self.session.record(self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
|
||||
```
|
||||
|
||||
이 listener는 crew가 시작될 때 AgentOps 세션을 초기화하고, agent를 AgentOps에 등록하며, 도구 사용을 추적하고, crew가 완료되면 세션을 종료합니다.
|
||||
|
||||
AgentOps listener는 `src/crewai/utilities/events/third_party/__init__.py` 파일의 import를 통해 CrewAI 이벤트 시스템에 등록됩니다:
|
||||
|
||||
```python
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
이렇게 하면 `crewai.utilities.events` 패키지가 import될 때 `agentops_listener`가 로드되는 것이 보장됩니다.
|
||||
|
||||
## 고급 사용법: Scoped Handlers
|
||||
|
||||
|
||||
@@ -1,65 +1,65 @@
|
||||
---
|
||||
title: 소개
|
||||
description: 함께 협력하여 복잡한 작업을 해결하는 AI 에이전트 팀 구축
|
||||
description: 함께 협력하여 복잡한 작업을 해결하는 AI agent 팀 구축
|
||||
icon: handshake
|
||||
---
|
||||
|
||||
# CrewAI란 무엇인가?
|
||||
|
||||
**CrewAI는 완전히 독립적으로, LangChain이나 기타 agent 프레임워크에 의존하지 않고 처음부터 스크래치로 개발된 가볍고 매우 빠른 Python 프레임워크입니다.**
|
||||
**CrewAI는 LangChain이나 기타 agent 프레임워크에 의존하지 않고, 완전히 독립적으로 처음부터 스크래치로 개발된 가볍고 매우 빠른 Python 프레임워크입니다.**
|
||||
|
||||
CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공하여, 어떤 시나리오에도 맞춤화된 자율 AI agent를 만드는 데 이상적입니다:
|
||||
|
||||
- **[CrewAI Crews](/ko/guides/crews/first-crew)**: 자율성과 협업 지능을 극대화하여, 각 agent가 특정 역할, 도구, 목표를 가진 AI 팀을 만들 수 있습니다.
|
||||
- **[CrewAI Flows](/ko/guides/flows/first-flow)**: 세밀한 이벤트 기반 제어와 단일 LLM 호출을 통한 정확한 작업 오케스트레이션을 가능하게 하며 Crews를 네이티브로 지원합니다.
|
||||
- **[CrewAI Flows](/ko/guides/flows/first-flow)**: 이벤트 기반의 세밀한 제어와 단일 LLM 호출을 통한 정확한 작업 orchestration을 지원하며, Crews와 네이티브로 통합됩니다.
|
||||
|
||||
10만 명이 넘는 개발자가 커뮤니티 과정을 통해 인증을 받았으며, CrewAI는 기업용 AI 자동화의 표준으로 빠르게 자리잡고 있습니다.
|
||||
|
||||
## 크루 작동 방식
|
||||
## Crew의 작동 방식
|
||||
|
||||
<Note>
|
||||
회사가 비즈니스 목표를 달성하기 위해 여러 부서(영업, 엔지니어링, 마케팅 등)가 리더십 아래에서 함께 일하는 것처럼, CrewAI는 복잡한 작업을 달성하기 위해 전문화된 역할의 AI 에이전트들이 협력하는 조직을 만들 수 있도록 도와줍니다.
|
||||
회사가 비즈니스 목표를 달성하기 위해 여러 부서(영업, 엔지니어링, 마케팅 등)가 리더십 아래에서 함께 일하는 것처럼, CrewAI는 복잡한 작업을 달성하기 위해 전문화된 역할의 AI agent들이 협력하는 조직을 만들 수 있도록 도와줍니다.
|
||||
</Note>
|
||||
|
||||
<Frame caption="CrewAI 프레임워크 개요">
|
||||
<Frame caption="CrewAI Framework Overview">
|
||||
<img src="/images/crews.png" alt="CrewAI Framework Overview" />
|
||||
</Frame>
|
||||
|
||||
| 구성 요소 | 설명 | 주요 특징 |
|
||||
|:--------------|:---------------------:|:----------|
|
||||
| **크루** | 최상위 조직 | • AI 에이전트 팀 관리<br/>• 워크플로우 감독<br/>• 협업 보장<br/>• 결과 전달 |
|
||||
| **AI 에이전트** | 전문 팀원 | • 특정 역할 보유(연구원, 작가 등)<br/>• 지정된 도구 사용<br/>• 작업 위임 가능<br/>• 자율적 의사결정 가능 |
|
||||
| **프로세스** | 워크플로우 관리 시스템 | • 협업 패턴 정의<br/>• 작업 할당 제어<br/>• 상호작용 관리<br/>• 효율적 실행 보장 |
|
||||
| **작업** | 개별 할당 | • 명확한 목표 보유<br/>• 특정 도구 사용<br/>• 더 큰 프로세스에 기여<br/>• 실행 가능한 결과 도출 |
|
||||
| 구성 요소 | 설명 | 주요 특징 |
|
||||
|:----------|:----:|:----------|
|
||||
| **Crew** | 최상위 조직 | • AI agent 팀 관리<br/>• workflow 감독<br/>• 협업 보장<br/>• 결과 전달 |
|
||||
| **AI agents** | 전문 팀원 | • 특정 역할 보유(Researcher, Writer 등)<br/>• 지정된 도구 사용<br/>• 작업 위임 가능<br/>• 자율적 의사결정 가능 |
|
||||
| **Process** | workflow 관리 시스템 | • 협업 패턴 정의<br/>• 작업 할당 제어<br/>• 상호작용 관리<br/>• 효율적 실행 보장 |
|
||||
| **Task** | 개별 할당 | • 명확한 목표 보유<br/>• 특정 도구 사용<br/>• 더 큰 프로세스에 기여<br/>• 실행 가능한 결과 도출 |
|
||||
|
||||
### 어떻게 모두 함께 작동하는가
|
||||
### 전체 구조의 동작 방식
|
||||
|
||||
1. **Crew**가 전체 운영을 조직합니다
|
||||
2. **AI Agents**가 자신들의 전문 작업을 수행합니다
|
||||
2. **AI agents**가 자신들의 전문 작업을 수행합니다
|
||||
3. **Process**가 원활한 협업을 보장합니다
|
||||
4. **Tasks**가 완료되어 목표를 달성합니다
|
||||
|
||||
## 주요 기능
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="역할 기반 에이전트" icon="users">
|
||||
연구원, 분석가, 작가 등 다양한 역할, 전문성, 목표를 가진 전문 에이전트를 생성할 수 있습니다
|
||||
<Card title="역할 기반 agent" icon="users">
|
||||
Researcher, Analyst, Writer 등 다양한 역할과 전문성, 목표를 가진 agent를 생성할 수 있습니다
|
||||
</Card>
|
||||
<Card title="유연한 도구" icon="screwdriver-wrench">
|
||||
에이전트에게 외부 서비스 및 데이터 소스와 상호작용할 수 있는 맞춤형 도구와 API를 제공합니다
|
||||
agent에게 외부 서비스 및 데이터 소스와 상호작용할 수 있는 맞춤형 도구와 API를 제공합니다
|
||||
</Card>
|
||||
<Card title="지능형 협업" icon="people-arrows">
|
||||
에이전트가 함께 작업하며, 인사이트를 공유하고 작업을 조율하여 복잡한 목표를 달성합니다
|
||||
agent들이 함께 작업하며, 인사이트를 공유하고 작업을 조율하여 복잡한 목표를 달성합니다
|
||||
</Card>
|
||||
<Card title="작업 관리" icon="list-check">
|
||||
순차적 또는 병렬 워크플로우를 정의할 수 있으며, 에이전트가 작업 의존성을 자동으로 처리합니다
|
||||
순차적 또는 병렬 workflow를 정의할 수 있으며, agent가 작업 의존성을 자동으로 처리합니다
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## 플로우의 작동 원리
|
||||
## Flow의 작동 원리
|
||||
|
||||
<Note>
|
||||
crew는 자율 협업에 탁월한 반면, 플로우는 구조화된 자동화를 제공하여 워크플로우 실행에 대한 세밀한 제어를 제공합니다. 플로우는 조건부 로직, 반복문, 동적 상태 관리를 정확하게 처리하면서 작업이 신뢰성 있게, 안전하게, 효율적으로 실행되도록 보장합니다. 플로우는 crew와 원활하게 통합되어 높은 자율성과 엄격한 제어의 균형을 이룰 수 있게 해줍니다.
|
||||
Crew가 자율 협업에 탁월하다면, Flow는 구조화된 자동화를 제공하여 workflow 실행에 대한 세밀한 제어를 제공합니다. Flow는 조건부 로직, 반복문, 동적 상태 관리를 정확하게 처리하면서 작업이 신뢰성 있게, 안전하게, 효율적으로 실행되도록 보장합니다. Flow는 Crew와 원활하게 통합되어 높은 자율성과 엄격한 제어의 균형을 이룰 수 있게 해줍니다.
|
||||
</Note>
|
||||
|
||||
<Frame caption="CrewAI Framework Overview">
|
||||
@@ -68,41 +68,41 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
|
||||
|
||||
| 구성 요소 | 설명 | 주요 기능 |
|
||||
|:----------|:-----------:|:------------|
|
||||
| **Flow** | 구조화된 워크플로우 오케스트레이션 | • 실행 경로 관리<br/>• 상태 전환 처리<br/>• 작업 순서 제어<br/>• 신뢰성 있는 실행 보장 |
|
||||
| **Events** | 워크플로우 액션 트리거 | • 특정 프로세스 시작<br/>• 동적 응답 가능<br/>• 조건부 분기 지원<br/>• 실시간 적응 허용 |
|
||||
| **States** | 워크플로우 실행 컨텍스트 | • 실행 데이터 유지<br/>• 데이터 영속성 지원<br/>• 재개 가능성 보장<br/>• 실행 무결성 확보 |
|
||||
| **Crew Support** | 워크플로우 자동화 강화 | • 필요할 때 agency 삽입<br/>• 구조화된 워크플로우 보완<br/>• 자동화와 인텔리전스의 균형<br/>• 적응적 의사결정 지원 |
|
||||
| **Flow** | 구조화된 workflow orchestration | • 실행 경로 관리<br/>• 상태 전환 처리<br/>• 작업 순서 제어<br/>• 신뢰성 있는 실행 보장 |
|
||||
| **Events** | workflow 액션 트리거 | • 특정 프로세스 시작<br/>• 동적 응답 가능<br/>• 조건부 분기 지원<br/>• 실시간 적응 허용 |
|
||||
| **States** | workflow 실행 컨텍스트 | • 실행 데이터 유지<br/>• 데이터 영속성 지원<br/>• 재개 가능성 보장<br/>• 실행 무결성 확보 |
|
||||
| **Crew Support** | workflow 자동화 강화 | • 필요할 때 agency 삽입<br/>• 구조화된 workflow 보완<br/>• 자동화와 인텔리전스의 균형<br/>• 적응적 의사결정 지원 |
|
||||
|
||||
### 주요 기능
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="이벤트 기반 오케스트레이션" icon="bolt">
|
||||
이벤트에 동적으로 반응하여 정밀한 실행 경로 정의
|
||||
<Card title="이벤트 기반 orchestration" icon="bolt">
|
||||
이벤트에 동적으로 반응하여 정밀한 실행 경로를 정의합니다
|
||||
</Card>
|
||||
<Card title="세밀한 제어" icon="sliders">
|
||||
워크플로우 상태와 조건부 실행을 안전하고 효율적으로 관리
|
||||
workflow 상태와 조건부 실행을 안전하고 효율적으로 관리합니다
|
||||
</Card>
|
||||
<Card title="네이티브 Crew 통합" icon="puzzle-piece">
|
||||
Crews와 손쉽게 결합하여 자율성과 지능 강화
|
||||
Crews와 손쉽게 결합하여 자율성과 지능을 강화합니다
|
||||
</Card>
|
||||
<Card title="결정론적 실행" icon="route">
|
||||
명시적 제어 흐름과 오류 처리로 예측 가능한 결과 보장
|
||||
명시적 제어 흐름과 오류 처리로 예측 가능한 결과를 보장합니다
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## 크루(Crews)와 플로우(Flows)를 언제 사용할까
|
||||
## Crew와 Flow를 언제 사용할까
|
||||
|
||||
<Note>
|
||||
[크루](/ko/guides/crews/first-crew)와 [플로우](/ko/guides/flows/first-flow)를 언제 사용할지 이해하는 것은 CrewAI의 잠재력을 애플리케이션에서 극대화하는 데 핵심적입니다.
|
||||
[Crew](/ko/guides/crews/first-crew)와 [Flow](/ko/guides/flows/first-flow)를 언제 사용할지 이해하는 것은 CrewAI의 잠재력을 애플리케이션에서 극대화하는 데 핵심적입니다.
|
||||
</Note>
|
||||
|
||||
| 사용 사례 | 권장 접근 방식 | 이유 |
|
||||
|:---------|:---------------------|:-----|
|
||||
| **개방형 연구** | [크루](/ko/guides/crews/first-crew) | 과제가 창의적인 사고, 탐색, 적응이 필요할 때 |
|
||||
| **콘텐츠 생성** | [크루](/ko/guides/crews/first-crew) | 기사, 보고서, 마케팅 자료 등 협업형 생성 시 |
|
||||
| **의사결정 워크플로우** | [플로우](/ko/guides/flows/first-flow) | 예측 가능하고 감사 가능한 의사결정 경로 및 정밀 제어가 필요할 때 |
|
||||
| **API 오케스트레이션** | [플로우](/ko/guides/flows/first-flow) | 특정 순서로 여러 외부 서비스에 신뢰성 있게 통합할 때 |
|
||||
| **하이브리드 애플리케이션** | 혼합 접근 방식 | [플로우](/ko/guides/flows/first-flow)로 전체 프로세스를 오케스트레이션하고, [크루](/ko/guides/crews/first-crew)로 복잡한 하위 작업을 처리 |
|
||||
| **개방형 연구** | [Crew](/ko/guides/crews/first-crew) | 창의적 사고, 탐색, 적응이 필요한 작업에 적합 |
|
||||
| **콘텐츠 생성** | [Crew](/ko/guides/crews/first-crew) | 기사, 보고서, 마케팅 자료 등 협업형 생성에 적합 |
|
||||
| **의사결정 workflow** | [Flow](/ko/guides/flows/first-flow) | 예측 가능하고 감사 가능한 의사결정 경로 및 정밀 제어가 필요할 때 |
|
||||
| **API orchestration** | [Flow](/ko/guides/flows/first-flow) | 특정 순서로 여러 외부 서비스에 신뢰성 있게 통합할 때 |
|
||||
| **하이브리드 애플리케이션** | 혼합 접근 방식 | [Flow](/ko/guides/flows/first-flow)로 전체 프로세스를 orchestration하고, [Crew](/ko/guides/crews/first-crew)로 복잡한 하위 작업을 처리 |
|
||||
|
||||
### 의사결정 프레임워크
|
||||
|
||||
@@ -112,8 +112,8 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
|
||||
|
||||
## CrewAI를 선택해야 하는 이유?
|
||||
|
||||
- 🧠 **자율적 운영**: 에이전트가 자신의 역할과 사용 가능한 도구를 바탕으로 지능적인 결정을 내립니다
|
||||
- 📝 **자연스러운 상호작용**: 에이전트가 인간 팀원처럼 소통하고 협업합니다
|
||||
- 🧠 **자율적 운영**: agent가 자신의 역할과 사용 가능한 도구를 바탕으로 지능적인 결정을 내립니다
|
||||
- 📝 **자연스러운 상호작용**: agent가 인간 팀원처럼 소통하고 협업합니다
|
||||
- 🛠️ **확장 가능한 설계**: 새로운 도구, 역할, 기능을 쉽게 추가할 수 있습니다
|
||||
- 🚀 **프로덕션 준비 완료**: 실제 환경에서의 신뢰성과 확장성을 고려하여 구축되었습니다
|
||||
- 🔒 **보안 중심**: 엔터프라이즈 보안 요구 사항을 고려하여 설계되었습니다
|
||||
@@ -134,7 +134,7 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
|
||||
icon="diagram-project"
|
||||
href="/ko/guides/flows/first-flow"
|
||||
>
|
||||
실행을 정밀하게 제어할 수 있는 구조화된, 이벤트 기반 워크플로우를 만드는 방법을 배워보세요.
|
||||
실행을 정밀하게 제어할 수 있는 구조화된, 이벤트 기반 workflow를 만드는 방법을 배워보세요.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
@@ -151,7 +151,7 @@ CrewAI는 고수준의 간편함과 정밀한 저수준 제어를 모두 제공
|
||||
icon="bolt"
|
||||
href="ko/quickstart"
|
||||
>
|
||||
빠른 시작 가이드를 따라 첫 번째 CrewAI 에이전트를 만들고 직접 경험해 보세요.
|
||||
빠른 시작 가이드를 따라 첫 번째 CrewAI agent를 만들고 직접 경험해 보세요.
|
||||
</Card>
|
||||
<Card
|
||||
title="커뮤니티 가입하기"
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
---
|
||||
title: AgentOps 통합
|
||||
description: AgentOps를 사용하여 에이전트 성능을 이해하고 로깅하기
|
||||
icon: paperclip
|
||||
---
|
||||
|
||||
# 소개
|
||||
|
||||
Observability는 대화형 AI 에이전트를 개발하고 배포하는 데 있어 핵심적인 요소입니다. 이는 개발자가 에이전트의 성능을 이해하고, 에이전트가 사용자와 어떻게 상호작용하는지, 그리고 에이전트가 외부 도구와 API를 어떻게 사용하는지를 파악할 수 있게 해줍니다.
|
||||
AgentOps는 CrewAI와 독립적인 제품으로, 에이전트를 위한 종합적인 observability 솔루션을 제공합니다.
|
||||
|
||||
## AgentOps
|
||||
|
||||
[AgentOps](https://agentops.ai/?=crew)은 에이전트에 대한 세션 리플레이, 메트릭, 모니터링을 제공합니다.
|
||||
|
||||
AgentOps는 높은 수준에서 비용, 토큰 사용량, 대기 시간, 에이전트 실패, 세션 전체 통계 등 다양한 항목을 모니터링할 수 있는 기능을 제공합니다.
|
||||
더 자세한 내용은 [AgentOps Repo](https://github.com/AgentOps-AI/agentops)를 확인하세요.
|
||||
|
||||
### 개요
|
||||
|
||||
AgentOps는 개발 및 프로덕션 환경에서 에이전트에 대한 모니터링을 제공합니다.
|
||||
에이전트 성능, 세션 리플레이, 맞춤형 리포팅을 추적할 수 있는 대시보드를 제공합니다.
|
||||
|
||||
또한, AgentOps는 Crew 에이전트 상호작용, LLM 호출, 툴 사용을 실시간으로 볼 수 있는 세션 드릴다운 기능을 제공합니다.
|
||||
이 기능은 에이전트가 사용자 및 다른 에이전트와 어떻게 상호작용하는지 디버깅하고 이해하는 데 유용합니다.
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
### 특징
|
||||
|
||||
- **LLM 비용 관리 및 추적**: 기반 모델 공급자와의 지출을 추적합니다.
|
||||
- **재생 분석**: 단계별 에이전트 실행 그래프를 시청할 수 있습니다.
|
||||
- **재귀적 사고 감지**: 에이전트가 무한 루프에 빠졌는지 식별합니다.
|
||||
- **맞춤형 보고서**: 에이전트 성능에 대한 맞춤형 분석을 생성합니다.
|
||||
- **분석 대시보드**: 개발 및 운영 중인 에이전트에 대한 상위 수준 통계를 모니터링합니다.
|
||||
- **공개 모델 테스트**: 벤치마크 및 리더보드를 통해 에이전트를 테스트할 수 있습니다.
|
||||
- **맞춤형 테스트**: 도메인별 테스트로 에이전트를 실행합니다.
|
||||
- **타임 트래블 디버깅**: 체크포인트에서 세션을 재시작합니다.
|
||||
- **컴플라이언스 및 보안**: 감사 로그를 생성하고 욕설 및 PII 유출과 같은 잠재적 위협을 감지합니다.
|
||||
- **프롬프트 인젝션 감지**: 잠재적 코드 인젝션 및 시크릿 유출을 식별합니다.
|
||||
|
||||
### AgentOps 사용하기
|
||||
|
||||
<Steps>
|
||||
<Step title="API 키 생성">
|
||||
사용자 API 키를 여기서 생성하세요: [API 키 생성](https://app.agentops.ai/account)
|
||||
</Step>
|
||||
<Step title="환경 설정">
|
||||
API 키를 환경 변수에 추가하세요:
|
||||
```bash
|
||||
AGENTOPS_API_KEY=<YOUR_AGENTOPS_API_KEY>
|
||||
```
|
||||
</Step>
|
||||
<Step title="AgentOps 설치">
|
||||
다음 명령어로 AgentOps를 설치하세요:
|
||||
```bash
|
||||
pip install 'crewai[agentops]'
|
||||
```
|
||||
또는
|
||||
```bash
|
||||
pip install agentops
|
||||
```
|
||||
</Step>
|
||||
<Step title="AgentOps 초기화">
|
||||
스크립트에서 `Crew`를 사용하기 전에 다음 코드를 포함하세요:
|
||||
|
||||
```python
|
||||
import agentops
|
||||
agentops.init()
|
||||
```
|
||||
|
||||
이렇게 하면 AgentOps 세션이 시작되고 Crew 에이전트가 자동으로 추적됩니다. 더 복잡한 agentic 시스템을 구성하는 방법에 대한 자세한 정보는 [AgentOps 문서](https://docs.agentops.ai) 또는 [Discord](https://discord.gg/j4f3KbeH)를 참조하세요.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Crew + AgentOps 예시
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="Job Posting"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting"
|
||||
icon="briefcase"
|
||||
iconType="solid"
|
||||
>
|
||||
채용 공고를 생성하는 Crew agent의 예시입니다.
|
||||
</Card>
|
||||
<Card
|
||||
title="Markdown Validator"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/markdown_validator"
|
||||
icon="markdown"
|
||||
iconType="solid"
|
||||
>
|
||||
Markdown 파일을 검증하는 Crew agent의 예시입니다.
|
||||
</Card>
|
||||
<Card
|
||||
title="Instagram Post"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post"
|
||||
icon="square-instagram"
|
||||
iconType="brands"
|
||||
>
|
||||
Instagram 게시물을 생성하는 Crew agent의 예시입니다.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
### 추가 정보
|
||||
|
||||
시작하려면 [AgentOps 계정](https://agentops.ai/?=crew)을 생성하세요.
|
||||
|
||||
기능 요청이나 버그 보고가 필요하시면 [AgentOps Repo](https://github.com/AgentOps-AI/agentops)에서 AgentOps 팀에 문의해 주세요.
|
||||
|
||||
#### 추가 링크
|
||||
|
||||
<a href="https://twitter.com/agentopsai/">🐦 트위터</a>
|
||||
<span> • </span>
|
||||
<a href="https://discord.gg/JHPt4C7r">📢 디스코드</a>
|
||||
<span> • </span>
|
||||
<a href="https://app.agentops.ai/?=crew">🖇️ AgentOps 대시보드</a>
|
||||
<span> • </span>
|
||||
<a href="https://docs.agentops.ai/introduction">📙 문서화</a>
|
||||
@@ -21,9 +21,6 @@ icon: "face-smile"
|
||||
### 모니터링 & 트레이싱 플랫폼
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="AgentOps" icon="paperclip" href="/ko/observability/agentops">
|
||||
에이전트 개발 및 운영을 위한 세션 리플레이, 메트릭, 모니터링 제공.
|
||||
</Card>
|
||||
|
||||
<Card title="LangDB" icon="database" href="/ko/observability/langdb">
|
||||
자동 에이전트 상호작용 캡처를 포함한 CrewAI 워크플로의 엔드-투-엔드 트레이싱.
|
||||
|
||||
7
docs/pt-BR/api-reference/inputs.mdx
Normal file
7
docs/pt-BR/api-reference/inputs.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /inputs"
|
||||
description: "Obter entradas necessárias para sua crew"
|
||||
openapi: "/enterprise-api.pt-BR.yaml GET /inputs"
|
||||
---
|
||||
|
||||
|
||||
7
docs/pt-BR/api-reference/kickoff.mdx
Normal file
7
docs/pt-BR/api-reference/kickoff.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "POST /kickoff"
|
||||
description: "Iniciar a execução da crew"
|
||||
openapi: "/enterprise-api.pt-BR.yaml POST /kickoff"
|
||||
---
|
||||
|
||||
|
||||
7
docs/pt-BR/api-reference/status.mdx
Normal file
7
docs/pt-BR/api-reference/status.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: "GET /status/{kickoff_id}"
|
||||
description: "Obter o status da execução"
|
||||
openapi: "/enterprise-api.pt-BR.yaml GET /status/{kickoff_id}"
|
||||
---
|
||||
|
||||
|
||||
@@ -177,14 +177,7 @@ class MyCustomCrew:
|
||||
# Sua implementação do crew...
|
||||
```
|
||||
|
||||
É exatamente assim que o `agentops_listener` integrado do CrewAI é registrado. No código-fonte do CrewAI, você encontrará:
|
||||
|
||||
```python
|
||||
# src/crewai/utilities/events/third_party/__init__.py
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
Isso garante que o `agentops_listener` seja carregado quando o pacote `crewai.utilities.events` for importado.
|
||||
É assim que listeners de eventos de terceiros são registrados no código do CrewAI.
|
||||
|
||||
## Tipos de Eventos Disponíveis
|
||||
|
||||
@@ -269,77 +262,6 @@ A estrutura do objeto de evento depende do tipo do evento, mas todos herdam de `
|
||||
|
||||
Campos adicionais variam pelo tipo de evento. Por exemplo, `CrewKickoffCompletedEvent` inclui os campos `crew_name` e `output`.
|
||||
|
||||
## Exemplo Real: Integração com AgentOps
|
||||
|
||||
O CrewAI inclui um exemplo de integração com [AgentOps](https://github.com/AgentOps-AI/agentops), uma plataforma de monitoramento e observabilidade para agentes de IA. Veja como é implementado:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
|
||||
from crewai.utilities.events.task_events import TaskEvaluationEvent
|
||||
|
||||
try:
|
||||
import agentops
|
||||
AGENTOPS_INSTALLED = True
|
||||
except ImportError:
|
||||
AGENTOPS_INSTALLED = False
|
||||
|
||||
class AgentOpsListener(BaseEventListener):
|
||||
tool_event: Optional["agentops.ToolEvent"] = None
|
||||
session: Optional["agentops.Session"] = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
if not AGENTOPS_INSTALLED:
|
||||
return
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
|
||||
self.session = agentops.init()
|
||||
for agent in source.agents:
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
|
||||
if self.session:
|
||||
self.session.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
||||
self.tool_event = agentops.ToolEvent(name=event.tool_name)
|
||||
if self.session:
|
||||
self.session.record(self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
|
||||
```
|
||||
|
||||
Esse listener inicializa uma sessão do AgentOps quando um Crew inicia, cadastra agentes no AgentOps, rastreia o uso de ferramentas e finaliza a sessão quando o Crew é concluído.
|
||||
|
||||
O listener AgentOps é registrado no sistema de eventos do CrewAI via importação em `src/crewai/utilities/events/third_party/__init__.py`:
|
||||
|
||||
```python
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
Isso garante que o `agentops_listener` seja carregado quando o pacote `crewai.utilities.events` for importado.
|
||||
|
||||
## Uso Avançado: Handlers Escopados
|
||||
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
---
|
||||
title: Integração com AgentOps
|
||||
description: Entendendo e registrando a performance do seu agente com AgentOps.
|
||||
icon: paperclip
|
||||
---
|
||||
|
||||
# Introdução
|
||||
|
||||
Observabilidade é um aspecto fundamental no desenvolvimento e implantação de agentes de IA conversacional. Ela permite que desenvolvedores compreendam como seus agentes estão performando,
|
||||
como eles estão interagindo com os usuários e como utilizam ferramentas externas e APIs.
|
||||
AgentOps é um produto independente do CrewAI que fornece uma solução completa de observabilidade para agentes.
|
||||
|
||||
## AgentOps
|
||||
|
||||
[AgentOps](https://agentops.ai/?=crew) oferece replay de sessões, métricas e monitoramento para agentes.
|
||||
|
||||
Em um alto nível, o AgentOps oferece a capacidade de monitorar custos, uso de tokens, latência, falhas do agente, estatísticas de sessão e muito mais.
|
||||
Para mais informações, confira o [Repositório do AgentOps](https://github.com/AgentOps-AI/agentops).
|
||||
|
||||
### Visão Geral
|
||||
|
||||
AgentOps fornece monitoramento para agentes em desenvolvimento e produção.
|
||||
Disponibiliza um dashboard para acompanhamento de performance dos agentes, replay de sessões e relatórios personalizados.
|
||||
|
||||
Além disso, o AgentOps traz análises detalhadas das sessões para visualizar interações do agente Crew, chamadas LLM e uso de ferramentas em tempo real.
|
||||
Esse recurso é útil para depuração e entendimento de como os agentes interagem com usuários e entre si.
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
### Funcionalidades
|
||||
|
||||
- **Gerenciamento e Rastreamento de Custos de LLM**: Acompanhe gastos com provedores de modelos fundamentais.
|
||||
- **Análises de Replay**: Assista gráficos de execução do agente, passo a passo.
|
||||
- **Detecção de Pensamento Recursivo**: Identifique quando agentes entram em loops infinitos.
|
||||
- **Relatórios Personalizados**: Crie análises customizadas sobre a performance dos agentes.
|
||||
- **Dashboard Analítico**: Monitore estatísticas gerais de agentes em desenvolvimento e produção.
|
||||
- **Teste de Modelos Públicos**: Teste seus agentes em benchmarks e rankings.
|
||||
- **Testes Personalizados**: Execute seus agentes em testes específicos de domínio.
|
||||
- **Depuração com Viagem no Tempo**: Reinicie suas sessões a partir de checkpoints.
|
||||
- **Conformidade e Segurança**: Crie registros de auditoria e detecte possíveis ameaças como uso de palavrões e vazamento de dados pessoais.
|
||||
- **Detecção de Prompt Injection**: Identifique possíveis injeções de código e vazamentos de segredos.
|
||||
|
||||
### Utilizando o AgentOps
|
||||
|
||||
<Steps>
|
||||
<Step title="Crie uma Chave de API">
|
||||
Crie uma chave de API de usuário aqui: [Create API Key](https://app.agentops.ai/account)
|
||||
</Step>
|
||||
<Step title="Configure seu Ambiente">
|
||||
Adicione sua chave API nas variáveis de ambiente:
|
||||
```bash
|
||||
AGENTOPS_API_KEY=<YOUR_AGENTOPS_API_KEY>
|
||||
```
|
||||
</Step>
|
||||
<Step title="Instale o AgentOps">
|
||||
Instale o AgentOps com:
|
||||
```bash
|
||||
pip install 'crewai[agentops]'
|
||||
```
|
||||
ou
|
||||
```bash
|
||||
pip install agentops
|
||||
```
|
||||
</Step>
|
||||
<Step title="Inicialize o AgentOps">
|
||||
Antes de utilizar o `Crew` no seu script, inclua estas linhas:
|
||||
|
||||
```python
|
||||
import agentops
|
||||
agentops.init()
|
||||
```
|
||||
|
||||
Isso irá iniciar uma sessão do AgentOps e também rastrear automaticamente os agentes Crew. Para mais detalhes sobre como adaptar sistemas de agentes mais complexos,
|
||||
confira a [documentação do AgentOps](https://docs.agentops.ai) ou participe do [Discord](https://discord.gg/j4f3KbeH).
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Exemplos de Crew + AgentOps
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="Vaga de Emprego"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting"
|
||||
icon="briefcase"
|
||||
iconType="solid"
|
||||
>
|
||||
Exemplo de um agente Crew que gera vagas de emprego.
|
||||
</Card>
|
||||
<Card
|
||||
title="Validador de Markdown"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/markdown_validator"
|
||||
icon="markdown"
|
||||
iconType="solid"
|
||||
>
|
||||
Exemplo de um agente Crew que valida arquivos Markdown.
|
||||
</Card>
|
||||
<Card
|
||||
title="Post no Instagram"
|
||||
color="#F3A78B"
|
||||
href="https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post"
|
||||
icon="square-instagram"
|
||||
iconType="brands"
|
||||
>
|
||||
Exemplo de um agente Crew que gera posts para Instagram.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
### Mais Informações
|
||||
|
||||
Para começar, crie uma [conta AgentOps](https://agentops.ai/?=crew).
|
||||
|
||||
Para sugestões de funcionalidades ou relatos de bugs, entre em contato com o time do AgentOps pelo [Repositório do AgentOps](https://github.com/AgentOps-AI/agentops).
|
||||
|
||||
#### Links Extras
|
||||
|
||||
<a href="https://twitter.com/agentopsai/">🐦 Twitter</a>
|
||||
<span> • </span>
|
||||
<a href="https://discord.gg/JHPt4C7r">📢 Discord</a>
|
||||
<span> • </span>
|
||||
<a href="https://app.agentops.ai/?=crew">🖇️ Dashboard AgentOps</a>
|
||||
<span> • </span>
|
||||
<a href="https://docs.agentops.ai/introduction">📙 Documentação</a>
|
||||
@@ -21,9 +21,6 @@ A observabilidade é fundamental para entender como seus agentes CrewAI estão d
|
||||
### Plataformas de Monitoramento e Rastreamento
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="AgentOps" icon="paperclip" href="/pt-BR/observability/agentops">
|
||||
Replays de sessões, métricas e monitoramento para desenvolvimento e produção de agentes.
|
||||
</Card>
|
||||
|
||||
<Card title="LangDB" icon="database" href="/pt-BR/observability/langdb">
|
||||
Rastreamento ponta a ponta para fluxos de trabalho CrewAI com captura automática de interações de agentes.
|
||||
|
||||
@@ -48,11 +48,10 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools~=0.62.0"]
|
||||
tools = ["crewai-tools~=0.62.1"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
]
|
||||
agentops = ["agentops==0.3.18"]
|
||||
pdfplumber = [
|
||||
"pdfplumber>=0.11.4",
|
||||
]
|
||||
@@ -99,6 +98,11 @@ exclude = ["cli/templates"]
|
||||
[tool.bandit]
|
||||
exclude_dirs = ["src/crewai/cli/templates"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
markers = [
|
||||
"telemetry: mark test as a telemetry test (don't mock telemetry)",
|
||||
]
|
||||
|
||||
# PyTorch index configuration, since torch 2.5.0 is not compatible with python 3.13
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-nightly"
|
||||
|
||||
@@ -54,7 +54,7 @@ def _track_install_async():
|
||||
|
||||
_track_install_async()
|
||||
|
||||
__version__ = "0.159.0"
|
||||
__version__ = "0.165.1"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"Crew",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Dict, List
|
||||
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
@@ -21,6 +21,7 @@ class CrewAgentExecutorMixin:
|
||||
task: "Task"
|
||||
iterations: int
|
||||
max_iter: int
|
||||
messages: List[Dict[str, str]]
|
||||
_i18n: I18N
|
||||
_printer: Printer = Printer()
|
||||
|
||||
@@ -62,6 +63,7 @@ class CrewAgentExecutorMixin:
|
||||
value=output.text,
|
||||
metadata={
|
||||
"description": self.task.description,
|
||||
"messages": self.messages,
|
||||
},
|
||||
agent=self.agent.role,
|
||||
)
|
||||
@@ -127,7 +129,6 @@ class CrewAgentExecutorMixin:
|
||||
def _ask_human_input(self, final_answer: str) -> str:
|
||||
"""Prompt human input with mode-appropriate messaging."""
|
||||
event_listener.formatter.pause_live_updates()
|
||||
|
||||
try:
|
||||
self._printer.print(
|
||||
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
from .utils import TokenManager
|
||||
|
||||
|
||||
class AuthError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_auth_token() -> str:
|
||||
"""Get the authentication token."""
|
||||
access_token = TokenManager().get_token()
|
||||
if not access_token:
|
||||
raise Exception("No token found, make sure you are logged in")
|
||||
raise AuthError("No token found, make sure you are logged in")
|
||||
return access_token
|
||||
|
||||
@@ -18,6 +18,7 @@ class PlusAPI:
|
||||
CREWS_RESOURCE = "/crewai_plus/api/v1/crews"
|
||||
AGENTS_RESOURCE = "/crewai_plus/api/v1/agents"
|
||||
TRACING_RESOURCE = "/crewai_plus/api/v1/tracing"
|
||||
EPHEMERAL_TRACING_RESOURCE = "/crewai_plus/api/v1/tracing/ephemeral"
|
||||
|
||||
def __init__(self, api_key: str) -> None:
|
||||
self.api_key = api_key
|
||||
@@ -124,6 +125,11 @@ class PlusAPI:
|
||||
"POST", f"{self.TRACING_RESOURCE}/batches", json=payload
|
||||
)
|
||||
|
||||
def initialize_ephemeral_trace_batch(self, payload) -> requests.Response:
|
||||
return self._make_request(
|
||||
"POST", f"{self.EPHEMERAL_TRACING_RESOURCE}/batches", json=payload
|
||||
)
|
||||
|
||||
def send_trace_events(self, trace_batch_id: str, payload) -> requests.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
@@ -131,9 +137,27 @@ class PlusAPI:
|
||||
json=payload,
|
||||
)
|
||||
|
||||
def send_ephemeral_trace_events(
|
||||
self, trace_batch_id: str, payload
|
||||
) -> requests.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/events",
|
||||
json=payload,
|
||||
)
|
||||
|
||||
def finalize_trace_batch(self, trace_batch_id: str, payload) -> requests.Response:
|
||||
return self._make_request(
|
||||
"PATCH",
|
||||
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
|
||||
json=payload,
|
||||
)
|
||||
|
||||
def finalize_ephemeral_trace_batch(
|
||||
self, trace_batch_id: str, payload
|
||||
) -> requests.Response:
|
||||
return self._make_request(
|
||||
"PATCH",
|
||||
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
|
||||
json=payload,
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.159.0,<1.0.0"
|
||||
"crewai[tools]>=0.165.1,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.159.0,<1.0.0",
|
||||
"crewai[tools]>=0.165.1,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.159.0"
|
||||
"crewai[tools]>=0.165.1"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -44,8 +44,9 @@ def migrate_pyproject(input_file, output_file):
|
||||
]
|
||||
new_pyproject["project"]["requires-python"] = poetry_data.get("python")
|
||||
else:
|
||||
# If it's already in the new format, just copy the project section
|
||||
# If it's already in the new format, just copy the project and tool sections
|
||||
new_pyproject["project"] = pyproject_data.get("project", {})
|
||||
new_pyproject["tool"] = pyproject_data.get("tool", {})
|
||||
|
||||
# Migrate or copy dependencies
|
||||
if "dependencies" in new_pyproject["project"]:
|
||||
|
||||
@@ -77,7 +77,9 @@ from crewai.utilities.events.listeners.tracing.trace_listener import (
|
||||
)
|
||||
|
||||
|
||||
from crewai.utilities.events.listeners.tracing.utils import is_tracing_enabled
|
||||
from crewai.utilities.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
)
|
||||
from crewai.utilities.formatter import (
|
||||
aggregate_raw_outputs_from_task_outputs,
|
||||
aggregate_raw_outputs_from_tasks,
|
||||
@@ -283,8 +285,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
self._cache_handler = CacheHandler()
|
||||
event_listener = EventListener()
|
||||
|
||||
if is_tracing_enabled() or self.tracing:
|
||||
trace_listener = TraceCollectionListener(tracing=self.tracing)
|
||||
trace_listener = TraceCollectionListener()
|
||||
trace_listener.setup_listeners(crewai_event_bus)
|
||||
event_listener.verbose = self.verbose
|
||||
event_listener.formatter.verbose = self.verbose
|
||||
@@ -633,6 +636,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self._inputs = inputs
|
||||
self._interpolate_inputs(inputs)
|
||||
self._set_tasks_callbacks()
|
||||
self._set_allow_crewai_trigger_context_for_first_task()
|
||||
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
|
||||
@@ -1502,3 +1506,18 @@ class Crew(FlowTrackable, BaseModel):
|
||||
"""Reset crew and agent knowledge storage."""
|
||||
for ks in knowledges:
|
||||
ks.reset()
|
||||
|
||||
def _set_allow_crewai_trigger_context_for_first_task(self):
|
||||
crewai_trigger_payload = self._inputs and self._inputs.get(
|
||||
"crewai_trigger_payload"
|
||||
)
|
||||
able_to_inject = (
|
||||
self.tasks and self.tasks[0].allow_crewai_trigger_context is None
|
||||
)
|
||||
|
||||
if (
|
||||
self.process == Process.sequential
|
||||
and crewai_trigger_payload
|
||||
and able_to_inject
|
||||
):
|
||||
self.tasks[0].allow_crewai_trigger_context = True
|
||||
|
||||
@@ -38,7 +38,9 @@ from crewai.utilities.events.flow_events import (
|
||||
from crewai.utilities.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
from crewai.utilities.events.listeners.tracing.utils import is_tracing_enabled
|
||||
from crewai.utilities.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
)
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -476,8 +478,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
# Initialize state with initial values
|
||||
self._state = self._create_initial_state()
|
||||
self.tracing = tracing
|
||||
if is_tracing_enabled() or tracing:
|
||||
trace_listener = TraceCollectionListener(tracing=tracing)
|
||||
if is_tracing_enabled() or self.tracing:
|
||||
trace_listener = TraceCollectionListener()
|
||||
trace_listener.setup_listeners(crewai_event_bus)
|
||||
# Apply any additional kwargs
|
||||
if kwargs:
|
||||
@@ -911,17 +913,52 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
- Triggers execution of any listeners waiting on this start method
|
||||
- Part of the flow's initialization sequence
|
||||
- Skips execution if method was already completed (e.g., after reload)
|
||||
- Automatically injects crewai_trigger_payload if available in flow inputs
|
||||
"""
|
||||
if start_method_name in self._completed_methods:
|
||||
last_output = self._method_outputs[-1] if self._method_outputs else None
|
||||
await self._execute_listeners(start_method_name, last_output)
|
||||
return
|
||||
|
||||
method = self._methods[start_method_name]
|
||||
enhanced_method = self._inject_trigger_payload_for_start_method(method)
|
||||
|
||||
result = await self._execute_method(
|
||||
start_method_name, self._methods[start_method_name]
|
||||
start_method_name, enhanced_method
|
||||
)
|
||||
await self._execute_listeners(start_method_name, result)
|
||||
|
||||
def _inject_trigger_payload_for_start_method(self, original_method: Callable) -> Callable:
|
||||
def prepare_kwargs(*args, **kwargs):
|
||||
inputs = baggage.get_baggage("flow_inputs") or {}
|
||||
trigger_payload = inputs.get("crewai_trigger_payload")
|
||||
|
||||
sig = inspect.signature(original_method)
|
||||
accepts_trigger_payload = "crewai_trigger_payload" in sig.parameters
|
||||
|
||||
if trigger_payload is not None and accepts_trigger_payload:
|
||||
kwargs["crewai_trigger_payload"] = trigger_payload
|
||||
elif trigger_payload is not None:
|
||||
self._log_flow_event(
|
||||
f"Trigger payload available but {original_method.__name__} doesn't accept crewai_trigger_payload parameter",
|
||||
color="yellow"
|
||||
)
|
||||
return args, kwargs
|
||||
|
||||
if asyncio.iscoroutinefunction(original_method):
|
||||
async def enhanced_method(*args, **kwargs):
|
||||
args, kwargs = prepare_kwargs(*args, **kwargs)
|
||||
return await original_method(*args, **kwargs)
|
||||
else:
|
||||
def enhanced_method(*args, **kwargs):
|
||||
args, kwargs = prepare_kwargs(*args, **kwargs)
|
||||
return original_method(*args, **kwargs)
|
||||
|
||||
enhanced_method.__name__ = original_method.__name__
|
||||
enhanced_method.__doc__ = original_method.__doc__
|
||||
|
||||
return enhanced_method
|
||||
|
||||
async def _execute_method(
|
||||
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
|
||||
) -> Any:
|
||||
|
||||
@@ -11,6 +11,7 @@ import chromadb.errors
|
||||
from chromadb.api import ClientAPI
|
||||
from chromadb.api.types import OneOrMany
|
||||
from chromadb.config import Settings
|
||||
import warnings
|
||||
|
||||
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
|
||||
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
|
||||
@@ -85,6 +86,14 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
raise Exception("Collection not initialized")
|
||||
|
||||
def initialize_knowledge_storage(self):
|
||||
# Suppress deprecation warnings from chromadb, which are not relevant to us
|
||||
# TODO: Remove this once we upgrade chromadb to at least 1.0.8.
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=r".*'model_fields'.*is deprecated.*",
|
||||
module=r"^chromadb(\.|$)",
|
||||
)
|
||||
|
||||
self.app = create_persistent_client(
|
||||
path=os.path.join(db_storage_path(), "knowledge"),
|
||||
settings=Settings(allow_reset=True),
|
||||
|
||||
@@ -12,6 +12,7 @@ from crewai.rag.embeddings.configurator import EmbeddingConfigurator
|
||||
from crewai.utilities.chromadb import create_persistent_client
|
||||
from crewai.utilities.constants import MAX_FILE_NAME_LENGTH
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
import warnings
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
@@ -62,6 +63,14 @@ class RAGStorage(BaseRAGStorage):
|
||||
def _initialize_app(self):
|
||||
from chromadb.config import Settings
|
||||
|
||||
# Suppress deprecation warnings from chromadb, which are not relevant to us
|
||||
# TODO: Remove this once we upgrade chromadb to at least 1.0.8.
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=r".*'model_fields'.*is deprecated.*",
|
||||
module=r"^chromadb(\.|$)",
|
||||
)
|
||||
|
||||
self._set_embedder_config()
|
||||
|
||||
self.app = create_persistent_client(
|
||||
|
||||
1
src/crewai/rag/core/__init__.py
Normal file
1
src/crewai/rag/core/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Core abstract base classes and protocols for RAG systems."""
|
||||
433
src/crewai/rag/core/base_client.py
Normal file
433
src/crewai/rag/core/base_client.py
Normal file
@@ -0,0 +1,433 @@
|
||||
"""Protocol for vector database client implementations."""
|
||||
|
||||
from abc import abstractmethod
|
||||
from typing import Any, Protocol, runtime_checkable, TypedDict, Annotated
|
||||
from typing_extensions import Unpack, Required
|
||||
|
||||
|
||||
from crewai.rag.types import (
|
||||
EmbeddingFunction,
|
||||
BaseRecord,
|
||||
SearchResult,
|
||||
)
|
||||
|
||||
|
||||
class BaseCollectionParams(TypedDict):
|
||||
"""Base parameters for collection operations.
|
||||
|
||||
Attributes:
|
||||
collection_name: The name of the collection/index to operate on.
|
||||
"""
|
||||
|
||||
collection_name: Required[
|
||||
Annotated[
|
||||
str,
|
||||
"Name of the collection/index. Implementations may have specific constraints (e.g., character limits, allowed characters, case sensitivity).",
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
class BaseCollectionAddParams(BaseCollectionParams):
|
||||
"""Parameters for adding documents to a collection.
|
||||
|
||||
Extends BaseCollectionParams with document-specific fields.
|
||||
|
||||
Attributes:
|
||||
collection_name: The name of the collection to add documents to.
|
||||
documents: List of BaseRecord dictionaries containing document data.
|
||||
"""
|
||||
|
||||
documents: list[BaseRecord]
|
||||
|
||||
|
||||
class BaseCollectionSearchParams(BaseCollectionParams, total=False):
|
||||
"""Parameters for searching within a collection.
|
||||
|
||||
Extends BaseCollectionParams with search-specific optional fields.
|
||||
All fields except collection_name and query are optional.
|
||||
|
||||
Attributes:
|
||||
query: The text query to search for (required).
|
||||
limit: Maximum number of results to return.
|
||||
metadata_filter: Filter results by metadata fields.
|
||||
score_threshold: Minimum similarity score for results (0-1).
|
||||
"""
|
||||
|
||||
query: Required[str]
|
||||
limit: int
|
||||
metadata_filter: dict[str, Any]
|
||||
score_threshold: float
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class BaseClient(Protocol):
|
||||
"""Protocol for vector store client implementations.
|
||||
|
||||
This protocol defines the interface that all vector store client implementations
|
||||
must follow. It provides a consistent API for storing and retrieving
|
||||
documents with their vector embeddings across different vector database
|
||||
backends (e.g., Qdrant, ChromaDB, Weaviate). Implementing classes should
|
||||
handle connection management, data persistence, and vector similarity
|
||||
search operations specific to their backend.
|
||||
|
||||
Implementation Guidelines:
|
||||
Implementations should accept BaseClientParams in their constructor to allow
|
||||
passing pre-configured client instances:
|
||||
|
||||
class MyVectorClient:
|
||||
def __init__(self, client: Any | None = None, **kwargs):
|
||||
if client:
|
||||
self.client = client
|
||||
else:
|
||||
self.client = self._create_default_client(**kwargs)
|
||||
|
||||
Notes:
|
||||
This protocol replaces the former BaseRAGStorage abstraction,
|
||||
providing a cleaner interface for vector store operations.
|
||||
|
||||
Attributes:
|
||||
embedding_function: Callable that takes a list of text strings
|
||||
and returns a list of embedding vectors. Implementations
|
||||
should always provide a default embedding function.
|
||||
client: The underlying vector database client instance. This could be
|
||||
passed via BaseClientParams during initialization or created internally.
|
||||
"""
|
||||
|
||||
client: Any
|
||||
embedding_function: EmbeddingFunction
|
||||
|
||||
@abstractmethod
|
||||
def create_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
|
||||
"""Create a new collection/index in the vector database.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to create. Must be unique within
|
||||
the vector database instance.
|
||||
|
||||
Raises:
|
||||
ValueError: If collection name already exists.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def acreate_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
|
||||
"""Create a new collection/index in the vector database asynchronously.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to create. Must be unique within
|
||||
the vector database instance.
|
||||
|
||||
Raises:
|
||||
ValueError: If collection name already exists.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def get_or_create_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> Any:
|
||||
"""Get an existing collection or create it if it doesn't exist.
|
||||
|
||||
This method provides a convenient way to ensure a collection exists
|
||||
without having to check for its existence first.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to get or create.
|
||||
|
||||
Returns:
|
||||
A collection object whose type depends on the backend implementation.
|
||||
This could be a collection reference, ID, or client object.
|
||||
|
||||
Raises:
|
||||
ValueError: If unable to create the collection.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def aget_or_create_collection(
|
||||
self, **kwargs: Unpack[BaseCollectionParams]
|
||||
) -> Any:
|
||||
"""Get an existing collection or create it if it doesn't exist asynchronously.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to get or create.
|
||||
|
||||
Returns:
|
||||
A collection object whose type depends on the backend implementation.
|
||||
|
||||
Raises:
|
||||
ValueError: If unable to create the collection.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def add_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
|
||||
"""Add documents with their embeddings to a collection.
|
||||
|
||||
This method performs an upsert operation - if a document with the same ID
|
||||
already exists, it will be updated with the new content and metadata.
|
||||
|
||||
Implementations should handle embedding generation internally based on
|
||||
the configured embedding function.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to add documents to.
|
||||
documents: List of BaseRecord dicts containing:
|
||||
- content: The text content (required)
|
||||
- doc_id: Optional unique identifier (auto-generated from content hash if missing)
|
||||
- metadata: Optional metadata dictionary
|
||||
Embeddings will be generated automatically.
|
||||
|
||||
Raises:
|
||||
ValueError: If collection doesn't exist or documents list is empty.
|
||||
TypeError: If documents are not BaseRecord dict instances.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>> from crewai.rag.types import BaseRecord
|
||||
>>> client = ChromaDBClient()
|
||||
>>>
|
||||
>>> records: list[BaseRecord] = [
|
||||
... {
|
||||
... "content": "Machine learning basics",
|
||||
... "metadata": {"source": "file3", "topic": "ML"}
|
||||
... },
|
||||
... {
|
||||
... "doc_id": "custom_id",
|
||||
... "content": "Deep learning fundamentals",
|
||||
... "metadata": {"source": "file4", "topic": "DL"}
|
||||
... }
|
||||
... ]
|
||||
>>> client.add_documents(collection_name="my_docs", documents=records)
|
||||
>>>
|
||||
>>> records_with_id: list[BaseRecord] = [
|
||||
... {
|
||||
... "doc_id": "nlp_001",
|
||||
... "content": "Advanced NLP techniques",
|
||||
... "metadata": {"source": "file5", "topic": "NLP"}
|
||||
... }
|
||||
... ]
|
||||
>>> client.add_documents(collection_name="my_docs", documents=records_with_id)
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def aadd_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
|
||||
"""Add documents with their embeddings to a collection asynchronously.
|
||||
|
||||
Implementations should handle embedding generation internally based on
|
||||
the configured embedding function.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to add documents to.
|
||||
documents: List of BaseRecord dicts containing:
|
||||
- content: The text content (required)
|
||||
- doc_id: Optional unique identifier (auto-generated from content hash if missing)
|
||||
- metadata: Optional metadata dictionary
|
||||
Embeddings will be generated automatically.
|
||||
|
||||
Raises:
|
||||
ValueError: If collection doesn't exist or documents list is empty.
|
||||
TypeError: If documents are not BaseRecord dict instances.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>> from crewai.rag.types import BaseRecord
|
||||
>>>
|
||||
>>> async def add_documents():
|
||||
... client = ChromaDBClient()
|
||||
...
|
||||
... records: list[BaseRecord] = [
|
||||
... {
|
||||
... "doc_id": "doc2",
|
||||
... "content": "Async operations in Python",
|
||||
... "metadata": {"source": "file2", "topic": "async"}
|
||||
... }
|
||||
... ]
|
||||
... await client.aadd_documents(collection_name="my_docs", documents=records)
|
||||
...
|
||||
>>> asyncio.run(add_documents())
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def search(
|
||||
self, **kwargs: Unpack[BaseCollectionSearchParams]
|
||||
) -> list[SearchResult]:
|
||||
"""Search for similar documents using a query.
|
||||
|
||||
Performs a vector similarity search to find the most similar documents
|
||||
to the provided query.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to search in.
|
||||
query: The text query to search for. The implementation handles
|
||||
embedding generation internally.
|
||||
limit: Maximum number of results to return. Defaults to 10.
|
||||
metadata_filter: Optional metadata filter to apply to the search. The exact
|
||||
format depends on the backend, but typically supports equality
|
||||
and range queries on metadata fields.
|
||||
score_threshold: Optional minimum similarity score threshold. Only
|
||||
results with scores >= this threshold will be returned. The
|
||||
score interpretation depends on the distance metric used.
|
||||
|
||||
Returns:
|
||||
A list of SearchResult dictionaries ordered by similarity score in
|
||||
descending order. Each result contains:
|
||||
- id: Document ID
|
||||
- content: Document text content
|
||||
- metadata: Document metadata
|
||||
- score: Similarity score (0-1, higher is better)
|
||||
|
||||
Raises:
|
||||
ValueError: If collection doesn't exist.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>> client = ChromaDBClient()
|
||||
>>>
|
||||
>>> results = client.search(
|
||||
... collection_name="my_docs",
|
||||
... query="What is machine learning?",
|
||||
... limit=5,
|
||||
... metadata_filter={"source": "file1"},
|
||||
... score_threshold=0.7
|
||||
... )
|
||||
>>> for result in results:
|
||||
... print(f"{result['id']}: {result['score']:.2f}")
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def asearch(
|
||||
self, **kwargs: Unpack[BaseCollectionSearchParams]
|
||||
) -> list[SearchResult]:
|
||||
"""Search for similar documents using a query asynchronously.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to search in.
|
||||
query: The text query to search for. The implementation handles
|
||||
embedding generation internally.
|
||||
limit: Maximum number of results to return. Defaults to 10.
|
||||
metadata_filter: Optional metadata filter to apply to the search.
|
||||
score_threshold: Optional minimum similarity score threshold.
|
||||
|
||||
Returns:
|
||||
A list of SearchResult dictionaries ordered by similarity score.
|
||||
|
||||
Raises:
|
||||
ValueError: If collection doesn't exist.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>>
|
||||
>>> async def search_documents():
|
||||
... client = ChromaDBClient()
|
||||
... results = await client.asearch(
|
||||
... collection_name="my_docs",
|
||||
... query="Python programming best practices",
|
||||
... limit=5,
|
||||
... metadata_filter={"source": "file1"},
|
||||
... score_threshold=0.7
|
||||
... )
|
||||
... for result in results:
|
||||
... print(f"{result['id']}: {result['score']:.2f}")
|
||||
...
|
||||
>>> asyncio.run(search_documents())
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def delete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
|
||||
"""Delete a collection and all its data.
|
||||
|
||||
This operation is irreversible and will permanently remove all documents,
|
||||
embeddings, and metadata associated with the collection.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to delete.
|
||||
|
||||
Raises:
|
||||
ValueError: If the collection doesn't exist.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>> client = ChromaDBClient()
|
||||
>>> client.delete_collection(collection_name="old_docs")
|
||||
>>> print("Collection 'old_docs' deleted successfully")
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def adelete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
|
||||
"""Delete a collection and all its data asynchronously.
|
||||
|
||||
Keyword Args:
|
||||
collection_name: The name of the collection to delete.
|
||||
|
||||
Raises:
|
||||
ValueError: If the collection doesn't exist.
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>>
|
||||
>>> async def delete_old_collection():
|
||||
... client = ChromaDBClient()
|
||||
... await client.adelete_collection(collection_name="old_docs")
|
||||
... print("Collection 'old_docs' deleted successfully")
|
||||
...
|
||||
>>> asyncio.run(delete_old_collection())
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def reset(self) -> None:
|
||||
"""Reset the vector database by deleting all collections and data.
|
||||
|
||||
This method provides a way to completely clear the vector database,
|
||||
removing all collections and their contents. Use with caution as
|
||||
this operation is irreversible.
|
||||
|
||||
Raises:
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
PermissionError: If the operation is not allowed by the backend.
|
||||
|
||||
Example:
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>> client = ChromaDBClient()
|
||||
>>> client.reset()
|
||||
>>> print("Vector database completely reset - all data deleted")
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def areset(self) -> None:
|
||||
"""Reset the vector database by deleting all collections and data asynchronously.
|
||||
|
||||
Raises:
|
||||
ConnectionError: If unable to connect to the vector database backend.
|
||||
PermissionError: If the operation is not allowed by the backend.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> from crewai.rag.chromadb.client import ChromaDBClient
|
||||
>>>
|
||||
>>> async def reset_database():
|
||||
... client = ChromaDBClient()
|
||||
... await client.areset()
|
||||
... print("Vector database completely reset - all data deleted")
|
||||
...
|
||||
>>> asyncio.run(reset_database())
|
||||
"""
|
||||
...
|
||||
30
src/crewai/rag/core/base_provider.py
Normal file
30
src/crewai/rag/core/base_provider.py
Normal file
@@ -0,0 +1,30 @@
|
||||
"""Base provider protocol for vector database client creation."""
|
||||
|
||||
from abc import ABC
|
||||
from typing import Any, Protocol, runtime_checkable, Union
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.rag.types import EmbeddingFunction
|
||||
from crewai.rag.embeddings.types import EmbeddingOptions
|
||||
|
||||
|
||||
class BaseProviderOptions(BaseModel, ABC):
|
||||
"""Base configuration for all provider options."""
|
||||
|
||||
client_type: str = Field(..., description="Type of client to create")
|
||||
embedding_config: Union[EmbeddingOptions, EmbeddingFunction, None] = Field(
|
||||
default=None,
|
||||
description="Embedding configuration - either options for built-in providers or a custom function",
|
||||
)
|
||||
options: Any = Field(
|
||||
default=None, description="Additional provider-specific options"
|
||||
)
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class BaseProvider(Protocol):
|
||||
"""Protocol for vector database client providers."""
|
||||
|
||||
def __call__(self, options: BaseProviderOptions) -> Any:
|
||||
"""Create and return a configured client instance."""
|
||||
...
|
||||
148
src/crewai/rag/embeddings/factory.py
Normal file
148
src/crewai/rag/embeddings/factory.py
Normal file
@@ -0,0 +1,148 @@
|
||||
"""Minimal embedding function factory for CrewAI."""
|
||||
|
||||
import os
|
||||
|
||||
from chromadb import EmbeddingFunction
|
||||
from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import (
|
||||
AmazonBedrockEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.cohere_embedding_function import (
|
||||
CohereEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.google_embedding_function import (
|
||||
GooglePalmEmbeddingFunction,
|
||||
GoogleGenerativeAiEmbeddingFunction,
|
||||
GoogleVertexEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.huggingface_embedding_function import (
|
||||
HuggingFaceEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.instructor_embedding_function import (
|
||||
InstructorEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.jina_embedding_function import (
|
||||
JinaEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.ollama_embedding_function import (
|
||||
OllamaEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.onnx_mini_lm_l6_v2 import ONNXMiniLM_L6_V2
|
||||
from chromadb.utils.embedding_functions.open_clip_embedding_function import (
|
||||
OpenCLIPEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.roboflow_embedding_function import (
|
||||
RoboflowEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.sentence_transformer_embedding_function import (
|
||||
SentenceTransformerEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.text2vec_embedding_function import (
|
||||
Text2VecEmbeddingFunction,
|
||||
)
|
||||
|
||||
from crewai.rag.embeddings.types import EmbeddingOptions
|
||||
|
||||
|
||||
def get_embedding_function(
|
||||
config: EmbeddingOptions | dict | None = None,
|
||||
) -> EmbeddingFunction:
|
||||
"""Get embedding function - delegates to ChromaDB.
|
||||
|
||||
Args:
|
||||
config: Optional configuration - either an EmbeddingOptions object or a dict with:
|
||||
- provider: The embedding provider to use (default: "openai")
|
||||
- Any other provider-specific parameters
|
||||
|
||||
Returns:
|
||||
EmbeddingFunction instance ready for use with ChromaDB
|
||||
|
||||
Supported providers:
|
||||
- openai: OpenAI embeddings (default)
|
||||
- cohere: Cohere embeddings
|
||||
- ollama: Ollama local embeddings
|
||||
- huggingface: HuggingFace embeddings
|
||||
- sentence-transformer: Local sentence transformers
|
||||
- instructor: Instructor embeddings for specialized tasks
|
||||
- google-palm: Google PaLM embeddings
|
||||
- google-generativeai: Google Generative AI embeddings
|
||||
- google-vertex: Google Vertex AI embeddings
|
||||
- amazon-bedrock: AWS Bedrock embeddings
|
||||
- jina: Jina AI embeddings
|
||||
- roboflow: Roboflow embeddings for vision tasks
|
||||
- openclip: OpenCLIP embeddings for multimodal tasks
|
||||
- text2vec: Text2Vec embeddings
|
||||
- onnx: ONNX MiniLM-L6-v2 (no API key needed, included with ChromaDB)
|
||||
|
||||
Examples:
|
||||
# Use default OpenAI with retry logic
|
||||
>>> embedder = get_embedding_function()
|
||||
|
||||
# Use Cohere with dict
|
||||
>>> embedder = get_embedding_function({
|
||||
... "provider": "cohere",
|
||||
... "api_key": "your-key",
|
||||
... "model_name": "embed-english-v3.0"
|
||||
... })
|
||||
|
||||
# Use with EmbeddingOptions
|
||||
>>> embedder = get_embedding_function(
|
||||
... EmbeddingOptions(provider="sentence-transformer", model_name="all-MiniLM-L6-v2")
|
||||
... )
|
||||
|
||||
# Use local sentence transformers (no API key needed)
|
||||
>>> embedder = get_embedding_function({
|
||||
... "provider": "sentence-transformer",
|
||||
... "model_name": "all-MiniLM-L6-v2"
|
||||
... })
|
||||
|
||||
# Use Ollama for local embeddings
|
||||
>>> embedder = get_embedding_function({
|
||||
... "provider": "ollama",
|
||||
... "model_name": "nomic-embed-text"
|
||||
... })
|
||||
|
||||
# Use ONNX (no API key needed)
|
||||
>>> embedder = get_embedding_function({
|
||||
... "provider": "onnx"
|
||||
... })
|
||||
"""
|
||||
if config is None:
|
||||
return OpenAIEmbeddingFunction(
|
||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||
)
|
||||
|
||||
# Handle EmbeddingOptions object
|
||||
if isinstance(config, EmbeddingOptions):
|
||||
config_dict = config.model_dump(exclude_none=True)
|
||||
else:
|
||||
config_dict = config.copy()
|
||||
|
||||
provider = config_dict.pop("provider", "openai")
|
||||
|
||||
embedding_functions = {
|
||||
"openai": OpenAIEmbeddingFunction,
|
||||
"cohere": CohereEmbeddingFunction,
|
||||
"ollama": OllamaEmbeddingFunction,
|
||||
"huggingface": HuggingFaceEmbeddingFunction,
|
||||
"sentence-transformer": SentenceTransformerEmbeddingFunction,
|
||||
"instructor": InstructorEmbeddingFunction,
|
||||
"google-palm": GooglePalmEmbeddingFunction,
|
||||
"google-generativeai": GoogleGenerativeAiEmbeddingFunction,
|
||||
"google-vertex": GoogleVertexEmbeddingFunction,
|
||||
"amazon-bedrock": AmazonBedrockEmbeddingFunction,
|
||||
"jina": JinaEmbeddingFunction,
|
||||
"roboflow": RoboflowEmbeddingFunction,
|
||||
"openclip": OpenCLIPEmbeddingFunction,
|
||||
"text2vec": Text2VecEmbeddingFunction,
|
||||
"onnx": ONNXMiniLM_L6_V2,
|
||||
}
|
||||
|
||||
if provider not in embedding_functions:
|
||||
raise ValueError(
|
||||
f"Unsupported provider: {provider}. "
|
||||
f"Available providers: {list(embedding_functions.keys())}"
|
||||
)
|
||||
return embedding_functions[provider](**config_dict)
|
||||
62
src/crewai/rag/embeddings/types.py
Normal file
62
src/crewai/rag/embeddings/types.py
Normal file
@@ -0,0 +1,62 @@
|
||||
"""Type definitions for the embeddings module."""
|
||||
|
||||
from typing import Literal
|
||||
from pydantic import BaseModel, Field, SecretStr
|
||||
|
||||
from crewai.rag.types import EmbeddingFunction
|
||||
|
||||
|
||||
EmbeddingProvider = Literal[
|
||||
"openai",
|
||||
"cohere",
|
||||
"ollama",
|
||||
"huggingface",
|
||||
"sentence-transformer",
|
||||
"instructor",
|
||||
"google-palm",
|
||||
"google-generativeai",
|
||||
"google-vertex",
|
||||
"amazon-bedrock",
|
||||
"jina",
|
||||
"roboflow",
|
||||
"openclip",
|
||||
"text2vec",
|
||||
"onnx",
|
||||
]
|
||||
"""Supported embedding providers.
|
||||
|
||||
These correspond to the embedding functions available in ChromaDB's
|
||||
embedding_functions module. Each provider has specific requirements
|
||||
and configuration options.
|
||||
"""
|
||||
|
||||
|
||||
class EmbeddingOptions(BaseModel):
|
||||
"""Configuration options for embedding providers.
|
||||
|
||||
Generic attributes that can be passed to get_embedding_function
|
||||
to configure various embedding providers.
|
||||
"""
|
||||
|
||||
provider: EmbeddingProvider = Field(
|
||||
..., description="Embedding provider name (e.g., 'openai', 'cohere', 'onnx')"
|
||||
)
|
||||
model_name: str | None = Field(
|
||||
default=None, description="Model name for the embedding provider"
|
||||
)
|
||||
api_key: SecretStr | None = Field(
|
||||
default=None, description="API key for the embedding provider"
|
||||
)
|
||||
|
||||
|
||||
class EmbeddingConfig(BaseModel):
|
||||
"""Configuration wrapper for embedding functions.
|
||||
|
||||
Accepts either a pre-configured EmbeddingFunction or EmbeddingOptions
|
||||
to create one. This provides flexibility in how embeddings are configured.
|
||||
|
||||
Attributes:
|
||||
function: Either a callable EmbeddingFunction or EmbeddingOptions to create one
|
||||
"""
|
||||
|
||||
function: EmbeddingFunction | EmbeddingOptions
|
||||
50
src/crewai/rag/types.py
Normal file
50
src/crewai/rag/types.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""Type definitions for RAG (Retrieval-Augmented Generation) systems."""
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from typing import TypeAlias, TypedDict, Any
|
||||
|
||||
from typing_extensions import Required
|
||||
|
||||
|
||||
class BaseRecord(TypedDict, total=False):
|
||||
"""A typed dictionary representing a document record.
|
||||
|
||||
Attributes:
|
||||
doc_id: Optional unique identifier for the document. If not provided,
|
||||
a content-based ID will be generated using SHA256 hash.
|
||||
content: The text content of the document (required)
|
||||
metadata: Optional metadata associated with the document
|
||||
"""
|
||||
|
||||
doc_id: str
|
||||
content: Required[str]
|
||||
metadata: (
|
||||
Mapping[str, str | int | float | bool]
|
||||
| list[Mapping[str, str | int | float | bool]]
|
||||
)
|
||||
|
||||
|
||||
DenseVector: TypeAlias = list[float]
|
||||
IntVector: TypeAlias = list[int]
|
||||
|
||||
EmbeddingFunction: TypeAlias = Callable[..., Any]
|
||||
|
||||
|
||||
class SearchResult(TypedDict):
|
||||
"""Standard search result format for vector store queries.
|
||||
|
||||
This provides a consistent interface for search results across different
|
||||
vector store implementations. Each implementation should convert their
|
||||
native result format to this standard format.
|
||||
|
||||
Attributes:
|
||||
id: Unique identifier of the document
|
||||
content: The text content of the document
|
||||
metadata: Optional metadata associated with the document
|
||||
score: Similarity score (higher is better, typically between 0 and 1)
|
||||
"""
|
||||
|
||||
id: str
|
||||
content: str
|
||||
metadata: dict[str, Any]
|
||||
score: float
|
||||
@@ -72,6 +72,10 @@ class Task(BaseModel):
|
||||
output_pydantic: Pydantic model for task output.
|
||||
security_config: Security configuration including fingerprinting.
|
||||
tools: List of tools/resources limited for task execution.
|
||||
allow_crewai_trigger_context: Optional flag to control crewai_trigger_payload injection.
|
||||
None (default): Auto-inject for first task only.
|
||||
True: Always inject trigger payload for this task.
|
||||
False: Never inject trigger payload, even for first task.
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
@@ -163,6 +167,10 @@ class Task(BaseModel):
|
||||
end_time: Optional[datetime.datetime] = Field(
|
||||
default=None, description="End time of the task execution"
|
||||
)
|
||||
allow_crewai_trigger_context: Optional[bool] = Field(
|
||||
default=None,
|
||||
description="Whether this task should append 'Trigger Payload: {crewai_trigger_payload}' to the task description when crewai_trigger_payload exists in crew inputs.",
|
||||
)
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
@field_validator("guardrail")
|
||||
@@ -548,12 +556,23 @@ class Task(BaseModel):
|
||||
str: The formatted prompt string containing the task description,
|
||||
expected output, and optional markdown formatting instructions.
|
||||
"""
|
||||
tasks_slices = [self.description]
|
||||
description = self.description
|
||||
|
||||
should_inject = self.allow_crewai_trigger_context
|
||||
|
||||
if should_inject and self.agent:
|
||||
crew = getattr(self.agent, 'crew', None)
|
||||
if crew and hasattr(crew, '_inputs') and crew._inputs:
|
||||
trigger_payload = crew._inputs.get("crewai_trigger_payload")
|
||||
if trigger_payload is not None:
|
||||
description += f"\n\nTrigger Payload: {trigger_payload}"
|
||||
|
||||
tasks_slices = [description]
|
||||
|
||||
output = self.i18n.slice("expected_output").format(
|
||||
expected_output=self.expected_output
|
||||
)
|
||||
tasks_slices = [self.description, output]
|
||||
tasks_slices = [description, output]
|
||||
|
||||
if self.markdown:
|
||||
markdown_instruction = """Your final answer MUST be formatted in Markdown syntax.
|
||||
|
||||
@@ -14,12 +14,14 @@ from pydantic import BaseModel as PydanticBaseModel
|
||||
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
|
||||
|
||||
class EnvVar(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
required: bool = True
|
||||
default: Optional[str] = None
|
||||
|
||||
|
||||
class BaseTool(BaseModel, ABC):
|
||||
class _ArgsSchemaPlaceholder(PydanticBaseModel):
|
||||
pass
|
||||
@@ -108,7 +110,7 @@ class BaseTool(BaseModel, ABC):
|
||||
def to_structured_tool(self) -> CrewStructuredTool:
|
||||
"""Convert this tool to a CrewStructuredTool instance."""
|
||||
self._set_args_schema()
|
||||
return CrewStructuredTool(
|
||||
structured_tool = CrewStructuredTool(
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
args_schema=self.args_schema,
|
||||
@@ -117,6 +119,8 @@ class BaseTool(BaseModel, ABC):
|
||||
max_usage_count=self.max_usage_count,
|
||||
current_usage_count=self.current_usage_count,
|
||||
)
|
||||
structured_tool._original_tool = self
|
||||
return structured_tool
|
||||
|
||||
@classmethod
|
||||
def from_langchain(cls, tool: Any) -> "BaseTool":
|
||||
@@ -276,7 +280,9 @@ def to_langchain(
|
||||
return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools]
|
||||
|
||||
|
||||
def tool(*args, result_as_answer: bool = False, max_usage_count: int | None = None) -> Callable:
|
||||
def tool(
|
||||
*args, result_as_answer: bool = False, max_usage_count: int | None = None
|
||||
) -> Callable:
|
||||
"""
|
||||
Decorator to create a tool from a function.
|
||||
|
||||
|
||||
@@ -10,6 +10,11 @@ from pydantic import BaseModel, Field, create_model
|
||||
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
class CrewStructuredTool:
|
||||
"""A structured tool that can operate on any number of inputs.
|
||||
@@ -18,6 +23,8 @@ class CrewStructuredTool:
|
||||
that integrates better with CrewAI's ecosystem.
|
||||
"""
|
||||
|
||||
_original_tool: BaseTool | None = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
@@ -47,6 +54,7 @@ class CrewStructuredTool:
|
||||
self.result_as_answer = result_as_answer
|
||||
self.max_usage_count = max_usage_count
|
||||
self.current_usage_count = current_usage_count
|
||||
self._original_tool = None
|
||||
|
||||
# Validate the function signature matches the schema
|
||||
self._validate_function_signature()
|
||||
@@ -219,6 +227,8 @@ class CrewStructuredTool:
|
||||
"""
|
||||
parsed_args = self._parse_args(input)
|
||||
|
||||
self._increment_usage_count()
|
||||
|
||||
if inspect.iscoroutinefunction(self.func):
|
||||
return await self.func(**parsed_args, **kwargs)
|
||||
else:
|
||||
@@ -242,6 +252,8 @@ class CrewStructuredTool:
|
||||
"""Main method for tool execution."""
|
||||
parsed_args = self._parse_args(input)
|
||||
|
||||
self._increment_usage_count()
|
||||
|
||||
if inspect.iscoroutinefunction(self.func):
|
||||
result = asyncio.run(self.func(**parsed_args, **kwargs))
|
||||
return result
|
||||
@@ -253,6 +265,12 @@ class CrewStructuredTool:
|
||||
|
||||
return result
|
||||
|
||||
def _increment_usage_count(self) -> None:
|
||||
"""Increment the usage count."""
|
||||
self.current_usage_count += 1
|
||||
if self._original_tool is not None:
|
||||
self._original_tool.current_usage_count = self.current_usage_count
|
||||
|
||||
@property
|
||||
def args(self) -> dict:
|
||||
"""Get the tool's input arguments schema."""
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import os
|
||||
import re
|
||||
import portalocker
|
||||
from chromadb import PersistentClient
|
||||
from hashlib import md5
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
MIN_COLLECTION_LENGTH = 3
|
||||
MAX_COLLECTION_LENGTH = 63
|
||||
@@ -27,7 +28,9 @@ def is_ipv4_pattern(name: str) -> bool:
|
||||
return bool(IPV4_PATTERN.match(name))
|
||||
|
||||
|
||||
def sanitize_collection_name(name: Optional[str], max_collection_length: int = MAX_COLLECTION_LENGTH) -> str:
|
||||
def sanitize_collection_name(
|
||||
name: Optional[str], max_collection_length: int = MAX_COLLECTION_LENGTH
|
||||
) -> str:
|
||||
"""
|
||||
Sanitize a collection name to meet ChromaDB requirements:
|
||||
1. 3-63 characters long
|
||||
@@ -72,7 +75,8 @@ def create_persistent_client(path: str, **kwargs):
|
||||
concurrent creations. Works for both multi-threads and multi-processes
|
||||
environments.
|
||||
"""
|
||||
lockfile = f"chromadb-{md5(path.encode(), usedforsecurity=False).hexdigest()}.lock"
|
||||
lock_id = md5(path.encode(), usedforsecurity=False).hexdigest()
|
||||
lockfile = os.path.join(db_storage_path(), f"chromadb-{lock_id}.lock")
|
||||
with portalocker.Lock(lockfile):
|
||||
client = PersistentClient(path=path, **kwargs)
|
||||
|
||||
|
||||
@@ -67,11 +67,9 @@ from .memory_events import (
|
||||
|
||||
# events
|
||||
from .event_listener import EventListener
|
||||
from .third_party.agentops_listener import agentops_listener
|
||||
|
||||
__all__ = [
|
||||
"EventListener",
|
||||
"agentops_listener",
|
||||
"CrewAIEventsBus",
|
||||
"crewai_event_bus",
|
||||
"AgentExecutionStartedEvent",
|
||||
@@ -105,7 +103,6 @@ __all__ = [
|
||||
"MemoryRetrievalStartedEvent",
|
||||
"MemoryRetrievalCompletedEvent",
|
||||
"EventListener",
|
||||
"agentops_listener",
|
||||
"CrewKickoffStartedEvent",
|
||||
"CrewKickoffCompletedEvent",
|
||||
"CrewKickoffFailedEvent",
|
||||
|
||||
@@ -161,8 +161,10 @@ class EventListener(BaseEventListener):
|
||||
def on_task_started(source, event: TaskStartedEvent):
|
||||
span = self._telemetry.task_started(crew=source.agent.crew, task=source)
|
||||
self.execution_spans[source] = span
|
||||
# Pass both task ID and task name (if set)
|
||||
task_name = source.name if hasattr(source, 'name') and source.name else None
|
||||
self.formatter.create_task_branch(
|
||||
self.formatter.current_crew_tree, source.id
|
||||
self.formatter.current_crew_tree, source.id, task_name
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(TaskCompletedEvent)
|
||||
@@ -173,11 +175,14 @@ class EventListener(BaseEventListener):
|
||||
self._telemetry.task_ended(span, source, source.agent.crew)
|
||||
self.execution_spans[source] = None
|
||||
|
||||
# Pass task name if it exists
|
||||
task_name = source.name if hasattr(source, 'name') and source.name else None
|
||||
self.formatter.update_task_status(
|
||||
self.formatter.current_crew_tree,
|
||||
source.id,
|
||||
source.agent.role,
|
||||
"completed",
|
||||
task_name
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(TaskFailedEvent)
|
||||
@@ -188,11 +193,14 @@ class EventListener(BaseEventListener):
|
||||
self._telemetry.task_ended(span, source, source.agent.crew)
|
||||
self.execution_spans[source] = None
|
||||
|
||||
# Pass task name if it exists
|
||||
task_name = source.name if hasattr(source, 'name') and source.name else None
|
||||
self.formatter.update_task_status(
|
||||
self.formatter.current_crew_tree,
|
||||
source.id,
|
||||
source.agent.role,
|
||||
"failed",
|
||||
task_name
|
||||
)
|
||||
|
||||
# ----------- AGENT EVENTS -----------
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from crewai.utilities.constants import CREWAI_BASE_URL
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
@@ -40,30 +40,42 @@ class TraceBatch:
|
||||
class TraceBatchManager:
|
||||
"""Single responsibility: Manage batches and event buffering"""
|
||||
|
||||
is_current_batch_ephemeral: bool = False
|
||||
|
||||
def __init__(self):
|
||||
self.plus_api = PlusAPI(api_key=get_auth_token())
|
||||
try:
|
||||
self.plus_api = PlusAPI(api_key=get_auth_token())
|
||||
except AuthError:
|
||||
self.plus_api = PlusAPI(api_key="")
|
||||
|
||||
self.trace_batch_id: Optional[str] = None # Backend ID
|
||||
self.current_batch: Optional[TraceBatch] = None
|
||||
self.event_buffer: List[TraceEvent] = []
|
||||
self.execution_start_times: Dict[str, datetime] = {}
|
||||
|
||||
def initialize_batch(
|
||||
self, user_context: Dict[str, str], execution_metadata: Dict[str, Any]
|
||||
self,
|
||||
user_context: Dict[str, str],
|
||||
execution_metadata: Dict[str, Any],
|
||||
use_ephemeral: bool = False,
|
||||
) -> TraceBatch:
|
||||
"""Initialize a new trace batch"""
|
||||
self.current_batch = TraceBatch(
|
||||
user_context=user_context, execution_metadata=execution_metadata
|
||||
)
|
||||
self.event_buffer.clear()
|
||||
self.is_current_batch_ephemeral = use_ephemeral
|
||||
|
||||
self.record_start_time("execution")
|
||||
|
||||
self._initialize_backend_batch(user_context, execution_metadata)
|
||||
self._initialize_backend_batch(user_context, execution_metadata, use_ephemeral)
|
||||
|
||||
return self.current_batch
|
||||
|
||||
def _initialize_backend_batch(
|
||||
self, user_context: Dict[str, str], execution_metadata: Dict[str, Any]
|
||||
self,
|
||||
user_context: Dict[str, str],
|
||||
execution_metadata: Dict[str, Any],
|
||||
use_ephemeral: bool = False,
|
||||
):
|
||||
"""Send batch initialization to backend"""
|
||||
|
||||
@@ -74,6 +86,7 @@ class TraceBatchManager:
|
||||
payload = {
|
||||
"trace_id": self.current_batch.batch_id,
|
||||
"execution_type": execution_metadata.get("execution_type", "crew"),
|
||||
"user_identifier": execution_metadata.get("user_context", None),
|
||||
"execution_context": {
|
||||
"crew_fingerprint": execution_metadata.get("crew_fingerprint"),
|
||||
"crew_name": execution_metadata.get("crew_name", None),
|
||||
@@ -91,12 +104,22 @@ class TraceBatchManager:
|
||||
"execution_started_at": datetime.now(timezone.utc).isoformat(),
|
||||
},
|
||||
}
|
||||
if use_ephemeral:
|
||||
payload["ephemeral_trace_id"] = self.current_batch.batch_id
|
||||
|
||||
response = self.plus_api.initialize_trace_batch(payload)
|
||||
response = (
|
||||
self.plus_api.initialize_ephemeral_trace_batch(payload)
|
||||
if use_ephemeral
|
||||
else self.plus_api.initialize_trace_batch(payload)
|
||||
)
|
||||
|
||||
if response.status_code == 201 or response.status_code == 200:
|
||||
response_data = response.json()
|
||||
self.trace_batch_id = response_data["trace_id"]
|
||||
self.trace_batch_id = (
|
||||
response_data["trace_id"]
|
||||
if not use_ephemeral
|
||||
else response_data["ephemeral_trace_id"]
|
||||
)
|
||||
console = Console()
|
||||
panel = Panel(
|
||||
f"✅ Trace batch initialized with session ID: {self.trace_batch_id}",
|
||||
@@ -134,7 +157,11 @@ class TraceBatchManager:
|
||||
if not self.trace_batch_id:
|
||||
raise Exception("❌ Trace batch ID not found")
|
||||
|
||||
response = self.plus_api.send_trace_events(self.trace_batch_id, payload)
|
||||
response = (
|
||||
self.plus_api.send_ephemeral_trace_events(self.trace_batch_id, payload)
|
||||
if self.is_current_batch_ephemeral
|
||||
else self.plus_api.send_trace_events(self.trace_batch_id, payload)
|
||||
)
|
||||
|
||||
if response.status_code == 200 or response.status_code == 201:
|
||||
self.event_buffer.clear()
|
||||
@@ -153,7 +180,6 @@ class TraceBatchManager:
|
||||
|
||||
if self.event_buffer:
|
||||
self._send_events_to_backend()
|
||||
|
||||
self._finalize_backend_batch()
|
||||
|
||||
self.current_batch.events = self.event_buffer.copy()
|
||||
@@ -163,6 +189,7 @@ class TraceBatchManager:
|
||||
self.current_batch = None
|
||||
self.event_buffer.clear()
|
||||
self.trace_batch_id = None
|
||||
self.is_current_batch_ephemeral = False
|
||||
|
||||
self._cleanup_batch_data()
|
||||
|
||||
@@ -182,12 +209,24 @@ class TraceBatchManager:
|
||||
"final_event_count": total_events,
|
||||
}
|
||||
|
||||
response = self.plus_api.finalize_trace_batch(self.trace_batch_id, payload)
|
||||
response = (
|
||||
self.plus_api.finalize_ephemeral_trace_batch(
|
||||
self.trace_batch_id, payload
|
||||
)
|
||||
if self.is_current_batch_ephemeral
|
||||
else self.plus_api.finalize_trace_batch(self.trace_batch_id, payload)
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
access_code = response.json().get("access_code", None)
|
||||
console = Console()
|
||||
return_link = (
|
||||
f"{CREWAI_BASE_URL}/crewai_plus/trace_batches/{self.trace_batch_id}"
|
||||
if not self.is_current_batch_ephemeral and access_code is None
|
||||
else f"{CREWAI_BASE_URL}/crewai_plus/ephemeral_trace_batches/{self.trace_batch_id}?access_code={access_code}"
|
||||
)
|
||||
panel = Panel(
|
||||
f"✅ Trace batch finalized with session ID: {self.trace_batch_id}. View here: {CREWAI_BASE_URL}/crewai_plus/trace_batches/{self.trace_batch_id}",
|
||||
f"✅ Trace batch finalized with session ID: {self.trace_batch_id}. View here: {return_link} {f', Access Code: {access_code}' if access_code else ''}",
|
||||
title="Trace Batch Finalization",
|
||||
border_style="green",
|
||||
)
|
||||
|
||||
@@ -13,7 +13,6 @@ from crewai.utilities.events.agent_events import (
|
||||
AgentExecutionErrorEvent,
|
||||
)
|
||||
from crewai.utilities.events.listeners.tracing.types import TraceEvent
|
||||
from crewai.utilities.events.listeners.tracing.utils import is_tracing_enabled
|
||||
from crewai.utilities.events.reasoning_events import (
|
||||
AgentReasoningStartedEvent,
|
||||
AgentReasoningCompletedEvent,
|
||||
@@ -67,7 +66,7 @@ from crewai.utilities.events.memory_events import (
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
@@ -76,13 +75,12 @@ class TraceCollectionListener(BaseEventListener):
|
||||
Trace collection listener that orchestrates trace collection
|
||||
"""
|
||||
|
||||
trace_enabled: Optional[bool] = False
|
||||
complex_events = ["task_started", "llm_call_started", "llm_call_completed"]
|
||||
|
||||
_instance = None
|
||||
_initialized = False
|
||||
|
||||
def __new__(cls, batch_manager=None, tracing: Optional[bool] = False):
|
||||
def __new__(cls, batch_manager=None):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
@@ -90,25 +88,22 @@ class TraceCollectionListener(BaseEventListener):
|
||||
def __init__(
|
||||
self,
|
||||
batch_manager: Optional[TraceBatchManager] = None,
|
||||
tracing: Optional[bool] = False,
|
||||
):
|
||||
if self._initialized:
|
||||
return
|
||||
|
||||
super().__init__()
|
||||
self.batch_manager = batch_manager or TraceBatchManager()
|
||||
self.tracing = tracing or False
|
||||
self.trace_enabled = self._check_trace_enabled()
|
||||
self._initialized = True
|
||||
|
||||
def _check_trace_enabled(self) -> bool:
|
||||
def _check_authenticated(self) -> bool:
|
||||
"""Check if tracing should be enabled"""
|
||||
auth_token = get_auth_token()
|
||||
if not auth_token:
|
||||
try:
|
||||
res = bool(get_auth_token())
|
||||
return res
|
||||
except AuthError:
|
||||
return False
|
||||
|
||||
return is_tracing_enabled() or self.tracing
|
||||
|
||||
def _get_user_context(self) -> Dict[str, str]:
|
||||
"""Extract user context for tracing"""
|
||||
return {
|
||||
@@ -120,8 +115,6 @@ class TraceCollectionListener(BaseEventListener):
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
"""Setup event listeners - delegates to specific handlers"""
|
||||
if not self.trace_enabled:
|
||||
return
|
||||
|
||||
self._register_flow_event_handlers(crewai_event_bus)
|
||||
self._register_context_event_handlers(crewai_event_bus)
|
||||
@@ -167,7 +160,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
@event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_started(source, event):
|
||||
if not self.batch_manager.is_batch_initialized():
|
||||
self._initialize_batch(source, event)
|
||||
self._initialize_crew_batch(source, event)
|
||||
self._handle_trace_event("crew_kickoff_started", source, event)
|
||||
|
||||
@event_bus.on(CrewKickoffCompletedEvent)
|
||||
@@ -287,7 +280,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
def on_agent_reasoning_failed(source, event):
|
||||
self._handle_action_event("agent_reasoning_failed", source, event)
|
||||
|
||||
def _initialize_batch(self, source: Any, event: Any):
|
||||
def _initialize_crew_batch(self, source: Any, event: Any):
|
||||
"""Initialize trace batch"""
|
||||
user_context = self._get_user_context()
|
||||
execution_metadata = {
|
||||
@@ -296,7 +289,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
"crewai_version": get_crewai_version(),
|
||||
}
|
||||
|
||||
self.batch_manager.initialize_batch(user_context, execution_metadata)
|
||||
self._initialize_batch(user_context, execution_metadata)
|
||||
|
||||
def _initialize_flow_batch(self, source: Any, event: Any):
|
||||
"""Initialize trace batch for Flow execution"""
|
||||
@@ -308,7 +301,20 @@ class TraceCollectionListener(BaseEventListener):
|
||||
"execution_type": "flow",
|
||||
}
|
||||
|
||||
self.batch_manager.initialize_batch(user_context, execution_metadata)
|
||||
self._initialize_batch(user_context, execution_metadata)
|
||||
|
||||
def _initialize_batch(
|
||||
self, user_context: Dict[str, str], execution_metadata: Dict[str, Any]
|
||||
):
|
||||
"""Initialize trace batch if ephemeral"""
|
||||
if not self._check_authenticated():
|
||||
self.batch_manager.initialize_batch(
|
||||
user_context, execution_metadata, use_ephemeral=True
|
||||
)
|
||||
else:
|
||||
self.batch_manager.initialize_batch(
|
||||
user_context, execution_metadata, use_ephemeral=False
|
||||
)
|
||||
|
||||
def _handle_trace_event(self, event_type: str, source: Any, event: Any):
|
||||
"""Generic handler for context end events"""
|
||||
|
||||
@@ -1,5 +1,153 @@
|
||||
import os
|
||||
import platform
|
||||
import uuid
|
||||
import hashlib
|
||||
import subprocess
|
||||
import getpass
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
import re
|
||||
import json
|
||||
|
||||
import click
|
||||
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
|
||||
def is_tracing_enabled() -> bool:
|
||||
return os.getenv("CREWAI_TRACING_ENABLED", "false").lower() == "true"
|
||||
|
||||
|
||||
def on_first_execution_tracing_confirmation() -> bool:
|
||||
if _is_test_environment():
|
||||
return False
|
||||
|
||||
if is_first_execution():
|
||||
mark_first_execution_done()
|
||||
return click.confirm(
|
||||
"This is the first execution of CrewAI. Do you want to enable tracing?",
|
||||
default=True,
|
||||
show_default=True,
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def _is_test_environment() -> bool:
|
||||
"""Detect if we're running in a test environment."""
|
||||
return os.environ.get("CREWAI_TESTING", "").lower() == "true"
|
||||
|
||||
|
||||
def _get_machine_id() -> str:
|
||||
"""Stable, privacy-preserving machine fingerprint (cross-platform)."""
|
||||
parts = []
|
||||
|
||||
try:
|
||||
mac = ":".join(
|
||||
["{:02x}".format((uuid.getnode() >> b) & 0xFF) for b in range(0, 12, 2)][
|
||||
::-1
|
||||
]
|
||||
)
|
||||
parts.append(mac)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
sysname = platform.system()
|
||||
parts.append(sysname)
|
||||
|
||||
try:
|
||||
if sysname == "Darwin":
|
||||
res = subprocess.run(
|
||||
["system_profiler", "SPHardwareDataType"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=2,
|
||||
)
|
||||
m = re.search(r"Hardware UUID:\s*([A-Fa-f0-9\-]+)", res.stdout)
|
||||
if m:
|
||||
parts.append(m.group(1))
|
||||
elif sysname == "Linux":
|
||||
try:
|
||||
parts.append(Path("/etc/machine-id").read_text().strip())
|
||||
except Exception:
|
||||
parts.append(Path("/sys/class/dmi/id/product_uuid").read_text().strip())
|
||||
elif sysname == "Windows":
|
||||
res = subprocess.run(
|
||||
["wmic", "csproduct", "get", "UUID"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=2,
|
||||
)
|
||||
lines = [line.strip() for line in res.stdout.splitlines() if line.strip()]
|
||||
if len(lines) >= 2:
|
||||
parts.append(lines[1])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return hashlib.sha256("".join(parts).encode()).hexdigest()
|
||||
|
||||
|
||||
def _user_data_file() -> Path:
|
||||
base = Path(db_storage_path())
|
||||
base.mkdir(parents=True, exist_ok=True)
|
||||
return base / ".crewai_user.json"
|
||||
|
||||
|
||||
def _load_user_data() -> dict:
|
||||
p = _user_data_file()
|
||||
if p.exists():
|
||||
try:
|
||||
return json.loads(p.read_text())
|
||||
except Exception:
|
||||
pass
|
||||
return {}
|
||||
|
||||
|
||||
def _save_user_data(data: dict) -> None:
|
||||
try:
|
||||
p = _user_data_file()
|
||||
p.write_text(json.dumps(data, indent=2))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def get_user_id() -> str:
|
||||
"""Stable, anonymized user identifier with caching."""
|
||||
data = _load_user_data()
|
||||
|
||||
if "user_id" in data:
|
||||
return data["user_id"]
|
||||
|
||||
try:
|
||||
username = getpass.getuser()
|
||||
except Exception:
|
||||
username = "unknown"
|
||||
|
||||
seed = f"{username}|{_get_machine_id()}"
|
||||
uid = hashlib.sha256(seed.encode()).hexdigest()
|
||||
|
||||
data["user_id"] = uid
|
||||
_save_user_data(data)
|
||||
return uid
|
||||
|
||||
|
||||
def is_first_execution() -> bool:
|
||||
"""True if this is the first execution for this user."""
|
||||
data = _load_user_data()
|
||||
return not data.get("first_execution_done", False)
|
||||
|
||||
|
||||
def mark_first_execution_done() -> None:
|
||||
"""Mark that the first execution has been completed."""
|
||||
data = _load_user_data()
|
||||
if data.get("first_execution_done", False):
|
||||
return
|
||||
|
||||
data.update(
|
||||
{
|
||||
"first_execution_done": True,
|
||||
"first_execution_at": datetime.now().timestamp(),
|
||||
"user_id": get_user_id(),
|
||||
"machine_id": _get_machine_id(),
|
||||
}
|
||||
)
|
||||
_save_user_data(data)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
from .agentops_listener import agentops_listener
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
|
||||
from crewai.utilities.events.task_events import TaskEvaluationEvent
|
||||
|
||||
try:
|
||||
import agentops
|
||||
|
||||
AGENTOPS_INSTALLED = True
|
||||
except ImportError:
|
||||
AGENTOPS_INSTALLED = False
|
||||
|
||||
|
||||
class AgentOpsListener(BaseEventListener):
|
||||
tool_event: Optional["agentops.ToolEvent"] = None
|
||||
session: Optional["agentops.Session"] = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
if not AGENTOPS_INSTALLED:
|
||||
return
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
|
||||
self.session = agentops.init()
|
||||
for agent in source.agents:
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
|
||||
if self.session:
|
||||
self.session.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
||||
self.tool_event = agentops.ToolEvent(name=event.tool_name)
|
||||
if self.session:
|
||||
self.session.record(self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(TaskEvaluationEvent)
|
||||
def on_task_evaluation(source, event: TaskEvaluationEvent):
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name="Task Evaluator", agent_id=str(source.original_agent.id)
|
||||
)
|
||||
|
||||
|
||||
agentops_listener = AgentOpsListener()
|
||||
@@ -220,14 +220,22 @@ class ConsoleFormatter:
|
||||
return tree
|
||||
|
||||
def create_task_branch(
|
||||
self, crew_tree: Optional[Tree], task_id: str
|
||||
self, crew_tree: Optional[Tree], task_id: str, task_name: Optional[str] = None
|
||||
) -> Optional[Tree]:
|
||||
"""Create and initialize a task branch."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
task_content = Text()
|
||||
task_content.append(f"📋 Task: {task_id}", style="yellow bold")
|
||||
|
||||
# Display task name if available, otherwise just the ID
|
||||
if task_name:
|
||||
task_content.append("📋 Task: ", style="yellow bold")
|
||||
task_content.append(f"{task_name}", style="yellow bold")
|
||||
task_content.append(f" (ID: {task_id})", style="yellow dim")
|
||||
else:
|
||||
task_content.append(f"📋 Task: {task_id}", style="yellow bold")
|
||||
|
||||
task_content.append("\nStatus: ", style="white")
|
||||
task_content.append("Executing Task...", style="yellow dim")
|
||||
|
||||
@@ -251,6 +259,7 @@ class ConsoleFormatter:
|
||||
task_id: str,
|
||||
agent_role: str,
|
||||
status: str = "completed",
|
||||
task_name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Update task status in the tree."""
|
||||
if not self.verbose or crew_tree is None:
|
||||
@@ -270,8 +279,13 @@ class ConsoleFormatter:
|
||||
if str(task_id) in str(branch.label):
|
||||
# Build label without introducing stray blank lines
|
||||
task_content = Text()
|
||||
# First line: Task ID
|
||||
task_content.append(f"📋 Task: {task_id}", style=f"{style} bold")
|
||||
# First line: Task ID/name
|
||||
if task_name:
|
||||
task_content.append("📋 Task: ", style=f"{style} bold")
|
||||
task_content.append(f"{task_name}", style=f"{style} bold")
|
||||
task_content.append(f" (ID: {task_id})", style=f"{style} dim")
|
||||
else:
|
||||
task_content.append(f"📋 Task: {task_id}", style=f"{style} bold")
|
||||
|
||||
# Second line: Assigned to
|
||||
task_content.append("\nAssigned to: ", style="white")
|
||||
@@ -285,8 +299,9 @@ class ConsoleFormatter:
|
||||
break
|
||||
|
||||
# Show status panel
|
||||
display_name = task_name if task_name else str(task_id)
|
||||
content = self.create_status_content(
|
||||
f"Task {status.title()}", str(task_id), style, Agent=agent_role
|
||||
f"Task {status.title()}", display_name, style, Agent=agent_role
|
||||
)
|
||||
self.print_panel(content, panel_title, style)
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ from crewai.utilities import RPMController
|
||||
from crewai.utilities.errors import AgentRepositoryError
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.events.tool_usage_events import ToolUsageFinishedEvent
|
||||
|
||||
from crewai.process import Process
|
||||
|
||||
def test_agent_llm_creation_with_env_vars():
|
||||
# Store original environment variables
|
||||
@@ -1209,6 +1209,181 @@ Thought:<|eot_id|>
|
||||
assert mock_format_prompt.return_value == expected_prompt
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_task_allow_crewai_trigger_context():
|
||||
from crewai import Crew
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory"
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Analyze the data",
|
||||
expected_output="Analysis report",
|
||||
agent=agent,
|
||||
allow_crewai_trigger_context=True
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
crew.kickoff({"crewai_trigger_payload": "Important context data"})
|
||||
|
||||
prompt = task.prompt()
|
||||
|
||||
assert "Analyze the data" in prompt
|
||||
assert "Trigger Payload: Important context data" in prompt
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_task_without_allow_crewai_trigger_context():
|
||||
from crewai import Crew
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory"
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Analyze the data",
|
||||
expected_output="Analysis report",
|
||||
agent=agent,
|
||||
allow_crewai_trigger_context=False
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
crew.kickoff({"crewai_trigger_payload": "Important context data"})
|
||||
|
||||
prompt = task.prompt()
|
||||
|
||||
assert "Analyze the data" in prompt
|
||||
assert "Trigger Payload:" not in prompt
|
||||
assert "Important context data" not in prompt
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_task_allow_crewai_trigger_context_no_payload():
|
||||
from crewai import Crew
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory"
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Analyze the data",
|
||||
expected_output="Analysis report",
|
||||
agent=agent,
|
||||
allow_crewai_trigger_context=True
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
crew.kickoff({"other_input": "other data"})
|
||||
|
||||
|
||||
prompt = task.prompt()
|
||||
|
||||
assert "Analyze the data" in prompt
|
||||
assert "Trigger Payload:" not in prompt
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical():
|
||||
from crewai import Crew
|
||||
|
||||
agent1 = Agent(role="First Agent", goal="First goal", backstory="First backstory")
|
||||
agent2 = Agent(role="Second Agent", goal="Second goal", backstory="Second backstory")
|
||||
|
||||
first_task = Task(
|
||||
description="Process initial data",
|
||||
expected_output="Initial analysis",
|
||||
agent=agent1,
|
||||
)
|
||||
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent1, agent2],
|
||||
tasks=[first_task],
|
||||
process=Process.hierarchical,
|
||||
manager_llm="gpt-4o"
|
||||
)
|
||||
|
||||
crew.kickoff({"crewai_trigger_payload": "Initial context data"})
|
||||
|
||||
first_prompt = first_task.prompt()
|
||||
assert "Process initial data" in first_prompt
|
||||
assert "Trigger Payload: Initial context data" not in first_prompt
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_first_task_auto_inject_trigger():
|
||||
from crewai import Crew
|
||||
|
||||
agent1 = Agent(role="First Agent", goal="First goal", backstory="First backstory")
|
||||
agent2 = Agent(role="Second Agent", goal="Second goal", backstory="Second backstory")
|
||||
|
||||
first_task = Task(
|
||||
description="Process initial data",
|
||||
expected_output="Initial analysis",
|
||||
agent=agent1,
|
||||
)
|
||||
|
||||
second_task = Task(
|
||||
description="Process secondary data",
|
||||
expected_output="Secondary analysis",
|
||||
agent=agent2,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent1, agent2],
|
||||
tasks=[first_task, second_task]
|
||||
)
|
||||
crew.kickoff({"crewai_trigger_payload": "Initial context data"})
|
||||
|
||||
first_prompt = first_task.prompt()
|
||||
assert "Process initial data" in first_prompt
|
||||
assert "Trigger Payload: Initial context data" in first_prompt
|
||||
|
||||
second_prompt = second_task.prompt()
|
||||
assert "Process secondary data" in second_prompt
|
||||
assert "Trigger Payload:" not in second_prompt
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_ensure_first_task_allow_crewai_trigger_context_is_false_does_not_inject():
|
||||
from crewai import Crew
|
||||
|
||||
agent1 = Agent(role="First Agent", goal="First goal", backstory="First backstory")
|
||||
agent2 = Agent(role="Second Agent", goal="Second goal", backstory="Second backstory")
|
||||
|
||||
first_task = Task(
|
||||
description="Process initial data",
|
||||
expected_output="Initial analysis",
|
||||
agent=agent1,
|
||||
allow_crewai_trigger_context=False
|
||||
)
|
||||
|
||||
second_task = Task(
|
||||
description="Process secondary data",
|
||||
expected_output="Secondary analysis",
|
||||
agent=agent2,
|
||||
allow_crewai_trigger_context=True
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent1, agent2],
|
||||
tasks=[first_task, second_task]
|
||||
)
|
||||
crew.kickoff({"crewai_trigger_payload": "Context data"})
|
||||
|
||||
first_prompt = first_task.prompt()
|
||||
assert "Trigger Payload: Context data" not in first_prompt
|
||||
|
||||
second_prompt = second_task.prompt()
|
||||
assert "Trigger Payload: Context data" in second_prompt
|
||||
|
||||
|
||||
|
||||
@patch("crewai.agent.CrewTrainingHandler")
|
||||
def test_agent_training_handler(crew_training_handler):
|
||||
task_prompt = "What is 1 + 1?"
|
||||
@@ -1896,7 +2071,7 @@ def test_agent_with_knowledge_sources_generate_search_query():
|
||||
assert "red" in result.raw.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(record_mode='none', filter_headers=["authorization"])
|
||||
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
|
||||
def test_agent_with_knowledge_with_no_crewai_knowledge():
|
||||
mock_knowledge = MagicMock(spec=Knowledge)
|
||||
|
||||
@@ -1904,8 +2079,11 @@ def test_agent_with_knowledge_with_no_crewai_knowledge():
|
||||
role="Information Agent",
|
||||
goal="Provide information based on knowledge sources",
|
||||
backstory="You have access to specific knowledge sources.",
|
||||
llm=LLM(model="openrouter/openai/gpt-4o-mini",api_key=os.getenv('OPENROUTER_API_KEY')),
|
||||
knowledge=mock_knowledge
|
||||
llm=LLM(
|
||||
model="openrouter/openai/gpt-4o-mini",
|
||||
api_key=os.getenv("OPENROUTER_API_KEY"),
|
||||
),
|
||||
knowledge=mock_knowledge,
|
||||
)
|
||||
|
||||
# Create a task that requires the agent to use the knowledge
|
||||
@@ -1920,7 +2098,7 @@ def test_agent_with_knowledge_with_no_crewai_knowledge():
|
||||
mock_knowledge.query.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(record_mode='none', filter_headers=["authorization"])
|
||||
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
|
||||
def test_agent_with_only_crewai_knowledge():
|
||||
mock_knowledge = MagicMock(spec=Knowledge)
|
||||
|
||||
@@ -1928,33 +2106,10 @@ def test_agent_with_only_crewai_knowledge():
|
||||
role="Information Agent",
|
||||
goal="Provide information based on knowledge sources",
|
||||
backstory="You have access to specific knowledge sources.",
|
||||
llm=LLM(model="openrouter/openai/gpt-4o-mini",api_key=os.getenv('OPENROUTER_API_KEY'))
|
||||
)
|
||||
|
||||
# Create a task that requires the agent to use the knowledge
|
||||
task = Task(
|
||||
description="What is Vidit's favorite color?",
|
||||
expected_output="Vidit's favorclearite color.",
|
||||
agent=agent
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task],knowledge=mock_knowledge)
|
||||
crew.kickoff()
|
||||
mock_knowledge.query.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(record_mode='none', filter_headers=["authorization"])
|
||||
def test_agent_knowledege_with_crewai_knowledge():
|
||||
crew_knowledge = MagicMock(spec=Knowledge)
|
||||
agent_knowledge = MagicMock(spec=Knowledge)
|
||||
|
||||
|
||||
agent = Agent(
|
||||
role="Information Agent",
|
||||
goal="Provide information based on knowledge sources",
|
||||
backstory="You have access to specific knowledge sources.",
|
||||
llm=LLM(model="openrouter/openai/gpt-4o-mini",api_key=os.getenv('OPENROUTER_API_KEY')),
|
||||
knowledge=agent_knowledge
|
||||
llm=LLM(
|
||||
model="openrouter/openai/gpt-4o-mini",
|
||||
api_key=os.getenv("OPENROUTER_API_KEY"),
|
||||
),
|
||||
)
|
||||
|
||||
# Create a task that requires the agent to use the knowledge
|
||||
@@ -1964,7 +2119,35 @@ def test_agent_knowledege_with_crewai_knowledge():
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent],tasks=[task],knowledge=crew_knowledge)
|
||||
crew = Crew(agents=[agent], tasks=[task], knowledge=mock_knowledge)
|
||||
crew.kickoff()
|
||||
mock_knowledge.query.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
|
||||
def test_agent_knowledege_with_crewai_knowledge():
|
||||
crew_knowledge = MagicMock(spec=Knowledge)
|
||||
agent_knowledge = MagicMock(spec=Knowledge)
|
||||
|
||||
agent = Agent(
|
||||
role="Information Agent",
|
||||
goal="Provide information based on knowledge sources",
|
||||
backstory="You have access to specific knowledge sources.",
|
||||
llm=LLM(
|
||||
model="openrouter/openai/gpt-4o-mini",
|
||||
api_key=os.getenv("OPENROUTER_API_KEY"),
|
||||
),
|
||||
knowledge=agent_knowledge,
|
||||
)
|
||||
|
||||
# Create a task that requires the agent to use the knowledge
|
||||
task = Task(
|
||||
description="What is Vidit's favorite color?",
|
||||
expected_output="Vidit's favorclearite color.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task], knowledge=crew_knowledge)
|
||||
crew.kickoff()
|
||||
agent_knowledge.query.assert_called_once()
|
||||
crew_knowledge.query.assert_called_once()
|
||||
@@ -2164,7 +2347,12 @@ def mock_get_auth_token():
|
||||
|
||||
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
|
||||
def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
|
||||
from crewai_tools import SerperDevTool, XMLSearchTool, CSVSearchTool, EnterpriseActionTool
|
||||
from crewai_tools import (
|
||||
SerperDevTool,
|
||||
XMLSearchTool,
|
||||
CSVSearchTool,
|
||||
EnterpriseActionTool,
|
||||
)
|
||||
|
||||
mock_get_response = MagicMock()
|
||||
mock_get_response.status_code = 200
|
||||
@@ -2173,12 +2361,23 @@ def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
|
||||
"goal": "test goal",
|
||||
"backstory": "test backstory",
|
||||
"tools": [
|
||||
{"module": "crewai_tools", "name": "SerperDevTool", "init_params": {"n_results": 30}},
|
||||
{"module": "crewai_tools", "name": "XMLSearchTool", "init_params": {"summarize": True}},
|
||||
{
|
||||
"module": "crewai_tools",
|
||||
"name": "SerperDevTool",
|
||||
"init_params": {"n_results": "30"},
|
||||
},
|
||||
{
|
||||
"module": "crewai_tools",
|
||||
"name": "XMLSearchTool",
|
||||
"init_params": {"summarize": "true"},
|
||||
},
|
||||
{"module": "crewai_tools", "name": "CSVSearchTool", "init_params": {}},
|
||||
|
||||
# using a tools that returns a list of BaseTools
|
||||
{"module": "crewai_tools", "name": "CrewaiEnterpriseTools", "init_params": {"actions_list": [], "enterprise_token": "test_key"}},
|
||||
{
|
||||
"module": "crewai_tools",
|
||||
"name": "CrewaiEnterpriseTools",
|
||||
"init_params": {"actions_list": [], "enterprise_token": "test_key"},
|
||||
},
|
||||
],
|
||||
}
|
||||
mock_get_agent.return_value = mock_get_response
|
||||
@@ -2221,7 +2420,9 @@ def test_agent_from_repository_override_attributes(mock_get_agent, mock_get_auth
|
||||
"role": "test role",
|
||||
"goal": "test goal",
|
||||
"backstory": "test backstory",
|
||||
"tools": [{"name": "SerperDevTool", "module": "crewai_tools", "init_params": {}}],
|
||||
"tools": [
|
||||
{"name": "SerperDevTool", "module": "crewai_tools", "init_params": {}}
|
||||
],
|
||||
}
|
||||
mock_get_agent.return_value = mock_get_response
|
||||
agent = Agent(from_repository="test_agent", role="Custom Role")
|
||||
|
||||
@@ -0,0 +1,126 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
|
||||
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
|
||||
["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '825'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJdi9swEHz3r1j0HBc7ZydXvx1HC0evhT6UUtrDKNLa1lXWqpJ8aTjy
|
||||
34vsXOz0A/pi8M7OaGZ3nxMApiSrgImOB9Fbnd4Why/v79HeePeD37/Zfdx8evf5+rY4fNjdPbJV
|
||||
ZNDuEUV4Yb0S1FuNQZGZYOGQB4yq+bYsr7J1nhcj0JNEHWmtDWlBaa+MStfZukizbZpfn9gdKYGe
|
||||
VfA1AQB4Hr/Rp5H4k1WQrV4qPXrPW2TVuQmAOdKxwrj3ygduAlvNoCAT0IzW78DQHgQ30KonBA5t
|
||||
tA3c+D06gG/mrTJcw834X0GHWhPsyWm5FHTYDJ7HUGbQegFwYyjwOJQxysMJOZ7Na2qto53/jcoa
|
||||
ZZTvaofck4lGfSDLRvSYADyMQxoucjPrqLehDvQdx+fycjvpsXk3C/TqBAYKXC/q29NoL/VqiYEr
|
||||
7RdjZoKLDuVMnXfCB6loASSL1H+6+Zv2lFyZ9n/kZ0AItAFlbR1KJS4Tz20O4+n+q+085dEw8+ie
|
||||
lMA6KHRxExIbPujpoJg/+IB93SjTorNOTVfV2LrcZLzZYFm+Zskx+QUAAP//AwB1vYZ+YwMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 96fc9f29dea3cf1f-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 15 Aug 2025 23:55:15 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=oA9oTa3cE0ZaEUDRf0hCpnarSAQKzrVUhl6qDS4j09w-1755302115-1.0.1.1-gUUDl4ZqvBQkg7244DTwOmSiDUT2z_AiQu0P1xUaABjaufSpZuIlI5G0H7OSnW.ldypvpxjj45NGWesJ62M_2U7r20tHz_gMmDFw6D5ZiNc;
|
||||
path=/; expires=Sat, 16-Aug-25 00:25:15 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=ICenEGMmOE5jaOjwD30bAOwrF8.XRbSIKTBl1EyWs0o-1755302115700-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '735'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '753'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999830'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999827'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_212fde9d945a462ba0d89ea856131dce
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,123 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
|
||||
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
|
||||
["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '825'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=oA9oTa3cE0ZaEUDRf0hCpnarSAQKzrVUhl6qDS4j09w-1755302115-1.0.1.1-gUUDl4ZqvBQkg7244DTwOmSiDUT2z_AiQu0P1xUaABjaufSpZuIlI5G0H7OSnW.ldypvpxjj45NGWesJ62M_2U7r20tHz_gMmDFw6D5ZiNc;
|
||||
_cfuvid=ICenEGMmOE5jaOjwD30bAOwrF8.XRbSIKTBl1EyWs0o-1755302115700-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrKxY8W4VkW3ajW1GgQA7JJUGLog0EmlxJrCkuQ1J2g8D/
|
||||
XlByLLkPoBcB2tkZzuzuawLAlGQlMNHyIDqr04/rl6/3z+6hu8vq/cOX5936cRPE3i8Px88rtogM
|
||||
2v1AEd5Y7wR1VmNQZEZYOOQBo2q+LYpVtszzYgA6kqgjrbEhXVPaKaPSZbZcp9k2zd+f2S0pgZ6V
|
||||
8C0BAHgdvtGnkfiTlZAt3iodes8bZOWlCYA50rHCuPfKB24CW0ygIBPQDNZvwdARBDfQqAMChyba
|
||||
Bm78ER3Ad/NJGa7hw/BfQotaExzJaTkXdFj3nsdQptd6BnBjKPA4lCHK0xk5Xcxraqyjnf+Nympl
|
||||
lG8rh9yTiUZ9IMsG9JQAPA1D6q9yM+uos6EKtMfhubzYjnps2s0MXZ3BQIHrWX17Hu21XiUxcKX9
|
||||
bMxMcNGinKjTTngvFc2AZJb6Tzd/0x6TK9P8j/wECIE2oKysQ6nEdeKpzWE83X+1XaY8GGYe3UEJ
|
||||
rIJCFzchsea9Hg+K+RcfsKtqZRp01qnxqmpbFZuM1xssihuWnJJfAAAA//8DAFSowWRjAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 96fc9f301bf7cf1f-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 15 Aug 2025 23:55:16 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '685'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '711'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999827'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999827'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_3f0ec42447374a76a22a4cdb9f336279
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,501 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Crew Manager. You
|
||||
are a seasoned manager with a knack for getting the best out of your team.\nYou
|
||||
are also known for your ability to delegate work to the right people, and to
|
||||
ask the right questions to get the best out of your team.\nEven though you don''t
|
||||
perform tasks by yourself, you have a lot of experience in the field, which
|
||||
allows you to properly evaluate the work of your team members.\nYour personal
|
||||
goal is: Manage the team to complete the task in the best way possible.\nYou
|
||||
ONLY have access to the following tools, and should NEVER make up tools that
|
||||
are not listed here:\n\nTool Name: Delegate work to coworker\nTool Arguments:
|
||||
{''task'': {''description'': ''The task to delegate'', ''type'': ''str''}, ''context'':
|
||||
{''description'': ''The context for the task'', ''type'': ''str''}, ''coworker'':
|
||||
{''description'': ''The role/name of the coworker to delegate to'', ''type'':
|
||||
''str''}}\nTool Description: Delegate a specific task to one of the following
|
||||
coworkers: First Agent\nThe input to this tool should be the coworker, the task
|
||||
you want them to do, and ALL necessary context to execute the task, they know
|
||||
nothing about the task, so share absolutely everything you know, don''t reference
|
||||
things but instead explain them.\nTool Name: Ask question to coworker\nTool
|
||||
Arguments: {''question'': {''description'': ''The question to ask'', ''type'':
|
||||
''str''}, ''context'': {''description'': ''The context for the question'', ''type'':
|
||||
''str''}, ''coworker'': {''description'': ''The role/name of the coworker to
|
||||
ask'', ''type'': ''str''}}\nTool Description: Ask a specific question to one
|
||||
of the following coworkers: First Agent\nThe input to this tool should be the
|
||||
coworker, the question you have for them, and ALL necessary context to ask the
|
||||
question properly, they know nothing about the question, so share absolutely
|
||||
everything you know, don''t reference things but instead explain them.\n\nIMPORTANT:
|
||||
Use the following format in your response:\n\n```\nThought: you should always
|
||||
think about what to do\nAction: the action to take, only one name of [Delegate
|
||||
work to coworker, Ask question to coworker], just the name, exactly as it''s
|
||||
written.\nAction Input: the input to the action, just a simple JSON object,
|
||||
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
|
||||
result of the action\n```\n\nOnce all necessary information is gathered, return
|
||||
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n```"}, {"role": "user", "content":
|
||||
"\nCurrent Task: Process initial data\n\nThis is the expected criteria for your
|
||||
final answer: Initial analysis\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\n\nBegin! This is VERY important to you,
|
||||
use the tools available and give your best Final Answer, your job depends on
|
||||
it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2921'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFTJbtswEL37KwY824aT2E7qWxcUSE9Fa6CHujDG5EiahhoK5MiOG+Tf
|
||||
C0re0qZAL4LAN2+WN8vTAMCwMwswtkK1deNH7+dXd5+0lTl/mZYPzd1mOvHNuzhffvt6szTDzAib
|
||||
n2T1yBrbUDeelIP0sI2EStnr1e1sNptNbmZvOqAOjnymlY2OpmF0PbmejiZ3o8n8QKwCW0pmAd8H
|
||||
AABP3TenKI4ezQImw+NLTSlhSWZxMgIwMfj8YjAlToqiZngGbRAl6bJeVqEtK13APQiRAw3gyFOJ
|
||||
SqAVgWJ6gFBAE4OllFjK7pmFldGDQ8XMyW8fOSaFtyWJ5ieS1EaCHUGFWwIErULMwQDFAVrbxhwE
|
||||
Bf0+cRrDPezY+xxpy66LXsOOtQL0vgsglFPAuAdHiuxTDnNQPNtz6tOloiCrvCW/H69kJW9tbsgC
|
||||
PhwL24X40HPzH8WjCdxL0+oCnlYmO1qZBazM577yFyWvzBBWvYyP2pstj2KxbIPfUuor+/WqYon0
|
||||
JEwkS7wlN4ZlroDF+tZRAusJ5cjOrCFYVCpD5M4pKxQhnvQbAjsS5WKfQZQ9aCRxCUKEBlUpShp2
|
||||
0qe2rvHgJPsuWBxLmXICBGVAD9xJe+hbTiRCK45inqRsmydiV6GecoPsI6eX+u7K/lQwS+Ky0gSa
|
||||
CRYFNgQu4k6giKEG1vFRzkM3Oj0vpmllni+nN1LRJszLI633FwCKBMXcyG5vfhyQ59Om+FA2MWzS
|
||||
H1RTsHCq1pEwBclbkTQ0pkOfBwA/uo1sXyyZaWKoG11reKAu3PzquvdnzjfgjF7dTA+oBkV/Bm6n
|
||||
8+ErDteHCb9YamPRVuTO1PMFwNZxuAAGF2X/nc5rvvvSWcr/cX8GrKVGya2bSI7ty5LPZpHyjfyX
|
||||
2UnmLmGTKG7Z0lqZYm6FowJb358vk/ZJqV4XLCXFJnJ/w4pmPZ1vimJCE3tnBs+D3wAAAP//AwBY
|
||||
9uEVzAUAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97144bd22eb41abc-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 18 Aug 2025 20:52:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=dCMuu_IT8i5kyJB9_ugQhudGYphCvJlfXMZwJgOuB8Y-1755550362-1.0.1.1-VyrRrYT2JzvUYUjT9T5uCe31rJR0Q_FicsTyAJZYdj0j8anm6ZdVD7QhtUW0OjVK_8F82E4cVt8Uf5shMfmUm3Gf.EMuBA1AgSAUrzsHEy4;
|
||||
path=/; expires=Mon, 18-Aug-25 21:22:42 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=YeODa6MF5ug3OZUV6ob1dSrBKCM8BXbKkS77TIihYoE-1755550362828-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '3236'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '3253'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '29999308'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999308'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 1ms
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- req_08aa9de2797d4fee93003bdc7fc19156
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are First Agent. First
|
||||
backstory\nYour personal goal is: First goal\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
|
||||
Process initial data\n\nThis is the expected criteria for your final answer:
|
||||
Your best answer to your coworker asking you this, accounting for the context
|
||||
shared.\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.\n\nThis is the context you''re working with:\nThe task involves analyzing
|
||||
the initial data set we have received. This includes cleaning the data, categorizing
|
||||
it for analysis, identifying any trends or patterns, and summarizing the findings.
|
||||
The goal is to have a clear understanding of what the data indicates and any
|
||||
initial insights that can be drawn from it.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1262'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=dCMuu_IT8i5kyJB9_ugQhudGYphCvJlfXMZwJgOuB8Y-1755550362-1.0.1.1-VyrRrYT2JzvUYUjT9T5uCe31rJR0Q_FicsTyAJZYdj0j8anm6ZdVD7QhtUW0OjVK_8F82E4cVt8Uf5shMfmUm3Gf.EMuBA1AgSAUrzsHEy4;
|
||||
_cfuvid=YeODa6MF5ug3OZUV6ob1dSrBKCM8BXbKkS77TIihYoE-1755550362828-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFbfbxRHDH7PX+HuE6DLKSEkQN6AChUhoUpFULVBkTPj3XUzO96MZ+9y
|
||||
RfzvlWfubi+USn3Jj/HaY3+f/Y2/HgE07JtLaFyP2Q1jOH5zcfriwx+v36RP3affN+9buetUT27o
|
||||
4uMvp5+bhXnIzV/k8s5r6WQYA2WWWM0uEWayqKfPz8/Pz0/OLs6KYRBPwdy6MR8/k+OBIx8/PXn6
|
||||
7Pjk+fHpi613L+xIm0v48wgA4Gv5aXlGT/fNJZwsdicDqWJHzeX+I4AmSbCTBlVZM8bcLGajk5gp
|
||||
ltTfQZQ1OIzQ8YoAobO0AaOuKQFcxbccMcCr8v8lfBQYkzhShdwTcOTMGMBjRlDKQG1LLvOKwmYB
|
||||
a4I1hwCthCBrUFpRwgC3tAHNNCpkAYo6JbJPXSCMC3CYqZPEf9MCMGLY1D886DQMaOcgU4KWo+fY
|
||||
KRjsiXqKWm5dXsWreLqEJ09+tpzeWFCO3ZMnl3AVAeAY3nLSPCeXaMW0LtVYFVZEKwkGVuXYLYCj
|
||||
k2gYUswLkASUkkSSSYFiTky63EX+vA3ZY/SBdiFghWEihZsNEOeeEvAwTtksuacBblDJg0TQKSWZ
|
||||
SlkVUEmQaJCVHSRykrzCuqdEEMkowGTllqtfec/WehgOgTfiPSZvoO1wdRhghYnxJtAhA/sq3QYe
|
||||
0bJbLqrFLscQIIhDuwEiDqSAiUBHCoF8wU5xIFjj5nEh4OlMwI7O4nxAwwe6P2BhZn3PBHDMAokC
|
||||
rTBmUOoGitn6DnN1QvalF0qbKM9EfOxZgeNKwooUuiTTuAd1FLYoe9SdDIP96jGhy5RYMztdgE6u
|
||||
B1TwNEiXcOzLaeaBYKTE4rV0A8ZNaeiRUitpwOhsKjw7zJIUHr3/9Z0+tjINsFbcVFpCYoHpzGD6
|
||||
mCj60uG/Ys6UIrzzFDO37L7H7DPnvuBTZoWq1wydLxXOoG5zAgRPGTkUhwqVEc/1mg1ky0BLsLGm
|
||||
oMtDJEuwLZxQC9CMuSCFAbJIqN4r1gnDlutyxxSdrMj6ONTDnkcLmHuOe6aX+8kJIreAucKsZNO1
|
||||
T3kBTtIuDjihtmXH1hJVH4wJ5S4W4GIGmXJgStuGGXADie4mNhqmVOcwrkgzdyViIeSZEfLbVmis
|
||||
zrdbmXmgH99NmSkQB9POKlEbkPahRq17dv0ORhcmT3AjuYc7Q8uQXFnTKHd9rkDeTRjzzjJQTuwe
|
||||
UrK7SXuZggeKDkedAuY6P9aRW1a3LDP5RYEoSrbR3zNdweNhDNt+U0s/96S0L2D5ncBhUAEvbrKJ
|
||||
LFEDDyXd2b1OWxXuriPNNgGoRVx3BCRSwuR64PaBpF3F1xvYvadVJ5XqmzHDHiWDxLCBHsvDZTOR
|
||||
YIqeUtG9MmctrI39A00po2lyPOVax5hkxZ4AXRFQw2bPRWmd8jhO9omRGuk+11SWlYoHD9A8YFWA
|
||||
KdmZWm9IYA+tCXzt4HarHriXbqD7MUiqZkngybGyxOMBb62a+tpaT2gdKNNeKVmNSWwZMQW8iu9a
|
||||
2Mi0xSVu4G6yNi/UWP7k9wS4gGkvNKaG9vmIKVcSWXev/QLGQKgEgTIMBLdR1j8drhSJ2knR1po4
|
||||
hXBgwGgdV663ZebL1vJtv74E6cYkN/qda9NyZO2vrWkk2qqiWcamWL8dAXwpa9L0YPNpxiTDmK+z
|
||||
3FK57unZ8xqvmbez2Xr2cmfNkjHMhouzZ4sfBLyuMqoHm1bj0PXkZ9d5LcPJsxwYjg7K/nc6P4pd
|
||||
S+fY/Z/ws8E5GjP56zGRZ/ew5PmzRNYw//XZHuaScGN9zI6uM1MyKjy1OIW6Uza60UzDdcuxozQm
|
||||
rotlO16fX5xge0Hn5y+bo29H/wAAAP//AwCE+a2iZgsAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97144be7eaa81abc-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 18 Aug 2025 20:52:47 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '4424'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '4473'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999717'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999717'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_5bf23819c1214732aa87a90207bc0d31
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Crew Manager. You
|
||||
are a seasoned manager with a knack for getting the best out of your team.\nYou
|
||||
are also known for your ability to delegate work to the right people, and to
|
||||
ask the right questions to get the best out of your team.\nEven though you don''t
|
||||
perform tasks by yourself, you have a lot of experience in the field, which
|
||||
allows you to properly evaluate the work of your team members.\nYour personal
|
||||
goal is: Manage the team to complete the task in the best way possible.\nYou
|
||||
ONLY have access to the following tools, and should NEVER make up tools that
|
||||
are not listed here:\n\nTool Name: Delegate work to coworker\nTool Arguments:
|
||||
{''task'': {''description'': ''The task to delegate'', ''type'': ''str''}, ''context'':
|
||||
{''description'': ''The context for the task'', ''type'': ''str''}, ''coworker'':
|
||||
{''description'': ''The role/name of the coworker to delegate to'', ''type'':
|
||||
''str''}}\nTool Description: Delegate a specific task to one of the following
|
||||
coworkers: First Agent\nThe input to this tool should be the coworker, the task
|
||||
you want them to do, and ALL necessary context to execute the task, they know
|
||||
nothing about the task, so share absolutely everything you know, don''t reference
|
||||
things but instead explain them.\nTool Name: Ask question to coworker\nTool
|
||||
Arguments: {''question'': {''description'': ''The question to ask'', ''type'':
|
||||
''str''}, ''context'': {''description'': ''The context for the question'', ''type'':
|
||||
''str''}, ''coworker'': {''description'': ''The role/name of the coworker to
|
||||
ask'', ''type'': ''str''}}\nTool Description: Ask a specific question to one
|
||||
of the following coworkers: First Agent\nThe input to this tool should be the
|
||||
coworker, the question you have for them, and ALL necessary context to ask the
|
||||
question properly, they know nothing about the question, so share absolutely
|
||||
everything you know, don''t reference things but instead explain them.\n\nIMPORTANT:
|
||||
Use the following format in your response:\n\n```\nThought: you should always
|
||||
think about what to do\nAction: the action to take, only one name of [Delegate
|
||||
work to coworker, Ask question to coworker], just the name, exactly as it''s
|
||||
written.\nAction Input: the input to the action, just a simple JSON object,
|
||||
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
|
||||
result of the action\n```\n\nOnce all necessary information is gathered, return
|
||||
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n```"}, {"role": "user", "content":
|
||||
"\nCurrent Task: Process initial data\n\nThis is the expected criteria for your
|
||||
final answer: Initial analysis\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\n\nBegin! This is VERY important to you,
|
||||
use the tools available and give your best Final Answer, your job depends on
|
||||
it!\n\nThought:"}, {"role": "assistant", "content": "Thought: I need to delegate
|
||||
the task of processing the initial data to the First Agent to ensure we have
|
||||
a thorough and accurate analysis. I will provide them with all the necessary
|
||||
details to complete this task effectively.\n\nAction: Delegate work to coworker\nAction
|
||||
Input: {\"task\": \"Process initial data\", \"context\": \"The task involves
|
||||
analyzing the initial data set we have received. This includes cleaning the
|
||||
data, categorizing it for analysis, identifying any trends or patterns, and
|
||||
summarizing the findings. The goal is to have a clear understanding of what
|
||||
the data indicates and any initial insights that can be drawn from it.\", \"coworker\":
|
||||
\"First Agent\"}\nObservation: To process the initial data set effectively,
|
||||
we will follow several key steps to ensure we clean, categorize, analyze, and
|
||||
summarize our findings comprehensively.\n\n1. **Data Cleaning**: \n - First,
|
||||
we will review the dataset for missing, inconsistent, or erroneous entries.
|
||||
\n - We will handle missing values by either imputing them based on surrounding
|
||||
data or removing records where necessary.\n - Additionally, we will standardize
|
||||
categorical variables to ensure consistency (e.g., ensuring all location names
|
||||
are spelled the same way).\n\n2. **Data Categorization**: \n - Next, we will
|
||||
categorize the data into relevant segments that will aid our analysis. \n -
|
||||
This involves grouping data points based on common characteristics, such as
|
||||
demographics, time periods, or any key performance indicators (KPIs) we are
|
||||
focusing on.\n\n3. **Trend and Pattern Identification**: \n - With the cleaned
|
||||
and categorized data, we will perform a detailed analysis to identify trends
|
||||
and patterns.\n - This will involve using statistical tools and visualizations
|
||||
to uncover relationships within the data. We will look at time series analysis,
|
||||
correlation coefficients, and any significant outliers that may require further
|
||||
investigation.\n\n4. **Summarizing Findings**: \n - Finally, we will compile
|
||||
a summary of our findings which will include both qualitative insights and quantitative
|
||||
metrics.\n - This summary should encapsulate the key trends identified, any
|
||||
notable patterns, and implications of these findings.\n - We will also document
|
||||
any limitations of the data and suggest areas for further research if necessary.\n\nBy
|
||||
completing these steps, we will not only have a clear understanding of what
|
||||
the data indicates but also provide actionable insights that can guide our next
|
||||
steps. This comprehensive analysis will serve as a solid foundation for any
|
||||
additional exploration or decision-making initiatives related to our project.
|
||||
\n\nIf you have any questions or need further clarification on any part of this
|
||||
process, please let me know!"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5714'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=dCMuu_IT8i5kyJB9_ugQhudGYphCvJlfXMZwJgOuB8Y-1755550362-1.0.1.1-VyrRrYT2JzvUYUjT9T5uCe31rJR0Q_FicsTyAJZYdj0j8anm6ZdVD7QhtUW0OjVK_8F82E4cVt8Uf5shMfmUm3Gf.EMuBA1AgSAUrzsHEy4;
|
||||
_cfuvid=YeODa6MF5ug3OZUV6ob1dSrBKCM8BXbKkS77TIihYoE-1755550362828-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFbbbhw3DH33VxDzlATrxTq+pX5LUwQJChRBayRA68ChJc4Ma404ETW7
|
||||
3gT594LS3pymQF/2IooUeQ55pK9HAA375goa12N2wxiOX12cvPj9t/dv4sOf79z1m8vFKvbdl+fy
|
||||
YfXlvTQz85C7v8nlrdfcyTAGyiyxml0izGRRTy7Pz8/PF6cXl8UwiKdgbt2Yj8/k+Pni+dnx4sXx
|
||||
4mLj2As70uYK/joCAPhaPi3F6OmhuYLFbLsykCp21FztNgE0SYKtNKjKmjHmZrY3OomZYsn606dP
|
||||
N/G6l6nr8xW8hSgruLeP3BO0HDEARl1Ruomvy7+X5d8VXAuMSRyplq0cOTMG8JgRlDJQ25LLvKSw
|
||||
nsGKYMUhQCshyAqUlpQwwD2tQTONClmAok6JbKsLhHEGDjN1kvgLzQAjhnX94UGnYUBbB5mSJek5
|
||||
dgqGfaKeopZT5zfxJp7M4dmzXyynVxaUY/fs2RXcRAA4htecNO+TS7RkqoVbFVZEKwkGVuXYzYCj
|
||||
k2hoUswzkASUkkSSSYFiTkw630b+sAnZY/SBtiFgiWEihbs1EOeeEvAwTtksuacB7lDJg0TQKSWZ
|
||||
SlkVUEmQaJClLSRykrzCqqdEEMkowGTllqNfes/WfxgOgbcW8Ji8gbbF1WGAJSbGu0CHDOyqdGt4
|
||||
QvNuPqsWOxxDgCAO7QSIOJACJgIdKQTyBTvFgWCF66eFgOd7ArZ0FucDGn6jhwMW9qzvmACOWSBR
|
||||
oCXGDErdQDFb32GuTsi+9EJpE+U9Edc9K3BcSliSQpdkGnegjsIWZYe6k2Gwrx4TukyJNbPTGejk
|
||||
ekAFT4N0Cce+rGYeCEZKLF5LN2Bcl4YeKbWSBozOpsKzwyxJ4cmv797qUyvTAGvFTaUlJBaYTg2m
|
||||
60TRlw5/hzlTivDWU8zcsvsesw+c+4JPmRWqXnvofKlwD+omJ0DwlJFDcahQGfFcj1lDtgy0BBtr
|
||||
Cjo/RLIE28AJtQDNmAtSGCCLhOq9ZJ0wbLguZ0zRyZKsj0Nd7Hm0gLnnuGN6vpucIHIPmCvMSjZd
|
||||
u5Rn4CRt44ATalt2bC2x0QfuYgEtZpApB6a0aZYB15Do88RGwZTqDMYlaeauRCtknBkZf2xExmp8
|
||||
vZGYR9rx3YSZ+nAgwI08rUHaR/pUBCRMZajvJPfw2RAy9JbWKMpdnyt4nyeMeWsZKCd2j2nYnqC9
|
||||
TMEDRYejTgFznRnrwg2TG2aZ/Kw0aJRs475jtwLGwxg2PaaWdu5JaZf4/DtRw6ACXtxkU1iiBh5K
|
||||
unv3OmFVrLuONFvXoxZB3QKfSAmT64HbRzJ2E39ew/YirdqoVO+JPdxRMkgMa+hxaaDbHCSYoqdU
|
||||
tK7MVgsrY/1AR8o4mgRPudYxJlmyJ0BXRNOw2XFRWsZhhG6yLUZmpIdcU5lXKh5dOvuhqqJLydbU
|
||||
ekICe2hN1GvXthvFwJ1cAz2MQVI1SwJPjpUlHg94b9XUG9Z6QusQmd5KyWpMYq8QU71ynx/e9Yna
|
||||
SdGeGnEK4cCA0ZqhkGavjI8by7fduyJINya50+9cm5Yja39rfEq0N4RmGZti/XYE8LG8X6ZHT5Jm
|
||||
TDKM+TbLPZXjTk5OX9SAzf7JtDefXv60sWbJGA78zk8uZz8IeVt1TQ8eQY1D15Pf++5fTDh5lgPD
|
||||
0UHh/87nR7Fr8Ry7/xN+b3COxkz+dkzk2T2ueb8tkbH5X9t2QJeEG2sydnSbmZKR4anFKdTnXqNr
|
||||
zTTcthw7SmPi+uZrx9uzi7u2XdDCvWiOvh39AwAA//8DAIF0yI38CgAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97144c04e89a1abc-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 18 Aug 2025 20:52:50 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2974'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '2999'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '29998628'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29998627'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 2ms
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 2ms
|
||||
x-request-id:
|
||||
- req_c0cd67fc9b9342a7bd649b1458724745
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,296 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are First Agent. First
|
||||
backstory\nYour personal goal is: First goal\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
|
||||
Process initial data\n\nThis is the expected criteria for your final answer:
|
||||
Initial analysis\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '831'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=PslIVDqXn7jd_NXBGdSU5kVFvzwCchKPRVe9LpQVdQA-1736351415895-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFVNbxw3DL37VxBzyWV3YTtZ2/EtLerCQNH2kCAFmmDBlTgzjDXURKR2
|
||||
vQny3wtpNp7Nx6EXr0eUqPceH6nPZwAN++YWGtejuWEMy1+vLm6Gi93rF9f4x+/h7V26/zi8fPv3
|
||||
xT/34c1vzaKciNsP5OzrqZWLwxjIOMoUdonQqGS9uF6v1+vz5zc3NTBET6Ec60ZbvojLgYWXl+eX
|
||||
L5bn18uLm+PpPrIjbW7h3zMAgM/1b8Epnh6bWzhffF0ZSBU7am6fNgE0KYay0qAqq6FYs5iDLoqR
|
||||
VOj3IHEPDgU63hEgdAU2oOieEsA7uWPBAK/q9y287glY2BgDoGA4KCvEFqwn8GgILLsYdqSgtKOE
|
||||
AVxiY4cB1GjUFdxxUlvAnmDIasCexLg91Awac3L0TcIFkGhOLB1Yj1bWD4CJIFFg3AYCFF8+aIdi
|
||||
YLGenErDuznXmGJZWsFf4ugEroKLIZAz8hXUSKmNaQCEsdwwsGA6AD1i+a8Ut1zhenIP0MYE6FxO
|
||||
6A4VxdEBJKS6gBDjQ4Fdt8kBBlYt3zsMueBK4FldohHFMenqnbyTP+lx0sahURcTfzrFKhZhIBSW
|
||||
rs0BlLqBxHQBOI7hUHJvUdmBGhrrpPpA1kevBbXmYcCa8oEO0BJaTqVQ2fWAWjMvYCDP5bfwKUZd
|
||||
weuetci3Y08KLMpdbzqhqdhYLfE2V3GqDCRWKm8kniq304JWnq+857IfQzgsYMeaMfCnqu8MqGe1
|
||||
2CUcdAHb+AhjiIVsTKAOzShNK9UNx2YrNLdUY1k8peL86o4pdc+jVohjPS8Ke7aeZQZXDK50RATI
|
||||
XqGnMALLk1OrFROJL1iyBaakk15jLF1VWyMRVtYuiqMklfRdTtZTGmKiWmNUJdW5vsUobApZccuB
|
||||
7VBuRe8TTcapHTKS45YdfMykk1xo0KP47xuFDTBwd+R42gPPFLqIQVfwy9R2JH6qEOsPzV2R7jkE
|
||||
6LHOBxcIE8QdpR3T/rSyzxS0CNNZP6m8J3wovUC6gC6zL9hyseIek1coQgDL0tNofRkchVF3NEFp
|
||||
Gv8hq1WLgxB58lWiNhffTpIde5ejrOBNMB7QqDiqUmljFo+TzeZhpWST5mrY0WnGumXqmjFFV4FX
|
||||
Hp4cK0dZDlg7etKojpfV6VhN1GbFMtolh3ASQJFoE7Ey0N8fI1+eRniI3ZjiVr872rQsrP2muClK
|
||||
GddqcWxq9MsZwPv6VORvpn8zpjiMtrH4QPW6i/V6ytfML9QcvXx+fYxaNAxz4PnLy8VPEm48GXLQ
|
||||
k9emceh68vPR+WnC7DmeBM5OaP8I52e5J+os3f9JPweco9HIb8ZEnt23lOdtiT7Uyf/zbU8yV8CN
|
||||
Fsc72hhTKqXw1GIO07va6EGNhk3L0lEaE0+Paztu1lfn2F7Rev2yOfty9h8AAAD//wMAaw+BEmoI
|
||||
AAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97144c8758cd1abc-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 18 Aug 2025 20:53:12 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=VDTNVbhdzLyVi3fpAyOvoFppI0NEm6YkT9eWIm1wnrs-1755550392-1.0.1.1-vfYBbcAz.yp6ATfVycTWX6tFDJ.1yb_ghwed7t5GOMhNlsFeYYNGz4uupfWMnhc4QLK4UNXIeZGeGKJ.me4S240xKk6FUEu3F5tEAvhPnCM;
|
||||
path=/; expires=Mon, 18-Aug-25 21:23:12 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=FFe5KuJ6P4BUXOoz57aqNdKwRoz64NOw_EhuSGirJWc-1755550392539-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '4008'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '4027'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999825'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999825'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_f287350aa2ac4662b9a5e01e85cc221f
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Second Agent. Second
|
||||
backstory\nYour personal goal is: Second goal\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
|
||||
Process secondary data\n\nTrigger Payload: Context data\n\nThis is the expected
|
||||
criteria for your final answer: Secondary analysis\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nThis is the context
|
||||
you''re working with:\nThe initial analysis of the data involves several critical
|
||||
steps. First, we must identify the sources of the data, ensuring that they are
|
||||
reliable and relevant to the objectives of the project. Once the data is collected,
|
||||
we perform a preliminary examination to check for accuracy and completeness,
|
||||
looking for any missing values or discrepancies.\n\nNext, we categorize the
|
||||
data into meaningful segments, applying basic statistical methods to summarize
|
||||
key features such as mean, median, and mode. This provides insights into the
|
||||
distribution and central tendencies of the data.\n\nAdditionally, visualizations
|
||||
such as histograms, box plots, or scatter plots are created to better understand
|
||||
relationships and patterns within the data. These visual aids help in identifying
|
||||
trends, outliers, and potential areas of concern.\n\nFurthermore, we assess
|
||||
the data for its usability in addressing the specific questions at hand, ensuring
|
||||
that it aligns with the project''s goals. By the end of this initial analysis,
|
||||
we will have a clear overview of the data''s strengths and weaknesses, guiding
|
||||
us towards more in-depth investigations or adjustments needed for future data
|
||||
collection. Ultimately, this foundational analysis sets the stage for future
|
||||
analytical processes and decision-making initiatives.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2214'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=FFe5KuJ6P4BUXOoz57aqNdKwRoz64NOw_EhuSGirJWc-1755550392539-0.0.1.1-604800000;
|
||||
__cf_bm=VDTNVbhdzLyVi3fpAyOvoFppI0NEm6YkT9eWIm1wnrs-1755550392-1.0.1.1-vfYBbcAz.yp6ATfVycTWX6tFDJ.1yb_ghwed7t5GOMhNlsFeYYNGz4uupfWMnhc4QLK4UNXIeZGeGKJ.me4S240xKk6FUEu3F5tEAvhPnCM
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFZNbxw5Dr37VxB9yaVt2JlpO/EtM0Awe5hd7GIWGGAzMGiJVcVYJSmk
|
||||
1O3OIP99QKn6w94c5lLoFiWKfHx84p8XACv2q3tYuQmLm3O4/Pn25t2XX/9T7z7+evP77vdNdP++
|
||||
peH6brz95W1ere1EevxMrhxOXbk050CFU+xmJ4SFzOvN3Waz2Vz/8P5tM8zJU7BjYy6XP6bLmSNf
|
||||
vr1+++Pl9d3lzbvl9JTYka7u4X8XAAB/tq/FGT09r+7hen1YmUkVR1rdHzcBrCQFW1mhKmvBWFbr
|
||||
k9GlWCi20P8BMe3AYYSRtwQIo4UNGHVHAvApfuSIAT60//fw20TAkQtjAIwY9soKaYAyEXgsCBy3
|
||||
KWxJQWlLggGccGGHAbRQ1iv4yKJlDTuCuWoB9hQLD/vmQVMVRy8croGiVuE4Qpmw2PoeUAiEAuNj
|
||||
IMDo7Q9tMRYoqZ3speHtyVeWZEtX8NvEehalk1o48td+AYET8vzIgcvejhK6aQlrDahKqoedM5Up
|
||||
+RTSyKRQlTwMSToKLoVgAaS4bvG5FAeW+VUWgccIOy5T8yekhOImQJ716lP8FP8VHZ0hqwe35Bt+
|
||||
mWRIMgNCNjBmjih7oGe0X3a3oeEmck8tMHSuCrr9ElAjK0VSXTCZCaN9C7saUtWwh5DSk8Xcjsc9
|
||||
zNzT32KopGvwNQd2WAgoFmFbMgRYnVDG6AyYlq9LNXjQJ9pZmjUUvYKfA2E8gNkybMCwghYcqeUr
|
||||
1RnT7P4jDTgWGsXqwxFSlYWH1DH7Jz13dllYYxL+eg5hLKnlyXEcagClcaZY1KLeojQ+LREbxTJJ
|
||||
4UidVnbTsURfKqkhfMBucdRRxxDSTlvQdjPmDpKZ0gCPqOwsxcLaG6MTSe0SrfOMLeYn2sNAWKqQ
|
||||
XsFPe3AYXA1YTvTDuIaZPONCMtOVlvqIHFuNWn9wVB6noj37hgVrEX6sPVpjg5UPAxSKnnrZXrTg
|
||||
bmJrAxJrGVRAGFKNvudkec5JaOHU88t6fPCebRuGsO91abIIW9aKgb82HwpajfgKE2tJo+Csa3hM
|
||||
z5BDKtrTU4elkPQlA4tCdezNWW+f0H1NnLUdyG1/1NZiHI/5WM1IDyEAslfIAfeAsOWCAUw7jVwH
|
||||
bWqQC0Vv/K4lMMkSU06mo00MhbCh5lJ0JFYSo4EdrS1ao61koXKiY0ONY6lsUhX2DbCPVcpEYraG
|
||||
Vxed46E3JjW4CBRHQO/lTJQ0k+OB3Ymh1lUTRv9a+ZowHJvbpMgYfJKjRTDfKIwJQ0f0TFYNf+tI
|
||||
ajEKQeoAi3HNoJ+u4EOXjJPzuRmFvlQWWnARokbVctanx3dAIPM2lRed3psGc5aEbmqY/dSfD7IQ
|
||||
h64hrx+phuWOQ4AJ2ztnbBWaKKq9e2lLsmXanfP+jSmRUBzL1NPfET6ZYNKh7Wv0JPa6egOyeR8r
|
||||
+yW1HYrXQ5EvPeUyGfxWl3GhvUHvP1ctXYYikV+ekaFa679+TYCGIYmp539D4RkLWVe1hE8def40
|
||||
K5XOna6pZ57PsMySXEurZenJsXKKlzM+db01JFvV10BzTjuShdaNLFauwB4GwZl2SZ6sqo+Vg4ea
|
||||
TWDUoPcUtvalTNKVyApKzzkkOWrjsUPPhxWhoSrawBRrCGcGjDF1yW1j0h+L5dtxMAppzJIe9dXR
|
||||
1cCRdXqwjk3RhiAtKa+a9dsFwB9tAKsvZqpVljTn8lDSE7Xrfni/6f5Wp7nvzPrudrGWVDCcDHfv
|
||||
btbfcfjgqSAHPZvhVg7dRP509DTwYfWczgwXZ2n/fzjf891T5zj+Hfcng3OUC/mHbEOSe5nyaZvQ
|
||||
5zakfH/bEeYW8MoeFXb0UJjESuFpwBr6tLrSvRaaHwaOo2kn95F1yA+b22scbmmzeb+6+HbxFwAA
|
||||
AP//AwAAHGphwAsAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97144ca1b97b1abc-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 18 Aug 2025 20:53:21 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '8604'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '8628'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999482'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999485'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_545a8ffcdf954433b9059a5b35dddf20
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
437
tests/cassettes/test_first_task_auto_inject_trigger.yaml
Normal file
437
tests/cassettes/test_first_task_auto_inject_trigger.yaml
Normal file
File diff suppressed because one or more lines are too long
845
tests/cassettes/test_max_usage_count_is_respected.yaml
Normal file
845
tests/cassettes/test_max_usage_count_is_respected.yaml
Normal file
@@ -0,0 +1,845 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
|
||||
are an agent that iterates a given number of times\nYour personal goal is: Call
|
||||
the iterating tool 5 times\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
|
||||
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
|
||||
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
|
||||
the following format in your response:\n\n```\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [iterating_tool],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
|
||||
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
|
||||
is gathered, return the following format:\n\n```\nThought: I now know the final
|
||||
answer\nFinal Answer: the final answer to the original input question\n```"},
|
||||
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
|
||||
is the expected criteria for your final answer: A list of the iterations\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1452'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//hFPRjtMwEHzPV6z83J5ySVvu8nYCIZ1ACKSCEOSUOs4mMefYlr2Boqr/
|
||||
jpy0TQ7uxIsV7exMZmftQwTAZMUyYKLlJDqrlq837zbXn/flx6/fVPIp/fI+3ZbaxVv568ObR7YI
|
||||
DFP+QEFn1pUwnVVI0ugRFg45YVC9frVeb5I0WacD0JkKVaA1lpYrs0ziZLWMb5bx5kRsjRToWQbf
|
||||
IwCAw3AGi7rCPcsgXpwrHXrPG2TZpQmAOaNChXHvpSeuiS0mUBhNqAfXu90u19vW9E1LGdyDRqyA
|
||||
DAiuFFCLIAkdJ6kbIGPGUi2dJyDZ4VWu70SYNpv6itB3rsO9tj1lcMiZDF8F4Z5ylkHO3g4qJ5rR
|
||||
OTvOLTqse89DQrpXagZwrQ0NjCGchxNyvMShTGOdKf1fVFZLLX1bOOTe6DC6J2PZgB4jgIch9v5J
|
||||
ksw601kqyDzi8Lvk9nbUY9OiJzRdn0AyxNWsnq4Wz+gVFRKXys8WxwQXLVYTddoy7ytpZkA0m/pf
|
||||
N89pj5OPG/qv/AQIgZawKqzDSoqnE09tDsM7eKntkvJgmHl0P6XAgiS6sIkKa96r8Yoy/9sTdkUt
|
||||
dYPOOjne09oWq01Z1zHG4oZFx+gPAAAA//8DAHvlcCKwAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 971b3f72effa6897-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 19 Aug 2025 17:07:35 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
|
||||
path=/; expires=Tue, 19-Aug-25 17:37:35 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2564'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '2751'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '29999674'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999674'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_d654df1116aa42ca8ee7d10b4b424303
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
|
||||
are an agent that iterates a given number of times\nYour personal goal is: Call
|
||||
the iterating tool 5 times\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
|
||||
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
|
||||
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
|
||||
the following format in your response:\n\n```\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [iterating_tool],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
|
||||
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
|
||||
is gathered, return the following format:\n\n```\nThought: I now know the final
|
||||
answer\nFinal Answer: the final answer to the original input question\n```"},
|
||||
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
|
||||
is the expected criteria for your final answer: A list of the iterations\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
|
||||
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
|
||||
iteration"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1673'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
|
||||
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4RTTW/bMAy9+1cQOidFlg8v8G1LB6zosbvNhaNItK3NFjWJ7loE+e+D7CR2uw67
|
||||
CAIf3xP5SB0TAGG0yECoWrJqXTPfpffp2tWqu93u7m+Xu+3Dl6/m96+X+pB+VmIWGXT4gYovrBtF
|
||||
rWuQDdkBVh4lY1T98HGzSZer5SbtgZY0NpFWOZ6vab5cLNfzxXa+SM/EmozCIDL4ngAAHPszlmg1
|
||||
PosMFrNLpMUQZIUiuyYBCE9NjAgZggksLYvZCCqyjLaver/f5/ZbTV1VcwZ3UMsnhHMXqIFrhNL4
|
||||
wGAYvYyNgbQaLEaQwHlS52tMDajIaiCLN7n9pGJ6dmHaqmCi5hKHO+s6zuCYCxNvBeMz5yKDXDwM
|
||||
KtcXc3GaVu+x7IKM5tmuaSaAtJa4Z/S+PZ6R09Wphirn6RDeUEVprAl14VEGstGVwOREj54SgMd+
|
||||
It0rk4Xz1DoumH5i/9xqvRn0xLgDE3R7BplYNtP4avaOXqGRpWnCZKZCSVWjHqnjAshOG5oAyaTr
|
||||
v6t5T3vofBjRf+VHQCl0jLpwHrVRrzse0zzGL/KvtKvLfcEioH8yCgs26OMkNJaya4btFeElMLZF
|
||||
aWyF3nkzrHDpinV6KMsFLtRWJKfkDwAAAP//AwAMYddzywMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 971b3f84cf146897-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 19 Aug 2025 17:07:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1900'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '2551'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999629'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_f4181fe581264993ac5c6deba4f1c287
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
|
||||
are an agent that iterates a given number of times\nYour personal goal is: Call
|
||||
the iterating tool 5 times\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
|
||||
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
|
||||
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
|
||||
the following format in your response:\n\n```\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [iterating_tool],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
|
||||
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
|
||||
is gathered, return the following format:\n\n```\nThought: I now know the final
|
||||
answer\nFinal Answer: the final answer to the original input question\n```"},
|
||||
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
|
||||
is the expected criteria for your final answer: A list of the iterations\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
|
||||
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
|
||||
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
|
||||
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
|
||||
iteration"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1922'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
|
||||
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//hFNNb9swDL37VxA6J0XmfCz1bRswNBh2GXrYsBSOItG2ElsUJLrIUOS/
|
||||
D7KT2N067CIIfHxP5CP1kgAIo0UGQlWSVePq6afVl9Xq9HAI3z6uj8eHz6os9OGrvt98T9sfYhIZ
|
||||
tD+g4ivrTlHjamRDtoeVR8kYVd+9Xy5X6TxdrjugIY11pJWOpwuaprN0MZ2tp7PVhViRURhEBj8T
|
||||
AICX7owlWo0nkcFsco00GIIsUWS3JADhqY4RIUMwgaVlMRlARZbRdlXvdrutfayoLSvOYAOVfEa4
|
||||
dIEauEIIqMhqMIxexs5AWg0WI0rgPKnLNeZyZbwGsni3tR9UzM6uRFvmTFRf47CxruUMXrbCxFvO
|
||||
eOKtyGArHjuR23tbcR4X77Fog4ze2bauR4C0lrhjdLY9XZDzzaiaSudpH/6gisJYE6rcowxkoymB
|
||||
yYkOPScAT91A2lceC+epcZwzHbF7bn6/6PXEsAIjdH0BmVjWQ3wxTydv6OUaWZo6jEYqlFQV6oE6
|
||||
zF+22tAISEZd/13NW9p95/2E/is/AEqhY9S586iNet3xkOYx/pB/pd1c7goWAf2zUZizQR8nobGQ
|
||||
bd0vrwi/AmOTF8aW6J03/QYXLl+s9kUxw5lai+Sc/AYAAP//AwDpY7tZygMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 971b3f958e746897-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 19 Aug 2025 17:07:39 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '890'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '906'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '29999577'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999577'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_b3632e88268747218e4cc4cc08d87bca
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
|
||||
are an agent that iterates a given number of times\nYour personal goal is: Call
|
||||
the iterating tool 5 times\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
|
||||
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
|
||||
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
|
||||
the following format in your response:\n\n```\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [iterating_tool],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
|
||||
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
|
||||
is gathered, return the following format:\n\n```\nThought: I now know the final
|
||||
answer\nFinal Answer: the final answer to the original input question\n```"},
|
||||
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
|
||||
is the expected criteria for your final answer: A list of the iterations\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
|
||||
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
|
||||
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
|
||||
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
|
||||
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
|
||||
the second iteration and need to proceed to the third one.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"Third iteration\"}\nObservation: Iteration 0: Third
|
||||
iteration\n\n\nYou ONLY have access to the following tools, and should NEVER
|
||||
make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool Arguments:
|
||||
{''input_text'': {''description'': None, ''type'': ''str''}}\nTool Description:
|
||||
A tool that iterates a given number of times\n\nIMPORTANT: Use the following
|
||||
format in your response:\n\n```\nThought: you should always think about what
|
||||
to do\nAction: the action to take, only one name of [iterating_tool], just the
|
||||
name, exactly as it''s written.\nAction Input: the input to the action, just
|
||||
a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question\n```"}], "model": "gpt-4o",
|
||||
"stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3018'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
|
||||
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//hFPBbtswDL37Kwidk8JzErfxbdgwtNh1GwrMhaPItK3MFgWJLloE+fdB
|
||||
dhK7W4ddBIGP74l8pI4RgNClyECoRrLqbLv8lH5Nb8k9Hu7T5PP37f361R1Wj1us6IeVYhEYtD+g
|
||||
4gvrRlFnW2RNZoSVQ8kYVD/cbjZpsko22wHoqMQ20GrLyzUtkzhZL+O7ZZyeiQ1phV5k8DMCADgO
|
||||
ZyjRlPgiMogXl0iH3ssaRXZNAhCO2hAR0nvtWRoWiwlUZBjNUPVut8vNt4b6uuEMHqCRzwjnLrAE
|
||||
bhC40a4EzehkaAykKcFgAAmsI3W+htSKescNkMGb3HxUIT27ME1dMFF7icODsT1ncMyFDreC8YVz
|
||||
kUEuvowq1xdzcZpX77DqvQzmmb5tZ4A0hnhgDL49nZHT1amWauto7/+gikob7ZvCofRkgiueyYoB
|
||||
PUUAT8NE+jcmC+uos1ww/cLhuTRJRz0x7cCEru7OIBPLdsZK14t39IoSWerWz2YqlFQNlhN1WgDZ
|
||||
l5pmQDTr+u9q3tMeOx9H9F/5CVAKLWNZWIelVm87ntIchi/yr7Sry0PBwqN71goL1ujCJEqsZN+O
|
||||
2yv8q2fsikqbGp11elzhyhbrdF9VMcbqTkSn6DcAAAD//wMACjMtRssDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 971b3f9bbef86897-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 19 Aug 2025 17:07:40 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1182'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1208'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '29999320'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999320'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 1ms
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- req_7fc641fabc634f29ae085ef176071402
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
|
||||
are an agent that iterates a given number of times\nYour personal goal is: Call
|
||||
the iterating tool 5 times\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
|
||||
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
|
||||
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
|
||||
the following format in your response:\n\n```\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [iterating_tool],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
|
||||
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
|
||||
is gathered, return the following format:\n\n```\nThought: I now know the final
|
||||
answer\nFinal Answer: the final answer to the original input question\n```"},
|
||||
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
|
||||
is the expected criteria for your final answer: A list of the iterations\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
|
||||
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
|
||||
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
|
||||
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
|
||||
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
|
||||
the second iteration and need to proceed to the third one.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"Third iteration\"}\nObservation: Iteration 0: Third
|
||||
iteration\n\n\nYou ONLY have access to the following tools, and should NEVER
|
||||
make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool Arguments:
|
||||
{''input_text'': {''description'': None, ''type'': ''str''}}\nTool Description:
|
||||
A tool that iterates a given number of times\n\nIMPORTANT: Use the following
|
||||
format in your response:\n\n```\nThought: you should always think about what
|
||||
to do\nAction: the action to take, only one name of [iterating_tool], just the
|
||||
name, exactly as it''s written.\nAction Input: the input to the action, just
|
||||
a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question\n```"}, {"role": "assistant",
|
||||
"content": "```\nThought: I have completed the third iteration and need to proceed
|
||||
to the fourth one.\nAction: iterating_tool\nAction Input: {\"input_text\": \"Fourth
|
||||
iteration\"}\nObservation: Iteration 0: Fourth iteration"}], "model": "gpt-4o",
|
||||
"stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3267'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
|
||||
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4RTTW/bMAy9+1cQOieFmyZO5ts2bECxDwztgB3mwlFk2tZmi4JEFx2C/PdBchKn
|
||||
W4ddBIGP74l8pPYJgNCVyEGoVrLqbTd/m33INvJ+0NnnT/ff3tTZR72483eSv7xLScwCg3Y/UPGJ
|
||||
daWotx2yJjPCyqFkDKrX69UqW9wssjQCPVXYBVpjeb6k+SJdLOfpZp5mR2JLWqEXOXxPAAD28Qwl
|
||||
mgqfRA5RJkZ69F42KPJzEoBw1IWIkN5rz9KwmE2gIsNoYtXb7bYwX1sampZzuIVWPiIcu8AKuEWo
|
||||
aXDcgmZ0MnQG0lRgMKAE1pE6XmOurrkFMnhVmNcqZOcnomlKJupOcbg1duAc9oXQ4VYyPnEhcijE
|
||||
+yhyfq8Qh8viHdaDl8E7M3TdBSCNIY6MaNvDETmcjeqosY52/g+qqLXRvi0dSk8mmOKZrIjoIQF4
|
||||
iAMZnnksrKPecsn0E+Nz2Xo16olpBSb05tURZGLZTfH19XL2gl5ZIUvd+YuRCiVVi9VEneYvh0rT
|
||||
BZBcdP13NS9pj52PE/qv/AQohZaxKq3DSqvnHU9pDsMP+Vfa2eVYsPDoHrXCkjW6MIkKazl04/IK
|
||||
/8sz9mWtTYPOOj1ucG3LZbar6xRTtRHJIfkNAAD//wMANy12ZMoDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 971b3fa3ea2f6897-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 19 Aug 2025 17:07:41 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '780'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '878'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '29999268'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999268'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 1ms
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- req_603a6c645bac468888838d21c64db11f
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
|
||||
are an agent that iterates a given number of times\nYour personal goal is: Call
|
||||
the iterating tool 5 times\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
|
||||
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
|
||||
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
|
||||
the following format in your response:\n\n```\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [iterating_tool],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
|
||||
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
|
||||
is gathered, return the following format:\n\n```\nThought: I now know the final
|
||||
answer\nFinal Answer: the final answer to the original input question\n```"},
|
||||
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
|
||||
is the expected criteria for your final answer: A list of the iterations\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
|
||||
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
|
||||
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
|
||||
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
|
||||
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
|
||||
the second iteration and need to proceed to the third one.\nAction: iterating_tool\nAction
|
||||
Input: {\"input_text\": \"Third iteration\"}\nObservation: Iteration 0: Third
|
||||
iteration\n\n\nYou ONLY have access to the following tools, and should NEVER
|
||||
make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool Arguments:
|
||||
{''input_text'': {''description'': None, ''type'': ''str''}}\nTool Description:
|
||||
A tool that iterates a given number of times\n\nIMPORTANT: Use the following
|
||||
format in your response:\n\n```\nThought: you should always think about what
|
||||
to do\nAction: the action to take, only one name of [iterating_tool], just the
|
||||
name, exactly as it''s written.\nAction Input: the input to the action, just
|
||||
a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question\n```"}, {"role": "assistant",
|
||||
"content": "```\nThought: I have completed the third iteration and need to proceed
|
||||
to the fourth one.\nAction: iterating_tool\nAction Input: {\"input_text\": \"Fourth
|
||||
iteration\"}\nObservation: Iteration 0: Fourth iteration"}, {"role": "assistant",
|
||||
"content": "```\nThought: I have completed the fourth iteration and need to
|
||||
proceed to the fifth one.\nAction: iterating_tool\nAction Input: {\"input_text\":
|
||||
\"Fifth iteration\"}\nObservation: Iteration 0: Fifth iteration"}], "model":
|
||||
"gpt-4o", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3514'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
|
||||
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xTy26jMBTd8xVXXpOK0IRE7KqRInVazaZZTROBY1/AU2NT26QzqvLvI0MSSNuJ
|
||||
ZsPC58E99/EeABDBSQqEVdSxupGTb8lDcne7pt8Xb/r14XX5tJiufiT1z8f944smoVfo3S9k7qS6
|
||||
YbpuJDqhVQ8zg9Shd50u5vMkvo2TuANqzVF6Wdm4yUxP4iieTaLlJEqOwkoLhpak8BwAALx3X1+i
|
||||
4vibpBCFp5caraUlkvRMAiBGS/9CqLXCOqocCQeQaeVQdVXneb5R60q3ZeVSuIeK7hGOKZADlRLm
|
||||
IBwa6kPZm41aCUUl3Cn7hiaF5w25P6EQpbASxrpBsCEhfGA8IdOKX6WsK2GuM1a6Na66ThHFJWO7
|
||||
UXmej/tgsGgt9WNQrZQjgCqlXZ/YT2B7RA7nnktdNkbv7AcpKYQStsoMUquV7691uiEdeggAtt1s
|
||||
24txkcbounGZ0y/Y/W4Rz3s/MmzTgM6TI+i0o3KkWk7DL/wyjo4KaUfbQRhlFfJBOqwSbbnQIyAY
|
||||
pf5czVfefXKhyv+xHwDGsHHIs8YgF+wy8UAz6I/tX7Rzl7uCiUWzFwwzJ9D4SXAsaCv7OyD2j3VY
|
||||
Z4VQJZrGiP4YiiabJbuiiDBiSxIcgr8AAAD//wMAnPTcwxUEAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 971b3faa6ba46897-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 19 Aug 2025 17:07:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1392'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1841'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999217'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- req_19dc255cec9d4763be7d5f597c80e936
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
228
tests/cassettes/test_task_allow_crewai_trigger_context.yaml
Normal file
228
tests/cassettes/test_task_allow_crewai_trigger_context.yaml
Normal file
@@ -0,0 +1,228 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Analyze the data\n\nTrigger
|
||||
Payload: Important context data\n\nThis is the expected criteria for your final
|
||||
answer: Analysis report\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '865'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=FFe5KuJ6P4BUXOoz57aqNdKwRoz64NOw_EhuSGirJWc-1755550392539-0.0.1.1-604800000;
|
||||
__cf_bm=VDTNVbhdzLyVi3fpAyOvoFppI0NEm6YkT9eWIm1wnrs-1755550392-1.0.1.1-vfYBbcAz.yp6ATfVycTWX6tFDJ.1yb_ghwed7t5GOMhNlsFeYYNGz4uupfWMnhc4QLK4UNXIeZGeGKJ.me4S240xKk6FUEu3F5tEAvhPnCM
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'upstream connect error or disconnect/reset before headers. reset reason:
|
||||
connection termination'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97144cd97d521abc-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '95'
|
||||
Content-Type:
|
||||
- text/plain
|
||||
Date:
|
||||
- Mon, 18 Aug 2025 20:53:22 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
status:
|
||||
code: 503
|
||||
message: Service Unavailable
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Analyze the data\n\nTrigger
|
||||
Payload: Important context data\n\nThis is the expected criteria for your final
|
||||
answer: Analysis report\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '865'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=FFe5KuJ6P4BUXOoz57aqNdKwRoz64NOw_EhuSGirJWc-1755550392539-0.0.1.1-604800000;
|
||||
__cf_bm=VDTNVbhdzLyVi3fpAyOvoFppI0NEm6YkT9eWIm1wnrs-1755550392-1.0.1.1-vfYBbcAz.yp6ATfVycTWX6tFDJ.1yb_ghwed7t5GOMhNlsFeYYNGz4uupfWMnhc4QLK4UNXIeZGeGKJ.me4S240xKk6FUEu3F5tEAvhPnCM
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '1'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xXTW8cNxK961cUBtBFmBEkRfKHbrLiAI6xcBLvYRe7gVFDVneXxSZbLPaMx0H+
|
||||
e1Bkf42kLPZiWM1hseq9V6/IP04AVmxXt7AyDSbTdm5z/+ry7cW/Ht99vfmIPz/uPt73b0MMP3/6
|
||||
d/fxx3erte4I269k0rjr3IS2c5Q4+LJsImEijXr5+ubm5ubi+uIqL7TBktNtdZc212HTsufN1cXV
|
||||
9ebi9ebyzbC7CWxIVrfwnxMAgD/yv5qnt/RtdQsX6/FLSyJY0+p2+hHAKganX1YowpLQp9V6XjTB
|
||||
J/I59Q/gwx4Meqh5R4BQa9qAXvYUAf7rf2KPDu7y37f64ezszqM7CAv8Rl2I6exMP+vCB59isL1R
|
||||
EMrXfzYEXR+7IAShgtSwQMy7gAVSgC6GHVs9WPGL1JCXnMh4Ri/sa0gNQc76WwKLCcd9FtjnxRS5
|
||||
rilChwcX0J7DuwPQN1Rsh+0dxcSefIIdRsatIwH0FtiST1wd8u8ieSvrozyR25ypJcc7irBD1+tu
|
||||
YC9cN0kgNZgyhOyrEFuwZFg4+E2LDxq1i8GQCMl5gelHLeDTjuKOaT/jpHUJJa1TORMFTFMNvUBL
|
||||
KbIRMME5MoksBM0FQToyXLHR8jjYNZBXIJV2XwPqchKQ3jSAAoJadcV1H0nWYHpJoaUI5GusqSWf
|
||||
1hmT0FFEZREdUFWxYfLmcA4f6bBAj71xvaVbreryHM7OPufwWt7t2Rn8I/jUuMPxoSBU6zlkYXtQ
|
||||
ZFQuYDBRHSJniK401P2Y2vsptRxzwCGS0+ZSXqYi2CeKmMUn6yE5BWFPW+FEsGPhJGuQYBgdtGQZ
|
||||
QQOXDVp3RWS3aB5AedRcftBcPi3QeD+hoelMnRCqI8xGuuYkcEcRa4JI0gUvBIlbWkPVu4qd0ywg
|
||||
YqIhj0gS+mgI0LlgctSldsZji3YW0P9CUTNHb6isAcAGVFyFhNw67C0r4AIIXRBO2m9Z+UVU2iwO
|
||||
JUEV+giPPcZEUdaw59QA+vwjdE6Li4SlsS9vTrUTI+3I93Sej2y4bkjSpitJDZ2gfAtgpLHxmKxK
|
||||
85dBCncZgPGvd+vc9pG3fcoNGuDVxWm2kpDQlbLOhzo/E0qGP0s00wp7igRhKxR3ZMcaQLj22jXo
|
||||
04CMdPxAYPuop/x6vQZM5dQisiY4tpgF24YcOfPxd1JdgH//vMcmeUgT9oDgQxoMZQCU/TPR9p32
|
||||
y9XNafZJjCWt7GuRdtkjigMU7BeHqawg+GPZdw5T1jg0KIBOAkQW8nrIDxen61Ekiob0Ru2r6h20
|
||||
GB8ofzTYdsi1n8Cf6jzuoSwJdIo5Jpaq9OeQVAVvbk4HUkxo2+Chi8iidhHi5A6PPTpOhyyMqduV
|
||||
UTaUafgfXbpg4s7PU+VJv84elznRwQDc6owpGLJ/qVP3DZsGGtzN1GVW3mpRkSp16nH6UFXpXzvy
|
||||
JPl49jvyKcQDtOgHrkYs714yi4LJwu0ee1bHLAlYGhOoYmjh+g00oY95cF1dl/+vc0c6nVmaFflG
|
||||
fULPVaNaINt3efA9HVtq/j+pLnw9OE+xFhPaLfsMZSlszKR01hFrC11KX9ck4/iclSVJ4a21MnWJ
|
||||
CbjJGL1qZ0/OFeHk6Y+xpgTYWyVRRbGBpR4WXJao6qt7PU0xrVCSDlNvoQ3lyKyGNIpsnP2H526U
|
||||
TW5ByzOVS05msqaF28jkxNlVM+i9V6HrVMOOEzr+nnt3sh2NMOMz2FVH+DB0/8jVfdDJI4t7GMvi
|
||||
TuUtRTFB57HC50l1o+oKPrHv87VqR5JG8c/sUFWFOJJmo/LyErmKpUbmVqtSnak0WmSfsFzJFLij
|
||||
HtRbqsVos2ZHDb6I6zl8njHIiTgaOmYYcMPFbM/OAflsrprNVi+T2n/PAJ7R56HR6hj2ed7Zp06g
|
||||
kao+9ZEGsH8jNS/ytsycsTfelwYbxEn2Jfc8orBoYyAyJxG6xC1/JztOVk17OHkD9wNb0AbPKeRA
|
||||
5pkNpzBdcVX6WCKPyl7UlmN+yLznMsk0PrhQH/KGLaWUbee5bekRI7WF12dmmZHKIvyba/5w2R4a
|
||||
TaAw/eSeHaDu9bFQ0J/pHC/cOidHaU/X/vGGw22HJqkMxw4ywEo46rJkFQw3m26+Rp0v302Rql5Q
|
||||
326+d26xgF5neGZfX2y/Dyt/Tm80F+ouhq082bqq2LM0X2K2B32PSQrdKq/+eQLwe34L9kfPu5Wa
|
||||
QZe+pPBA+bjLV1cl3mp+gs6rN9NqvjHNC6+vrtcvBPxiKSE7WTwnVwZNQ3beOr891XHDYuFkUfbz
|
||||
dF6KXUpnX/8/4ecFY6hLZL90kSyb45Lnn0X6ml9LL//sBP4CAAD//4IGM9jBStAsEV+SmVoEioqU
|
||||
1LTE0hxIx1mpuLK4JDU3Pi0zLz21qKAoE9J7TiuINzVMSbIwSUxLTFLiquUCAAAA//8DANr6751L
|
||||
EAAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97144ce12be51abc-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 18 Aug 2025 20:53:29 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '6350'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '6385'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999820'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999820'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_633dd1e17cb44249af3d9408f3d3c21b
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,156 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Analyze the data\n\nThis
|
||||
is the expected criteria for your final answer: Analysis report\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '822'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=wu1mwFBixM_Cn8wLLh.nRacWi8OMVBrEyBNuF_Htz6I-1743463498282-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//fFfbjhtHDn33VxAC5mXQEmY8lh3Mm9e3DHa9NpzZC3YdBFQ31V2Z6mKn
|
||||
WCVZCfLvC7JKLY2d7IsAdXfxcnh4yPrtCcDCdYtbWLQDpnac/PLV8+vv/n1P7+7yx+f/eeOHXQ7/
|
||||
/P6uf0fxYfqwaPQEb36mNh1PrVoeJ0/JcSiv20iYSK1ev1iv1+urmxc39mLkjrwe66e0fMbL0QW3
|
||||
fHr19Nny6sXy+rt6emDXkixu4b9PAAB+s1+NM3T0ZXELV83xyUgi2NPidv4IYBHZ65MFijhJGNKi
|
||||
Ob1sOSQKFvodBN5DiwF6tyNA6DVswCB7igCfw1sX0MNL+38Ln8PncHn5MqA/iBP4RBPHdHlZHl+v
|
||||
4C6kyF1uFYbLSz1/PziBaN/BFEkoJAEERSvSQEHM7dEgbyENBB0m/cR7ahN1wDuK9tyjJPglY0wU
|
||||
V3A/EPSMvpxycjLjBBJDR9GMWzS48QQuiOuHJA24jkJy2wOkSKGTBjB04MKW4wgdtU4ch+WIDy70
|
||||
MEVuSYQEthxhm1OOBJIiJuodyapk/3QFrzXsDzuKO0f7Y/o1G6FUknZqaMw+uckTTBhxpERRwIXW
|
||||
504dCnp15vocSRposyQeKUJHI/cRp8G10mhYijS0GgdHRzWLno4foYfOSYpukxWCFfyVDrDD6BSM
|
||||
Ctev1FXPdKuJLOHy8gfz/7b4v7y8hXtO6GtYkXYUMjWAO4rYE6SIQQrIsEOvr3JwSUDYd6ti8dUx
|
||||
hddnKajllz010FPoKDbguUU1U/KYcmwHFAVkQwPuHMdq7WPN/NWcuZr6SFHLh6HVmkcWgc5ttxQp
|
||||
PAbpBPTIksyXJ2XWxFP2GI/ISnX37hzQ12eAqteC1fStb8WZs+LVOw5HltyUIrx1QQOQQpIT5vfG
|
||||
RrX7OQAsq3UXVEyEOtgc4Hp9YUTCSJ2yXPtiirQzZ7U3Gti7NABC4GTEl8k9EHQ5atZ6YmDvOjyA
|
||||
EAqHVfGmZP3TqgJ6YYgsZFFcXTSAqUBRAhlcP1A8sRX3GCmQiCGcMPakX44YHyhpHC2OE7q+YvP/
|
||||
aHIKb4puxHg4edmgELQcVOZMPxTZnesyegHsqYOn6+XNswawbTkHc6xt/OzqwnTDmF2pZr2snt7S
|
||||
iJ5mJwKc04StRf/o+eYAT68uwAXTKPQeZCIrbbX0IXgXCGTgabLWzrGnrtG+T65VtvkD4Mihhxw3
|
||||
GE62G8tEGyL0gCCD2yZIvMfYKbF7p4FT6LGnkUKaMfyqOQ7nfTFD+UaVNXJwrQCNpDEBStFYwq5U
|
||||
p56vVLq5uqi1AxkwqphW9EwXar7f80hFhTB2FOZWAsE9oEqr4eFCVZPNAdYXpmXjVDNVLtNei3Oi
|
||||
ykltLRx1rYIv1NfcrVmMy1rOo9NvCS8grg9u61oMyR9gwyzKykeSa2VdX8yQPhKAuzpDZizf488c
|
||||
j9WjkKx2JBO1Dr0/GE4Dwd85psFkRuH5F0mCV4ySGiXHXltIo9QeknQUWVVGiqaJNcdPOaIHVDlo
|
||||
YD847W1r8cbOoKcOckg4TdTBxDrmHfrGRkcwFBUQDso6WF/M0jJX5CvaBaLO+mXuX84pErZDFbRn
|
||||
K/hELY8jhc4CPdO093P9fij1O6iyvQmD6eSRw6cq03bLMUn1pU8OnEP/1eQrhJwoWrGdDrC60JSx
|
||||
EXlkC2QF/0hOPwBhrQWM1DmEyWPShijj/NRBj4pmiqILRKXPWQDzFgE9poFUhreRx3lx+aYT33Kb
|
||||
TdM/EeoaIZWQf9gpwDqydDaopmoTxqPK0479riwkZGOtJVM8jTWrZKzgVQ0bNjl0vi4vpRc4gq2k
|
||||
1k5nnVLBUieRtDeSkiFRJEl/3AFvvkwYpM6/17Qjz5MqVO3RVhEOsNWsC9kgzqyFiQKlSmm4Cy45
|
||||
THQaBZDQea6jTfcB/yhbK/CR1bqv+awvooVPlVibWHigy2ZLp+HCqv5Zx6Qtau85uMQKteZxp9v7
|
||||
aCTSkW17gC1uVY7qEqh+HogmnY/tg6E/YOhNMOeRVLeVEm7VzLJnruBvVAesbV9J+ZyYvRnGnHjE
|
||||
RGcMm/u/NKEbyR++XlBrJ66t/K3PMi/fH8pQaow+p+247L4qpmdiCH3kvTbWDK9Gb0oDkX7JznrB
|
||||
jdZEiXT463daxr8cADsu+q2e4lEQVFhn5S5RcOwxuF9LTnrvOFbtOEDPVqnmJE8zuILJybYsJmVP
|
||||
1FVaUVNgbOEfWffzs8xmNbUMK1zPV/BysmH95ahYRot0XI47La3KkfM+z9JZBkWx9KeredWh8/X8
|
||||
cJ7Yqvh7T2ngjj33B8hShfb87qMYWRVq6Sz08xuVUNxpsHql2nKuEozeOGXmsq7WegPsjtVpc7SV
|
||||
2GPopMWpqlB29kW93hSACyvbwdGOSvVy0vndwUZ7W/uh3ILdjmR1fsWMtM2Ces0N2fuzFxh0JzXj
|
||||
ern9sb75fb7Oeu6nyBv56uhi64KT4ado0qVXV0k8Lezt708AfrRrc350E16UteKnxA9k7q7X62Jv
|
||||
UW7r/wMAAP//jFi9DoIhDNx5DGaHbxDi9zSEtEVr/CHANzj47gYwFiOD85XLHSXAtcoQ1Jr9G23/
|
||||
GgEOy7qbEDqkelvlIXlr8HAilKUS0/2GfB8ANdj+lTPj7tb5dvyHXgAAioXQxUTI8G1ZyhKdW9ie
|
||||
l322uQnW9dgxkCtMqbYCKfjt0mcMOj9yoasLXF/umLgPGkJ0xi4+WDJm1eqpXgAAAP//AwCGkEKG
|
||||
dhEAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97144c27cad01abc-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 18 Aug 2025 20:53:07 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=gumItH7ZRtD4GgE2NL8KJd5b0g0ukzMySphsV0ru1LE-1755550387-1.0.1.1-iwCn2q9kDpJVTaZu1Swtv1kYCiM39NBeviV1R9awG4XHHMKnojkbu6T7jh_Z3UxfNbluVCsI6RMKj.2rEPp1IcH63gHUQdJfHF71CdCZ3Uc;
|
||||
path=/; expires=Mon, 18-Aug-25 21:23:07 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=d7iU8FXLKWOoICtn52jYIApBpBp20kALP6yQjOvXHvQ-1755550387858-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '14516'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '14596'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999830'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999827'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_3c1af5f5590a4b76b33f3fbf7d3a3288
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,154 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Analyze the data\n\nThis
|
||||
is the expected criteria for your final answer: Analysis report\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '822'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=aoRHJvKio8gVXmGaYpzTzdGuWwkBsDAyAKAVwm6QUbE-1743465392324-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFddcxu3Dn3Pr8BoJi8eSWM5luP6zbFbN7d1m0n81nQyEAntIuaSe0lQ
|
||||
itLpf++A+6G1rztzX+zVcgEeAAeH4F+vAGZsZ1cwMzWKaVq3uLlY/XD5n7uffzvLFw+r28tV+nT3
|
||||
7vdV/hDuf8bZXC3C5isZGayWJjStI+Hgu2UTCYXU6+rter1en56vTstCEyw5NataWZyHRcOeF2en
|
||||
Z+eL07eL1WVvXQc2lGZX8McrAIC/yl/F6S19m11B8VXeNJQSVjS7Gj8CmMXg9M0MU+Ik6GU2Py6a
|
||||
4IV8gf4efNiDQQ8V7wgQKoUN6NOeIsBn/xN7dHBdfl/BZ//Zn5xce3SHxAk+UhuinJx0r1dLeO8l
|
||||
BpuNpuHk5EodPNScIJYPAblJIAFQHXwnkJqgjWHHlixYFIREAuwhREtRv6RvEtEINISefbXNDtgn
|
||||
rmpJIDVKgc5+G2IDSSIKVWzAkuHEwadlh+xsCbfq/ZaSidw+QUfdxiZ4zVSCsIUmO+HWEewwMm4c
|
||||
pTmwNy5b9hVssoAPAo4bFrKKMqGjBFuuctRvTU4SGopgqQlVxLZmk+ZQ0fADXY8ZFcoc0FsQbigJ
|
||||
Nm2BIBF9wpLHtIQf0dRAXuJBkyM9Zs1VpDZSIi8JELLn/2aa2s4BnQt7hb0NETTMpo1Uk0+l3EMh
|
||||
wxbaHE2NqURINe44RAg7UqPUkuEtk4WWIgfbZ/XNEu5J6mCDC9VhktDRLSe10EDJQi6+k6BwkpKD
|
||||
plgnSNnUgAnsUJ4dHb/TfIYYyZVcjb67pEWqIiUtNZS20h2UX8lQFGQPg12quU2wIdkT+WNde17s
|
||||
OGV0/L3bQkJwCTASoEsBsrDj712h2bnc0Qwe6QASydtUkLQoQnFk3PkSfmKvfEl9Yj77BZycfCpM
|
||||
eSh2/QKA5uwJhSDVYa+J58rzlg16gdzuMdpuy64wMk11V5k57FlqaAkfe4/BmByj5sXm8q8Oji0e
|
||||
IBGmQq774KVeBL9o9AGqGPZSg8aYAHcUsSILq/Xr3rfuWnNVU5J+i0hG+9UqN2/JULOhuBwCvhl6
|
||||
4XbSC2Pk17CJhI827L1y8MXGgUg7Qtf3u27f4NcQWQ4T2lKCPUVdsgQbbRPLO7ZZ7UoAZ+vFm/M5
|
||||
oDEhexka4vz0dem2IOie9dy1tayP6Nzh2NJJg8xxg14Jgkl1QyJvcq8EF1OHJT3zgsVg2RNVpIKv
|
||||
oI20pUjeUMHRdqqp8JTOG3YshzGHd1PhGMR3zOFHqkaYgzRAylVFSSZJ+y1EqfdaNQUOIcuxNYPU
|
||||
FLWZNPS+zthXd8IHjcwTRneAs/VrOBD2EqFPSyhKb0J2FjYEKNO8CMaK9LnB+EglFwabFrnyCRxm
|
||||
b+qOQAWvIhyjf6CmDREdvO+F/8ge/0TBJvVTNS1qU4d9n4PSExORq0OOCdBx5buA9Zi02mIbVSpK
|
||||
qftkPsif0RSPErI6gw/3pfPfwIf7+ZBw9Rxa4Ub5pIo+lLcJhVcQ4pHl5CusqCGvZx4LoyrfoCDr
|
||||
JdxMdO9Z2adLJtB2y4bLKdCTjUaihcRFUF9WwhEZOtDU7ViYOkXryr8LLjc0cmKyl6b8dHm57guv
|
||||
muCGg5mAtWiCSnA9Uceq03YbYsEJNvKuFEk36qO+WMJHMqFpyNsO7hP5fBhodD86vBloNNLiHSbl
|
||||
tJ+qyDg1zEGQXYhPqDhMDqVcpjuoQnfQFt1QCdFGyO3z9m/wAAcmZyHlTRm0GB1EkjycBQr7d2UE
|
||||
fyf4JME8wq+0I3fEe8c76jZjS166k7bLvpI2/YtylyKx1zlT431O7Tlg28bQRu7YoBs73RjQfs1J
|
||||
mkKXVA/9WpRTAjREAm3QAVFDsdQUMuRYUXpZkX781qJPx5kK4EaHKUsFtcYVe4nSk14FbphiRk2a
|
||||
A0vJ5YZgQ56UYtpBAdjvtK2qEgQ37DC+XDgtjLcUe1UraltKpPOZcJOduujUrCfbW20xnevS04Hw
|
||||
eKh2s2mCTlV0ZHhh/NyiUb1W991Up8NsP4EuGnwsULwtfCfYZsmRjiLT49VOKXCEfQ45QRM8S4iD
|
||||
MVpspev3sJ3GvRnZjoKLsoc/gtyzc5pUE3PJKHtokL3ORkWDQ9OSdAKBdodesBp78XIJ121LKig0
|
||||
sHUxvPoG11dwS9pMZKEba8os9VAGq2ffvruCO+VLR9qbGlUC3g/DlCL5hQ7jxPTM+OZqOmfCu3Fi
|
||||
UGn9NJknx3tJ0Yv+OPqXeXerZzlCDJucBLYh93pTnB5vEq1D74cahJYi9mLJjZKDui5ScewpHWKF
|
||||
vp8kl9NLV6RtTqgXP5+dmyyg96GrbLnu/dmv/D1e8Fyo2hg26ZnpbMueU/0lFjnQy1yS0M7K6t+v
|
||||
AP4sF8n85G44U71v5YuERyrbrdbrzt/seH89rl6s3vSrZaA5Lry9uJy/4PCLLYRIk7vozKAe7UfT
|
||||
48UVs+UwWXg1Cft/4bzkuwudffX/uD8uGEOtkP3S/gMAAP//KkpNyUxG9TJCWVEqqH+PSxk8mMEO
|
||||
VipOLSrLTE6NL8lMLQJFRUpqWmJpDqTXrVRcWVySmhuflpmXnlpUUJQJ6XqnFcSbmhkkppmlmppa
|
||||
KnHVcgEAAAD//wMABbo03YgQAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97144d0daeb11abc-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 18 Aug 2025 20:53:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=UW4fV15_S2h9VQ58d_nhU200TOxc3Tjdd_QFUBY6B80-1755550423-1.0.1.1-.oSX43E.zjFk61gbEHMacZh5c8ndmynl75bstCvKcohtwVY6oLpdBWnO2lTUFXpzvGaGsbuYt55OUo_Hmi228z97Nm4cDdOT84lhfStAcms;
|
||||
path=/; expires=Mon, 18-Aug-25 21:23:43 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=dg9d3YnyfwVQNRGWo64PZ6mtqIOlYEozligD5ggvZFc-1755550423708-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '13654'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '13673'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999827'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999827'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_169cd22058fb418f90f12e041c0880a9
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -2,6 +2,7 @@
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
from dotenv import load_dotenv
|
||||
@@ -34,14 +35,130 @@ def setup_test_environment():
|
||||
f"Test storage directory {storage_dir} is not writable: {e}"
|
||||
)
|
||||
|
||||
# Set environment variable to point to the test storage directory
|
||||
os.environ["CREWAI_STORAGE_DIR"] = str(storage_dir)
|
||||
|
||||
os.environ["CREWAI_TESTING"] = "true"
|
||||
yield
|
||||
|
||||
os.environ.pop("CREWAI_TESTING", None)
|
||||
# Cleanup is handled automatically when tempfile context exits
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line(
|
||||
"markers", "telemetry: mark test as a telemetry test (don't mock telemetry)"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def auto_mock_telemetry(request):
|
||||
if request.node.get_closest_marker("telemetry"):
|
||||
telemetry_env = {
|
||||
key: value
|
||||
for key, value in os.environ.items()
|
||||
if key not in ["CREWAI_DISABLE_TELEMETRY", "OTEL_SDK_DISABLED"]
|
||||
}
|
||||
with patch.dict(os.environ, telemetry_env, clear=True):
|
||||
yield
|
||||
return
|
||||
|
||||
if "telemetry" in str(request.fspath):
|
||||
telemetry_env = {
|
||||
key: value
|
||||
for key, value in os.environ.items()
|
||||
if key not in ["CREWAI_DISABLE_TELEMETRY", "OTEL_SDK_DISABLED"]
|
||||
}
|
||||
with patch.dict(os.environ, telemetry_env, clear=True):
|
||||
yield
|
||||
return
|
||||
|
||||
with patch.dict(
|
||||
os.environ, {"CREWAI_DISABLE_TELEMETRY": "true", "OTEL_SDK_DISABLED": "true"}
|
||||
):
|
||||
with patch("crewai.telemetry.Telemetry") as mock_telemetry_class:
|
||||
mock_instance = create_mock_telemetry_instance()
|
||||
mock_telemetry_class.return_value = mock_instance
|
||||
|
||||
with (
|
||||
patch(
|
||||
"crewai.utilities.events.event_listener.Telemetry",
|
||||
mock_telemetry_class,
|
||||
),
|
||||
patch("crewai.tools.tool_usage.Telemetry", mock_telemetry_class),
|
||||
patch("crewai.cli.command.Telemetry", mock_telemetry_class),
|
||||
patch("crewai.cli.create_flow.Telemetry", mock_telemetry_class),
|
||||
):
|
||||
yield mock_instance
|
||||
|
||||
|
||||
def create_mock_telemetry_instance():
|
||||
mock_instance = Mock()
|
||||
|
||||
mock_instance.ready = False
|
||||
mock_instance.trace_set = False
|
||||
mock_instance._initialized = True
|
||||
|
||||
mock_instance._is_telemetry_disabled.return_value = True
|
||||
mock_instance._should_execute_telemetry.return_value = False
|
||||
|
||||
telemetry_methods = [
|
||||
"set_tracer",
|
||||
"crew_creation",
|
||||
"task_started",
|
||||
"task_ended",
|
||||
"tool_usage",
|
||||
"tool_repeated_usage",
|
||||
"tool_usage_error",
|
||||
"crew_execution_span",
|
||||
"end_crew",
|
||||
"flow_creation_span",
|
||||
"flow_execution_span",
|
||||
"individual_test_result_span",
|
||||
"test_execution_span",
|
||||
"deploy_signup_error_span",
|
||||
"start_deployment_span",
|
||||
"create_crew_deployment_span",
|
||||
"get_crew_logs_span",
|
||||
"remove_crew_span",
|
||||
"flow_plotting_span",
|
||||
"_add_attribute",
|
||||
"_safe_telemetry_operation",
|
||||
]
|
||||
|
||||
for method in telemetry_methods:
|
||||
setattr(mock_instance, method, Mock(return_value=None))
|
||||
|
||||
mock_instance.task_started.return_value = None
|
||||
|
||||
return mock_instance
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_opentelemetry_components():
|
||||
with (
|
||||
patch("opentelemetry.trace.get_tracer") as mock_get_tracer,
|
||||
patch("opentelemetry.trace.set_tracer_provider") as mock_set_provider,
|
||||
patch("opentelemetry.baggage.set_baggage") as mock_set_baggage,
|
||||
patch("opentelemetry.baggage.get_baggage") as mock_get_baggage,
|
||||
patch("opentelemetry.context.attach") as mock_attach,
|
||||
patch("opentelemetry.context.detach") as mock_detach,
|
||||
):
|
||||
mock_tracer = Mock()
|
||||
mock_span = Mock()
|
||||
mock_tracer.start_span.return_value = mock_span
|
||||
mock_get_tracer.return_value = mock_tracer
|
||||
|
||||
yield {
|
||||
"get_tracer": mock_get_tracer,
|
||||
"set_tracer_provider": mock_set_provider,
|
||||
"tracer": mock_tracer,
|
||||
"span": mock_span,
|
||||
"set_baggage": mock_set_baggage,
|
||||
"get_baggage": mock_get_baggage,
|
||||
"attach": mock_attach,
|
||||
"detach": mock_detach,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def vcr_config(request) -> dict:
|
||||
return {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Test Agent creation and execution basic functionality."""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
from concurrent.futures import Future
|
||||
@@ -51,7 +50,7 @@ from crewai.utilities.events.memory_events import (
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
)
|
||||
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
|
||||
@pytest.fixture
|
||||
def ceo():
|
||||
@@ -312,7 +311,6 @@ def test_crew_creation(researcher, writer):
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_sync_task_execution(researcher, writer):
|
||||
from unittest.mock import patch
|
||||
|
||||
tasks = [
|
||||
Task(
|
||||
@@ -961,7 +959,6 @@ def test_cache_hitting_between_agents(researcher, writer, ceo):
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_api_calls_throttling(capsys):
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai.tools import tool
|
||||
|
||||
@@ -1396,7 +1393,6 @@ def test_kickoff_for_each_invalid_input():
|
||||
|
||||
def test_kickoff_for_each_error_handling():
|
||||
"""Tests error handling in kickoff_for_each when kickoff raises an error."""
|
||||
from unittest.mock import patch
|
||||
|
||||
inputs = [
|
||||
{"topic": "dog"},
|
||||
@@ -1433,7 +1429,6 @@ def test_kickoff_for_each_error_handling():
|
||||
@pytest.mark.asyncio
|
||||
async def test_kickoff_async_basic_functionality_and_output():
|
||||
"""Tests the basic functionality and output of kickoff_async."""
|
||||
from unittest.mock import patch
|
||||
|
||||
inputs = {"topic": "dog"}
|
||||
|
||||
@@ -1540,7 +1535,6 @@ async def test_async_kickoff_for_each_async_empty_input():
|
||||
|
||||
|
||||
def test_set_agents_step_callback():
|
||||
from unittest.mock import patch
|
||||
|
||||
researcher_agent = Agent(
|
||||
role="Researcher",
|
||||
@@ -1570,7 +1564,6 @@ def test_set_agents_step_callback():
|
||||
|
||||
|
||||
def test_dont_set_agents_step_callback_if_already_set():
|
||||
from unittest.mock import patch
|
||||
|
||||
def agent_callback(_):
|
||||
pass
|
||||
@@ -2035,7 +2028,6 @@ def test_crew_inputs_interpolate_both_agents_and_tasks():
|
||||
|
||||
|
||||
def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
|
||||
from unittest.mock import patch
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
@@ -2068,7 +2060,6 @@ def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_does_not_interpolate_without_inputs():
|
||||
from unittest.mock import patch
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
@@ -2203,7 +2194,6 @@ def test_task_same_callback_both_on_task_and_crew():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_tools_with_custom_caching():
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai.tools import tool
|
||||
|
||||
@@ -2484,7 +2474,6 @@ def test_multiple_conditional_tasks(researcher, writer):
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_using_contextual_memory():
|
||||
from unittest.mock import patch
|
||||
|
||||
math_researcher = Agent(
|
||||
role="Researcher",
|
||||
@@ -2583,7 +2572,6 @@ def test_memory_events_are_emitted():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_using_contextual_memory_with_long_term_memory():
|
||||
from unittest.mock import patch
|
||||
|
||||
math_researcher = Agent(
|
||||
role="Researcher",
|
||||
@@ -2614,7 +2602,6 @@ def test_using_contextual_memory_with_long_term_memory():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_warning_long_term_memory_without_entity_memory():
|
||||
from unittest.mock import patch
|
||||
|
||||
math_researcher = Agent(
|
||||
role="Researcher",
|
||||
@@ -2651,7 +2638,6 @@ def test_warning_long_term_memory_without_entity_memory():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_long_term_memory_with_memory_flag():
|
||||
from unittest.mock import patch
|
||||
|
||||
math_researcher = Agent(
|
||||
role="Researcher",
|
||||
@@ -2686,7 +2672,6 @@ def test_long_term_memory_with_memory_flag():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_using_contextual_memory_with_short_term_memory():
|
||||
from unittest.mock import patch
|
||||
|
||||
math_researcher = Agent(
|
||||
role="Researcher",
|
||||
@@ -2717,7 +2702,6 @@ def test_using_contextual_memory_with_short_term_memory():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_disabled_memory_using_contextual_memory():
|
||||
from unittest.mock import patch
|
||||
|
||||
math_researcher = Agent(
|
||||
role="Researcher",
|
||||
@@ -2845,7 +2829,6 @@ def test_crew_output_file_validation_failures():
|
||||
|
||||
|
||||
def test_manager_agent(researcher, writer):
|
||||
from unittest.mock import patch
|
||||
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
@@ -4752,3 +4735,43 @@ def test_default_crew_name(researcher, writer):
|
||||
],
|
||||
)
|
||||
assert crew.name == "crew"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_ensure_exchanged_messages_are_propagated_to_external_memory():
|
||||
external_memory = ExternalMemory(storage=MagicMock())
|
||||
|
||||
math_researcher = Agent(
|
||||
role="Researcher",
|
||||
goal="You research about math.",
|
||||
backstory="You're an expert in research and you love to learn new things.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task1 = Task(
|
||||
description="Research a topic to teach a kid aged 6 about math.",
|
||||
expected_output="A topic, explanation, angle, and examples.",
|
||||
agent=math_researcher,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[math_researcher],
|
||||
tasks=[task1],
|
||||
external_memory=external_memory,
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
ExternalMemory, "save", return_value=None
|
||||
) as external_memory_save:
|
||||
crew.kickoff()
|
||||
|
||||
expected_messages = [
|
||||
{'role': 'system', 'content': "You are Researcher. You're an expert in research and you love to learn new things.\nYour personal goal is: You research about math.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},
|
||||
{'role': 'user', 'content': '\nCurrent Task: Research a topic to teach a kid aged 6 about math.\n\nThis is the expected criteria for your final answer: A topic, explanation, angle, and examples.\nyou MUST return the actual complete content as the final answer, not a summary.\n\n# Useful context: \nExternal memories:\n\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:'},
|
||||
{'role': 'assistant', 'content': 'I now can give a great answer \nFinal Answer: \n\n**Topic: Understanding Shapes (Geometry)**\n\n**Explanation:** \nShapes are everywhere around us! They are the special forms that we can see in everyday objects. Teaching a 6-year-old about shapes is not only fun but also a way to help them think about the world around them and develop their spatial awareness. We will focus on basic shapes: circle, square, triangle, and rectangle. Understanding these shapes helps kids recognize and describe their environment.\n\n**Angle:** \nLet’s make learning about shapes an adventure! We can turn it into a treasure hunt where the child has to find objects around the house or outside that match the shapes we learn. This hands-on approach helps make the learning stick!\n\n**Examples:** \n1. **Circle:** \n - Explanation: A circle is round and has no corners. It looks like a wheel or a cookie! \n - Activity: Find objects that are circles, such as a clock, a dinner plate, or a ball. Draw a big circle on a paper and then try to draw smaller circles inside it.\n\n2. **Square:** \n - Explanation: A square has four equal sides and four corners. It looks like a box! \n - Activity: Look for squares in books, in windows, or in building blocks. Try to build a tall tower using square blocks!\n\n3. **Triangle:** \n - Explanation: A triangle has three sides and three corners. It looks like a slice of pizza or a roof! \n - Activity: Use crayons to draw a big triangle and then find things that are shaped like a triangle, like a slice of cheese or a traffic sign.\n\n4. **Rectangle:** \n - Explanation: A rectangle has four sides but only opposite sides are equal. It’s like a stretched square! \n - Activity: Search for rectangles, such as a book cover or a door. You can cut out rectangles from colored paper and create a collage!\n\nBy relating the shapes to fun activities and using real-world examples, we not only make learning more enjoyable but also help the child better remember and understand the concept of shapes in math. This foundation forms the basis of their future learning in geometry!'}
|
||||
]
|
||||
external_memory_save.assert_called_once_with(
|
||||
value=ANY,
|
||||
metadata={"description": ANY, "messages": expected_messages},
|
||||
agent=ANY,
|
||||
)
|
||||
|
||||
@@ -497,6 +497,131 @@ def test_unstructured_flow_event_emission():
|
||||
assert isinstance(received_events[6].timestamp, datetime)
|
||||
|
||||
|
||||
def test_flow_trigger_payload_injection():
|
||||
captured_payload = []
|
||||
|
||||
class TriggerFlow(Flow):
|
||||
@start()
|
||||
def start_method(self, crewai_trigger_payload=None):
|
||||
captured_payload.append(crewai_trigger_payload)
|
||||
return "started"
|
||||
|
||||
@listen(start_method)
|
||||
def second_method(self):
|
||||
captured_payload.append("no_parameter")
|
||||
return "finished"
|
||||
|
||||
flow = TriggerFlow()
|
||||
|
||||
test_payload = "This is important trigger data"
|
||||
flow.kickoff(inputs={"crewai_trigger_payload": test_payload})
|
||||
|
||||
assert captured_payload == [test_payload, "no_parameter"]
|
||||
|
||||
|
||||
def test_flow_trigger_payload_injection_multiple_starts():
|
||||
captured_payloads = []
|
||||
|
||||
class MultiStartFlow(Flow):
|
||||
@start()
|
||||
def start_method_1(self, crewai_trigger_payload=None):
|
||||
captured_payloads.append(("start_1", crewai_trigger_payload))
|
||||
return "start_1_done"
|
||||
|
||||
@start()
|
||||
def start_method_2(self, crewai_trigger_payload=None):
|
||||
captured_payloads.append(("start_2", crewai_trigger_payload))
|
||||
return "start_2_done"
|
||||
|
||||
flow = MultiStartFlow()
|
||||
|
||||
test_payload = "Multiple start trigger data"
|
||||
flow.kickoff(inputs={"crewai_trigger_payload": test_payload})
|
||||
|
||||
assert captured_payloads == [("start_1", test_payload), ("start_2", test_payload)]
|
||||
|
||||
|
||||
def test_flow_without_trigger_payload():
|
||||
captured_payload = []
|
||||
|
||||
class NormalFlow(Flow):
|
||||
@start()
|
||||
def start_method(self, crewai_trigger_payload=None):
|
||||
captured_payload.append(crewai_trigger_payload)
|
||||
return "no_trigger"
|
||||
|
||||
flow = NormalFlow()
|
||||
|
||||
flow.kickoff(inputs={"other_data": "some value"})
|
||||
|
||||
assert captured_payload[0] is None
|
||||
|
||||
|
||||
def test_flow_trigger_payload_with_structured_state():
|
||||
class TriggerState(BaseModel):
|
||||
id: str = "test"
|
||||
message: str = ""
|
||||
|
||||
class StructuredFlow(Flow[TriggerState]):
|
||||
@start()
|
||||
def start_method(self, crewai_trigger_payload=None):
|
||||
return crewai_trigger_payload
|
||||
|
||||
flow = StructuredFlow()
|
||||
|
||||
test_payload = "Structured state trigger data"
|
||||
result = flow.kickoff(inputs={"crewai_trigger_payload": test_payload})
|
||||
|
||||
assert result == test_payload
|
||||
|
||||
|
||||
def test_flow_start_method_without_trigger_parameter():
|
||||
execution_order = []
|
||||
|
||||
class FlowWithoutParameter(Flow):
|
||||
@start()
|
||||
def start_without_param(self):
|
||||
execution_order.append("start_executed")
|
||||
return "started"
|
||||
|
||||
@listen(start_without_param)
|
||||
def second_method(self):
|
||||
execution_order.append("second_executed")
|
||||
return "finished"
|
||||
|
||||
flow = FlowWithoutParameter()
|
||||
|
||||
result = flow.kickoff(inputs={"crewai_trigger_payload": "some data"})
|
||||
|
||||
assert execution_order == ["start_executed", "second_executed"]
|
||||
assert result == "finished"
|
||||
|
||||
|
||||
def test_async_flow_with_trigger_payload():
|
||||
captured_payload = []
|
||||
|
||||
class AsyncTriggerFlow(Flow):
|
||||
@start()
|
||||
async def async_start_method(self, crewai_trigger_payload=None):
|
||||
captured_payload.append(crewai_trigger_payload)
|
||||
await asyncio.sleep(0.01)
|
||||
return "async_started"
|
||||
|
||||
@listen(async_start_method)
|
||||
async def async_second_method(self, result):
|
||||
captured_payload.append(result)
|
||||
await asyncio.sleep(0.01)
|
||||
return "async_finished"
|
||||
|
||||
flow = AsyncTriggerFlow()
|
||||
|
||||
test_payload = "Async trigger data"
|
||||
result = asyncio.run(flow.kickoff_async(inputs={"crewai_trigger_payload": test_payload}))
|
||||
|
||||
assert captured_payload == [test_payload, "async_started"]
|
||||
assert result == "async_finished"
|
||||
|
||||
|
||||
def test_structured_flow_event_emission():
|
||||
"""Test that the correct events are emitted during structured flow
|
||||
execution with all fields validated."""
|
||||
|
||||
@@ -39,6 +39,7 @@ def test_short_term_memory_search_events(short_term_memory):
|
||||
events = defaultdict(list)
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryStartedEvent)
|
||||
def on_search_started(source, event):
|
||||
events["MemoryQueryStartedEvent"].append(event)
|
||||
@@ -59,33 +60,34 @@ def test_short_term_memory_search_events(short_term_memory):
|
||||
assert len(events["MemoryQueryFailedEvent"]) == 0
|
||||
|
||||
assert dict(events["MemoryQueryStartedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_query_started',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'short_term_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'query': 'test value',
|
||||
'limit': 3,
|
||||
'score_threshold': 0.35
|
||||
"timestamp": ANY,
|
||||
"type": "memory_query_started",
|
||||
"source_fingerprint": None,
|
||||
"source_type": "short_term_memory",
|
||||
"fingerprint_metadata": None,
|
||||
"query": "test value",
|
||||
"limit": 3,
|
||||
"score_threshold": 0.35,
|
||||
}
|
||||
|
||||
assert dict(events["MemoryQueryCompletedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_query_completed',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'short_term_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'query': 'test value',
|
||||
'results': [],
|
||||
'limit': 3,
|
||||
'score_threshold': 0.35,
|
||||
'query_time_ms': ANY
|
||||
"timestamp": ANY,
|
||||
"type": "memory_query_completed",
|
||||
"source_fingerprint": None,
|
||||
"source_type": "short_term_memory",
|
||||
"fingerprint_metadata": None,
|
||||
"query": "test value",
|
||||
"results": [],
|
||||
"limit": 3,
|
||||
"score_threshold": 0.35,
|
||||
"query_time_ms": ANY,
|
||||
}
|
||||
|
||||
|
||||
def test_short_term_memory_save_events(short_term_memory):
|
||||
events = defaultdict(list)
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
|
||||
@crewai_event_bus.on(MemorySaveStartedEvent)
|
||||
def on_save_started(source, event):
|
||||
events["MemorySaveStartedEvent"].append(event)
|
||||
@@ -105,28 +107,29 @@ def test_short_term_memory_save_events(short_term_memory):
|
||||
assert len(events["MemorySaveFailedEvent"]) == 0
|
||||
|
||||
assert dict(events["MemorySaveStartedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_save_started',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'short_term_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'value': 'test value',
|
||||
'metadata': {'task': 'test_task'},
|
||||
'agent_role': "test_agent"
|
||||
"timestamp": ANY,
|
||||
"type": "memory_save_started",
|
||||
"source_fingerprint": None,
|
||||
"source_type": "short_term_memory",
|
||||
"fingerprint_metadata": None,
|
||||
"value": "test value",
|
||||
"metadata": {"task": "test_task"},
|
||||
"agent_role": "test_agent",
|
||||
}
|
||||
|
||||
assert dict(events["MemorySaveCompletedEvent"][0]) == {
|
||||
'timestamp': ANY,
|
||||
'type': 'memory_save_completed',
|
||||
'source_fingerprint': None,
|
||||
'source_type': 'short_term_memory',
|
||||
'fingerprint_metadata': None,
|
||||
'value': 'test value',
|
||||
'metadata': {'task': 'test_task', 'agent': 'test_agent'},
|
||||
'agent_role': "test_agent",
|
||||
'save_time_ms': ANY
|
||||
"timestamp": ANY,
|
||||
"type": "memory_save_completed",
|
||||
"source_fingerprint": None,
|
||||
"source_type": "short_term_memory",
|
||||
"fingerprint_metadata": None,
|
||||
"value": "test value",
|
||||
"metadata": {"task": "test_task", "agent": "test_agent"},
|
||||
"agent_role": "test_agent",
|
||||
"save_time_ms": ANY,
|
||||
}
|
||||
|
||||
|
||||
def test_save_and_search(short_term_memory):
|
||||
memory = ShortTermMemoryItem(
|
||||
data="""test value test value test value test value test value test value
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
import threading
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
@@ -11,12 +12,16 @@ from opentelemetry import trace
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def cleanup_telemetry():
|
||||
"""Automatically clean up Telemetry singleton between tests."""
|
||||
Telemetry._instance = None
|
||||
if hasattr(Telemetry, "_lock"):
|
||||
Telemetry._lock = threading.Lock()
|
||||
yield
|
||||
Telemetry._instance = None
|
||||
if hasattr(Telemetry, "_lock"):
|
||||
Telemetry._lock = threading.Lock()
|
||||
|
||||
|
||||
@pytest.mark.telemetry
|
||||
@pytest.mark.parametrize(
|
||||
"env_var,value,expected_ready",
|
||||
[
|
||||
@@ -36,6 +41,7 @@ def test_telemetry_environment_variables(env_var, value, expected_ready):
|
||||
assert telemetry.ready is expected_ready
|
||||
|
||||
|
||||
@pytest.mark.telemetry
|
||||
def test_telemetry_enabled_by_default():
|
||||
"""Test that telemetry is enabled by default."""
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
@@ -44,6 +50,7 @@ def test_telemetry_enabled_by_default():
|
||||
assert telemetry.ready is True
|
||||
|
||||
|
||||
@pytest.mark.telemetry
|
||||
@patch("crewai.telemetry.telemetry.logger.error")
|
||||
@patch(
|
||||
"opentelemetry.exporter.otlp.proto.http.trace_exporter.OTLPSpanExporter.export",
|
||||
@@ -76,6 +83,7 @@ def test_telemetry_fails_due_connect_timeout(export_mock, logger_mock):
|
||||
logger_mock.assert_called_once_with(error)
|
||||
|
||||
|
||||
@pytest.mark.telemetry
|
||||
def test_telemetry_singleton_pattern():
|
||||
"""Test that Telemetry uses the singleton pattern correctly."""
|
||||
Telemetry._instance = None
|
||||
|
||||
@@ -14,14 +14,18 @@ def cleanup_telemetry():
|
||||
Telemetry._instance = None
|
||||
|
||||
|
||||
@pytest.mark.parametrize("env_var,value,expected_ready", [
|
||||
("OTEL_SDK_DISABLED", "true", False),
|
||||
("OTEL_SDK_DISABLED", "TRUE", False),
|
||||
("CREWAI_DISABLE_TELEMETRY", "true", False),
|
||||
("CREWAI_DISABLE_TELEMETRY", "TRUE", False),
|
||||
("OTEL_SDK_DISABLED", "false", True),
|
||||
("CREWAI_DISABLE_TELEMETRY", "false", True),
|
||||
])
|
||||
@pytest.mark.telemetry
|
||||
@pytest.mark.parametrize(
|
||||
"env_var,value,expected_ready",
|
||||
[
|
||||
("OTEL_SDK_DISABLED", "true", False),
|
||||
("OTEL_SDK_DISABLED", "TRUE", False),
|
||||
("CREWAI_DISABLE_TELEMETRY", "true", False),
|
||||
("CREWAI_DISABLE_TELEMETRY", "TRUE", False),
|
||||
("OTEL_SDK_DISABLED", "false", True),
|
||||
("CREWAI_DISABLE_TELEMETRY", "false", True),
|
||||
],
|
||||
)
|
||||
def test_telemetry_environment_variables(env_var, value, expected_ready):
|
||||
"""Test telemetry state with different environment variable configurations."""
|
||||
with patch.dict(os.environ, {env_var: value}):
|
||||
@@ -30,6 +34,7 @@ def test_telemetry_environment_variables(env_var, value, expected_ready):
|
||||
assert telemetry.ready is expected_ready
|
||||
|
||||
|
||||
@pytest.mark.telemetry
|
||||
def test_telemetry_enabled_by_default():
|
||||
"""Test that telemetry is enabled by default."""
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
@@ -38,57 +43,60 @@ def test_telemetry_enabled_by_default():
|
||||
assert telemetry.ready is True
|
||||
|
||||
|
||||
@pytest.mark.telemetry
|
||||
def test_telemetry_disable_after_singleton_creation():
|
||||
"""Test that telemetry operations are disabled when env var is set after singleton creation."""
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
with patch("crewai.telemetry.telemetry.TracerProvider"):
|
||||
telemetry = Telemetry()
|
||||
assert telemetry.ready is True
|
||||
|
||||
|
||||
mock_operation = MagicMock()
|
||||
telemetry._safe_telemetry_operation(mock_operation)
|
||||
mock_operation.assert_called_once()
|
||||
|
||||
|
||||
mock_operation.reset_mock()
|
||||
|
||||
os.environ['CREWAI_DISABLE_TELEMETRY'] = 'true'
|
||||
|
||||
|
||||
os.environ["CREWAI_DISABLE_TELEMETRY"] = "true"
|
||||
|
||||
telemetry._safe_telemetry_operation(mock_operation)
|
||||
mock_operation.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.telemetry
|
||||
def test_telemetry_disable_with_multiple_instances():
|
||||
"""Test that multiple telemetry instances respect dynamically changed env vars."""
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
with patch("crewai.telemetry.telemetry.TracerProvider"):
|
||||
telemetry1 = Telemetry()
|
||||
assert telemetry1.ready is True
|
||||
|
||||
os.environ['CREWAI_DISABLE_TELEMETRY'] = 'true'
|
||||
|
||||
|
||||
os.environ["CREWAI_DISABLE_TELEMETRY"] = "true"
|
||||
|
||||
telemetry2 = Telemetry()
|
||||
assert telemetry2 is telemetry1
|
||||
assert telemetry2.ready is True
|
||||
|
||||
|
||||
mock_operation = MagicMock()
|
||||
telemetry2._safe_telemetry_operation(mock_operation)
|
||||
mock_operation.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.telemetry
|
||||
def test_telemetry_otel_sdk_disabled_after_creation():
|
||||
"""Test that OTEL_SDK_DISABLED also works when set after singleton creation."""
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
with patch("crewai.telemetry.telemetry.TracerProvider"):
|
||||
telemetry = Telemetry()
|
||||
assert telemetry.ready is True
|
||||
|
||||
|
||||
mock_operation = MagicMock()
|
||||
telemetry._safe_telemetry_operation(mock_operation)
|
||||
mock_operation.assert_called_once()
|
||||
|
||||
|
||||
mock_operation.reset_mock()
|
||||
|
||||
os.environ['OTEL_SDK_DISABLED'] = 'true'
|
||||
|
||||
|
||||
os.environ["OTEL_SDK_DISABLED"] = "true"
|
||||
|
||||
telemetry._safe_telemetry_operation(mock_operation)
|
||||
mock_operation.assert_not_called()
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
import asyncio
|
||||
import inspect
|
||||
import unittest
|
||||
from typing import Any, Callable, Dict, List
|
||||
from typing import Callable
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool, tool
|
||||
|
||||
|
||||
@@ -111,25 +114,26 @@ def test_result_as_answer_in_tool_decorator():
|
||||
def my_tool_with_result_as_answer(question: str) -> str:
|
||||
"""This tool will return its result as the final answer."""
|
||||
return question
|
||||
|
||||
|
||||
assert my_tool_with_result_as_answer.result_as_answer is True
|
||||
|
||||
|
||||
converted_tool = my_tool_with_result_as_answer.to_structured_tool()
|
||||
assert converted_tool.result_as_answer is True
|
||||
|
||||
|
||||
@tool("Tool with default result_as_answer")
|
||||
def my_tool_with_default(question: str) -> str:
|
||||
"""This tool uses the default result_as_answer value."""
|
||||
return question
|
||||
|
||||
|
||||
assert my_tool_with_default.result_as_answer is False
|
||||
|
||||
|
||||
converted_tool = my_tool_with_default.to_structured_tool()
|
||||
assert converted_tool.result_as_answer is False
|
||||
|
||||
|
||||
class SyncTool(BaseTool):
|
||||
"""Test implementation with a synchronous _run method"""
|
||||
|
||||
name: str = "sync_tool"
|
||||
description: str = "A synchronous tool for testing"
|
||||
|
||||
@@ -140,6 +144,7 @@ class SyncTool(BaseTool):
|
||||
|
||||
class AsyncTool(BaseTool):
|
||||
"""Test implementation with an asynchronous _run method"""
|
||||
|
||||
name: str = "async_tool"
|
||||
description: str = "An asynchronous tool for testing"
|
||||
|
||||
@@ -174,7 +179,7 @@ def test_run_calls_asyncio_run_for_async_tools():
|
||||
"""Test that asyncio.run is called when using async tools."""
|
||||
async_tool = AsyncTool()
|
||||
|
||||
with patch('asyncio.run') as mock_run:
|
||||
with patch("asyncio.run") as mock_run:
|
||||
mock_run.return_value = "Processed test asynchronously"
|
||||
async_result = async_tool.run(input_text="test")
|
||||
|
||||
@@ -186,9 +191,43 @@ def test_run_does_not_call_asyncio_run_for_sync_tools():
|
||||
"""Test that asyncio.run is NOT called when using sync tools."""
|
||||
sync_tool = SyncTool()
|
||||
|
||||
with patch('asyncio.run') as mock_run:
|
||||
with patch("asyncio.run") as mock_run:
|
||||
sync_result = sync_tool.run(input_text="test")
|
||||
|
||||
mock_run.assert_not_called()
|
||||
assert sync_result == "Processed test synchronously"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_max_usage_count_is_respected():
|
||||
class IteratingTool(BaseTool):
|
||||
name: str = "iterating_tool"
|
||||
description: str = "A tool that iterates a given number of times"
|
||||
|
||||
def _run(self, input_text: str):
|
||||
return f"Iteration {input_text}"
|
||||
|
||||
tool = IteratingTool(max_usage_count=5)
|
||||
|
||||
agent = Agent(
|
||||
role="Iterating Agent",
|
||||
goal="Call the iterating tool 5 times",
|
||||
backstory="You are an agent that iterates a given number of times",
|
||||
tools=[tool],
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Call the iterating tool 5 times",
|
||||
expected_output="A list of the iterations",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
assert tool.max_usage_count == 5
|
||||
assert tool.current_usage_count == 5
|
||||
|
||||
@@ -2,8 +2,9 @@ import os
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
# Remove the module-level patch
|
||||
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai.utilities.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
@@ -284,29 +285,110 @@ class TestTraceListenerSetup:
|
||||
f"Found {len(trace_handlers)} trace handlers when tracing should be disabled"
|
||||
)
|
||||
|
||||
def test_trace_listener_setup_correctly(self):
|
||||
def test_trace_listener_setup_correctly_for_crew(self):
|
||||
"""Test that trace listener is set up correctly when enabled"""
|
||||
|
||||
with patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}):
|
||||
trace_listener = TraceCollectionListener()
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
task = Task(
|
||||
description="Say hello to the world",
|
||||
expected_output="hello world",
|
||||
agent=agent,
|
||||
)
|
||||
with patch.object(
|
||||
TraceCollectionListener, "setup_listeners"
|
||||
) as mock_listener_setup:
|
||||
Crew(agents=[agent], tasks=[task], verbose=True)
|
||||
assert mock_listener_setup.call_count >= 1
|
||||
|
||||
assert trace_listener.trace_enabled is True
|
||||
assert trace_listener.batch_manager is not None
|
||||
def test_trace_listener_setup_correctly_for_flow(self):
|
||||
"""Test that trace listener is set up correctly when enabled"""
|
||||
|
||||
with patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}):
|
||||
|
||||
class FlowExample(Flow):
|
||||
@start()
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
with patch.object(
|
||||
TraceCollectionListener, "setup_listeners"
|
||||
) as mock_listener_setup:
|
||||
FlowExample()
|
||||
assert mock_listener_setup.call_count >= 1
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_trace_listener_setup_correctly_with_tracing_flag(self):
|
||||
"""Test that trace listener is set up correctly when enabled"""
|
||||
agent = Agent(role="Test Agent", goal="Test goal", backstory="Test backstory")
|
||||
task = Task(
|
||||
description="Say hello to the world",
|
||||
expected_output="hello world",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=True, tracing=True)
|
||||
crew.kickoff()
|
||||
trace_listener = TraceCollectionListener(tracing=True)
|
||||
assert trace_listener.trace_enabled is True
|
||||
assert trace_listener.batch_manager is not None
|
||||
def test_trace_listener_ephemeral_batch(self):
|
||||
"""Test that trace listener properly handles ephemeral batches"""
|
||||
with (
|
||||
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}),
|
||||
patch(
|
||||
"crewai.utilities.events.listeners.tracing.trace_listener.TraceCollectionListener._check_authenticated",
|
||||
return_value=False,
|
||||
),
|
||||
):
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
task = Task(
|
||||
description="Say hello to the world",
|
||||
expected_output="hello world",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], tracing=True)
|
||||
|
||||
with patch.object(TraceBatchManager, "initialize_batch") as mock_initialize:
|
||||
crew.kickoff()
|
||||
|
||||
assert mock_initialize.call_count >= 1
|
||||
assert mock_initialize.call_args_list[0][1]["use_ephemeral"] is True
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_trace_listener_with_authenticated_user(self):
|
||||
"""Test that trace listener properly handles authenticated batches"""
|
||||
with (
|
||||
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}),
|
||||
patch(
|
||||
"crewai.utilities.events.listeners.tracing.trace_batch_manager.PlusAPI"
|
||||
) as mock_plus_api_class,
|
||||
):
|
||||
mock_plus_api_instance = MagicMock()
|
||||
mock_plus_api_class.return_value = mock_plus_api_instance
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
task = Task(
|
||||
description="Say hello to the world",
|
||||
expected_output="hello world",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
with (
|
||||
patch.object(TraceBatchManager, "initialize_batch") as mock_initialize,
|
||||
patch.object(
|
||||
TraceBatchManager, "finalize_batch"
|
||||
) as mock_finalize_backend_batch,
|
||||
):
|
||||
crew = Crew(agents=[agent], tasks=[task], tracing=True)
|
||||
crew.kickoff()
|
||||
|
||||
mock_plus_api_class.assert_called_with(api_key="mock_token_12345")
|
||||
|
||||
assert mock_initialize.call_count >= 1
|
||||
mock_finalize_backend_batch.assert_called_with()
|
||||
assert mock_finalize_backend_batch.call_count >= 1
|
||||
|
||||
# Helper method to ensure cleanup
|
||||
def teardown_method(self):
|
||||
|
||||
55
uv.lock
generated
55
uv.lock
generated
@@ -1,5 +1,5 @@
|
||||
version = 1
|
||||
revision = 2
|
||||
revision = 3
|
||||
requires-python = ">=3.10, <3.14"
|
||||
resolution-markers = [
|
||||
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
|
||||
@@ -34,22 +34,6 @@ resolution-markers = [
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version < '3.11' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "agentops"
|
||||
version = "0.3.18"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "packaging" },
|
||||
{ name = "psutil" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "requests" },
|
||||
{ name = "termcolor" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c5/52/46bb2f29b9e5f2e1d8b124296b7794934a9048de635d9e7d6a95e791ad7b/agentops-0.3.18.tar.gz", hash = "sha256:4d509754df7be52579597cc9f53939c5218131a0379463e0ff6f6f40cde9fcc4", size = 55394, upload-time = "2024-11-19T19:06:21.306Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/97/8d/bd4cad95dad722dc2d3e4179feab1058ef846828c0e15e51e8bfaea373ee/agentops-0.3.18-py3-none-any.whl", hash = "sha256:8b48d8a1662f276653430fd541c77fa4f9a15a43e881b518ff88ea56925afcf7", size = 58032, upload-time = "2024-11-19T19:06:19.068Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aiohappyeyeballs"
|
||||
version = "2.6.1"
|
||||
@@ -744,9 +728,6 @@ dependencies = [
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
agentops = [
|
||||
{ name = "agentops" },
|
||||
]
|
||||
aisuite = [
|
||||
{ name = "aisuite" },
|
||||
]
|
||||
@@ -792,13 +773,12 @@ dev = [
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "agentops", marker = "extra == 'agentops'", specifier = "==0.3.18" },
|
||||
{ name = "aisuite", marker = "extra == 'aisuite'", specifier = ">=0.1.10" },
|
||||
{ name = "appdirs", specifier = ">=1.4.4" },
|
||||
{ name = "blinker", specifier = ">=1.9.0" },
|
||||
{ name = "chromadb", specifier = ">=0.5.23" },
|
||||
{ name = "click", specifier = ">=8.1.7" },
|
||||
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.62.0" },
|
||||
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.62.1" },
|
||||
{ name = "docling", marker = "extra == 'docling'", specifier = ">=2.12.0" },
|
||||
{ name = "instructor", specifier = ">=1.3.3" },
|
||||
{ name = "json-repair", specifier = "==0.25.2" },
|
||||
@@ -828,7 +808,7 @@ requires-dist = [
|
||||
{ name = "tomli-w", specifier = ">=1.1.0" },
|
||||
{ name = "uv", specifier = ">=0.4.25" },
|
||||
]
|
||||
provides-extras = ["agentops", "aisuite", "docling", "embeddings", "mem0", "openpyxl", "pandas", "pdfplumber", "tools"]
|
||||
provides-extras = ["aisuite", "docling", "embeddings", "mem0", "openpyxl", "pandas", "pdfplumber", "tools"]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
dev = [
|
||||
@@ -850,7 +830,7 @@ dev = [
|
||||
|
||||
[[package]]
|
||||
name = "crewai-tools"
|
||||
version = "0.62.0"
|
||||
version = "0.62.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "chromadb" },
|
||||
@@ -868,9 +848,9 @@ dependencies = [
|
||||
{ name = "stagehand" },
|
||||
{ name = "tiktoken" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e9/24/0287edbaa33c52b5541a564d92d3324d4839a76a4b023540c0fd5c7ee330/crewai_tools-0.62.0.tar.gz", hash = "sha256:71a24c173677f108516e1cde286e476e9aeb60da78d911bec0f15caa3c6af15a", size = 1059534, upload-time = "2025-08-13T21:13:49.879Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e3/4b/23cd61a07105b54be8d1185e3b861b1bb2ac6050264acc060d91b22da171/crewai_tools-0.62.1.tar.gz", hash = "sha256:1819d09189e815a28f6744b6cffde703b9e9e438ef5f066c5b4dcdd75cc3c6ad", size = 1059792, upload-time = "2025-08-19T01:08:02.567Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/16/cf26f4eaf1edc2f62d0c9cb1ec97f573f8d7401663da047c52b3ce4c4628/crewai_tools-0.62.0-py3-none-any.whl", hash = "sha256:b5e7035563cb00601431286b1c56933966acea1f220052cd33e1d4ee35590017", size = 677008, upload-time = "2025-08-13T21:13:46.903Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/87/a6e8d5dff4718795d806cd54bdb1cb94c637d93951d00e87c0b55d431870/crewai_tools-0.62.1-py3-none-any.whl", hash = "sha256:d8333315c8bf35bdb939b22e5b555acf9357ae676b992832f96f63de21670871", size = 677041, upload-time = "2025-08-19T01:08:00.891Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4013,20 +3993,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/cc/7e77861000a0691aeea8f4566e5d3aa716f2b1dece4a24439437e41d3d25/protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5", size = 172823, upload-time = "2025-05-28T23:51:58.157Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "psutil"
|
||||
version = "5.9.8"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/90/c7/6dc0a455d111f68ee43f27793971cf03fe29b6ef972042549db29eec39a2/psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c", size = 503247, upload-time = "2024-01-19T20:47:09.517Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/e3/07ae864a636d70a8a6f58da27cb1179192f1140d5d1da10886ade9405797/psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81", size = 248702, upload-time = "2024-01-19T20:47:36.303Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/bd/28c5f553667116b2598b9cc55908ec435cb7f77a34f2bff3e3ca765b0f78/psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421", size = 285242, upload-time = "2024-01-19T20:47:39.65Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/4f/0e22aaa246f96d6ac87fe5ebb9c5a693fbe8877f537a1022527c47ca43c5/psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4", size = 288191, upload-time = "2024-01-19T20:47:43.078Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/f5/2aa3a4acdc1e5940b59d421742356f133185667dd190b166dbcfcf5d7b43/psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0", size = 251252, upload-time = "2024-01-19T20:47:52.88Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/93/52/3e39d26feae7df0aa0fd510b14012c3678b36ed068f7d78b8d8784d61f0e/psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf", size = 255090, upload-time = "2024-01-19T20:47:56.019Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/33/2d74d588408caedd065c2497bdb5ef83ce6082db01289a1e1147f6639802/psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8", size = 249898, upload-time = "2024-01-19T20:47:59.238Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ptyprocess"
|
||||
version = "0.7.0"
|
||||
@@ -5502,15 +5468,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "2.4.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/10/56/d7d66a84f96d804155f6ff2873d065368b25a07222a6fd51c4f24ef6d764/termcolor-2.4.0.tar.gz", hash = "sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a", size = 12664, upload-time = "2023-12-01T11:04:51.66Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/5f/8c716e47b3a50cbd7c146f45881e11d9414def768b7cd9c5e6650ec2a80a/termcolor-2.4.0-py3-none-any.whl", hash = "sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63", size = 7719, upload-time = "2023-12-01T11:04:50.019Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tifffile"
|
||||
version = "2025.5.10"
|
||||
|
||||
Reference in New Issue
Block a user